Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: head/sys/amd64/amd64/bpf_jit_machdep.c
===================================================================
--- head/sys/amd64/amd64/bpf_jit_machdep.c (revision 328217)
+++ head/sys/amd64/amd64/bpf_jit_machdep.c (revision 328218)
@@ -1,671 +1,671 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2002-2003 NetGroup, Politecnico di Torino (Italy)
* Copyright (C) 2005-2017 Jung-uk Kim <jkim@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Politecnico di Torino nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifdef _KERNEL
#include "opt_bpf.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <net/if.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#else
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/param.h>
#endif
#include <sys/types.h>
#include <net/bpf.h>
#include <net/bpf_jitter.h>
#include <amd64/amd64/bpf_jit_machdep.h>
/*
* Emit routine to update the jump table.
*/
static void
emit_length(bpf_bin_stream *stream, __unused u_int value, u_int len)
{
if (stream->refs != NULL)
(stream->refs)[stream->bpf_pc] += len;
stream->cur_ip += len;
}
/*
* Emit routine to output the actual binary code.
*/
static void
emit_code(bpf_bin_stream *stream, u_int value, u_int len)
{
switch (len) {
case 1:
stream->ibuf[stream->cur_ip] = (u_char)value;
stream->cur_ip++;
break;
case 2:
*((u_short *)(void *)(stream->ibuf + stream->cur_ip)) =
(u_short)value;
stream->cur_ip += 2;
break;
case 4:
*((u_int *)(void *)(stream->ibuf + stream->cur_ip)) = value;
stream->cur_ip += 4;
break;
}
return;
}
/*
* Scan the filter program and find possible optimization.
*/
static int
bpf_jit_optimize(struct bpf_insn *prog, u_int nins)
{
int flags;
u_int i;
/* Do we return immediately? */
if (BPF_CLASS(prog[0].code) == BPF_RET)
return (BPF_JIT_FRET);
for (flags = 0, i = 0; i < nins; i++) {
switch (prog[i].code) {
case BPF_LD|BPF_W|BPF_ABS:
case BPF_LD|BPF_H|BPF_ABS:
case BPF_LD|BPF_B|BPF_ABS:
case BPF_LD|BPF_W|BPF_IND:
case BPF_LD|BPF_H|BPF_IND:
case BPF_LD|BPF_B|BPF_IND:
case BPF_LDX|BPF_MSH|BPF_B:
flags |= BPF_JIT_FPKT;
break;
case BPF_LD|BPF_MEM:
case BPF_LDX|BPF_MEM:
case BPF_ST:
case BPF_STX:
flags |= BPF_JIT_FMEM;
break;
case BPF_LD|BPF_W|BPF_LEN:
case BPF_LDX|BPF_W|BPF_LEN:
flags |= BPF_JIT_FLEN;
break;
case BPF_JMP|BPF_JA:
case BPF_JMP|BPF_JGT|BPF_K:
case BPF_JMP|BPF_JGE|BPF_K:
case BPF_JMP|BPF_JEQ|BPF_K:
case BPF_JMP|BPF_JSET|BPF_K:
case BPF_JMP|BPF_JGT|BPF_X:
case BPF_JMP|BPF_JGE|BPF_X:
case BPF_JMP|BPF_JEQ|BPF_X:
case BPF_JMP|BPF_JSET|BPF_X:
flags |= BPF_JIT_FJMP;
break;
}
if (flags == BPF_JIT_FLAG_ALL)
break;
}
return (flags);
}
/*
* Function that does the real stuff.
*/
bpf_filter_func
bpf_jit_compile(struct bpf_insn *prog, u_int nins, size_t *size)
{
bpf_bin_stream stream;
struct bpf_insn *ins;
int flags, fret, fpkt, fmem, fjmp, flen;
u_int i, pass;
/*
* NOTE: Do not modify the name of this variable, as it's used by
* the macros to emit code.
*/
emit_func emitm;
flags = bpf_jit_optimize(prog, nins);
fret = (flags & BPF_JIT_FRET) != 0;
fpkt = (flags & BPF_JIT_FPKT) != 0;
fmem = (flags & BPF_JIT_FMEM) != 0;
fjmp = (flags & BPF_JIT_FJMP) != 0;
flen = (flags & BPF_JIT_FLEN) != 0;
if (fret)
nins = 1;
memset(&stream, 0, sizeof(stream));
/* Allocate the reference table for the jumps. */
if (fjmp) {
#ifdef _KERNEL
- stream.refs = mallocarray(nins + 1, sizeof(u_int), M_BPFJIT,
+ stream.refs = malloc((nins + 1) * sizeof(u_int), M_BPFJIT,
M_NOWAIT | M_ZERO);
#else
stream.refs = calloc(nins + 1, sizeof(u_int));
#endif
if (stream.refs == NULL)
return (NULL);
}
/*
* The first pass will emit the lengths of the instructions
* to create the reference table.
*/
emitm = emit_length;
for (pass = 0; pass < 2; pass++) {
ins = prog;
/* Create the procedure header. */
if (fmem) {
PUSH(RBP);
MOVrq(RSP, RBP);
SUBib(BPF_MEMWORDS * sizeof(uint32_t), RSP);
}
if (flen)
MOVrd2(ESI, R9D);
if (fpkt) {
MOVrq2(RDI, R8);
MOVrd(EDX, EDI);
}
for (i = 0; i < nins; i++) {
stream.bpf_pc++;
switch (ins->code) {
default:
#ifdef _KERNEL
return (NULL);
#else
abort();
#endif
case BPF_RET|BPF_K:
MOVid(ins->k, EAX);
if (fmem)
LEAVE();
RET();
break;
case BPF_RET|BPF_A:
if (fmem)
LEAVE();
RET();
break;
case BPF_LD|BPF_W|BPF_ABS:
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
JAb(12);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int32_t), ECX);
if (fmem) {
JAEb(4);
ZEROrd(EAX);
LEAVE();
} else {
JAEb(3);
ZEROrd(EAX);
}
RET();
MOVrq3(R8, RCX);
MOVobd(RCX, RSI, EAX);
BSWAP(EAX);
break;
case BPF_LD|BPF_H|BPF_ABS:
ZEROrd(EAX);
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
JAb(12);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int16_t), ECX);
if (fmem) {
JAEb(2);
LEAVE();
} else
JAEb(1);
RET();
MOVrq3(R8, RCX);
MOVobw(RCX, RSI, AX);
SWAP_AX();
break;
case BPF_LD|BPF_B|BPF_ABS:
ZEROrd(EAX);
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
if (fmem) {
JBb(2);
LEAVE();
} else
JBb(1);
RET();
MOVrq3(R8, RCX);
MOVobb(RCX, RSI, AL);
break;
case BPF_LD|BPF_W|BPF_LEN:
MOVrd3(R9D, EAX);
break;
case BPF_LDX|BPF_W|BPF_LEN:
MOVrd3(R9D, EDX);
break;
case BPF_LD|BPF_W|BPF_IND:
CMPrd(EDI, EDX);
JAb(27);
MOVid(ins->k, ESI);
MOVrd(EDI, ECX);
SUBrd(EDX, ECX);
CMPrd(ESI, ECX);
JBb(14);
ADDrd(EDX, ESI);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int32_t), ECX);
if (fmem) {
JAEb(4);
ZEROrd(EAX);
LEAVE();
} else {
JAEb(3);
ZEROrd(EAX);
}
RET();
MOVrq3(R8, RCX);
MOVobd(RCX, RSI, EAX);
BSWAP(EAX);
break;
case BPF_LD|BPF_H|BPF_IND:
ZEROrd(EAX);
CMPrd(EDI, EDX);
JAb(27);
MOVid(ins->k, ESI);
MOVrd(EDI, ECX);
SUBrd(EDX, ECX);
CMPrd(ESI, ECX);
JBb(14);
ADDrd(EDX, ESI);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int16_t), ECX);
if (fmem) {
JAEb(2);
LEAVE();
} else
JAEb(1);
RET();
MOVrq3(R8, RCX);
MOVobw(RCX, RSI, AX);
SWAP_AX();
break;
case BPF_LD|BPF_B|BPF_IND:
ZEROrd(EAX);
CMPrd(EDI, EDX);
JAEb(13);
MOVid(ins->k, ESI);
MOVrd(EDI, ECX);
SUBrd(EDX, ECX);
CMPrd(ESI, ECX);
if (fmem) {
JAb(2);
LEAVE();
} else
JAb(1);
RET();
MOVrq3(R8, RCX);
ADDrd(EDX, ESI);
MOVobb(RCX, RSI, AL);
break;
case BPF_LDX|BPF_MSH|BPF_B:
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
if (fmem) {
JBb(4);
ZEROrd(EAX);
LEAVE();
} else {
JBb(3);
ZEROrd(EAX);
}
RET();
ZEROrd(EDX);
MOVrq3(R8, RCX);
MOVobb(RCX, RSI, DL);
ANDib(0x0f, DL);
SHLib(2, EDX);
break;
case BPF_LD|BPF_IMM:
MOVid(ins->k, EAX);
break;
case BPF_LDX|BPF_IMM:
MOVid(ins->k, EDX);
break;
case BPF_LD|BPF_MEM:
MOVid(ins->k * sizeof(uint32_t), ESI);
MOVobd(RSP, RSI, EAX);
break;
case BPF_LDX|BPF_MEM:
MOVid(ins->k * sizeof(uint32_t), ESI);
MOVobd(RSP, RSI, EDX);
break;
case BPF_ST:
/*
* XXX this command and the following could
* be optimized if the previous instruction
* was already of this type
*/
MOVid(ins->k * sizeof(uint32_t), ESI);
MOVomd(EAX, RSP, RSI);
break;
case BPF_STX:
MOVid(ins->k * sizeof(uint32_t), ESI);
MOVomd(EDX, RSP, RSI);
break;
case BPF_JMP|BPF_JA:
JUMP(ins->k);
break;
case BPF_JMP|BPF_JGT|BPF_K:
case BPF_JMP|BPF_JGE|BPF_K:
case BPF_JMP|BPF_JEQ|BPF_K:
case BPF_JMP|BPF_JSET|BPF_K:
case BPF_JMP|BPF_JGT|BPF_X:
case BPF_JMP|BPF_JGE|BPF_X:
case BPF_JMP|BPF_JEQ|BPF_X:
case BPF_JMP|BPF_JSET|BPF_X:
if (ins->jt == ins->jf) {
JUMP(ins->jt);
break;
}
switch (ins->code) {
case BPF_JMP|BPF_JGT|BPF_K:
CMPid(ins->k, EAX);
JCC(JA, JBE);
break;
case BPF_JMP|BPF_JGE|BPF_K:
CMPid(ins->k, EAX);
JCC(JAE, JB);
break;
case BPF_JMP|BPF_JEQ|BPF_K:
CMPid(ins->k, EAX);
JCC(JE, JNE);
break;
case BPF_JMP|BPF_JSET|BPF_K:
TESTid(ins->k, EAX);
JCC(JNE, JE);
break;
case BPF_JMP|BPF_JGT|BPF_X:
CMPrd(EDX, EAX);
JCC(JA, JBE);
break;
case BPF_JMP|BPF_JGE|BPF_X:
CMPrd(EDX, EAX);
JCC(JAE, JB);
break;
case BPF_JMP|BPF_JEQ|BPF_X:
CMPrd(EDX, EAX);
JCC(JE, JNE);
break;
case BPF_JMP|BPF_JSET|BPF_X:
TESTrd(EDX, EAX);
JCC(JNE, JE);
break;
}
break;
case BPF_ALU|BPF_ADD|BPF_X:
ADDrd(EDX, EAX);
break;
case BPF_ALU|BPF_SUB|BPF_X:
SUBrd(EDX, EAX);
break;
case BPF_ALU|BPF_MUL|BPF_X:
MOVrd(EDX, ECX);
MULrd(EDX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_DIV|BPF_X:
case BPF_ALU|BPF_MOD|BPF_X:
TESTrd(EDX, EDX);
if (fmem) {
JNEb(4);
ZEROrd(EAX);
LEAVE();
} else {
JNEb(3);
ZEROrd(EAX);
}
RET();
MOVrd(EDX, ECX);
ZEROrd(EDX);
DIVrd(ECX);
if (BPF_OP(ins->code) == BPF_MOD)
MOVrd(EDX, EAX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_AND|BPF_X:
ANDrd(EDX, EAX);
break;
case BPF_ALU|BPF_OR|BPF_X:
ORrd(EDX, EAX);
break;
case BPF_ALU|BPF_XOR|BPF_X:
XORrd(EDX, EAX);
break;
case BPF_ALU|BPF_LSH|BPF_X:
MOVrd(EDX, ECX);
SHL_CLrb(EAX);
break;
case BPF_ALU|BPF_RSH|BPF_X:
MOVrd(EDX, ECX);
SHR_CLrb(EAX);
break;
case BPF_ALU|BPF_ADD|BPF_K:
ADD_EAXi(ins->k);
break;
case BPF_ALU|BPF_SUB|BPF_K:
SUB_EAXi(ins->k);
break;
case BPF_ALU|BPF_MUL|BPF_K:
MOVrd(EDX, ECX);
MOVid(ins->k, EDX);
MULrd(EDX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_DIV|BPF_K:
case BPF_ALU|BPF_MOD|BPF_K:
MOVrd(EDX, ECX);
ZEROrd(EDX);
MOVid(ins->k, ESI);
DIVrd(ESI);
if (BPF_OP(ins->code) == BPF_MOD)
MOVrd(EDX, EAX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_AND|BPF_K:
ANDid(ins->k, EAX);
break;
case BPF_ALU|BPF_OR|BPF_K:
ORid(ins->k, EAX);
break;
case BPF_ALU|BPF_XOR|BPF_K:
XORid(ins->k, EAX);
break;
case BPF_ALU|BPF_LSH|BPF_K:
SHLib((ins->k) & 0xff, EAX);
break;
case BPF_ALU|BPF_RSH|BPF_K:
SHRib((ins->k) & 0xff, EAX);
break;
case BPF_ALU|BPF_NEG:
NEGd(EAX);
break;
case BPF_MISC|BPF_TAX:
MOVrd(EAX, EDX);
break;
case BPF_MISC|BPF_TXA:
MOVrd(EDX, EAX);
break;
}
ins++;
}
if (pass > 0)
continue;
*size = stream.cur_ip;
#ifdef _KERNEL
/*
* We cannot use malloc(9) because DMAP is mapped as NX.
*/
stream.ibuf = (void *)kmem_malloc(kernel_arena, *size,
M_NOWAIT);
if (stream.ibuf == NULL)
break;
#else
stream.ibuf = mmap(NULL, *size, PROT_READ | PROT_WRITE,
MAP_ANON, -1, 0);
if (stream.ibuf == MAP_FAILED) {
stream.ibuf = NULL;
break;
}
#endif
/*
* Modify the reference table to contain the offsets and
* not the lengths of the instructions.
*/
if (fjmp)
for (i = 1; i < nins + 1; i++)
stream.refs[i] += stream.refs[i - 1];
/* Reset the counters. */
stream.cur_ip = 0;
stream.bpf_pc = 0;
/* The second pass creates the actual code. */
emitm = emit_code;
}
/*
* The reference table is needed only during compilation,
* now we can free it.
*/
if (fjmp)
#ifdef _KERNEL
free(stream.refs, M_BPFJIT);
#else
free(stream.refs);
#endif
#ifndef _KERNEL
if (stream.ibuf != NULL &&
mprotect(stream.ibuf, *size, PROT_READ | PROT_EXEC) != 0) {
munmap(stream.ibuf, *size);
stream.ibuf = NULL;
}
#endif
return ((bpf_filter_func)(void *)stream.ibuf);
}
void
bpf_jit_free(void *func, size_t size)
{
#ifdef _KERNEL
kmem_free(kernel_arena, (vm_offset_t)func, size);
#else
munmap(func, size);
#endif
}
Index: head/sys/arm/xscale/ixp425/if_npe.c
===================================================================
--- head/sys/arm/xscale/ixp425/if_npe.c (revision 328217)
+++ head/sys/arm/xscale/ixp425/if_npe.c (revision 328218)
@@ -1,1782 +1,1781 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2006-2008 Sam Leffler. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Intel XScale NPE Ethernet driver.
*
* This driver handles the two ports present on the IXP425.
* Packet processing is done by the Network Processing Engines
* (NPE's) that work together with a MAC and PHY. The MAC
* is also mapped to the XScale cpu; the PHY is accessed via
* the MAC. NPE-XScale communication happens through h/w
* queues managed by the Q Manager block.
*
* The code here replaces the ethAcc, ethMii, and ethDB classes
* in the Intel Access Library (IAL) and the OS-specific driver.
*
* XXX add vlan support
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <machine/bus.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_mib.h>
#include <net/if_types.h>
#include <net/if_var.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#endif
#include <net/bpf.h>
#include <net/bpfdesc.h>
#include <arm/xscale/ixp425/ixp425reg.h>
#include <arm/xscale/ixp425/ixp425var.h>
#include <arm/xscale/ixp425/ixp425_qmgr.h>
#include <arm/xscale/ixp425/ixp425_npevar.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <arm/xscale/ixp425/if_npereg.h>
#include <machine/armreg.h>
#include "miibus_if.h"
/*
* XXX: For the main bus dma tag. Can go away if the new method to get the
* dma tag from the parent got MFC'd into RELENG_6.
*/
extern struct ixp425_softc *ixp425_softc;
struct npebuf {
struct npebuf *ix_next; /* chain to next buffer */
void *ix_m; /* backpointer to mbuf */
bus_dmamap_t ix_map; /* bus dma map for associated data */
struct npehwbuf *ix_hw; /* associated h/w block */
uint32_t ix_neaddr; /* phys address of ix_hw */
};
struct npedma {
const char* name;
int nbuf; /* # npebuf's allocated */
bus_dma_tag_t mtag; /* bus dma tag for mbuf data */
struct npehwbuf *hwbuf; /* NPE h/w buffers */
bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */
bus_dmamap_t buf_map;
bus_addr_t buf_phys; /* phys addr of buffers */
struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */
};
struct npe_softc {
/* XXX mii requires this be first; do not move! */
struct ifnet *sc_ifp; /* ifnet pointer */
struct mtx sc_mtx; /* basically a perimeter lock */
device_t sc_dev;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh; /* MAC register window */
device_t sc_mii; /* child miibus */
bus_space_handle_t sc_miih; /* MII register window */
int sc_npeid;
struct ixpnpe_softc *sc_npe; /* NPE support */
int sc_debug; /* DPRINTF* control */
int sc_tickinterval;
struct callout tick_ch; /* Tick callout */
int npe_watchdog_timer;
struct npedma txdma;
struct npebuf *tx_free; /* list of free tx buffers */
struct npedma rxdma;
bus_addr_t buf_phys; /* XXX for returning a value */
int rx_qid; /* rx qid */
int rx_freeqid; /* rx free buffers qid */
int tx_qid; /* tx qid */
int tx_doneqid; /* tx completed qid */
struct ifmib_iso_8802_3 mibdata;
bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */
struct npestats *sc_stats;
bus_dmamap_t sc_stats_map;
bus_addr_t sc_stats_phys; /* phys addr of sc_stats */
struct npestats sc_totals; /* accumulated sc_stats */
};
/*
* Static configuration for IXP425. The tx and
* rx free Q id's are fixed by the NPE microcode. The
* rx Q id's are programmed to be separate to simplify
* multi-port processing. It may be better to handle
* all traffic through one Q (as done by the Intel drivers).
*
* Note that the PHY's are accessible only from MAC B on the
* IXP425 and from MAC C on other devices. This and other
* platform-specific assumptions are handled with hints.
*/
static const struct {
uint32_t macbase;
uint32_t miibase;
int phy; /* phy id */
uint8_t rx_qid;
uint8_t rx_freeqid;
uint8_t tx_qid;
uint8_t tx_doneqid;
} npeconfig[NPE_MAX] = {
[NPE_A] = {
.macbase = IXP435_MAC_A_HWBASE,
.miibase = IXP425_MAC_C_HWBASE,
.phy = 2,
.rx_qid = 4,
.rx_freeqid = 26,
.tx_qid = 23,
.tx_doneqid = 31
},
[NPE_B] = {
.macbase = IXP425_MAC_B_HWBASE,
.miibase = IXP425_MAC_B_HWBASE,
.phy = 0,
.rx_qid = 4,
.rx_freeqid = 27,
.tx_qid = 24,
.tx_doneqid = 31
},
[NPE_C] = {
.macbase = IXP425_MAC_C_HWBASE,
.miibase = IXP425_MAC_B_HWBASE,
.phy = 1,
.rx_qid = 12,
.rx_freeqid = 28,
.tx_qid = 25,
.tx_doneqid = 31
},
};
static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
static __inline uint32_t
RD4(struct npe_softc *sc, bus_size_t off)
{
return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
}
static __inline void
WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
{
bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
}
#define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define NPE_LOCK_INIT(_sc) \
mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF)
#define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
#define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
static devclass_t npe_devclass;
static int override_npeid(device_t, const char *resname, int *val);
static int npe_activate(device_t dev);
static void npe_deactivate(device_t dev);
static int npe_ifmedia_update(struct ifnet *ifp);
static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
static void npe_setmac(struct npe_softc *sc, u_char *eaddr);
static void npe_getmac(struct npe_softc *sc, u_char *eaddr);
static void npe_txdone(int qid, void *arg);
static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
struct mbuf *);
static int npe_rxdone(int qid, void *arg);
static void npeinit(void *);
static void npestart_locked(struct ifnet *);
static void npestart(struct ifnet *);
static void npestop(struct npe_softc *);
static void npewatchdog(struct npe_softc *);
static int npeioctl(struct ifnet * ifp, u_long, caddr_t);
static int npe_setrxqosentry(struct npe_softc *, int classix,
int trafclass, int qid);
static int npe_setportaddress(struct npe_softc *, const uint8_t mac[]);
static int npe_setfirewallmode(struct npe_softc *, int onoff);
static int npe_updatestats(struct npe_softc *);
#if 0
static int npe_getstats(struct npe_softc *);
static uint32_t npe_getimageid(struct npe_softc *);
static int npe_setloopback(struct npe_softc *, int ena);
#endif
/* NB: all tx done processing goes through one queue */
static int tx_doneqid = -1;
static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0,
"IXP4XX NPE driver parameters");
static int npe_debug = 0;
SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RWTUN, &npe_debug,
0, "IXP4XX NPE network interface debug msgs");
#define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
} while (0)
#define DPRINTFn(n, sc, fmt, ...) do { \
if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
} while (0)
static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RDTUN, &npe_tickinterval,
0, "periodic work interval (secs)");
static int npe_rxbuf = 64; /* # rx buffers to allocate */
SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RDTUN, &npe_rxbuf,
0, "rx buffers allocated");
static int npe_txbuf = 128; /* # tx buffers to allocate */
SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RDTUN, &npe_txbuf,
0, "tx buffers allocated");
static int
unit2npeid(int unit)
{
static const int npeidmap[2][3] = {
/* on 425 A is for HSS, B & C are for Ethernet */
{ NPE_B, NPE_C, -1 }, /* IXP425 */
/* 435 only has A & C, order C then A */
{ NPE_C, NPE_A, -1 }, /* IXP435 */
};
/* XXX check feature register instead */
return (unit < 3 ? npeidmap[
(cpu_ident() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
}
static int
npe_probe(device_t dev)
{
static const char *desc[NPE_MAX] = {
[NPE_A] = "IXP NPE-A",
[NPE_B] = "IXP NPE-B",
[NPE_C] = "IXP NPE-C"
};
int unit = device_get_unit(dev);
int npeid;
if (unit > 2 ||
(ixp4xx_read_feature_bits() &
(unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
return EINVAL;
npeid = -1;
if (!override_npeid(dev, "npeid", &npeid))
npeid = unit2npeid(unit);
if (npeid == -1) {
device_printf(dev, "unit %d not supported\n", unit);
return EINVAL;
}
device_set_desc(dev, desc[npeid]);
return 0;
}
static int
npe_attach(device_t dev)
{
struct npe_softc *sc = device_get_softc(dev);
struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct ifnet *ifp;
int error;
u_char eaddr[6];
sc->sc_dev = dev;
sc->sc_iot = sa->sc_iot;
NPE_LOCK_INIT(sc);
callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
sc->sc_debug = npe_debug;
sc->sc_tickinterval = npe_tickinterval;
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "cannot allocate ifnet\n");
error = EIO; /* XXX */
goto out;
}
/* NB: must be setup prior to invoking mii code */
sc->sc_ifp = ifp;
error = npe_activate(dev);
if (error) {
device_printf(dev, "cannot activate npe\n");
goto out;
}
npe_getmac(sc, eaddr);
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_start = npestart;
ifp->if_ioctl = npeioctl;
ifp->if_init = npeinit;
IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
IFQ_SET_READY(&ifp->if_snd);
ifp->if_linkmib = &sc->mibdata;
ifp->if_linkmiblen = sizeof(sc->mibdata);
sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
/* device supports oversided vlan frames */
ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats");
ether_ifattach(ifp, eaddr);
return 0;
out:
if (ifp != NULL)
if_free(ifp);
NPE_LOCK_DESTROY(sc);
npe_deactivate(dev);
return error;
}
static int
npe_detach(device_t dev)
{
struct npe_softc *sc = device_get_softc(dev);
struct ifnet *ifp = sc->sc_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
npestop(sc);
if (ifp != NULL) {
ether_ifdetach(ifp);
if_free(ifp);
}
NPE_LOCK_DESTROY(sc);
npe_deactivate(dev);
return 0;
}
/*
* Compute and install the multicast filter.
*/
static void
npe_setmcast(struct npe_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
int i;
if (ifp->if_flags & IFF_PROMISC) {
memset(mask, 0, ETHER_ADDR_LEN);
memset(addr, 0, ETHER_ADDR_LEN);
} else if (ifp->if_flags & IFF_ALLMULTI) {
static const uint8_t allmulti[ETHER_ADDR_LEN] =
{ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
memcpy(mask, allmulti, ETHER_ADDR_LEN);
memcpy(addr, allmulti, ETHER_ADDR_LEN);
} else {
uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
struct ifmultiaddr *ifma;
const uint8_t *mac;
memset(clr, 0, ETHER_ADDR_LEN);
memset(set, 0xff, ETHER_ADDR_LEN);
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
for (i = 0; i < ETHER_ADDR_LEN; i++) {
clr[i] |= mac[i];
set[i] &= mac[i];
}
}
if_maddr_runlock(ifp);
for (i = 0; i < ETHER_ADDR_LEN; i++) {
mask[i] = set[i] | ~clr[i];
addr[i] = set[i];
}
}
/*
* Write the mask and address registers.
*/
for (i = 0; i < ETHER_ADDR_LEN; i++) {
WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
WR4(sc, NPE_MAC_ADDR(i), addr[i]);
}
}
static void
npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct npe_softc *sc;
if (error != 0)
return;
sc = (struct npe_softc *)arg;
sc->buf_phys = segs[0].ds_addr;
}
static int
npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
const char *name, int nbuf, int maxseg)
{
int error, i;
memset(dma, 0, sizeof(*dma));
dma->name = name;
dma->nbuf = nbuf;
/* DMA tag for mapped mbufs */
error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES, maxseg, MCLBYTES, 0,
busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
if (error != 0) {
device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
"error %u\n", dma->name, error);
return error;
}
/* DMA tag and map for the NPE buffers */
error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
nbuf * sizeof(struct npehwbuf), 1,
nbuf * sizeof(struct npehwbuf), 0,
busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
if (error != 0) {
device_printf(sc->sc_dev,
"unable to create %s npebuf dma tag, error %u\n",
dma->name, error);
return error;
}
if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&dma->buf_map) != 0) {
device_printf(sc->sc_dev,
"unable to allocate memory for %s h/w buffers, error %u\n",
dma->name, error);
return error;
}
/* XXX M_TEMP */
- dma->buf = mallocarray(nbuf, sizeof(struct npebuf), M_TEMP,
- M_NOWAIT | M_ZERO);
+ dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
if (dma->buf == NULL) {
device_printf(sc->sc_dev,
"unable to allocate memory for %s s/w buffers\n",
dma->name);
return error;
}
if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
device_printf(sc->sc_dev,
"unable to map memory for %s h/w buffers, error %u\n",
dma->name, error);
return error;
}
dma->buf_phys = sc->buf_phys;
for (i = 0; i < dma->nbuf; i++) {
struct npebuf *npe = &dma->buf[i];
struct npehwbuf *hw = &dma->hwbuf[i];
/* calculate offset to shared area */
npe->ix_neaddr = dma->buf_phys +
((uintptr_t)hw - (uintptr_t)dma->hwbuf);
KASSERT((npe->ix_neaddr & 0x1f) == 0,
("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
&npe->ix_map);
if (error != 0) {
device_printf(sc->sc_dev,
"unable to create dmamap for %s buffer %u, "
"error %u\n", dma->name, i, error);
return error;
}
npe->ix_hw = hw;
}
bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
return 0;
}
static void
npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
{
int i;
if (dma->hwbuf != NULL) {
for (i = 0; i < dma->nbuf; i++) {
struct npebuf *npe = &dma->buf[i];
bus_dmamap_destroy(dma->mtag, npe->ix_map);
}
bus_dmamap_unload(dma->buf_tag, dma->buf_map);
bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
}
if (dma->buf != NULL)
free(dma->buf, M_TEMP);
if (dma->buf_tag)
bus_dma_tag_destroy(dma->buf_tag);
if (dma->mtag)
bus_dma_tag_destroy(dma->mtag);
memset(dma, 0, sizeof(*dma));
}
static int
override_addr(device_t dev, const char *resname, int *base)
{
int unit = device_get_unit(dev);
const char *resval;
/* XXX warn for wrong hint type */
if (resource_string_value("npe", unit, resname, &resval) != 0)
return 0;
switch (resval[0]) {
case 'A':
*base = IXP435_MAC_A_HWBASE;
break;
case 'B':
*base = IXP425_MAC_B_HWBASE;
break;
case 'C':
*base = IXP425_MAC_C_HWBASE;
break;
default:
device_printf(dev, "Warning, bad value %s for "
"npe.%d.%s ignored\n", resval, unit, resname);
return 0;
}
if (bootverbose)
device_printf(dev, "using npe.%d.%s=%s override\n",
unit, resname, resval);
return 1;
}
static int
override_npeid(device_t dev, const char *resname, int *npeid)
{
int unit = device_get_unit(dev);
const char *resval;
/* XXX warn for wrong hint type */
if (resource_string_value("npe", unit, resname, &resval) != 0)
return 0;
switch (resval[0]) {
case 'A': *npeid = NPE_A; break;
case 'B': *npeid = NPE_B; break;
case 'C': *npeid = NPE_C; break;
default:
device_printf(dev, "Warning, bad value %s for "
"npe.%d.%s ignored\n", resval, unit, resname);
return 0;
}
if (bootverbose)
device_printf(dev, "using npe.%d.%s=%s override\n",
unit, resname, resval);
return 1;
}
static int
override_unit(device_t dev, const char *resname, int *val, int min, int max)
{
int unit = device_get_unit(dev);
int resval;
if (resource_int_value("npe", unit, resname, &resval) != 0)
return 0;
if (!(min <= resval && resval <= max)) {
device_printf(dev, "Warning, bad value %d for npe.%d.%s "
"ignored (value must be [%d-%d])\n", resval, unit,
resname, min, max);
return 0;
}
if (bootverbose)
device_printf(dev, "using npe.%d.%s=%d override\n",
unit, resname, resval);
*val = resval;
return 1;
}
static void
npe_mac_reset(struct npe_softc *sc)
{
/*
* Reset MAC core.
*/
WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
DELAY(NPE_MAC_RESET_DELAY);
/* configure MAC to generate MDC clock */
WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
}
static int
npe_activate(device_t dev)
{
struct npe_softc *sc = device_get_softc(dev);
int error, i, macbase, miibase, phy;
/*
* Setup NEP ID, MAC, and MII bindings. We allow override
* via hints to handle unexpected board configs.
*/
if (!override_npeid(dev, "npeid", &sc->sc_npeid))
sc->sc_npeid = unit2npeid(device_get_unit(dev));
sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
if (sc->sc_npe == NULL) {
device_printf(dev, "cannot attach ixpnpe\n");
return EIO; /* XXX */
}
/* MAC */
if (!override_addr(dev, "mac", &macbase))
macbase = npeconfig[sc->sc_npeid].macbase;
if (bootverbose)
device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
macbase, IXP425_REG_SIZE);
return ENOMEM;
}
/* PHY */
if (!override_unit(dev, "phy", &phy, 0, MII_NPHY - 1))
phy = npeconfig[sc->sc_npeid].phy;
if (!override_addr(dev, "mii", &miibase))
miibase = npeconfig[sc->sc_npeid].miibase;
if (bootverbose)
device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
if (miibase != macbase) {
/*
* PHY is mapped through a different MAC, setup an
* additional mapping for frobbing the PHY registers.
*/
if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
device_printf(dev,
"cannot map MII registers 0x%x:0x%x\n",
miibase, IXP425_REG_SIZE);
return ENOMEM;
}
} else
sc->sc_miih = sc->sc_ioh;
/*
* Load NPE firmware and start it running.
*/
error = ixpnpe_init(sc->sc_npe);
if (error != 0) {
device_printf(dev, "cannot init NPE (error %d)\n", error);
return error;
}
/* attach PHY */
error = mii_attach(dev, &sc->sc_mii, sc->sc_ifp, npe_ifmedia_update,
npe_ifmedia_status, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
return error;
}
error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
if (error != 0)
return error;
error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
if (error != 0)
return error;
/* setup statistics block */
error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
sizeof(struct npestats), 1, sizeof(struct npestats), 0,
busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
if (error != 0) {
device_printf(sc->sc_dev, "unable to create stats tag, "
"error %u\n", error);
return error;
}
if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
device_printf(sc->sc_dev,
"unable to allocate memory for stats block, error %u\n",
error);
return error;
}
if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
device_printf(sc->sc_dev,
"unable to load memory for stats block, error %u\n",
error);
return error;
}
sc->sc_stats_phys = sc->buf_phys;
/*
* Setup h/w rx/tx queues. There are four q's:
* rx inbound q of rx'd frames
* rx_free pool of ixpbuf's for receiving frames
* tx outbound q of frames to send
* tx_done q of tx frames that have been processed
*
* The NPE handles the actual tx/rx process and the q manager
* handles the queues. The driver just writes entries to the
* q manager mailbox's and gets callbacks when there are rx'd
* frames to process or tx'd frames to reap. These callbacks
* are controlled by the q configurations; e.g. we get a
* callback when tx_done has 2 or more frames to process and
* when the rx q has at least one frame. These setings can
* changed at the time the q is configured.
*/
sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1,
IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc);
sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
/*
* Setup the NPE to direct all traffic to rx_qid.
* When QoS is enabled in the firmware there are
* 8 traffic classes; otherwise just 4.
*/
for (i = 0; i < 8; i++)
npe_setrxqosentry(sc, i, 0, sc->rx_qid);
/* disable firewall mode just in case (should be off) */
npe_setfirewallmode(sc, 0);
sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
if (tx_doneqid == -1) {
ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2,
IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
tx_doneqid = sc->tx_doneqid;
}
KASSERT(npes[sc->sc_npeid] == NULL,
("npe %u already setup", sc->sc_npeid));
npes[sc->sc_npeid] = sc;
return 0;
}
static void
npe_deactivate(device_t dev)
{
struct npe_softc *sc = device_get_softc(dev);
npes[sc->sc_npeid] = NULL;
/* XXX disable q's */
if (sc->sc_npe != NULL) {
ixpnpe_stop(sc->sc_npe);
ixpnpe_detach(sc->sc_npe);
}
if (sc->sc_stats != NULL) {
bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
sc->sc_stats_map);
}
if (sc->sc_stats_tag != NULL)
bus_dma_tag_destroy(sc->sc_stats_tag);
npe_dma_destroy(sc, &sc->txdma);
npe_dma_destroy(sc, &sc->rxdma);
bus_generic_detach(sc->sc_dev);
if (sc->sc_mii != NULL)
device_delete_child(sc->sc_dev, sc->sc_mii);
}
/*
* Change media according to request.
*/
static int
npe_ifmedia_update(struct ifnet *ifp)
{
struct npe_softc *sc = ifp->if_softc;
struct mii_data *mii;
mii = device_get_softc(sc->sc_mii);
NPE_LOCK(sc);
mii_mediachg(mii);
/* XXX push state ourself? */
NPE_UNLOCK(sc);
return (0);
}
/*
* Notify the world which media we're using.
*/
static void
npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct npe_softc *sc = ifp->if_softc;
struct mii_data *mii;
mii = device_get_softc(sc->sc_mii);
NPE_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
NPE_UNLOCK(sc);
}
static void
npe_addstats(struct npe_softc *sc)
{
#define NPEADD(x) sc->sc_totals.x += be32toh(ns->x)
#define MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0)
struct ifnet *ifp = sc->sc_ifp;
struct npestats *ns = sc->sc_stats;
MIBADD(dot3StatsAlignmentErrors);
MIBADD(dot3StatsFCSErrors);
MIBADD(dot3StatsInternalMacReceiveErrors);
NPEADD(RxOverrunDiscards);
NPEADD(RxLearnedEntryDiscards);
NPEADD(RxLargeFramesDiscards);
NPEADD(RxSTPBlockedDiscards);
NPEADD(RxVLANTypeFilterDiscards);
NPEADD(RxVLANIdFilterDiscards);
NPEADD(RxInvalidSourceDiscards);
NPEADD(RxBlackListDiscards);
NPEADD(RxWhiteListDiscards);
NPEADD(RxUnderflowEntryDiscards);
MIBADD(dot3StatsSingleCollisionFrames);
MIBADD(dot3StatsMultipleCollisionFrames);
MIBADD(dot3StatsDeferredTransmissions);
MIBADD(dot3StatsLateCollisions);
MIBADD(dot3StatsExcessiveCollisions);
MIBADD(dot3StatsInternalMacTransmitErrors);
MIBADD(dot3StatsCarrierSenseErrors);
NPEADD(TxLargeFrameDiscards);
NPEADD(TxVLANIdFilterDiscards);
sc->mibdata.dot3StatsFrameTooLongs +=
be32toh(ns->RxLargeFramesDiscards)
+ be32toh(ns->TxLargeFrameDiscards);
sc->mibdata.dot3StatsMissedFrames +=
be32toh(ns->RxOverrunDiscards)
+ be32toh(ns->RxUnderflowEntryDiscards);
if_inc_counter(ifp, IFCOUNTER_OERRORS,
be32toh(ns->dot3StatsInternalMacTransmitErrors) +
be32toh(ns->dot3StatsCarrierSenseErrors) +
be32toh(ns->TxVLANIdFilterDiscards));
if_inc_counter(ifp, IFCOUNTER_IERRORS,
be32toh(ns->dot3StatsFCSErrors) +
be32toh(ns->dot3StatsInternalMacReceiveErrors) +
be32toh(ns->RxOverrunDiscards) +
be32toh(ns->RxUnderflowEntryDiscards));
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
be32toh(ns->dot3StatsSingleCollisionFrames) +
be32toh(ns->dot3StatsMultipleCollisionFrames));
#undef NPEADD
#undef MIBADD
}
static void
npe_tick(void *xsc)
{
#define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
struct npe_softc *sc = xsc;
struct mii_data *mii = device_get_softc(sc->sc_mii);
uint32_t msg[2];
NPE_ASSERT_LOCKED(sc);
/*
* NB: to avoid sleeping with the softc lock held we
* split the NPE msg processing into two parts. The
* request for statistics is sent w/o waiting for a
* reply and then on the next tick we retrieve the
* results. This works because npe_tick is the only
* code that talks via the mailbox's (except at setup).
* This likely can be handled better.
*/
if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
BUS_DMASYNC_POSTREAD);
npe_addstats(sc);
}
npe_updatestats(sc);
mii_tick(mii);
npewatchdog(sc);
/* schedule next poll */
callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
#undef ACK
}
static void
npe_setmac(struct npe_softc *sc, u_char *eaddr)
{
WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
}
static void
npe_getmac(struct npe_softc *sc, u_char *eaddr)
{
/* NB: the unicast address appears to be loaded from EEPROM on reset */
eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
}
struct txdone {
struct npebuf *head;
struct npebuf **tail;
int count;
};
static __inline void
npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
{
struct ifnet *ifp = sc->sc_ifp;
NPE_LOCK(sc);
*td->tail = sc->tx_free;
sc->tx_free = td->head;
/*
* We're no longer busy, so clear the busy flag and call the
* start routine to xmit more packets.
*/
if_inc_counter(ifp, IFCOUNTER_OPACKETS, td->count);
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->npe_watchdog_timer = 0;
npestart_locked(ifp);
NPE_UNLOCK(sc);
}
/*
* Q manager callback on tx done queue. Reap mbufs
* and return tx buffers to the free list. Finally
* restart output. Note the microcode has only one
* txdone q wired into it so we must use the NPE ID
* returned with each npehwbuf to decide where to
* send buffers.
*/
static void
npe_txdone(int qid, void *arg)
{
#define P2V(a, dma) \
&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
struct npe_softc *sc0 = arg;
struct npe_softc *sc;
struct npebuf *npe;
struct txdone *td, q[NPE_MAX];
uint32_t entry;
q[NPE_A].tail = &q[NPE_A].head; q[NPE_A].count = 0;
q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
/* XXX max # at a time? */
while (ixpqmgr_qread(qid, &entry) == 0) {
DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
__func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
sc = npes[NPE_QM_Q_NPE(entry)];
npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
m_freem(npe->ix_m);
npe->ix_m = NULL;
td = &q[NPE_QM_Q_NPE(entry)];
*td->tail = npe;
td->tail = &npe->ix_next;
td->count++;
}
if (q[NPE_A].count)
npe_txdone_finish(npes[NPE_A], &q[NPE_A]);
if (q[NPE_B].count)
npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
if (q[NPE_C].count)
npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
#undef P2V
}
static int
npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
{
bus_dma_segment_t segs[1];
struct npedma *dma = &sc->rxdma;
struct npehwbuf *hw;
int error, nseg;
if (m == NULL) {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return ENOBUFS;
}
KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
("ext_size %d", m->m_ext.ext_size));
m->m_pkthdr.len = m->m_len = 1536;
/* backload payload and align ip hdr */
m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
bus_dmamap_unload(dma->mtag, npe->ix_map);
error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
segs, &nseg, 0);
if (error != 0) {
m_freem(m);
return error;
}
hw = npe->ix_hw;
hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
/* NB: NPE requires length be a multiple of 64 */
/* NB: buffer length is shifted in word */
hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
hw->ix_ne[0].next = 0;
bus_dmamap_sync(dma->buf_tag, dma->buf_map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
npe->ix_m = m;
/* Flush the memory in the mbuf */
bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
return 0;
}
/*
* RX q processing for a specific NPE. Claim entries
* from the hardware queue and pass the frames up the
* stack. Pass the rx buffers to the free list.
*/
static int
npe_rxdone(int qid, void *arg)
{
#define P2V(a, dma) \
&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
struct npe_softc *sc = arg;
struct npedma *dma = &sc->rxdma;
uint32_t entry;
int rx_npkts = 0;
while (ixpqmgr_qread(qid, &entry) == 0) {
struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
struct mbuf *m;
bus_dmamap_sync(dma->buf_tag, dma->buf_map,
BUS_DMASYNC_POSTREAD);
DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
__func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
/*
* Allocate a new mbuf to replenish the rx buffer.
* If doing so fails we drop the rx'd frame so we
* can reuse the previous mbuf. When we're able to
* allocate a new mbuf dispatch the mbuf w/ rx'd
* data up the stack and replace it with the newly
* allocated one.
*/
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m != NULL) {
struct mbuf *mrx = npe->ix_m;
struct npehwbuf *hw = npe->ix_hw;
struct ifnet *ifp = sc->sc_ifp;
/* Flush mbuf memory for rx'd data */
bus_dmamap_sync(dma->mtag, npe->ix_map,
BUS_DMASYNC_POSTREAD);
/* set m_len etc. per rx frame size */
mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
mrx->m_pkthdr.len = mrx->m_len;
mrx->m_pkthdr.rcvif = ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
ifp->if_input(ifp, mrx);
rx_npkts++;
} else {
/* discard frame and re-use mbuf */
m = npe->ix_m;
}
if (npe_rxbuf_init(sc, npe, m) == 0) {
/* return npe buf to rx free list */
ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
} else {
/* XXX should not happen */
}
}
return rx_npkts;
#undef P2V
}
#ifdef DEVICE_POLLING
static int
npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
struct npe_softc *sc = ifp->if_softc;
int rx_npkts = 0;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
rx_npkts = npe_rxdone(sc->rx_qid, sc);
npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
}
return rx_npkts;
}
#endif /* DEVICE_POLLING */
static void
npe_startxmit(struct npe_softc *sc)
{
struct npedma *dma = &sc->txdma;
int i;
NPE_ASSERT_LOCKED(sc);
sc->tx_free = NULL;
for (i = 0; i < dma->nbuf; i++) {
struct npebuf *npe = &dma->buf[i];
if (npe->ix_m != NULL) {
/* NB: should not happen */
device_printf(sc->sc_dev,
"%s: free mbuf at entry %u\n", __func__, i);
m_freem(npe->ix_m);
}
npe->ix_m = NULL;
npe->ix_next = sc->tx_free;
sc->tx_free = npe;
}
}
static void
npe_startrecv(struct npe_softc *sc)
{
struct npedma *dma = &sc->rxdma;
struct npebuf *npe;
int i;
NPE_ASSERT_LOCKED(sc);
for (i = 0; i < dma->nbuf; i++) {
npe = &dma->buf[i];
npe_rxbuf_init(sc, npe, npe->ix_m);
/* set npe buf on rx free list */
ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
}
}
/*
* Reset and initialize the chip
*/
static void
npeinit_locked(void *xsc)
{
struct npe_softc *sc = xsc;
struct ifnet *ifp = sc->sc_ifp;
NPE_ASSERT_LOCKED(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
/*
* Reset MAC core.
*/
npe_mac_reset(sc);
/* disable transmitter and reciver in the MAC */
WR4(sc, NPE_MAC_RX_CNTRL1,
RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
WR4(sc, NPE_MAC_TX_CNTRL1,
RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
/*
* Set the MAC core registers.
*/
WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */
WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */
WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */
/* thresholds determined by NPE firmware FS */
WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30);
WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */
WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */
WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/
WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */
WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */
WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */
WR4(sc, NPE_MAC_TX_CNTRL1,
NPE_TX_CNTRL1_RETRY /* retry failed xmits */
| NPE_TX_CNTRL1_FCS_EN /* append FCS */
| NPE_TX_CNTRL1_2DEFER /* 2-part deferal */
| NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */
/* XXX pad strip? */
/* ena pause frame handling */
WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
WR4(sc, NPE_MAC_RX_CNTRL2, 0);
npe_setmac(sc, IF_LLADDR(ifp));
npe_setportaddress(sc, IF_LLADDR(ifp));
npe_setmcast(sc);
npe_startxmit(sc);
npe_startrecv(sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->npe_watchdog_timer = 0; /* just in case */
/* enable transmitter and reciver in the MAC */
WR4(sc, NPE_MAC_RX_CNTRL1,
RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
WR4(sc, NPE_MAC_TX_CNTRL1,
RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
}
static void
npeinit(void *xsc)
{
struct npe_softc *sc = xsc;
NPE_LOCK(sc);
npeinit_locked(sc);
NPE_UNLOCK(sc);
}
/*
* Dequeue packets and place on the h/w transmit queue.
*/
static void
npestart_locked(struct ifnet *ifp)
{
struct npe_softc *sc = ifp->if_softc;
struct npebuf *npe;
struct npehwbuf *hw;
struct mbuf *m, *n;
struct npedma *dma = &sc->txdma;
bus_dma_segment_t segs[NPE_MAXSEG];
int nseg, len, error, i;
uint32_t next;
NPE_ASSERT_LOCKED(sc);
/* XXX can this happen? */
if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
return;
while (sc->tx_free != NULL) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL) {
/* XXX? */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
return;
}
npe = sc->tx_free;
bus_dmamap_unload(dma->mtag, npe->ix_map);
error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
m, segs, &nseg, 0);
if (error == EFBIG) {
n = m_collapse(m, M_NOWAIT, NPE_MAXSEG);
if (n == NULL) {
if_printf(ifp, "%s: too many fragments %u\n",
__func__, nseg);
m_freem(m);
return; /* XXX? */
}
m = n;
error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
m, segs, &nseg, 0);
}
if (error != 0 || nseg == 0) {
if_printf(ifp, "%s: error %u nseg %u\n",
__func__, error, nseg);
m_freem(m);
return; /* XXX? */
}
sc->tx_free = npe->ix_next;
bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
/*
* Tap off here if there is a bpf listener.
*/
BPF_MTAP(ifp, m);
npe->ix_m = m;
hw = npe->ix_hw;
len = m->m_pkthdr.len;
next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
for (i = 0; i < nseg; i++) {
hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
hw->ix_ne[i].next = htobe32(next);
len = 0; /* zero for segments > 1 */
next += sizeof(hw->ix_ne[0]);
}
hw->ix_ne[i-1].next = 0; /* zero last in chain */
bus_dmamap_sync(dma->buf_tag, dma->buf_map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
__func__, sc->tx_qid, npe->ix_neaddr,
hw->ix_ne[0].data, hw->ix_ne[0].len);
/* stick it on the tx q */
/* XXX add vlan priority */
ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
sc->npe_watchdog_timer = 5;
}
if (sc->tx_free == NULL)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
}
void
npestart(struct ifnet *ifp)
{
struct npe_softc *sc = ifp->if_softc;
NPE_LOCK(sc);
npestart_locked(ifp);
NPE_UNLOCK(sc);
}
static void
npe_stopxmit(struct npe_softc *sc)
{
struct npedma *dma = &sc->txdma;
int i;
NPE_ASSERT_LOCKED(sc);
/* XXX qmgr */
for (i = 0; i < dma->nbuf; i++) {
struct npebuf *npe = &dma->buf[i];
if (npe->ix_m != NULL) {
bus_dmamap_unload(dma->mtag, npe->ix_map);
m_freem(npe->ix_m);
npe->ix_m = NULL;
}
}
}
static void
npe_stoprecv(struct npe_softc *sc)
{
struct npedma *dma = &sc->rxdma;
int i;
NPE_ASSERT_LOCKED(sc);
/* XXX qmgr */
for (i = 0; i < dma->nbuf; i++) {
struct npebuf *npe = &dma->buf[i];
if (npe->ix_m != NULL) {
bus_dmamap_unload(dma->mtag, npe->ix_map);
m_freem(npe->ix_m);
npe->ix_m = NULL;
}
}
}
/*
* Turn off interrupts, and stop the nic.
*/
void
npestop(struct npe_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
/* disable transmitter and reciver in the MAC */
WR4(sc, NPE_MAC_RX_CNTRL1,
RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
WR4(sc, NPE_MAC_TX_CNTRL1,
RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
sc->npe_watchdog_timer = 0;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
callout_stop(&sc->tick_ch);
npe_stopxmit(sc);
npe_stoprecv(sc);
/* XXX go into loopback & drain q's? */
/* XXX but beware of disabling tx above */
/*
* The MAC core rx/tx disable may leave the MAC hardware in an
* unpredictable state. A hw reset is executed before resetting
* all the MAC parameters to a known value.
*/
WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
DELAY(NPE_MAC_RESET_DELAY);
WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
}
void
npewatchdog(struct npe_softc *sc)
{
NPE_ASSERT_LOCKED(sc);
if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
return;
device_printf(sc->sc_dev, "watchdog timeout\n");
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
npeinit_locked(sc);
}
static int
npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct npe_softc *sc = ifp->if_softc;
struct mii_data *mii;
struct ifreq *ifr = (struct ifreq *)data;
int error = 0;
#ifdef DEVICE_POLLING
int mask;
#endif
switch (cmd) {
case SIOCSIFFLAGS:
NPE_LOCK(sc);
if ((ifp->if_flags & IFF_UP) == 0 &&
ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
npestop(sc);
} else {
/* reinitialize card on any parameter change */
npeinit_locked(sc);
}
NPE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/* update multicast filter list. */
NPE_LOCK(sc);
npe_setmcast(sc);
NPE_UNLOCK(sc);
error = 0;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->sc_mii);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
#ifdef DEVICE_POLLING
case SIOCSIFCAP:
mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(npe_poll, ifp);
if (error)
return error;
NPE_LOCK(sc);
/* disable callbacks XXX txdone is shared */
ixpqmgr_notify_disable(sc->rx_qid);
ixpqmgr_notify_disable(sc->tx_doneqid);
ifp->if_capenable |= IFCAP_POLLING;
NPE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* NB: always enable qmgr callbacks */
NPE_LOCK(sc);
/* enable qmgr callbacks */
ixpqmgr_notify_enable(sc->rx_qid,
IX_QMGR_Q_SOURCE_ID_NOT_E);
ixpqmgr_notify_enable(sc->tx_doneqid,
IX_QMGR_Q_SOURCE_ID_NOT_E);
ifp->if_capenable &= ~IFCAP_POLLING;
NPE_UNLOCK(sc);
}
}
break;
#endif
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return error;
}
/*
* Setup a traffic class -> rx queue mapping.
*/
static int
npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
{
uint32_t msg[2];
msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
}
static int
npe_setportaddress(struct npe_softc *sc, const uint8_t mac[ETHER_ADDR_LEN])
{
uint32_t msg[2];
msg[0] = (NPE_SETPORTADDRESS << 24)
| (sc->sc_npeid << 20)
| (mac[0] << 8)
| (mac[1] << 0);
msg[1] = (mac[2] << 24)
| (mac[3] << 16)
| (mac[4] << 8)
| (mac[5] << 0);
return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
}
static int
npe_setfirewallmode(struct npe_softc *sc, int onoff)
{
uint32_t msg[2];
/* XXX honor onoff */
msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
msg[1] = 0;
return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
}
/*
* Update and reset the statistics in the NPE.
*/
static int
npe_updatestats(struct npe_softc *sc)
{
uint32_t msg[2];
msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
msg[1] = sc->sc_stats_phys; /* physical address of stat block */
return ixpnpe_sendmsg_async(sc->sc_npe, msg);
}
#if 0
/*
* Get the current statistics block.
*/
static int
npe_getstats(struct npe_softc *sc)
{
uint32_t msg[2];
msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
msg[1] = sc->sc_stats_phys; /* physical address of stat block */
return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
}
/*
* Query the image id of the loaded firmware.
*/
static uint32_t
npe_getimageid(struct npe_softc *sc)
{
uint32_t msg[2];
msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
msg[1] = 0;
return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
}
/*
* Enable/disable loopback.
*/
static int
npe_setloopback(struct npe_softc *sc, int ena)
{
uint32_t msg[2];
msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
msg[1] = 0;
return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
}
#endif
static void
npe_child_detached(device_t dev, device_t child)
{
struct npe_softc *sc;
sc = device_get_softc(dev);
if (child == sc->sc_mii)
sc->sc_mii = NULL;
}
/*
* MII bus support routines.
*/
#define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
#define MII_WR4(sc, reg, v) \
bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
static uint32_t
npe_mii_mdio_read(struct npe_softc *sc, int reg)
{
uint32_t v;
/* NB: registers are known to be sequential */
v = (MII_RD4(sc, reg+0) & 0xff) << 0;
v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
return v;
}
static void
npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
{
/* NB: registers are known to be sequential */
MII_WR4(sc, reg+0, cmd & 0xff);
MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
}
static int
npe_mii_mdio_wait(struct npe_softc *sc)
{
uint32_t v;
int i;
/* NB: typically this takes 25-30 trips */
for (i = 0; i < 1000; i++) {
v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
if ((v & NPE_MII_GO) == 0)
return 1;
DELAY(1);
}
device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
__func__, v);
return 0; /* NB: timeout */
}
static int
npe_miibus_readreg(device_t dev, int phy, int reg)
{
struct npe_softc *sc = device_get_softc(dev);
uint32_t v;
v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
if (npe_mii_mdio_wait(sc))
v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
else
v = 0xffff | NPE_MII_READ_FAIL;
return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
}
static int
npe_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct npe_softc *sc = device_get_softc(dev);
uint32_t v;
v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
| data | NPE_MII_WRITE
| NPE_MII_GO;
npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
/* XXX complain about timeout */
(void) npe_mii_mdio_wait(sc);
return (0);
}
static void
npe_miibus_statchg(device_t dev)
{
struct npe_softc *sc = device_get_softc(dev);
struct mii_data *mii = device_get_softc(sc->sc_mii);
uint32_t tx1, rx1;
/* sync MAC duplex state */
tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
} else {
tx1 |= NPE_TX_CNTRL1_DUPLEX;
rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
}
WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
}
static device_method_t npe_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, npe_probe),
DEVMETHOD(device_attach, npe_attach),
DEVMETHOD(device_detach, npe_detach),
/* Bus interface */
DEVMETHOD(bus_child_detached, npe_child_detached),
/* MII interface */
DEVMETHOD(miibus_readreg, npe_miibus_readreg),
DEVMETHOD(miibus_writereg, npe_miibus_writereg),
DEVMETHOD(miibus_statchg, npe_miibus_statchg),
{ 0, 0 }
};
static driver_t npe_driver = {
"npe",
npe_methods,
sizeof(struct npe_softc),
};
DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
MODULE_DEPEND(npe, miibus, 1, 1, 1);
MODULE_DEPEND(npe, ether, 1, 1, 1);
Index: head/sys/arm64/arm64/busdma_bounce.c
===================================================================
--- head/sys/arm64/arm64/busdma_bounce.c (revision 328217)
+++ head/sys/arm64/arm64/busdma_bounce.c (revision 328218)
@@ -1,1333 +1,1333 @@
/*-
* Copyright (c) 1997, 1998 Justin T. Gibbs.
* Copyright (c) 2015-2016 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Andrew Turner
* under sponsorship of the FreeBSD Foundation.
*
* Portions of this software were developed by Semihalf
* under sponsorship of the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/md_var.h>
#include <arm64/include/bus_dma_impl.h>
#define MAX_BPAGES 4096
enum {
BF_COULD_BOUNCE = 0x01,
BF_MIN_ALLOC_COMP = 0x02,
BF_KMEM_ALLOC = 0x04,
BF_COHERENT = 0x10,
};
struct bounce_zone;
struct bus_dma_tag {
struct bus_dma_tag_common common;
int map_count;
int bounce_flags;
bus_dma_segment_t *segments;
struct bounce_zone *bounce_zone;
};
struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
vm_page_t datapage; /* physical page of client data */
vm_offset_t dataoffs; /* page offset of client data */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
int busdma_swi_pending;
struct bounce_zone {
STAILQ_ENTRY(bounce_zone) links;
STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
int total_bpages;
int free_bpages;
int reserved_bpages;
int active_bpages;
int total_bounced;
int total_deferred;
int map_count;
bus_size_t alignment;
bus_addr_t lowaddr;
char zoneid[8];
char lowaddrid[20];
struct sysctl_ctx_list sysctl_tree;
struct sysctl_oid *sysctl_tree_top;
};
static struct mtx bounce_lock;
static int total_bpages;
static int busdma_zonecount;
static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
"Total bounce pages");
struct sync_list {
vm_offset_t vaddr; /* kva of client data */
bus_addr_t paddr; /* physical address */
vm_page_t pages; /* starting page of client data */
bus_size_t datacount; /* client data count */
};
struct bus_dmamap {
struct bp_list bpages;
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
struct memdesc mem;
bus_dmamap_callback_t *callback;
void *callback_arg;
STAILQ_ENTRY(bus_dmamap) links;
u_int flags;
#define DMAMAP_COULD_BOUNCE (1 << 0)
#define DMAMAP_FROM_DMAMEM (1 << 1)
int sync_count;
struct sync_list slist[];
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_zone(bus_dma_tag_t dmat);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
pmap_t pmap, void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags);
static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int flags);
/*
* Allocate a device specific dma_tag.
*/
static int
bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat)
{
bus_dma_tag_t newtag;
int error;
*dmat = NULL;
error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
sizeof (struct bus_dma_tag), (void **)&newtag);
if (error != 0)
return (error);
newtag->common.impl = &bus_dma_bounce_impl;
newtag->map_count = 0;
newtag->segments = NULL;
if ((flags & BUS_DMA_COHERENT) != 0)
newtag->bounce_flags |= BF_COHERENT;
if (parent != NULL) {
if ((newtag->common.filter != NULL ||
(parent->bounce_flags & BF_COULD_BOUNCE) != 0))
newtag->bounce_flags |= BF_COULD_BOUNCE;
/* Copy some flags from the parent */
newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
}
if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
newtag->common.alignment > 1)
newtag->bounce_flags |= BF_COULD_BOUNCE;
if (((newtag->bounce_flags & BF_COULD_BOUNCE) != 0) &&
(flags & BUS_DMA_ALLOCNOW) != 0) {
struct bounce_zone *bz;
/* Must bounce */
if ((error = alloc_bounce_zone(newtag)) != 0) {
free(newtag, M_DEVBUF);
return (error);
}
bz = newtag->bounce_zone;
if (ptoa(bz->total_bpages) < maxsize) {
int pages;
pages = atop(maxsize) - bz->total_bpages;
/* Add pages to our bounce pool */
if (alloc_bounce_pages(newtag, pages) < pages)
error = ENOMEM;
}
/* Performed initial allocation */
newtag->bounce_flags |= BF_MIN_ALLOC_COMP;
} else
error = 0;
if (error != 0)
free(newtag, M_DEVBUF);
else
*dmat = newtag;
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
error);
return (error);
}
static int
bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
bus_dma_tag_t dmat_copy, parent;
int error;
error = 0;
dmat_copy = dmat;
if (dmat != NULL) {
if (dmat->map_count != 0) {
error = EBUSY;
goto out;
}
while (dmat != NULL) {
parent = (bus_dma_tag_t)dmat->common.parent;
atomic_subtract_int(&dmat->common.ref_count, 1);
if (dmat->common.ref_count == 0) {
if (dmat->segments != NULL)
free(dmat->segments, M_DEVBUF);
free(dmat, M_DEVBUF);
/*
* Last reference count, so
* release our reference
* count on our parent.
*/
dmat = parent;
} else
dmat = NULL;
}
}
out:
CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
return (error);
}
static bus_dmamap_t
alloc_dmamap(bus_dma_tag_t dmat, int flags)
{
u_long mapsize;
bus_dmamap_t map;
mapsize = sizeof(*map);
mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
if (map == NULL)
return (NULL);
/* Initialize the new map */
STAILQ_INIT(&map->bpages);
return (map);
}
/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.
*/
static int
bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
struct bounce_zone *bz;
int error, maxpages, pages;
error = 0;
if (dmat->segments == NULL) {
- dmat->segments = (bus_dma_segment_t *)mallocarray(
- dmat->common.nsegments, sizeof(bus_dma_segment_t),
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->common.nsegments,
M_DEVBUF, M_NOWAIT);
if (dmat->segments == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
return (ENOMEM);
}
}
*mapp = alloc_dmamap(dmat, M_NOWAIT);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
return (ENOMEM);
}
/*
* Bouncing might be required if the driver asks for an active
* exclusion region, a data alignment that is stricter than 1, and/or
* an active address boundary.
*/
if (dmat->bounce_flags & BF_COULD_BOUNCE) {
/* Must bounce */
if (dmat->bounce_zone == NULL) {
if ((error = alloc_bounce_zone(dmat)) != 0) {
free(*mapp, M_DEVBUF);
return (error);
}
}
bz = dmat->bounce_zone;
(*mapp)->flags = DMAMAP_COULD_BOUNCE;
/*
* Attempt to add pages to our pool on a per-instance
* basis up to a sane limit.
*/
if (dmat->common.alignment > 1)
maxpages = MAX_BPAGES;
else
maxpages = MIN(MAX_BPAGES, Maxmem -
atop(dmat->common.lowaddr));
if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 ||
(bz->map_count > 0 && bz->total_bpages < maxpages)) {
pages = MAX(atop(dmat->common.maxsize), 1);
pages = MIN(maxpages - bz->total_bpages, pages);
pages = MAX(pages, 1);
if (alloc_bounce_pages(dmat, pages) < pages)
error = ENOMEM;
if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP)
== 0) {
if (error == 0) {
dmat->bounce_flags |=
BF_MIN_ALLOC_COMP;
}
} else
error = 0;
}
bz->map_count++;
}
if (error == 0)
dmat->map_count++;
else
free(*mapp, M_DEVBUF);
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, error);
return (error);
}
/*
* Destroy a handle for mapping from kva/uva/physical
* address space into bus device space.
*/
static int
bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
/* Check we are destroying the correct map type */
if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
return (EBUSY);
}
if (dmat->bounce_zone) {
KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
("%s: Bounce zone when cannot bounce", __func__));
dmat->bounce_zone->map_count--;
}
free(map, M_DEVBUF);
dmat->map_count--;
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
return (0);
}
/*
* Allocate a piece of memory that can be efficiently mapped into
* bus device space based on the constraints lited in the dma tag.
* A dmamap to for use with dmamap_load is also allocated.
*/
static int
bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
bus_dmamap_t *mapp)
{
/*
* XXX ARM64TODO:
* This bus_dma implementation requires IO-Coherent architecutre.
* If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has
* to be implented using non-cacheable memory.
*/
vm_memattr_t attr;
int mflags;
if (flags & BUS_DMA_NOWAIT)
mflags = M_NOWAIT;
else
mflags = M_WAITOK;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc(
sizeof(bus_dma_segment_t) * dmat->common.nsegments,
M_DEVBUF, mflags);
if (dmat->segments == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
return (ENOMEM);
}
}
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
if (flags & BUS_DMA_NOCACHE)
attr = VM_MEMATTR_UNCACHEABLE;
else if ((flags & BUS_DMA_COHERENT) != 0 &&
(dmat->bounce_flags & BF_COHERENT) == 0)
/*
* If we have a non-coherent tag, and are trying to allocate
* a coherent block of memory it needs to be uncached.
*/
attr = VM_MEMATTR_UNCACHEABLE;
else
attr = VM_MEMATTR_DEFAULT;
/*
* Create the map, but don't set the could bounce flag as
* this allocation should never bounce;
*/
*mapp = alloc_dmamap(dmat, mflags);
if (*mapp == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
return (ENOMEM);
}
(*mapp)->flags = DMAMAP_FROM_DMAMEM;
/*
* Allocate the buffer from the malloc(9) allocator if...
* - It's small enough to fit into a single power of two sized bucket.
* - The alignment is less than or equal to the maximum size
* - The low address requirement is fulfilled.
* else allocate non-contiguous pages if...
* - The page count that could get allocated doesn't exceed
* nsegments also when the maximum segment size is less
* than PAGE_SIZE.
* - The alignment constraint isn't larger than a page boundary.
* - There are no boundary-crossing constraints.
* else allocate a block of contiguous pages because one or more of the
* constraints is something that only the contig allocator can fulfill.
*
* NOTE: The (dmat->common.alignment <= dmat->maxsize) check
* below is just a quick hack. The exact alignment guarantees
* of malloc(9) need to be nailed down, and the code below
* should be rewritten to take that into account.
*
* In the meantime warn the user if malloc gets it wrong.
*/
if ((dmat->common.maxsize <= PAGE_SIZE) &&
(dmat->common.alignment <= dmat->common.maxsize) &&
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
} else if (dmat->common.nsegments >=
howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
dmat->common.alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
} else {
*vaddr = (void *)kmem_alloc_contig(kernel_arena,
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
dmat->common.boundary, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
}
if (*vaddr == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, 0);
return (0);
}
/*
* Free a piece of memory and it's allociated dmamap, that was allocated
* via bus_dmamem_alloc. Make the same choice for free/contigfree.
*/
static void
bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
/*
* Check the map came from bounce_bus_dmamem_alloc, so the map
* should be NULL and the BF_KMEM_ALLOC flag cleared if malloc()
* was used and set if kmem_alloc_contig() was used.
*/
if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
panic("bus_dmamem_free: Invalid map freed\n");
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
free(vaddr, M_DEVBUF);
else
kmem_free(kernel_arena, (vm_offset_t)vaddr,
dmat->common.maxsize);
free(map, M_DEVBUF);
dmat->map_count--;
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
dmat->bounce_flags);
}
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
vm_offset_t vaddr;
vm_offset_t vendaddr;
bus_addr_t paddr;
bus_size_t sg_len;
if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->common.lowaddr,
ptoa((vm_paddr_t)Maxmem),
dmat->common.boundary, dmat->common.alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
vaddr = (vm_offset_t)buf;
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
map->pagesneeded++;
}
vaddr += sg_len;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
/* Reserve Necessary Bounce Pages */
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->common.boundary - 1);
if (dmat->common.boundary > 0) {
baddr = (curaddr + dmat->common.boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
(dmat->common.boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
static int
bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
struct sync_list *sl;
bus_size_t sgsize;
bus_addr_t curaddr, sl_end;
int error;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
sl = map->slist + map->sync_count - 1;
sl_end = 0;
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
} else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
if (map->sync_count > 0)
sl_end = sl->paddr + sl->datacount;
if (map->sync_count == 0 || curaddr != sl_end) {
if (++map->sync_count > dmat->common.nsegments)
break;
sl++;
sl->vaddr = 0;
sl->paddr = curaddr;
sl->datacount = sgsize;
sl->pages = PHYS_TO_VM_PAGE(curaddr);
KASSERT(sl->pages != NULL,
("%s: page at PA:0x%08lx is not in "
"vm_page_array", __func__, curaddr));
} else
sl->datacount += sgsize;
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
static int
bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
struct sync_list *sl;
bus_size_t sgsize, max_sgsize;
bus_addr_t curaddr, sl_pend;
vm_offset_t kvaddr, vaddr, sl_vend;
int error;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
sl = map->slist + map->sync_count - 1;
vaddr = (vm_offset_t)buf;
sl_pend = 0;
sl_vend = 0;
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (pmap == kernel_pmap) {
curaddr = pmap_kextract(vaddr);
kvaddr = vaddr;
} else {
curaddr = pmap_extract(pmap, vaddr);
kvaddr = 0;
}
/*
* Compute the segment size, and adjust counts.
*/
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
} else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
sgsize = MIN(sgsize, max_sgsize);
if (map->sync_count > 0) {
sl_pend = sl->paddr + sl->datacount;
sl_vend = sl->vaddr + sl->datacount;
}
if (map->sync_count == 0 ||
(kvaddr != 0 && kvaddr != sl_vend) ||
(curaddr != sl_pend)) {
if (++map->sync_count > dmat->common.nsegments)
goto cleanup;
sl++;
sl->vaddr = kvaddr;
sl->paddr = curaddr;
if (kvaddr != 0) {
sl->pages = NULL;
} else {
sl->pages = PHYS_TO_VM_PAGE(curaddr);
KASSERT(sl->pages != NULL,
("%s: page at PA:0x%08lx is not "
"in vm_page_array", __func__,
curaddr));
}
sl->datacount = sgsize;
} else
sl->datacount += sgsize;
} else {
sgsize = MIN(sgsize, max_sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
vaddr += sgsize;
buflen -= sgsize;
}
cleanup:
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
static void
bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
if ((map->flags & DMAMAP_COULD_BOUNCE) == 0)
return;
map->mem = *mem;
map->dmat = dmat;
map->callback = callback;
map->callback_arg = callback_arg;
}
static bus_dma_segment_t *
bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
* Release the mapping held by map.
*/
static void
bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
}
map->sync_count = 0;
}
static void
dma_preread_safe(vm_offset_t va, vm_size_t size)
{
/*
* Write back any partial cachelines immediately before and
* after the DMA region.
*/
if (va & (dcache_line_size - 1))
cpu_dcache_wb_range(va, 1);
if ((va + size) & (dcache_line_size - 1))
cpu_dcache_wb_range(va + size, 1);
cpu_dcache_inv_range(va, size);
}
static void
dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
{
uint32_t len, offset;
vm_page_t m;
vm_paddr_t pa;
vm_offset_t va, tempva;
bus_size_t size;
offset = sl->paddr & PAGE_MASK;
m = sl->pages;
size = sl->datacount;
pa = sl->paddr;
for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
tempva = 0;
if (sl->vaddr == 0) {
len = min(PAGE_SIZE - offset, size);
tempva = pmap_quick_enter_page(m);
va = tempva | offset;
KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
VM_PAGE_TO_PHYS(m) | offset, pa));
} else {
len = sl->datacount;
va = sl->vaddr;
}
switch (op) {
case BUS_DMASYNC_PREWRITE:
case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
cpu_dcache_wb_range(va, len);
break;
case BUS_DMASYNC_PREREAD:
/*
* An mbuf may start in the middle of a cacheline. There
* will be no cpu writes to the beginning of that line
* (which contains the mbuf header) while dma is in
* progress. Handle that case by doing a writeback of
* just the first cacheline before invalidating the
* overall buffer. Any mbuf in a chain may have this
* misalignment. Buffers which are not mbufs bounce if
* they are not aligned to a cacheline.
*/
dma_preread_safe(va, len);
break;
case BUS_DMASYNC_POSTREAD:
case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
cpu_dcache_inv_range(va, len);
break;
default:
panic("unsupported combination of sync operations: "
"0x%08x\n", op);
}
if (tempva != 0)
pmap_quick_remove_page(tempva);
}
}
static void
bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
struct bounce_page *bpage;
struct sync_list *sl, *end;
vm_offset_t datavaddr, tempvaddr;
if (op == BUS_DMASYNC_POSTWRITE)
return;
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
/*
* Wait for any DMA operations to complete before the bcopy.
*/
dsb(sy);
}
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
"performing bounce", __func__, dmat, dmat->common.flags,
op);
if ((op & BUS_DMASYNC_PREWRITE) != 0) {
while (bpage != NULL) {
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
datavaddr = tempvaddr | bpage->dataoffs;
}
bcopy((void *)datavaddr,
(void *)bpage->vaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
if ((dmat->bounce_flags & BF_COHERENT) == 0)
cpu_dcache_wb_range(bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
} else if ((op & BUS_DMASYNC_PREREAD) != 0) {
while (bpage != NULL) {
if ((dmat->bounce_flags & BF_COHERENT) == 0)
cpu_dcache_wbinv_range(bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
}
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
while (bpage != NULL) {
if ((dmat->bounce_flags & BF_COHERENT) == 0)
cpu_dcache_inv_range(bpage->vaddr,
bpage->datacount);
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
datavaddr = tempvaddr | bpage->dataoffs;
}
bcopy((void *)bpage->vaddr,
(void *)datavaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
}
}
/*
* Cache maintenance for normal (non-COHERENT non-bounce) buffers.
*/
if (map->sync_count != 0) {
sl = &map->slist[0];
end = &map->slist[map->sync_count];
CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
"performing sync", __func__, dmat, op);
for ( ; sl != end; ++sl)
dma_dcache_sync(sl, op);
}
if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
/*
* Wait for the bcopy to complete before any DMA operations.
*/
dsb(sy);
}
}
static void
init_bounce_pages(void *dummy __unused)
{
total_bpages = 0;
STAILQ_INIT(&bounce_zone_list);
STAILQ_INIT(&bounce_map_waitinglist);
STAILQ_INIT(&bounce_map_callbacklist);
mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
}
SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
static struct sysctl_ctx_list *
busdma_sysctl_tree(struct bounce_zone *bz)
{
return (&bz->sysctl_tree);
}
static struct sysctl_oid *
busdma_sysctl_tree_top(struct bounce_zone *bz)
{
return (bz->sysctl_tree_top);
}
static int
alloc_bounce_zone(bus_dma_tag_t dmat)
{
struct bounce_zone *bz;
/* Check to see if we already have a suitable zone */
STAILQ_FOREACH(bz, &bounce_zone_list, links) {
if ((dmat->common.alignment <= bz->alignment) &&
(dmat->common.lowaddr >= bz->lowaddr)) {
dmat->bounce_zone = bz;
return (0);
}
}
if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
M_NOWAIT | M_ZERO)) == NULL)
return (ENOMEM);
STAILQ_INIT(&bz->bounce_page_list);
bz->free_bpages = 0;
bz->reserved_bpages = 0;
bz->active_bpages = 0;
bz->lowaddr = dmat->common.lowaddr;
bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
bz->map_count = 0;
snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
busdma_zonecount++;
snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
dmat->bounce_zone = bz;
sysctl_ctx_init(&bz->sysctl_tree);
bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
CTLFLAG_RD, 0, "");
if (bz->sysctl_tree_top == NULL) {
sysctl_ctx_free(&bz->sysctl_tree);
return (0); /* XXX error code? */
}
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
"Total bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
"Free bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
"Reserved bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
"Active bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
"Total bounce requests");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
"Total bounce requests that were deferred");
SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"alignment", CTLFLAG_RD, &bz->alignment, "");
return (0);
}
static int
alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
{
struct bounce_zone *bz;
int count;
bz = dmat->bounce_zone;
count = 0;
while (numpages > 0) {
struct bounce_page *bpage;
bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (bpage == NULL)
break;
bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
if (bpage->vaddr == 0) {
free(bpage, M_DEVBUF);
break;
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
mtx_lock(&bounce_lock);
STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
total_bpages++;
bz->total_bpages++;
bz->free_bpages++;
mtx_unlock(&bounce_lock);
count++;
numpages--;
}
return (count);
}
static int
reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
{
struct bounce_zone *bz;
int pages;
mtx_assert(&bounce_lock, MA_OWNED);
bz = dmat->bounce_zone;
pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
return (map->pagesneeded - (map->pagesreserved + pages));
bz->free_bpages -= pages;
bz->reserved_bpages += pages;
map->pagesreserved += pages;
pages = map->pagesneeded - map->pagesreserved;
return (pages);
}
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
("add_bounce_page: bad map %p", map));
bz = dmat->bounce_zone;
if (map->pagesneeded == 0)
panic("add_bounce_page: map doesn't need any pages");
map->pagesneeded--;
if (map->pagesreserved == 0)
panic("add_bounce_page: map doesn't need any pages");
map->pagesreserved--;
mtx_lock(&bounce_lock);
bpage = STAILQ_FIRST(&bz->bounce_page_list);
if (bpage == NULL)
panic("add_bounce_page: free page list is empty");
STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
bz->reserved_bpages--;
bz->active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/* Page offset needs to be preserved. */
bpage->vaddr |= addr & PAGE_MASK;
bpage->busaddr |= addr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->datapage = PHYS_TO_VM_PAGE(addr);
bpage->dataoffs = addr & PAGE_MASK;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
}
static void
free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
{
struct bus_dmamap *map;
struct bounce_zone *bz;
bz = dmat->bounce_zone;
bpage->datavaddr = 0;
bpage->datacount = 0;
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/*
* Reset the bounce page to start at offset 0. Other uses
* of this bounce page may need to store a full page of
* data and/or assume it starts on a page boundary.
*/
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
}
mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
bz->free_bpages++;
bz->active_bpages--;
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
busdma_swi_pending = 1;
bz->total_deferred++;
swi_sched(vm_ih, 0);
}
}
mtx_unlock(&bounce_lock);
}
void
busdma_swi(void)
{
bus_dma_tag_t dmat;
struct bus_dmamap *map;
mtx_lock(&bounce_lock);
while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load_mem(map->dmat, map, &map->mem,
map->callback, map->callback_arg, BUS_DMA_WAITOK);
(dmat->common.lockfunc)(dmat->common.lockfuncarg,
BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}
mtx_unlock(&bounce_lock);
}
struct bus_dma_impl bus_dma_bounce_impl = {
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,
.map_create = bounce_bus_dmamap_create,
.map_destroy = bounce_bus_dmamap_destroy,
.mem_alloc = bounce_bus_dmamem_alloc,
.mem_free = bounce_bus_dmamem_free,
.load_phys = bounce_bus_dmamap_load_phys,
.load_buffer = bounce_bus_dmamap_load_buffer,
.load_ma = bus_dmamap_load_ma_triv,
.map_waitok = bounce_bus_dmamap_waitok,
.map_complete = bounce_bus_dmamap_complete,
.map_unload = bounce_bus_dmamap_unload,
.map_sync = bounce_bus_dmamap_sync
};
Index: head/sys/cam/cam_queue.c
===================================================================
--- head/sys/cam/cam_queue.c (revision 328217)
+++ head/sys/cam/cam_queue.c (revision 328218)
@@ -1,410 +1,410 @@
/*-
* CAM request queue management functions.
*
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1997 Justin T. Gibbs.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_queue.h>
#include <cam/cam_debug.h>
static MALLOC_DEFINE(M_CAMQ, "CAM queue", "CAM queue buffers");
static MALLOC_DEFINE(M_CAMDEVQ, "CAM dev queue", "CAM dev queue buffers");
static MALLOC_DEFINE(M_CAMCCBQ, "CAM ccb queue", "CAM ccb queue buffers");
static __inline int
queue_cmp(cam_pinfo **queue_array, int i, int j);
static __inline void
swap(cam_pinfo **queue_array, int i, int j);
static void heap_up(cam_pinfo **queue_array, int new_index);
static void heap_down(cam_pinfo **queue_array, int index,
int last_index);
struct camq *
camq_alloc(int size)
{
struct camq *camq;
camq = (struct camq *)malloc(sizeof(*camq), M_CAMQ, M_NOWAIT);
if (camq != NULL) {
if (camq_init(camq, size) != 0) {
free(camq, M_CAMQ);
camq = NULL;
}
}
return (camq);
}
int
camq_init(struct camq *camq, int size)
{
bzero(camq, sizeof(*camq));
camq->array_size = size;
if (camq->array_size != 0) {
camq->queue_array = (cam_pinfo**)malloc(size*sizeof(cam_pinfo*),
M_CAMQ, M_NOWAIT);
if (camq->queue_array == NULL) {
printf("camq_init: - cannot malloc array!\n");
return (1);
}
/*
* Heap algorithms like everything numbered from 1, so
* offset our pointer into the heap array by one element.
*/
camq->queue_array--;
}
return (0);
}
/*
* Free a camq structure. This should only be called if a controller
* driver failes somehow during its attach routine or is unloaded and has
* obtained a camq structure. The XPT should ensure that the queue
* is empty before calling this routine.
*/
void
camq_free(struct camq *queue)
{
if (queue != NULL) {
camq_fini(queue);
free(queue, M_CAMQ);
}
}
void
camq_fini(struct camq *queue)
{
if (queue->queue_array != NULL) {
/*
* Heap algorithms like everything numbered from 1, so
* our pointer into the heap array is offset by one element.
*/
queue->queue_array++;
free(queue->queue_array, M_CAMQ);
}
}
u_int32_t
camq_resize(struct camq *queue, int new_size)
{
cam_pinfo **new_array;
KASSERT(new_size >= queue->entries, ("camq_resize: "
"New queue size can't accommodate queued entries (%d < %d).",
new_size, queue->entries));
- new_array = (cam_pinfo **)mallocarray(new_size, sizeof(cam_pinfo *),
+ new_array = (cam_pinfo **)malloc(new_size * sizeof(cam_pinfo *),
M_CAMQ, M_NOWAIT);
if (new_array == NULL) {
/* Couldn't satisfy request */
return (CAM_RESRC_UNAVAIL);
}
/*
* Heap algorithms like everything numbered from 1, so
* remember that our pointer into the heap array is offset
* by one element.
*/
if (queue->queue_array != NULL) {
queue->queue_array++;
bcopy(queue->queue_array, new_array,
queue->entries * sizeof(cam_pinfo *));
free(queue->queue_array, M_CAMQ);
}
queue->queue_array = new_array-1;
queue->array_size = new_size;
return (CAM_REQ_CMP);
}
/*
* camq_insert: Given an array of cam_pinfo* elememnts with
* the Heap(1, num_elements) property and array_size - num_elements >= 1,
* output Heap(1, num_elements+1) including new_entry in the array.
*/
void
camq_insert(struct camq *queue, cam_pinfo *new_entry)
{
KASSERT(queue->entries < queue->array_size,
("camq_insert: Attempt to insert into a full queue (%d >= %d)",
queue->entries, queue->array_size));
queue->entries++;
queue->queue_array[queue->entries] = new_entry;
new_entry->index = queue->entries;
if (queue->entries != 0)
heap_up(queue->queue_array, queue->entries);
}
/*
* camq_remove: Given an array of cam_pinfo* elevements with the
* Heap(1, num_elements) property and an index such that 1 <= index <=
* num_elements, remove that entry and restore the Heap(1, num_elements-1)
* property.
*/
cam_pinfo *
camq_remove(struct camq *queue, int index)
{
cam_pinfo *removed_entry;
if (index <= 0 || index > queue->entries)
panic("%s: Attempt to remove out-of-bounds index %d "
"from queue %p of size %d", __func__, index, queue,
queue->entries);
removed_entry = queue->queue_array[index];
if (queue->entries != index) {
queue->queue_array[index] = queue->queue_array[queue->entries];
queue->queue_array[index]->index = index;
heap_down(queue->queue_array, index, queue->entries - 1);
}
removed_entry->index = CAM_UNQUEUED_INDEX;
queue->entries--;
return (removed_entry);
}
/*
* camq_change_priority: Given an array of cam_pinfo* elements with the
* Heap(1, num_entries) property, an index such that 1 <= index <= num_elements,
* and a new priority for the element at index, change the priority of
* element index and restore the Heap(0, num_elements) property.
*/
void
camq_change_priority(struct camq *queue, int index, u_int32_t new_priority)
{
if (new_priority > queue->queue_array[index]->priority) {
queue->queue_array[index]->priority = new_priority;
heap_down(queue->queue_array, index, queue->entries);
} else {
/* new_priority <= old_priority */
queue->queue_array[index]->priority = new_priority;
heap_up(queue->queue_array, index);
}
}
struct cam_devq *
cam_devq_alloc(int devices, int openings)
{
struct cam_devq *devq;
devq = (struct cam_devq *)malloc(sizeof(*devq), M_CAMDEVQ, M_NOWAIT);
if (devq == NULL) {
printf("cam_devq_alloc: - cannot malloc!\n");
return (NULL);
}
if (cam_devq_init(devq, devices, openings) != 0) {
free(devq, M_CAMDEVQ);
return (NULL);
}
return (devq);
}
int
cam_devq_init(struct cam_devq *devq, int devices, int openings)
{
bzero(devq, sizeof(*devq));
mtx_init(&devq->send_mtx, "CAM queue lock", NULL, MTX_DEF);
if (camq_init(&devq->send_queue, devices) != 0)
return (1);
devq->send_openings = openings;
devq->send_active = 0;
return (0);
}
void
cam_devq_free(struct cam_devq *devq)
{
camq_fini(&devq->send_queue);
mtx_destroy(&devq->send_mtx);
free(devq, M_CAMDEVQ);
}
u_int32_t
cam_devq_resize(struct cam_devq *camq, int devices)
{
u_int32_t retval;
retval = camq_resize(&camq->send_queue, devices);
return (retval);
}
struct cam_ccbq *
cam_ccbq_alloc(int openings)
{
struct cam_ccbq *ccbq;
ccbq = (struct cam_ccbq *)malloc(sizeof(*ccbq), M_CAMCCBQ, M_NOWAIT);
if (ccbq == NULL) {
printf("cam_ccbq_alloc: - cannot malloc!\n");
return (NULL);
}
if (cam_ccbq_init(ccbq, openings) != 0) {
free(ccbq, M_CAMCCBQ);
return (NULL);
}
return (ccbq);
}
void
cam_ccbq_free(struct cam_ccbq *ccbq)
{
if (ccbq) {
cam_ccbq_fini(ccbq);
free(ccbq, M_CAMCCBQ);
}
}
u_int32_t
cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size)
{
int delta;
delta = new_size - (ccbq->dev_active + ccbq->dev_openings);
ccbq->total_openings += delta;
ccbq->dev_openings += delta;
new_size = imax(64, 1 << fls(new_size + new_size / 2));
if (new_size > ccbq->queue.array_size)
return (camq_resize(&ccbq->queue, new_size));
else
return (CAM_REQ_CMP);
}
int
cam_ccbq_init(struct cam_ccbq *ccbq, int openings)
{
bzero(ccbq, sizeof(*ccbq));
if (camq_init(&ccbq->queue,
imax(64, 1 << fls(openings + openings / 2))) != 0)
return (1);
ccbq->total_openings = openings;
ccbq->dev_openings = openings;
return (0);
}
void
cam_ccbq_fini(struct cam_ccbq *ccbq)
{
camq_fini(&ccbq->queue);
}
/*
* Heap routines for manipulating CAM queues.
*/
/*
* queue_cmp: Given an array of cam_pinfo* elements and indexes i
* and j, return less than 0, 0, or greater than 0 if i is less than,
* equal too, or greater than j respectively.
*/
static __inline int
queue_cmp(cam_pinfo **queue_array, int i, int j)
{
if (queue_array[i]->priority == queue_array[j]->priority)
return ( queue_array[i]->generation
- queue_array[j]->generation );
else
return ( queue_array[i]->priority
- queue_array[j]->priority );
}
/*
* swap: Given an array of cam_pinfo* elements and indexes i and j,
* exchange elements i and j.
*/
static __inline void
swap(cam_pinfo **queue_array, int i, int j)
{
cam_pinfo *temp_qentry;
temp_qentry = queue_array[j];
queue_array[j] = queue_array[i];
queue_array[i] = temp_qentry;
queue_array[j]->index = j;
queue_array[i]->index = i;
}
/*
* heap_up: Given an array of cam_pinfo* elements with the
* Heap(1, new_index-1) property and a new element in location
* new_index, output Heap(1, new_index).
*/
static void
heap_up(cam_pinfo **queue_array, int new_index)
{
int child;
int parent;
child = new_index;
while (child != 1) {
parent = child >> 1;
if (queue_cmp(queue_array, parent, child) <= 0)
break;
swap(queue_array, parent, child);
child = parent;
}
}
/*
* heap_down: Given an array of cam_pinfo* elements with the
* Heap(index + 1, num_entries) property with index containing
* an unsorted entry, output Heap(index, num_entries).
*/
static void
heap_down(cam_pinfo **queue_array, int index, int num_entries)
{
int child;
int parent;
parent = index;
child = parent << 1;
for (; child <= num_entries; child = parent << 1) {
if (child < num_entries) {
/* child+1 is the right child of parent */
if (queue_cmp(queue_array, child + 1, child) < 0)
child++;
}
/* child is now the least child of parent */
if (queue_cmp(queue_array, parent, child) <= 0)
break;
swap(queue_array, child, parent);
parent = child;
}
}
Index: head/sys/cam/ctl/ctl_frontend.c
===================================================================
--- head/sys/cam/ctl/ctl_frontend.c (revision 328217)
+++ head/sys/cam/ctl/ctl_frontend.c (revision 328218)
@@ -1,391 +1,391 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2003 Silicon Graphics International Corp.
* Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.c#4 $
*/
/*
* CAM Target Layer front end interface code
*
* Author: Ken Merry <ken@FreeBSD.org>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/endian.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_da.h>
#include <cam/ctl/ctl_io.h>
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_frontend.h>
#include <cam/ctl/ctl_backend.h>
/* XXX KDM move defines from ctl_ioctl.h to somewhere else */
#include <cam/ctl/ctl_ioctl.h>
#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_debug.h>
extern struct ctl_softc *control_softc;
int
ctl_frontend_register(struct ctl_frontend *fe)
{
struct ctl_softc *softc = control_softc;
struct ctl_frontend *fe_tmp;
int error;
KASSERT(softc != NULL, ("CTL is not initialized"));
/* Sanity check, make sure this isn't a duplicate registration. */
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(fe_tmp, &softc->fe_list, links) {
if (strcmp(fe_tmp->name, fe->name) == 0) {
mtx_unlock(&softc->ctl_lock);
return (-1);
}
}
mtx_unlock(&softc->ctl_lock);
STAILQ_INIT(&fe->port_list);
/* Call the frontend's initialization routine. */
if (fe->init != NULL) {
if ((error = fe->init()) != 0) {
printf("%s frontend init error: %d\n",
fe->name, error);
return (error);
}
}
mtx_lock(&softc->ctl_lock);
softc->num_frontends++;
STAILQ_INSERT_TAIL(&softc->fe_list, fe, links);
mtx_unlock(&softc->ctl_lock);
return (0);
}
int
ctl_frontend_deregister(struct ctl_frontend *fe)
{
struct ctl_softc *softc = control_softc;
int error;
/* Call the frontend's shutdown routine.*/
if (fe->shutdown != NULL) {
if ((error = fe->shutdown()) != 0) {
printf("%s frontend shutdown error: %d\n",
fe->name, error);
return (error);
}
}
mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->fe_list, fe, ctl_frontend, links);
softc->num_frontends--;
mtx_unlock(&softc->ctl_lock);
return (0);
}
struct ctl_frontend *
ctl_frontend_find(char *frontend_name)
{
struct ctl_softc *softc = control_softc;
struct ctl_frontend *fe;
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(fe, &softc->fe_list, links) {
if (strcmp(fe->name, frontend_name) == 0) {
mtx_unlock(&softc->ctl_lock);
return (fe);
}
}
mtx_unlock(&softc->ctl_lock);
return (NULL);
}
int
ctl_port_register(struct ctl_port *port)
{
struct ctl_softc *softc = control_softc;
struct ctl_port *tport, *nport;
void *pool;
int port_num;
int retval;
KASSERT(softc != NULL, ("CTL is not initialized"));
port->ctl_softc = softc;
mtx_lock(&softc->ctl_lock);
if (port->targ_port >= 0)
port_num = port->targ_port;
else
port_num = ctl_ffz(softc->ctl_port_mask,
softc->port_min, softc->port_max);
if ((port_num < 0) ||
(ctl_set_mask(softc->ctl_port_mask, port_num) < 0)) {
mtx_unlock(&softc->ctl_lock);
return (1);
}
softc->num_ports++;
mtx_unlock(&softc->ctl_lock);
/*
* Initialize the initiator and portname mappings
*/
port->max_initiators = CTL_MAX_INIT_PER_PORT;
- port->wwpn_iid = mallocarray(port->max_initiators,
- sizeof(*port->wwpn_iid), M_CTL, M_NOWAIT | M_ZERO);
+ port->wwpn_iid = malloc(sizeof(*port->wwpn_iid) * port->max_initiators,
+ M_CTL, M_NOWAIT | M_ZERO);
if (port->wwpn_iid == NULL) {
retval = ENOMEM;
goto error;
}
/*
* We add 20 to whatever the caller requests, so he doesn't get
* burned by queueing things back to the pending sense queue. In
* theory, there should probably only be one outstanding item, at
* most, on the pending sense queue for a LUN. We'll clear the
* pending sense queue on the next command, whether or not it is
* a REQUEST SENSE.
*/
retval = ctl_pool_create(softc, port->port_name,
port->num_requested_ctl_io + 20, &pool);
if (retval != 0) {
free(port->wwpn_iid, M_CTL);
error:
port->targ_port = -1;
mtx_lock(&softc->ctl_lock);
ctl_clear_mask(softc->ctl_port_mask, port_num);
mtx_unlock(&softc->ctl_lock);
return (retval);
}
port->targ_port = port_num;
port->ctl_pool_ref = pool;
if (port->options.stqh_first == NULL)
STAILQ_INIT(&port->options);
port->stats.item = port_num;
mtx_init(&port->port_lock, "CTL port", NULL, MTX_DEF);
mtx_lock(&softc->ctl_lock);
STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list);
nport != NULL && nport->targ_port < port_num;
tport = nport, nport = STAILQ_NEXT(tport, links)) {
}
if (tport)
STAILQ_INSERT_AFTER(&softc->port_list, tport, port, links);
else
STAILQ_INSERT_HEAD(&softc->port_list, port, links);
softc->ctl_ports[port->targ_port] = port;
mtx_unlock(&softc->ctl_lock);
return (retval);
}
int
ctl_port_deregister(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
struct ctl_io_pool *pool = (struct ctl_io_pool *)port->ctl_pool_ref;
int i;
if (port->targ_port == -1)
return (1);
mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->port_list, port, ctl_port, links);
STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links);
softc->num_ports--;
ctl_clear_mask(softc->ctl_port_mask, port->targ_port);
softc->ctl_ports[port->targ_port] = NULL;
mtx_unlock(&softc->ctl_lock);
ctl_pool_free(pool);
ctl_free_opts(&port->options);
ctl_lun_map_deinit(port);
free(port->port_devid, M_CTL);
port->port_devid = NULL;
free(port->target_devid, M_CTL);
port->target_devid = NULL;
free(port->init_devid, M_CTL);
port->init_devid = NULL;
for (i = 0; i < port->max_initiators; i++)
free(port->wwpn_iid[i].name, M_CTL);
free(port->wwpn_iid, M_CTL);
mtx_destroy(&port->port_lock);
return (0);
}
void
ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn,
int wwpn_valid, uint64_t wwpn)
{
struct scsi_vpd_id_descriptor *desc;
int len, proto;
if (port->port_type == CTL_PORT_FC)
proto = SCSI_PROTO_FC << 4;
else if (port->port_type == CTL_PORT_SAS)
proto = SCSI_PROTO_SAS << 4;
else if (port->port_type == CTL_PORT_ISCSI)
proto = SCSI_PROTO_ISCSI << 4;
else
proto = SCSI_PROTO_SPI << 4;
if (wwnn_valid) {
port->wwnn = wwnn;
free(port->target_devid, M_CTL);
len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN;
port->target_devid = malloc(sizeof(struct ctl_devid) + len,
M_CTL, M_WAITOK | M_ZERO);
port->target_devid->len = len;
desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data;
desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET |
SVPD_ID_TYPE_NAA;
desc->length = CTL_WWPN_LEN;
scsi_u64to8b(port->wwnn, desc->identifier);
}
if (wwpn_valid) {
port->wwpn = wwpn;
free(port->port_devid, M_CTL);
len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN;
port->port_devid = malloc(sizeof(struct ctl_devid) + len,
M_CTL, M_WAITOK | M_ZERO);
port->port_devid->len = len;
desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data;
desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
SVPD_ID_TYPE_NAA;
desc->length = CTL_WWPN_LEN;
scsi_u64to8b(port->wwpn, desc->identifier);
}
}
void
ctl_port_online(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
struct ctl_lun *lun;
const char *value;
uint32_t l;
if (port->lun_enable != NULL) {
if (port->lun_map) {
for (l = 0; l < port->lun_map_size; l++) {
if (ctl_lun_map_from_port(port, l) ==
UINT32_MAX)
continue;
port->lun_enable(port->targ_lun_arg, l);
}
} else {
STAILQ_FOREACH(lun, &softc->lun_list, links)
port->lun_enable(port->targ_lun_arg, lun->lun);
}
}
if (port->port_online != NULL)
port->port_online(port->onoff_arg);
mtx_lock(&softc->ctl_lock);
if (softc->is_single == 0) {
value = ctl_get_opt(&port->options, "ha_shared");
if (value != NULL && strcmp(value, "on") == 0)
port->status |= CTL_PORT_STATUS_HA_SHARED;
else
port->status &= ~CTL_PORT_STATUS_HA_SHARED;
}
port->status |= CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
ctl_isc_announce_port(port);
}
void
ctl_port_offline(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
struct ctl_lun *lun;
uint32_t l;
if (port->port_offline != NULL)
port->port_offline(port->onoff_arg);
if (port->lun_disable != NULL) {
if (port->lun_map) {
for (l = 0; l < port->lun_map_size; l++) {
if (ctl_lun_map_from_port(port, l) ==
UINT32_MAX)
continue;
port->lun_disable(port->targ_lun_arg, l);
}
} else {
STAILQ_FOREACH(lun, &softc->lun_list, links)
port->lun_disable(port->targ_lun_arg, lun->lun);
}
}
mtx_lock(&softc->ctl_lock);
port->status &= ~CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
ctl_isc_announce_port(port);
}
/*
* vim: ts=8
*/
Index: head/sys/compat/ndis/subr_ndis.c
===================================================================
--- head/sys/compat/ndis/subr_ndis.c (revision 328217)
+++ head/sys/compat/ndis/subr_ndis.c (revision 328218)
@@ -1,3376 +1,3376 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2003
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* This file implements a translation layer between the BSD networking
* infrasturcture and Windows(R) NDIS network driver modules. A Windows
* NDIS driver calls into several functions in the NDIS.SYS Windows
* kernel module and exports a table of functions designed to be called
* by the NDIS subsystem. Using the PE loader, we can patch our own
* versions of the NDIS routines into a given Windows driver module and
* convince the driver that it is in fact running on Windows.
*
* We provide a table of all our implemented NDIS routines which is patched
* into the driver object code. All our exported routines must use the
* _stdcall calling convention, since that's what the Windows object code
* expects.
*/
#include <sys/ctype.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/timespec.h>
#include <sys/smp.h>
#include <sys/queue.h>
#include <sys/proc.h>
#include <sys/filedesc.h>
#include <sys/namei.h>
#include <sys/fcntl.h>
#include <sys/vnode.h>
#include <sys/kthread.h>
#include <sys/linker.h>
#include <sys/mount.h>
#include <sys/sysproto.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <machine/stdarg.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_ioctl.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <compat/ndis/pe_var.h>
#include <compat/ndis/cfg_var.h>
#include <compat/ndis/resource_var.h>
#include <compat/ndis/ntoskrnl_var.h>
#include <compat/ndis/hal_var.h>
#include <compat/ndis/ndis_var.h>
#include <dev/if_ndis/if_ndisvar.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/uma.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
static char ndis_filepath[MAXPATHLEN];
SYSCTL_STRING(_hw, OID_AUTO, ndis_filepath, CTLFLAG_RW, ndis_filepath,
MAXPATHLEN, "Path used by NdisOpenFile() to search for files");
static void NdisInitializeWrapper(ndis_handle *,
driver_object *, void *, void *);
static ndis_status NdisMRegisterMiniport(ndis_handle,
ndis_miniport_characteristics *, int);
static ndis_status NdisAllocateMemoryWithTag(void **,
uint32_t, uint32_t);
static ndis_status NdisAllocateMemory(void **,
uint32_t, uint32_t, ndis_physaddr);
static void NdisFreeMemory(void *, uint32_t, uint32_t);
static ndis_status NdisMSetAttributesEx(ndis_handle, ndis_handle,
uint32_t, uint32_t, ndis_interface_type);
static void NdisOpenConfiguration(ndis_status *,
ndis_handle *, ndis_handle);
static void NdisOpenConfigurationKeyByIndex(ndis_status *,
ndis_handle, uint32_t, unicode_string *, ndis_handle *);
static void NdisOpenConfigurationKeyByName(ndis_status *,
ndis_handle, unicode_string *, ndis_handle *);
static ndis_status ndis_encode_parm(ndis_miniport_block *,
struct sysctl_oid *, ndis_parm_type, ndis_config_parm **);
static ndis_status ndis_decode_parm(ndis_miniport_block *,
ndis_config_parm *, char *);
static void NdisReadConfiguration(ndis_status *, ndis_config_parm **,
ndis_handle, unicode_string *, ndis_parm_type);
static void NdisWriteConfiguration(ndis_status *, ndis_handle,
unicode_string *, ndis_config_parm *);
static void NdisCloseConfiguration(ndis_handle);
static void NdisAllocateSpinLock(ndis_spin_lock *);
static void NdisFreeSpinLock(ndis_spin_lock *);
static void NdisAcquireSpinLock(ndis_spin_lock *);
static void NdisReleaseSpinLock(ndis_spin_lock *);
static void NdisDprAcquireSpinLock(ndis_spin_lock *);
static void NdisDprReleaseSpinLock(ndis_spin_lock *);
static void NdisInitializeReadWriteLock(ndis_rw_lock *);
static void NdisAcquireReadWriteLock(ndis_rw_lock *,
uint8_t, ndis_lock_state *);
static void NdisReleaseReadWriteLock(ndis_rw_lock *, ndis_lock_state *);
static uint32_t NdisReadPciSlotInformation(ndis_handle, uint32_t,
uint32_t, void *, uint32_t);
static uint32_t NdisWritePciSlotInformation(ndis_handle, uint32_t,
uint32_t, void *, uint32_t);
static void NdisWriteErrorLogEntry(ndis_handle, ndis_error_code, uint32_t, ...);
static void ndis_map_cb(void *, bus_dma_segment_t *, int, int);
static void NdisMStartBufferPhysicalMapping(ndis_handle,
ndis_buffer *, uint32_t, uint8_t, ndis_paddr_unit *, uint32_t *);
static void NdisMCompleteBufferPhysicalMapping(ndis_handle,
ndis_buffer *, uint32_t);
static void NdisMInitializeTimer(ndis_miniport_timer *, ndis_handle,
ndis_timer_function, void *);
static void NdisInitializeTimer(ndis_timer *,
ndis_timer_function, void *);
static void NdisSetTimer(ndis_timer *, uint32_t);
static void NdisMSetPeriodicTimer(ndis_miniport_timer *, uint32_t);
static void NdisMCancelTimer(ndis_timer *, uint8_t *);
static void ndis_timercall(kdpc *, ndis_miniport_timer *,
void *, void *);
static void NdisMQueryAdapterResources(ndis_status *, ndis_handle,
ndis_resource_list *, uint32_t *);
static ndis_status NdisMRegisterIoPortRange(void **,
ndis_handle, uint32_t, uint32_t);
static void NdisMDeregisterIoPortRange(ndis_handle,
uint32_t, uint32_t, void *);
static void NdisReadNetworkAddress(ndis_status *, void **,
uint32_t *, ndis_handle);
static ndis_status NdisQueryMapRegisterCount(uint32_t, uint32_t *);
static ndis_status NdisMAllocateMapRegisters(ndis_handle,
uint32_t, uint8_t, uint32_t, uint32_t);
static void NdisMFreeMapRegisters(ndis_handle);
static void ndis_mapshared_cb(void *, bus_dma_segment_t *, int, int);
static void NdisMAllocateSharedMemory(ndis_handle, uint32_t,
uint8_t, void **, ndis_physaddr *);
static void ndis_asyncmem_complete(device_object *, void *);
static ndis_status NdisMAllocateSharedMemoryAsync(ndis_handle,
uint32_t, uint8_t, void *);
static void NdisMFreeSharedMemory(ndis_handle, uint32_t,
uint8_t, void *, ndis_physaddr);
static ndis_status NdisMMapIoSpace(void **, ndis_handle,
ndis_physaddr, uint32_t);
static void NdisMUnmapIoSpace(ndis_handle, void *, uint32_t);
static uint32_t NdisGetCacheFillSize(void);
static void *NdisGetRoutineAddress(unicode_string *);
static uint32_t NdisMGetDmaAlignment(ndis_handle);
static ndis_status NdisMInitializeScatterGatherDma(ndis_handle,
uint8_t, uint32_t);
static void NdisUnchainBufferAtFront(ndis_packet *, ndis_buffer **);
static void NdisUnchainBufferAtBack(ndis_packet *, ndis_buffer **);
static void NdisAllocateBufferPool(ndis_status *,
ndis_handle *, uint32_t);
static void NdisFreeBufferPool(ndis_handle);
static void NdisAllocateBuffer(ndis_status *, ndis_buffer **,
ndis_handle, void *, uint32_t);
static void NdisFreeBuffer(ndis_buffer *);
static uint32_t NdisBufferLength(ndis_buffer *);
static void NdisQueryBuffer(ndis_buffer *, void **, uint32_t *);
static void NdisQueryBufferSafe(ndis_buffer *, void **,
uint32_t *, uint32_t);
static void *NdisBufferVirtualAddress(ndis_buffer *);
static void *NdisBufferVirtualAddressSafe(ndis_buffer *, uint32_t);
static void NdisAdjustBufferLength(ndis_buffer *, int);
static uint32_t NdisInterlockedIncrement(uint32_t *);
static uint32_t NdisInterlockedDecrement(uint32_t *);
static void NdisInitializeEvent(ndis_event *);
static void NdisSetEvent(ndis_event *);
static void NdisResetEvent(ndis_event *);
static uint8_t NdisWaitEvent(ndis_event *, uint32_t);
static ndis_status NdisUnicodeStringToAnsiString(ansi_string *,
unicode_string *);
static ndis_status
NdisAnsiStringToUnicodeString(unicode_string *, ansi_string *);
static ndis_status NdisMPciAssignResources(ndis_handle,
uint32_t, ndis_resource_list **);
static ndis_status NdisMRegisterInterrupt(ndis_miniport_interrupt *,
ndis_handle, uint32_t, uint32_t, uint8_t,
uint8_t, ndis_interrupt_mode);
static void NdisMDeregisterInterrupt(ndis_miniport_interrupt *);
static void NdisMRegisterAdapterShutdownHandler(ndis_handle, void *,
ndis_shutdown_handler);
static void NdisMDeregisterAdapterShutdownHandler(ndis_handle);
static uint32_t NDIS_BUFFER_TO_SPAN_PAGES(ndis_buffer *);
static void NdisGetBufferPhysicalArraySize(ndis_buffer *,
uint32_t *);
static void NdisQueryBufferOffset(ndis_buffer *,
uint32_t *, uint32_t *);
static uint32_t NdisReadPcmciaAttributeMemory(ndis_handle,
uint32_t, void *, uint32_t);
static uint32_t NdisWritePcmciaAttributeMemory(ndis_handle,
uint32_t, void *, uint32_t);
static list_entry *NdisInterlockedInsertHeadList(list_entry *,
list_entry *, ndis_spin_lock *);
static list_entry *NdisInterlockedRemoveHeadList(list_entry *,
ndis_spin_lock *);
static list_entry *NdisInterlockedInsertTailList(list_entry *,
list_entry *, ndis_spin_lock *);
static uint8_t
NdisMSynchronizeWithInterrupt(ndis_miniport_interrupt *,
void *, void *);
static void NdisGetCurrentSystemTime(uint64_t *);
static void NdisGetSystemUpTime(uint32_t *);
static uint32_t NdisGetVersion(void);
static void NdisInitializeString(unicode_string *, char *);
static void NdisInitAnsiString(ansi_string *, char *);
static void NdisInitUnicodeString(unicode_string *, uint16_t *);
static void NdisFreeString(unicode_string *);
static ndis_status NdisMRemoveMiniport(ndis_handle *);
static void NdisTerminateWrapper(ndis_handle, void *);
static void NdisMGetDeviceProperty(ndis_handle, device_object **,
device_object **, device_object **, cm_resource_list *,
cm_resource_list *);
static void NdisGetFirstBufferFromPacket(ndis_packet *,
ndis_buffer **, void **, uint32_t *, uint32_t *);
static void NdisGetFirstBufferFromPacketSafe(ndis_packet *,
ndis_buffer **, void **, uint32_t *, uint32_t *, uint32_t);
static int ndis_find_sym(linker_file_t, char *, char *, caddr_t *);
static void NdisOpenFile(ndis_status *, ndis_handle *, uint32_t *,
unicode_string *, ndis_physaddr);
static void NdisMapFile(ndis_status *, void **, ndis_handle);
static void NdisUnmapFile(ndis_handle);
static void NdisCloseFile(ndis_handle);
static uint8_t NdisSystemProcessorCount(void);
static void NdisGetCurrentProcessorCounts(uint32_t *, uint32_t *, uint32_t *);
static void NdisMIndicateStatusComplete(ndis_handle);
static void NdisMIndicateStatus(ndis_handle, ndis_status,
void *, uint32_t);
static uint8_t ndis_intr(kinterrupt *, void *);
static void ndis_intrhand(kdpc *, ndis_miniport_interrupt *, void *, void *);
static funcptr ndis_findwrap(funcptr);
static void NdisCopyFromPacketToPacket(ndis_packet *,
uint32_t, uint32_t, ndis_packet *, uint32_t, uint32_t *);
static void NdisCopyFromPacketToPacketSafe(ndis_packet *,
uint32_t, uint32_t, ndis_packet *, uint32_t, uint32_t *, uint32_t);
static void NdisIMCopySendPerPacketInfo(ndis_packet *, ndis_packet *);
static ndis_status NdisMRegisterDevice(ndis_handle,
unicode_string *, unicode_string *, driver_dispatch **,
void **, ndis_handle *);
static ndis_status NdisMDeregisterDevice(ndis_handle);
static ndis_status
NdisMQueryAdapterInstanceName(unicode_string *, ndis_handle);
static void NdisMRegisterUnloadHandler(ndis_handle, void *);
static void dummy(void);
/*
* Some really old drivers do not properly check the return value
* from NdisAllocatePacket() and NdisAllocateBuffer() and will
* sometimes allocate few more buffers/packets that they originally
* requested when they created the pool. To prevent this from being
* a problem, we allocate a few extra buffers/packets beyond what
* the driver asks for. This #define controls how many.
*/
#define NDIS_POOL_EXTRA 16
int
ndis_libinit()
{
image_patch_table *patch;
strcpy(ndis_filepath, "/compat/ndis");
patch = ndis_functbl;
while (patch->ipt_func != NULL) {
windrv_wrap((funcptr)patch->ipt_func,
(funcptr *)&patch->ipt_wrap,
patch->ipt_argcnt, patch->ipt_ftype);
patch++;
}
return (0);
}
int
ndis_libfini()
{
image_patch_table *patch;
patch = ndis_functbl;
while (patch->ipt_func != NULL) {
windrv_unwrap(patch->ipt_wrap);
patch++;
}
return (0);
}
static funcptr
ndis_findwrap(func)
funcptr func;
{
image_patch_table *patch;
patch = ndis_functbl;
while (patch->ipt_func != NULL) {
if ((funcptr)patch->ipt_func == func)
return ((funcptr)patch->ipt_wrap);
patch++;
}
return (NULL);
}
/*
* This routine does the messy Windows Driver Model device attachment
* stuff on behalf of NDIS drivers. We register our own AddDevice
* routine here
*/
static void
NdisInitializeWrapper(wrapper, drv, path, unused)
ndis_handle *wrapper;
driver_object *drv;
void *path;
void *unused;
{
/*
* As of yet, I haven't come up with a compelling
* reason to define a private NDIS wrapper structure,
* so we use a pointer to the driver object as the
* wrapper handle. The driver object has the miniport
* characteristics struct for this driver hung off it
* via IoAllocateDriverObjectExtension(), and that's
* really all the private data we need.
*/
*wrapper = drv;
/*
* If this was really Windows, we'd be registering dispatch
* routines for the NDIS miniport module here, but we're
* not Windows so all we really need to do is set up an
* AddDevice function that'll be invoked when a new device
* instance appears.
*/
drv->dro_driverext->dre_adddevicefunc = NdisAddDevice;
}
static void
NdisTerminateWrapper(handle, syspec)
ndis_handle handle;
void *syspec;
{
/* Nothing to see here, move along. */
}
static ndis_status
NdisMRegisterMiniport(handle, characteristics, len)
ndis_handle handle;
ndis_miniport_characteristics *characteristics;
int len;
{
ndis_miniport_characteristics *ch = NULL;
driver_object *drv;
drv = (driver_object *)handle;
/*
* We need to save the NDIS miniport characteristics
* somewhere. This data is per-driver, not per-device
* (all devices handled by the same driver have the
* same characteristics) so we hook it onto the driver
* object using IoAllocateDriverObjectExtension().
* The extra extension info is automagically deleted when
* the driver is unloaded (see windrv_unload()).
*/
if (IoAllocateDriverObjectExtension(drv, (void *)1,
sizeof(ndis_miniport_characteristics), (void **)&ch) !=
STATUS_SUCCESS) {
return (NDIS_STATUS_RESOURCES);
}
bzero((char *)ch, sizeof(ndis_miniport_characteristics));
bcopy((char *)characteristics, (char *)ch, len);
if (ch->nmc_version_major < 5 || ch->nmc_version_minor < 1) {
ch->nmc_shutdown_handler = NULL;
ch->nmc_canceltxpkts_handler = NULL;
ch->nmc_pnpevent_handler = NULL;
}
return (NDIS_STATUS_SUCCESS);
}
static ndis_status
NdisAllocateMemoryWithTag(vaddr, len, tag)
void **vaddr;
uint32_t len;
uint32_t tag;
{
void *mem;
mem = ExAllocatePoolWithTag(NonPagedPool, len, tag);
if (mem == NULL) {
return (NDIS_STATUS_RESOURCES);
}
*vaddr = mem;
return (NDIS_STATUS_SUCCESS);
}
static ndis_status
NdisAllocateMemory(vaddr, len, flags, highaddr)
void **vaddr;
uint32_t len;
uint32_t flags;
ndis_physaddr highaddr;
{
void *mem;
mem = ExAllocatePoolWithTag(NonPagedPool, len, 0);
if (mem == NULL)
return (NDIS_STATUS_RESOURCES);
*vaddr = mem;
return (NDIS_STATUS_SUCCESS);
}
static void
NdisFreeMemory(vaddr, len, flags)
void *vaddr;
uint32_t len;
uint32_t flags;
{
if (len == 0)
return;
ExFreePool(vaddr);
}
static ndis_status
NdisMSetAttributesEx(adapter_handle, adapter_ctx, hangsecs,
flags, iftype)
ndis_handle adapter_handle;
ndis_handle adapter_ctx;
uint32_t hangsecs;
uint32_t flags;
ndis_interface_type iftype;
{
ndis_miniport_block *block;
/*
* Save the adapter context, we need it for calling
* the driver's internal functions.
*/
block = (ndis_miniport_block *)adapter_handle;
block->nmb_miniportadapterctx = adapter_ctx;
block->nmb_checkforhangsecs = hangsecs;
block->nmb_flags = flags;
return (NDIS_STATUS_SUCCESS);
}
static void
NdisOpenConfiguration(status, cfg, wrapctx)
ndis_status *status;
ndis_handle *cfg;
ndis_handle wrapctx;
{
*cfg = wrapctx;
*status = NDIS_STATUS_SUCCESS;
}
static void
NdisOpenConfigurationKeyByName(status, cfg, subkey, subhandle)
ndis_status *status;
ndis_handle cfg;
unicode_string *subkey;
ndis_handle *subhandle;
{
*subhandle = cfg;
*status = NDIS_STATUS_SUCCESS;
}
static void
NdisOpenConfigurationKeyByIndex(status, cfg, idx, subkey, subhandle)
ndis_status *status;
ndis_handle cfg;
uint32_t idx;
unicode_string *subkey;
ndis_handle *subhandle;
{
*status = NDIS_STATUS_FAILURE;
}
static ndis_status
ndis_encode_parm(block, oid, type, parm)
ndis_miniport_block *block;
struct sysctl_oid *oid;
ndis_parm_type type;
ndis_config_parm **parm;
{
ndis_config_parm *p;
ndis_parmlist_entry *np;
unicode_string *us;
ansi_string as;
int base = 0;
uint32_t val;
char tmp[32];
np = ExAllocatePoolWithTag(NonPagedPool,
sizeof(ndis_parmlist_entry), 0);
if (np == NULL)
return (NDIS_STATUS_RESOURCES);
InsertHeadList((&block->nmb_parmlist), (&np->np_list));
*parm = p = &np->np_parm;
switch(type) {
case ndis_parm_string:
/* See if this might be a number. */
val = strtoul((char *)oid->oid_arg1, NULL, 10);
us = &p->ncp_parmdata.ncp_stringdata;
p->ncp_type = ndis_parm_string;
if (val) {
snprintf(tmp, 32, "%x", val);
RtlInitAnsiString(&as, tmp);
} else {
RtlInitAnsiString(&as, (char *)oid->oid_arg1);
}
if (RtlAnsiStringToUnicodeString(us, &as, TRUE)) {
ExFreePool(np);
return (NDIS_STATUS_RESOURCES);
}
break;
case ndis_parm_int:
if (strncmp((char *)oid->oid_arg1, "0x", 2) == 0)
base = 16;
else
base = 10;
p->ncp_type = ndis_parm_int;
p->ncp_parmdata.ncp_intdata =
strtol((char *)oid->oid_arg1, NULL, base);
break;
case ndis_parm_hexint:
#ifdef notdef
if (strncmp((char *)oid->oid_arg1, "0x", 2) == 0)
base = 16;
else
base = 10;
#endif
base = 16;
p->ncp_type = ndis_parm_hexint;
p->ncp_parmdata.ncp_intdata =
strtoul((char *)oid->oid_arg1, NULL, base);
break;
default:
return (NDIS_STATUS_FAILURE);
break;
}
return (NDIS_STATUS_SUCCESS);
}
static void
NdisReadConfiguration(status, parm, cfg, key, type)
ndis_status *status;
ndis_config_parm **parm;
ndis_handle cfg;
unicode_string *key;
ndis_parm_type type;
{
char *keystr = NULL;
ndis_miniport_block *block;
struct ndis_softc *sc;
struct sysctl_oid *oidp;
struct sysctl_ctx_entry *e;
ansi_string as;
block = (ndis_miniport_block *)cfg;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
/*
device_printf(sc->ndis_dev, "NdisReadConfiguration sc=%p\n", sc);
*/
if (key->us_len == 0 || key->us_buf == NULL) {
*status = NDIS_STATUS_FAILURE;
return;
}
if (RtlUnicodeStringToAnsiString(&as, key, TRUE)) {
*status = NDIS_STATUS_RESOURCES;
return;
}
keystr = as.as_buf;
/*
* See if registry key is already in a list of known keys
* included with the driver.
*/
TAILQ_FOREACH(e, device_get_sysctl_ctx(sc->ndis_dev), link) {
oidp = e->entry;
if (strcasecmp(oidp->oid_name, keystr) == 0) {
if (strcmp((char *)oidp->oid_arg1, "UNSET") == 0) {
RtlFreeAnsiString(&as);
*status = NDIS_STATUS_FAILURE;
return;
}
*status = ndis_encode_parm(block, oidp, type, parm);
RtlFreeAnsiString(&as);
return;
}
}
/*
* If the key didn't match, add it to the list of dynamically
* created ones. Sometimes, drivers refer to registry keys
* that aren't documented in their .INF files. These keys
* are supposed to be created by some sort of utility or
* control panel snap-in that comes with the driver software.
* Sometimes it's useful to be able to manipulate these.
* If the driver requests the key in the form of a string,
* make its default value an empty string, otherwise default
* it to "0".
*/
if (type == ndis_parm_int || type == ndis_parm_hexint)
ndis_add_sysctl(sc, keystr, "(dynamic integer key)",
"UNSET", CTLFLAG_RW);
else
ndis_add_sysctl(sc, keystr, "(dynamic string key)",
"UNSET", CTLFLAG_RW);
RtlFreeAnsiString(&as);
*status = NDIS_STATUS_FAILURE;
}
static ndis_status
ndis_decode_parm(block, parm, val)
ndis_miniport_block *block;
ndis_config_parm *parm;
char *val;
{
unicode_string *ustr;
ansi_string as;
switch(parm->ncp_type) {
case ndis_parm_string:
ustr = &parm->ncp_parmdata.ncp_stringdata;
if (RtlUnicodeStringToAnsiString(&as, ustr, TRUE))
return (NDIS_STATUS_RESOURCES);
bcopy(as.as_buf, val, as.as_len);
RtlFreeAnsiString(&as);
break;
case ndis_parm_int:
sprintf(val, "%d", parm->ncp_parmdata.ncp_intdata);
break;
case ndis_parm_hexint:
sprintf(val, "%xu", parm->ncp_parmdata.ncp_intdata);
break;
default:
return (NDIS_STATUS_FAILURE);
break;
}
return (NDIS_STATUS_SUCCESS);
}
static void
NdisWriteConfiguration(status, cfg, key, parm)
ndis_status *status;
ndis_handle cfg;
unicode_string *key;
ndis_config_parm *parm;
{
ansi_string as;
char *keystr = NULL;
ndis_miniport_block *block;
struct ndis_softc *sc;
struct sysctl_oid *oidp;
struct sysctl_ctx_entry *e;
char val[256];
block = (ndis_miniport_block *)cfg;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
if (RtlUnicodeStringToAnsiString(&as, key, TRUE)) {
*status = NDIS_STATUS_RESOURCES;
return;
}
keystr = as.as_buf;
/* Decode the parameter into a string. */
bzero(val, sizeof(val));
*status = ndis_decode_parm(block, parm, val);
if (*status != NDIS_STATUS_SUCCESS) {
RtlFreeAnsiString(&as);
return;
}
/* See if the key already exists. */
TAILQ_FOREACH(e, device_get_sysctl_ctx(sc->ndis_dev), link) {
oidp = e->entry;
if (strcasecmp(oidp->oid_name, keystr) == 0) {
/* Found it, set the value. */
strcpy((char *)oidp->oid_arg1, val);
RtlFreeAnsiString(&as);
return;
}
}
/* Not found, add a new key with the specified value. */
ndis_add_sysctl(sc, keystr, "(dynamically set key)",
val, CTLFLAG_RW);
RtlFreeAnsiString(&as);
*status = NDIS_STATUS_SUCCESS;
}
static void
NdisCloseConfiguration(cfg)
ndis_handle cfg;
{
list_entry *e;
ndis_parmlist_entry *pe;
ndis_miniport_block *block;
ndis_config_parm *p;
block = (ndis_miniport_block *)cfg;
while (!IsListEmpty(&block->nmb_parmlist)) {
e = RemoveHeadList(&block->nmb_parmlist);
pe = CONTAINING_RECORD(e, ndis_parmlist_entry, np_list);
p = &pe->np_parm;
if (p->ncp_type == ndis_parm_string)
RtlFreeUnicodeString(&p->ncp_parmdata.ncp_stringdata);
ExFreePool(e);
}
}
/*
* Initialize a Windows spinlock.
*/
static void
NdisAllocateSpinLock(lock)
ndis_spin_lock *lock;
{
KeInitializeSpinLock(&lock->nsl_spinlock);
lock->nsl_kirql = 0;
}
/*
* Destroy a Windows spinlock. This is a no-op for now. There are two reasons
* for this. One is that it's sort of superfluous: we don't have to do anything
* special to deallocate the spinlock. The other is that there are some buggy
* drivers which call NdisFreeSpinLock() _after_ calling NdisFreeMemory() on
* the block of memory in which the spinlock resides. (Yes, ADMtek, I'm
* talking to you.)
*/
static void
NdisFreeSpinLock(lock)
ndis_spin_lock *lock;
{
#ifdef notdef
KeInitializeSpinLock(&lock->nsl_spinlock);
lock->nsl_kirql = 0;
#endif
}
/*
* Acquire a spinlock from IRQL <= DISPATCH_LEVEL.
*/
static void
NdisAcquireSpinLock(lock)
ndis_spin_lock *lock;
{
KeAcquireSpinLock(&lock->nsl_spinlock, &lock->nsl_kirql);
}
/*
* Release a spinlock from IRQL == DISPATCH_LEVEL.
*/
static void
NdisReleaseSpinLock(lock)
ndis_spin_lock *lock;
{
KeReleaseSpinLock(&lock->nsl_spinlock, lock->nsl_kirql);
}
/*
* Acquire a spinlock when already running at IRQL == DISPATCH_LEVEL.
*/
static void
NdisDprAcquireSpinLock(lock)
ndis_spin_lock *lock;
{
KeAcquireSpinLockAtDpcLevel(&lock->nsl_spinlock);
}
/*
* Release a spinlock without leaving IRQL == DISPATCH_LEVEL.
*/
static void
NdisDprReleaseSpinLock(lock)
ndis_spin_lock *lock;
{
KeReleaseSpinLockFromDpcLevel(&lock->nsl_spinlock);
}
static void
NdisInitializeReadWriteLock(lock)
ndis_rw_lock *lock;
{
KeInitializeSpinLock(&lock->nrl_spinlock);
bzero((char *)&lock->nrl_rsvd, sizeof(lock->nrl_rsvd));
}
static void
NdisAcquireReadWriteLock(ndis_rw_lock *lock, uint8_t writeacc,
ndis_lock_state *state)
{
if (writeacc == TRUE) {
KeAcquireSpinLock(&lock->nrl_spinlock, &state->nls_oldirql);
lock->nrl_rsvd[0]++;
} else
lock->nrl_rsvd[1]++;
}
static void
NdisReleaseReadWriteLock(lock, state)
ndis_rw_lock *lock;
ndis_lock_state *state;
{
if (lock->nrl_rsvd[0]) {
lock->nrl_rsvd[0]--;
KeReleaseSpinLock(&lock->nrl_spinlock, state->nls_oldirql);
} else
lock->nrl_rsvd[1]--;
}
static uint32_t
NdisReadPciSlotInformation(adapter, slot, offset, buf, len)
ndis_handle adapter;
uint32_t slot;
uint32_t offset;
void *buf;
uint32_t len;
{
ndis_miniport_block *block;
uint32_t i;
char *dest;
device_t dev;
block = (ndis_miniport_block *)adapter;
dest = buf;
if (block == NULL)
return (0);
dev = block->nmb_physdeviceobj->do_devext;
/*
* I have a test system consisting of a Sun w2100z
* dual 2.4Ghz Opteron machine and an Atheros 802.11a/b/g
* "Aries" miniPCI NIC. (The NIC is installed in the
* machine using a miniPCI to PCI bus adapter card.)
* When running in SMP mode, I found that
* performing a large number of consecutive calls to
* NdisReadPciSlotInformation() would result in a
* sudden system reset (or in some cases a freeze).
* My suspicion is that the multiple reads are somehow
* triggering a fatal PCI bus error that leads to a
* machine check. The 1us delay in the loop below
* seems to prevent this problem.
*/
for (i = 0; i < len; i++) {
DELAY(1);
dest[i] = pci_read_config(dev, i + offset, 1);
}
return (len);
}
static uint32_t
NdisWritePciSlotInformation(adapter, slot, offset, buf, len)
ndis_handle adapter;
uint32_t slot;
uint32_t offset;
void *buf;
uint32_t len;
{
ndis_miniport_block *block;
uint32_t i;
char *dest;
device_t dev;
block = (ndis_miniport_block *)adapter;
dest = buf;
if (block == NULL)
return (0);
dev = block->nmb_physdeviceobj->do_devext;
for (i = 0; i < len; i++) {
DELAY(1);
pci_write_config(dev, i + offset, dest[i], 1);
}
return (len);
}
/*
* The errorlog routine uses a variable argument list, so we
* have to declare it this way.
*/
#define ERRMSGLEN 512
static void
NdisWriteErrorLogEntry(ndis_handle adapter, ndis_error_code code,
uint32_t numerrors, ...)
{
ndis_miniport_block *block;
va_list ap;
int i, error;
char *str = NULL;
uint16_t flags;
device_t dev;
driver_object *drv;
struct ndis_softc *sc;
struct ifnet *ifp;
unicode_string us;
ansi_string as = { 0, 0, NULL };
block = (ndis_miniport_block *)adapter;
dev = block->nmb_physdeviceobj->do_devext;
drv = block->nmb_deviceobj->do_drvobj;
sc = device_get_softc(dev);
ifp = NDISUSB_GET_IFNET(sc);
if (ifp != NULL && ifp->if_flags & IFF_DEBUG) {
error = pe_get_message((vm_offset_t)drv->dro_driverstart,
code, &str, &i, &flags);
if (error == 0) {
if (flags & MESSAGE_RESOURCE_UNICODE) {
RtlInitUnicodeString(&us, (uint16_t *)str);
if (RtlUnicodeStringToAnsiString(&as,
&us, TRUE) == STATUS_SUCCESS)
str = as.as_buf;
else
str = NULL;
}
}
}
device_printf(dev, "NDIS ERROR: %x (%s)\n", code,
str == NULL ? "unknown error" : str);
if (ifp != NULL && ifp->if_flags & IFF_DEBUG) {
device_printf(dev, "NDIS NUMERRORS: %x\n", numerrors);
va_start(ap, numerrors);
for (i = 0; i < numerrors; i++)
device_printf(dev, "argptr: %p\n",
va_arg(ap, void *));
va_end(ap);
}
if (as.as_len)
RtlFreeAnsiString(&as);
}
static void
ndis_map_cb(arg, segs, nseg, error)
void *arg;
bus_dma_segment_t *segs;
int nseg;
int error;
{
struct ndis_map_arg *ctx;
int i;
if (error)
return;
ctx = arg;
for (i = 0; i < nseg; i++) {
ctx->nma_fraglist[i].npu_physaddr.np_quad = segs[i].ds_addr;
ctx->nma_fraglist[i].npu_len = segs[i].ds_len;
}
ctx->nma_cnt = nseg;
}
static void
NdisMStartBufferPhysicalMapping(ndis_handle adapter, ndis_buffer *buf,
uint32_t mapreg, uint8_t writedev, ndis_paddr_unit *addrarray,
uint32_t *arraysize)
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ndis_map_arg nma;
bus_dmamap_t map;
int error;
if (adapter == NULL)
return;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
if (mapreg > sc->ndis_mmapcnt)
return;
map = sc->ndis_mmaps[mapreg];
nma.nma_fraglist = addrarray;
error = bus_dmamap_load(sc->ndis_mtag, map,
MmGetMdlVirtualAddress(buf), MmGetMdlByteCount(buf), ndis_map_cb,
(void *)&nma, BUS_DMA_NOWAIT);
if (error)
return;
bus_dmamap_sync(sc->ndis_mtag, map,
writedev ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
*arraysize = nma.nma_cnt;
}
static void
NdisMCompleteBufferPhysicalMapping(adapter, buf, mapreg)
ndis_handle adapter;
ndis_buffer *buf;
uint32_t mapreg;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
bus_dmamap_t map;
if (adapter == NULL)
return;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
if (mapreg > sc->ndis_mmapcnt)
return;
map = sc->ndis_mmaps[mapreg];
bus_dmamap_sync(sc->ndis_mtag, map,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ndis_mtag, map);
}
/*
* This is an older (?) timer init routine which doesn't
* accept a miniport context handle. Serialized miniports should
* never call this function.
*/
static void
NdisInitializeTimer(timer, func, ctx)
ndis_timer *timer;
ndis_timer_function func;
void *ctx;
{
KeInitializeTimer(&timer->nt_ktimer);
KeInitializeDpc(&timer->nt_kdpc, func, ctx);
KeSetImportanceDpc(&timer->nt_kdpc, KDPC_IMPORTANCE_LOW);
}
static void
ndis_timercall(dpc, timer, sysarg1, sysarg2)
kdpc *dpc;
ndis_miniport_timer *timer;
void *sysarg1;
void *sysarg2;
{
/*
* Since we're called as a DPC, we should be running
* at DISPATCH_LEVEL here. This means to acquire the
* spinlock, we can use KeAcquireSpinLockAtDpcLevel()
* rather than KeAcquireSpinLock().
*/
if (NDIS_SERIALIZED(timer->nmt_block))
KeAcquireSpinLockAtDpcLevel(&timer->nmt_block->nmb_lock);
MSCALL4(timer->nmt_timerfunc, dpc, timer->nmt_timerctx,
sysarg1, sysarg2);
if (NDIS_SERIALIZED(timer->nmt_block))
KeReleaseSpinLockFromDpcLevel(&timer->nmt_block->nmb_lock);
}
/*
* For a long time I wondered why there were two NDIS timer initialization
* routines, and why this one needed an NDIS_MINIPORT_TIMER and the
* MiniportAdapterHandle. The NDIS_MINIPORT_TIMER has its own callout
* function and context pointers separate from those in the DPC, which
* allows for another level of indirection: when the timer fires, we
* can have our own timer function invoked, and from there we can call
* the driver's function. But why go to all that trouble? Then it hit
* me: for serialized miniports, the timer callouts are not re-entrant.
* By trapping the callouts and having access to the MiniportAdapterHandle,
* we can protect the driver callouts by acquiring the NDIS serialization
* lock. This is essential for allowing serialized miniports to work
* correctly on SMP systems. On UP hosts, setting IRQL to DISPATCH_LEVEL
* is enough to prevent other threads from pre-empting you, but with
* SMP, you must acquire a lock as well, otherwise the other CPU is
* free to clobber you.
*/
static void
NdisMInitializeTimer(timer, handle, func, ctx)
ndis_miniport_timer *timer;
ndis_handle handle;
ndis_timer_function func;
void *ctx;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
block = (ndis_miniport_block *)handle;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
/* Save the driver's funcptr and context */
timer->nmt_timerfunc = func;
timer->nmt_timerctx = ctx;
timer->nmt_block = handle;
/*
* Set up the timer so it will call our intermediate DPC.
* Be sure to use the wrapped entry point, since
* ntoskrnl_run_dpc() expects to invoke a function with
* Microsoft calling conventions.
*/
KeInitializeTimer(&timer->nmt_ktimer);
KeInitializeDpc(&timer->nmt_kdpc,
ndis_findwrap((funcptr)ndis_timercall), timer);
timer->nmt_ktimer.k_dpc = &timer->nmt_kdpc;
}
/*
* In Windows, there's both an NdisMSetTimer() and an NdisSetTimer(),
* but the former is just a macro wrapper around the latter.
*/
static void
NdisSetTimer(timer, msecs)
ndis_timer *timer;
uint32_t msecs;
{
/*
* KeSetTimer() wants the period in
* hundred nanosecond intervals.
*/
KeSetTimer(&timer->nt_ktimer,
((int64_t)msecs * -10000), &timer->nt_kdpc);
}
static void
NdisMSetPeriodicTimer(timer, msecs)
ndis_miniport_timer *timer;
uint32_t msecs;
{
KeSetTimerEx(&timer->nmt_ktimer,
((int64_t)msecs * -10000), msecs, &timer->nmt_kdpc);
}
/*
* Technically, this is really NdisCancelTimer(), but we also
* (ab)use it for NdisMCancelTimer(), since in our implementation
* we don't need the extra info in the ndis_miniport_timer
* structure just to cancel a timer.
*/
static void
NdisMCancelTimer(timer, cancelled)
ndis_timer *timer;
uint8_t *cancelled;
{
*cancelled = KeCancelTimer(&timer->nt_ktimer);
}
static void
NdisMQueryAdapterResources(status, adapter, list, buflen)
ndis_status *status;
ndis_handle adapter;
ndis_resource_list *list;
uint32_t *buflen;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
int rsclen;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
rsclen = sizeof(ndis_resource_list) +
(sizeof(cm_partial_resource_desc) * (sc->ndis_rescnt - 1));
if (*buflen < rsclen) {
*buflen = rsclen;
*status = NDIS_STATUS_INVALID_LENGTH;
return;
}
bcopy((char *)block->nmb_rlist, (char *)list, rsclen);
*status = NDIS_STATUS_SUCCESS;
}
static ndis_status
NdisMRegisterIoPortRange(offset, adapter, port, numports)
void **offset;
ndis_handle adapter;
uint32_t port;
uint32_t numports;
{
struct ndis_miniport_block *block;
struct ndis_softc *sc;
if (adapter == NULL)
return (NDIS_STATUS_FAILURE);
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
if (sc->ndis_res_io == NULL)
return (NDIS_STATUS_FAILURE);
/* Don't let the device map more ports than we have. */
if (rman_get_size(sc->ndis_res_io) < numports)
return (NDIS_STATUS_INVALID_LENGTH);
*offset = (void *)rman_get_start(sc->ndis_res_io);
return (NDIS_STATUS_SUCCESS);
}
static void
NdisMDeregisterIoPortRange(adapter, port, numports, offset)
ndis_handle adapter;
uint32_t port;
uint32_t numports;
void *offset;
{
}
static void
NdisReadNetworkAddress(status, addr, addrlen, adapter)
ndis_status *status;
void **addr;
uint32_t *addrlen;
ndis_handle adapter;
{
struct ndis_softc *sc;
struct ifnet *ifp;
ndis_miniport_block *block;
uint8_t empty[] = { 0, 0, 0, 0, 0, 0 };
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = NDISUSB_GET_IFNET(sc);
if (ifp == NULL) {
*status = NDIS_STATUS_FAILURE;
return;
}
if (ifp->if_addr == NULL ||
bcmp(IF_LLADDR(sc->ifp), empty, ETHER_ADDR_LEN) == 0)
*status = NDIS_STATUS_FAILURE;
else {
*addr = IF_LLADDR(sc->ifp);
*addrlen = ETHER_ADDR_LEN;
*status = NDIS_STATUS_SUCCESS;
}
}
static ndis_status
NdisQueryMapRegisterCount(bustype, cnt)
uint32_t bustype;
uint32_t *cnt;
{
*cnt = 8192;
return (NDIS_STATUS_SUCCESS);
}
static ndis_status
NdisMAllocateMapRegisters(ndis_handle adapter, uint32_t dmachannel,
uint8_t dmasize, uint32_t physmapneeded, uint32_t maxmap)
{
struct ndis_softc *sc;
ndis_miniport_block *block;
int error, i, nseg = NDIS_MAXSEG;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
- sc->ndis_mmaps = mallocarray(physmapneeded, sizeof(bus_dmamap_t),
+ sc->ndis_mmaps = malloc(sizeof(bus_dmamap_t) * physmapneeded,
M_DEVBUF, M_NOWAIT|M_ZERO);
if (sc->ndis_mmaps == NULL)
return (NDIS_STATUS_RESOURCES);
error = bus_dma_tag_create(sc->ndis_parent_tag, ETHER_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
NULL, maxmap * nseg, nseg, maxmap, BUS_DMA_ALLOCNOW,
NULL, NULL, &sc->ndis_mtag);
if (error) {
free(sc->ndis_mmaps, M_DEVBUF);
return (NDIS_STATUS_RESOURCES);
}
for (i = 0; i < physmapneeded; i++)
bus_dmamap_create(sc->ndis_mtag, 0, &sc->ndis_mmaps[i]);
sc->ndis_mmapcnt = physmapneeded;
return (NDIS_STATUS_SUCCESS);
}
static void
NdisMFreeMapRegisters(adapter)
ndis_handle adapter;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
int i;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
for (i = 0; i < sc->ndis_mmapcnt; i++)
bus_dmamap_destroy(sc->ndis_mtag, sc->ndis_mmaps[i]);
free(sc->ndis_mmaps, M_DEVBUF);
bus_dma_tag_destroy(sc->ndis_mtag);
}
static void
ndis_mapshared_cb(arg, segs, nseg, error)
void *arg;
bus_dma_segment_t *segs;
int nseg;
int error;
{
ndis_physaddr *p;
if (error || nseg > 1)
return;
p = arg;
p->np_quad = segs[0].ds_addr;
}
/*
* This maps to bus_dmamem_alloc().
*/
static void
NdisMAllocateSharedMemory(ndis_handle adapter, uint32_t len, uint8_t cached,
void **vaddr, ndis_physaddr *paddr)
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ndis_shmem *sh;
int error;
if (adapter == NULL)
return;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
sh = malloc(sizeof(struct ndis_shmem), M_DEVBUF, M_NOWAIT|M_ZERO);
if (sh == NULL)
return;
InitializeListHead(&sh->ndis_list);
/*
* When performing shared memory allocations, create a tag
* with a lowaddr limit that restricts physical memory mappings
* so that they all fall within the first 1GB of memory.
* At least one device/driver combination (Linksys Instant
* Wireless PCI Card V2.7, Broadcom 802.11b) seems to have
* problems with performing DMA operations with physical
* addresses that lie above the 1GB mark. I don't know if this
* is a hardware limitation or if the addresses are being
* truncated within the driver, but this seems to be the only
* way to make these cards work reliably in systems with more
* than 1GB of physical memory.
*/
error = bus_dma_tag_create(sc->ndis_parent_tag, 64,
0, NDIS_BUS_SPACE_SHARED_MAXADDR, BUS_SPACE_MAXADDR, NULL,
NULL, len, 1, len, BUS_DMA_ALLOCNOW, NULL, NULL,
&sh->ndis_stag);
if (error) {
free(sh, M_DEVBUF);
return;
}
error = bus_dmamem_alloc(sh->ndis_stag, vaddr,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sh->ndis_smap);
if (error) {
bus_dma_tag_destroy(sh->ndis_stag);
free(sh, M_DEVBUF);
return;
}
error = bus_dmamap_load(sh->ndis_stag, sh->ndis_smap, *vaddr,
len, ndis_mapshared_cb, (void *)paddr, BUS_DMA_NOWAIT);
if (error) {
bus_dmamem_free(sh->ndis_stag, *vaddr, sh->ndis_smap);
bus_dma_tag_destroy(sh->ndis_stag);
free(sh, M_DEVBUF);
return;
}
/*
* Save the physical address along with the source address.
* The AirGo MIMO driver will call NdisMFreeSharedMemory()
* with a bogus virtual address sometimes, but with a valid
* physical address. To keep this from causing trouble, we
* use the physical address to as a sanity check in case
* searching based on the virtual address fails.
*/
NDIS_LOCK(sc);
sh->ndis_paddr.np_quad = paddr->np_quad;
sh->ndis_saddr = *vaddr;
InsertHeadList((&sc->ndis_shlist), (&sh->ndis_list));
NDIS_UNLOCK(sc);
}
struct ndis_allocwork {
uint32_t na_len;
uint8_t na_cached;
void *na_ctx;
io_workitem *na_iw;
};
static void
ndis_asyncmem_complete(dobj, arg)
device_object *dobj;
void *arg;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ndis_allocwork *w;
void *vaddr;
ndis_physaddr paddr;
ndis_allocdone_handler donefunc;
w = arg;
block = (ndis_miniport_block *)dobj->do_devext;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
vaddr = NULL;
paddr.np_quad = 0;
donefunc = sc->ndis_chars->nmc_allocate_complete_func;
NdisMAllocateSharedMemory(block, w->na_len,
w->na_cached, &vaddr, &paddr);
MSCALL5(donefunc, block, vaddr, &paddr, w->na_len, w->na_ctx);
IoFreeWorkItem(w->na_iw);
free(w, M_DEVBUF);
}
static ndis_status
NdisMAllocateSharedMemoryAsync(ndis_handle adapter, uint32_t len,
uint8_t cached, void *ctx)
{
ndis_miniport_block *block;
struct ndis_allocwork *w;
io_workitem *iw;
io_workitem_func ifw;
if (adapter == NULL)
return (NDIS_STATUS_FAILURE);
block = adapter;
iw = IoAllocateWorkItem(block->nmb_deviceobj);
if (iw == NULL)
return (NDIS_STATUS_FAILURE);
w = malloc(sizeof(struct ndis_allocwork), M_TEMP, M_NOWAIT);
if (w == NULL)
return (NDIS_STATUS_FAILURE);
w->na_cached = cached;
w->na_len = len;
w->na_ctx = ctx;
w->na_iw = iw;
ifw = (io_workitem_func)ndis_findwrap((funcptr)ndis_asyncmem_complete);
IoQueueWorkItem(iw, ifw, WORKQUEUE_DELAYED, w);
return (NDIS_STATUS_PENDING);
}
static void
NdisMFreeSharedMemory(ndis_handle adapter, uint32_t len, uint8_t cached,
void *vaddr, ndis_physaddr paddr)
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ndis_shmem *sh = NULL;
list_entry *l;
if (vaddr == NULL || adapter == NULL)
return;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
/* Sanity check: is list empty? */
if (IsListEmpty(&sc->ndis_shlist))
return;
NDIS_LOCK(sc);
l = sc->ndis_shlist.nle_flink;
while (l != &sc->ndis_shlist) {
sh = CONTAINING_RECORD(l, struct ndis_shmem, ndis_list);
if (sh->ndis_saddr == vaddr)
break;
/*
* Check the physaddr too, just in case the driver lied
* about the virtual address.
*/
if (sh->ndis_paddr.np_quad == paddr.np_quad)
break;
l = l->nle_flink;
}
if (sh == NULL) {
NDIS_UNLOCK(sc);
printf("NDIS: buggy driver tried to free "
"invalid shared memory: vaddr: %p paddr: 0x%jx\n",
vaddr, (uintmax_t)paddr.np_quad);
return;
}
RemoveEntryList(&sh->ndis_list);
NDIS_UNLOCK(sc);
bus_dmamap_unload(sh->ndis_stag, sh->ndis_smap);
bus_dmamem_free(sh->ndis_stag, sh->ndis_saddr, sh->ndis_smap);
bus_dma_tag_destroy(sh->ndis_stag);
free(sh, M_DEVBUF);
}
static ndis_status
NdisMMapIoSpace(vaddr, adapter, paddr, len)
void **vaddr;
ndis_handle adapter;
ndis_physaddr paddr;
uint32_t len;
{
if (adapter == NULL)
return (NDIS_STATUS_FAILURE);
*vaddr = MmMapIoSpace(paddr.np_quad, len, 0);
if (*vaddr == NULL)
return (NDIS_STATUS_FAILURE);
return (NDIS_STATUS_SUCCESS);
}
static void
NdisMUnmapIoSpace(adapter, vaddr, len)
ndis_handle adapter;
void *vaddr;
uint32_t len;
{
MmUnmapIoSpace(vaddr, len);
}
static uint32_t
NdisGetCacheFillSize(void)
{
return (128);
}
static void *
NdisGetRoutineAddress(ustr)
unicode_string *ustr;
{
ansi_string astr;
if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE))
return (NULL);
return (ndis_get_routine_address(ndis_functbl, astr.as_buf));
}
static uint32_t
NdisMGetDmaAlignment(handle)
ndis_handle handle;
{
return (16);
}
/*
* NDIS has two methods for dealing with NICs that support DMA.
* One is to just pass packets to the driver and let it call
* NdisMStartBufferPhysicalMapping() to map each buffer in the packet
* all by itself, and the other is to let the NDIS library handle the
* buffer mapping internally, and hand the driver an already populated
* scatter/gather fragment list. If the driver calls
* NdisMInitializeScatterGatherDma(), it wants to use the latter
* method.
*/
static ndis_status
NdisMInitializeScatterGatherDma(ndis_handle adapter, uint8_t is64,
uint32_t maxphysmap)
{
struct ndis_softc *sc;
ndis_miniport_block *block;
int error;
if (adapter == NULL)
return (NDIS_STATUS_FAILURE);
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
/* Don't do this twice. */
if (sc->ndis_sc == 1)
return (NDIS_STATUS_SUCCESS);
error = bus_dma_tag_create(sc->ndis_parent_tag, ETHER_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * NDIS_MAXSEG, NDIS_MAXSEG, MCLBYTES, BUS_DMA_ALLOCNOW,
NULL, NULL, &sc->ndis_ttag);
sc->ndis_sc = 1;
return (NDIS_STATUS_SUCCESS);
}
void
NdisAllocatePacketPool(status, pool, descnum, protrsvdlen)
ndis_status *status;
ndis_handle *pool;
uint32_t descnum;
uint32_t protrsvdlen;
{
ndis_packet_pool *p;
ndis_packet *packets;
int i;
p = ExAllocatePoolWithTag(NonPagedPool, sizeof(ndis_packet_pool), 0);
if (p == NULL) {
*status = NDIS_STATUS_RESOURCES;
return;
}
p->np_cnt = descnum + NDIS_POOL_EXTRA;
p->np_protrsvd = protrsvdlen;
p->np_len = sizeof(ndis_packet) + protrsvdlen;
packets = ExAllocatePoolWithTag(NonPagedPool, p->np_cnt *
p->np_len, 0);
if (packets == NULL) {
ExFreePool(p);
*status = NDIS_STATUS_RESOURCES;
return;
}
p->np_pktmem = packets;
for (i = 0; i < p->np_cnt; i++)
InterlockedPushEntrySList(&p->np_head,
(struct slist_entry *)&packets[i]);
#ifdef NDIS_DEBUG_PACKETS
p->np_dead = 0;
KeInitializeSpinLock(&p->np_lock);
KeInitializeEvent(&p->np_event, EVENT_TYPE_NOTIFY, TRUE);
#endif
*pool = p;
*status = NDIS_STATUS_SUCCESS;
}
void
NdisAllocatePacketPoolEx(status, pool, descnum, oflowdescnum, protrsvdlen)
ndis_status *status;
ndis_handle *pool;
uint32_t descnum;
uint32_t oflowdescnum;
uint32_t protrsvdlen;
{
return (NdisAllocatePacketPool(status, pool,
descnum + oflowdescnum, protrsvdlen));
}
uint32_t
NdisPacketPoolUsage(pool)
ndis_handle pool;
{
ndis_packet_pool *p;
p = (ndis_packet_pool *)pool;
return (p->np_cnt - ExQueryDepthSList(&p->np_head));
}
void
NdisFreePacketPool(pool)
ndis_handle pool;
{
ndis_packet_pool *p;
int usage;
#ifdef NDIS_DEBUG_PACKETS
uint8_t irql;
#endif
p = (ndis_packet_pool *)pool;
#ifdef NDIS_DEBUG_PACKETS
KeAcquireSpinLock(&p->np_lock, &irql);
#endif
usage = NdisPacketPoolUsage(pool);
#ifdef NDIS_DEBUG_PACKETS
if (usage) {
p->np_dead = 1;
KeResetEvent(&p->np_event);
KeReleaseSpinLock(&p->np_lock, irql);
KeWaitForSingleObject(&p->np_event, 0, 0, FALSE, NULL);
} else
KeReleaseSpinLock(&p->np_lock, irql);
#endif
ExFreePool(p->np_pktmem);
ExFreePool(p);
}
void
NdisAllocatePacket(status, packet, pool)
ndis_status *status;
ndis_packet **packet;
ndis_handle pool;
{
ndis_packet_pool *p;
ndis_packet *pkt;
#ifdef NDIS_DEBUG_PACKETS
uint8_t irql;
#endif
p = (ndis_packet_pool *)pool;
#ifdef NDIS_DEBUG_PACKETS
KeAcquireSpinLock(&p->np_lock, &irql);
if (p->np_dead) {
KeReleaseSpinLock(&p->np_lock, irql);
printf("NDIS: tried to allocate packet from dead pool %p\n",
pool);
*status = NDIS_STATUS_RESOURCES;
return;
}
#endif
pkt = (ndis_packet *)InterlockedPopEntrySList(&p->np_head);
#ifdef NDIS_DEBUG_PACKETS
KeReleaseSpinLock(&p->np_lock, irql);
#endif
if (pkt == NULL) {
*status = NDIS_STATUS_RESOURCES;
return;
}
bzero((char *)pkt, sizeof(ndis_packet));
/* Save pointer to the pool. */
pkt->np_private.npp_pool = pool;
/* Set the oob offset pointer. Lots of things expect this. */
pkt->np_private.npp_packetooboffset = offsetof(ndis_packet, np_oob);
/*
* We must initialize the packet flags correctly in order
* for the NDIS_SET_PACKET_MEDIA_SPECIFIC_INFO() and
* NDIS_GET_PACKET_MEDIA_SPECIFIC_INFO() macros to work
* correctly.
*/
pkt->np_private.npp_ndispktflags = NDIS_PACKET_ALLOCATED_BY_NDIS;
pkt->np_private.npp_validcounts = FALSE;
*packet = pkt;
*status = NDIS_STATUS_SUCCESS;
}
void
NdisFreePacket(packet)
ndis_packet *packet;
{
ndis_packet_pool *p;
#ifdef NDIS_DEBUG_PACKETS
uint8_t irql;
#endif
p = (ndis_packet_pool *)packet->np_private.npp_pool;
#ifdef NDIS_DEBUG_PACKETS
KeAcquireSpinLock(&p->np_lock, &irql);
#endif
InterlockedPushEntrySList(&p->np_head, (slist_entry *)packet);
#ifdef NDIS_DEBUG_PACKETS
if (p->np_dead) {
if (ExQueryDepthSList(&p->np_head) == p->np_cnt)
KeSetEvent(&p->np_event, IO_NO_INCREMENT, FALSE);
}
KeReleaseSpinLock(&p->np_lock, irql);
#endif
}
static void
NdisUnchainBufferAtFront(packet, buf)
ndis_packet *packet;
ndis_buffer **buf;
{
ndis_packet_private *priv;
if (packet == NULL || buf == NULL)
return;
priv = &packet->np_private;
priv->npp_validcounts = FALSE;
if (priv->npp_head == priv->npp_tail) {
*buf = priv->npp_head;
priv->npp_head = priv->npp_tail = NULL;
} else {
*buf = priv->npp_head;
priv->npp_head = (*buf)->mdl_next;
}
}
static void
NdisUnchainBufferAtBack(packet, buf)
ndis_packet *packet;
ndis_buffer **buf;
{
ndis_packet_private *priv;
ndis_buffer *tmp;
if (packet == NULL || buf == NULL)
return;
priv = &packet->np_private;
priv->npp_validcounts = FALSE;
if (priv->npp_head == priv->npp_tail) {
*buf = priv->npp_head;
priv->npp_head = priv->npp_tail = NULL;
} else {
*buf = priv->npp_tail;
tmp = priv->npp_head;
while (tmp->mdl_next != priv->npp_tail)
tmp = tmp->mdl_next;
priv->npp_tail = tmp;
tmp->mdl_next = NULL;
}
}
/*
* The NDIS "buffer" is really an MDL (memory descriptor list)
* which is used to describe a buffer in a way that allows it
* to mapped into different contexts. We have to be careful how
* we handle them: in some versions of Windows, the NdisFreeBuffer()
* routine is an actual function in the NDIS API, but in others
* it's just a macro wrapper around IoFreeMdl(). There's really
* no way to use the 'descnum' parameter to count how many
* "buffers" are allocated since in order to use IoFreeMdl() to
* dispose of a buffer, we have to use IoAllocateMdl() to allocate
* them, and IoAllocateMdl() just grabs them out of the heap.
*/
static void
NdisAllocateBufferPool(status, pool, descnum)
ndis_status *status;
ndis_handle *pool;
uint32_t descnum;
{
/*
* The only thing we can really do here is verify that descnum
* is a reasonable value, but I really don't know what to check
* it against.
*/
*pool = NonPagedPool;
*status = NDIS_STATUS_SUCCESS;
}
static void
NdisFreeBufferPool(pool)
ndis_handle pool;
{
}
static void
NdisAllocateBuffer(status, buffer, pool, vaddr, len)
ndis_status *status;
ndis_buffer **buffer;
ndis_handle pool;
void *vaddr;
uint32_t len;
{
ndis_buffer *buf;
buf = IoAllocateMdl(vaddr, len, FALSE, FALSE, NULL);
if (buf == NULL) {
*status = NDIS_STATUS_RESOURCES;
return;
}
MmBuildMdlForNonPagedPool(buf);
*buffer = buf;
*status = NDIS_STATUS_SUCCESS;
}
static void
NdisFreeBuffer(buf)
ndis_buffer *buf;
{
IoFreeMdl(buf);
}
/* Aw c'mon. */
static uint32_t
NdisBufferLength(buf)
ndis_buffer *buf;
{
return (MmGetMdlByteCount(buf));
}
/*
* Get the virtual address and length of a buffer.
* Note: the vaddr argument is optional.
*/
static void
NdisQueryBuffer(buf, vaddr, len)
ndis_buffer *buf;
void **vaddr;
uint32_t *len;
{
if (vaddr != NULL)
*vaddr = MmGetMdlVirtualAddress(buf);
*len = MmGetMdlByteCount(buf);
}
/* Same as above -- we don't care about the priority. */
static void
NdisQueryBufferSafe(buf, vaddr, len, prio)
ndis_buffer *buf;
void **vaddr;
uint32_t *len;
uint32_t prio;
{
if (vaddr != NULL)
*vaddr = MmGetMdlVirtualAddress(buf);
*len = MmGetMdlByteCount(buf);
}
/* Damnit Microsoft!! How many ways can you do the same thing?! */
static void *
NdisBufferVirtualAddress(buf)
ndis_buffer *buf;
{
return (MmGetMdlVirtualAddress(buf));
}
static void *
NdisBufferVirtualAddressSafe(buf, prio)
ndis_buffer *buf;
uint32_t prio;
{
return (MmGetMdlVirtualAddress(buf));
}
static void
NdisAdjustBufferLength(buf, len)
ndis_buffer *buf;
int len;
{
MmGetMdlByteCount(buf) = len;
}
static uint32_t
NdisInterlockedIncrement(addend)
uint32_t *addend;
{
atomic_add_long((u_long *)addend, 1);
return (*addend);
}
static uint32_t
NdisInterlockedDecrement(addend)
uint32_t *addend;
{
atomic_subtract_long((u_long *)addend, 1);
return (*addend);
}
static uint32_t
NdisGetVersion(void)
{
return (0x00050001);
}
static void
NdisInitializeEvent(event)
ndis_event *event;
{
/*
* NDIS events are always notification
* events, and should be initialized to the
* not signaled state.
*/
KeInitializeEvent(&event->ne_event, EVENT_TYPE_NOTIFY, FALSE);
}
static void
NdisSetEvent(event)
ndis_event *event;
{
KeSetEvent(&event->ne_event, IO_NO_INCREMENT, FALSE);
}
static void
NdisResetEvent(event)
ndis_event *event;
{
KeResetEvent(&event->ne_event);
}
static uint8_t
NdisWaitEvent(event, msecs)
ndis_event *event;
uint32_t msecs;
{
int64_t duetime;
uint32_t rval;
duetime = ((int64_t)msecs * -10000);
rval = KeWaitForSingleObject(event,
0, 0, TRUE, msecs ? & duetime : NULL);
if (rval == STATUS_TIMEOUT)
return (FALSE);
return (TRUE);
}
static ndis_status
NdisUnicodeStringToAnsiString(dstr, sstr)
ansi_string *dstr;
unicode_string *sstr;
{
uint32_t rval;
rval = RtlUnicodeStringToAnsiString(dstr, sstr, FALSE);
if (rval == STATUS_INSUFFICIENT_RESOURCES)
return (NDIS_STATUS_RESOURCES);
if (rval)
return (NDIS_STATUS_FAILURE);
return (NDIS_STATUS_SUCCESS);
}
static ndis_status
NdisAnsiStringToUnicodeString(dstr, sstr)
unicode_string *dstr;
ansi_string *sstr;
{
uint32_t rval;
rval = RtlAnsiStringToUnicodeString(dstr, sstr, FALSE);
if (rval == STATUS_INSUFFICIENT_RESOURCES)
return (NDIS_STATUS_RESOURCES);
if (rval)
return (NDIS_STATUS_FAILURE);
return (NDIS_STATUS_SUCCESS);
}
static ndis_status
NdisMPciAssignResources(adapter, slot, list)
ndis_handle adapter;
uint32_t slot;
ndis_resource_list **list;
{
ndis_miniport_block *block;
if (adapter == NULL || list == NULL)
return (NDIS_STATUS_FAILURE);
block = (ndis_miniport_block *)adapter;
*list = block->nmb_rlist;
return (NDIS_STATUS_SUCCESS);
}
static uint8_t
ndis_intr(iobj, arg)
kinterrupt *iobj;
void *arg;
{
struct ndis_softc *sc;
uint8_t is_our_intr = FALSE;
int call_isr = 0;
ndis_miniport_interrupt *intr;
sc = arg;
intr = sc->ndis_block->nmb_interrupt;
if (intr == NULL || sc->ndis_block->nmb_miniportadapterctx == NULL)
return (FALSE);
if (sc->ndis_block->nmb_interrupt->ni_isrreq == TRUE)
MSCALL3(intr->ni_isrfunc, &is_our_intr, &call_isr,
sc->ndis_block->nmb_miniportadapterctx);
else {
MSCALL1(sc->ndis_chars->nmc_disable_interrupts_func,
sc->ndis_block->nmb_miniportadapterctx);
call_isr = 1;
}
if (call_isr)
IoRequestDpc(sc->ndis_block->nmb_deviceobj, NULL, sc);
return (is_our_intr);
}
static void
ndis_intrhand(dpc, intr, sysarg1, sysarg2)
kdpc *dpc;
ndis_miniport_interrupt *intr;
void *sysarg1;
void *sysarg2;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
ndis_handle adapter;
block = intr->ni_block;
adapter = block->nmb_miniportadapterctx;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
if (NDIS_SERIALIZED(sc->ndis_block))
KeAcquireSpinLockAtDpcLevel(&block->nmb_lock);
MSCALL1(intr->ni_dpcfunc, adapter);
/* If there's a MiniportEnableInterrupt() routine, call it. */
if (sc->ndis_chars->nmc_enable_interrupts_func != NULL)
MSCALL1(sc->ndis_chars->nmc_enable_interrupts_func, adapter);
if (NDIS_SERIALIZED(sc->ndis_block))
KeReleaseSpinLockFromDpcLevel(&block->nmb_lock);
/*
* Set the completion event if we've drained all
* pending interrupts.
*/
KeAcquireSpinLockAtDpcLevel(&intr->ni_dpccountlock);
intr->ni_dpccnt--;
if (intr->ni_dpccnt == 0)
KeSetEvent(&intr->ni_dpcevt, IO_NO_INCREMENT, FALSE);
KeReleaseSpinLockFromDpcLevel(&intr->ni_dpccountlock);
}
static ndis_status
NdisMRegisterInterrupt(ndis_miniport_interrupt *intr, ndis_handle adapter,
uint32_t ivec, uint32_t ilevel, uint8_t reqisr, uint8_t shared,
ndis_interrupt_mode imode)
{
ndis_miniport_block *block;
ndis_miniport_characteristics *ch;
struct ndis_softc *sc;
int error;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ch = IoGetDriverObjectExtension(block->nmb_deviceobj->do_drvobj,
(void *)1);
intr->ni_rsvd = ExAllocatePoolWithTag(NonPagedPool,
sizeof(struct mtx), 0);
if (intr->ni_rsvd == NULL)
return (NDIS_STATUS_RESOURCES);
intr->ni_block = adapter;
intr->ni_isrreq = reqisr;
intr->ni_shared = shared;
intr->ni_dpccnt = 0;
intr->ni_isrfunc = ch->nmc_isr_func;
intr->ni_dpcfunc = ch->nmc_interrupt_func;
KeInitializeEvent(&intr->ni_dpcevt, EVENT_TYPE_NOTIFY, TRUE);
KeInitializeDpc(&intr->ni_dpc,
ndis_findwrap((funcptr)ndis_intrhand), intr);
KeSetImportanceDpc(&intr->ni_dpc, KDPC_IMPORTANCE_LOW);
error = IoConnectInterrupt(&intr->ni_introbj,
ndis_findwrap((funcptr)ndis_intr), sc, NULL,
ivec, ilevel, 0, imode, shared, 0, FALSE);
if (error != STATUS_SUCCESS)
return (NDIS_STATUS_FAILURE);
block->nmb_interrupt = intr;
return (NDIS_STATUS_SUCCESS);
}
static void
NdisMDeregisterInterrupt(intr)
ndis_miniport_interrupt *intr;
{
ndis_miniport_block *block;
uint8_t irql;
block = intr->ni_block;
/* Should really be KeSynchronizeExecution() */
KeAcquireSpinLock(intr->ni_introbj->ki_lock, &irql);
block->nmb_interrupt = NULL;
KeReleaseSpinLock(intr->ni_introbj->ki_lock, irql);
/*
KeFlushQueuedDpcs();
*/
/* Disconnect our ISR */
IoDisconnectInterrupt(intr->ni_introbj);
KeWaitForSingleObject(&intr->ni_dpcevt, 0, 0, FALSE, NULL);
KeResetEvent(&intr->ni_dpcevt);
}
static void
NdisMRegisterAdapterShutdownHandler(adapter, shutdownctx, shutdownfunc)
ndis_handle adapter;
void *shutdownctx;
ndis_shutdown_handler shutdownfunc;
{
ndis_miniport_block *block;
ndis_miniport_characteristics *chars;
struct ndis_softc *sc;
if (adapter == NULL)
return;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
chars = sc->ndis_chars;
chars->nmc_shutdown_handler = shutdownfunc;
chars->nmc_rsvd0 = shutdownctx;
}
static void
NdisMDeregisterAdapterShutdownHandler(adapter)
ndis_handle adapter;
{
ndis_miniport_block *block;
ndis_miniport_characteristics *chars;
struct ndis_softc *sc;
if (adapter == NULL)
return;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
chars = sc->ndis_chars;
chars->nmc_shutdown_handler = NULL;
chars->nmc_rsvd0 = NULL;
}
static uint32_t
NDIS_BUFFER_TO_SPAN_PAGES(buf)
ndis_buffer *buf;
{
if (buf == NULL)
return (0);
if (MmGetMdlByteCount(buf) == 0)
return (1);
return (SPAN_PAGES(MmGetMdlVirtualAddress(buf),
MmGetMdlByteCount(buf)));
}
static void
NdisGetBufferPhysicalArraySize(buf, pages)
ndis_buffer *buf;
uint32_t *pages;
{
if (buf == NULL)
return;
*pages = NDIS_BUFFER_TO_SPAN_PAGES(buf);
}
static void
NdisQueryBufferOffset(buf, off, len)
ndis_buffer *buf;
uint32_t *off;
uint32_t *len;
{
if (buf == NULL)
return;
*off = MmGetMdlByteOffset(buf);
*len = MmGetMdlByteCount(buf);
}
void
NdisMSleep(usecs)
uint32_t usecs;
{
ktimer timer;
/*
* During system bootstrap, (i.e. cold == 1), we aren't
* allowed to sleep, so we have to do a hard DELAY()
* instead.
*/
if (cold)
DELAY(usecs);
else {
KeInitializeTimer(&timer);
KeSetTimer(&timer, ((int64_t)usecs * -10), NULL);
KeWaitForSingleObject(&timer, 0, 0, FALSE, NULL);
}
}
static uint32_t
NdisReadPcmciaAttributeMemory(handle, offset, buf, len)
ndis_handle handle;
uint32_t offset;
void *buf;
uint32_t len;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
bus_space_handle_t bh;
bus_space_tag_t bt;
char *dest;
uint32_t i;
if (handle == NULL)
return (0);
block = (ndis_miniport_block *)handle;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
dest = buf;
bh = rman_get_bushandle(sc->ndis_res_am);
bt = rman_get_bustag(sc->ndis_res_am);
for (i = 0; i < len; i++)
dest[i] = bus_space_read_1(bt, bh, (offset + i) * 2);
return (i);
}
static uint32_t
NdisWritePcmciaAttributeMemory(handle, offset, buf, len)
ndis_handle handle;
uint32_t offset;
void *buf;
uint32_t len;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
bus_space_handle_t bh;
bus_space_tag_t bt;
char *src;
uint32_t i;
if (handle == NULL)
return (0);
block = (ndis_miniport_block *)handle;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
src = buf;
bh = rman_get_bushandle(sc->ndis_res_am);
bt = rman_get_bustag(sc->ndis_res_am);
for (i = 0; i < len; i++)
bus_space_write_1(bt, bh, (offset + i) * 2, src[i]);
return (i);
}
static list_entry *
NdisInterlockedInsertHeadList(head, entry, lock)
list_entry *head;
list_entry *entry;
ndis_spin_lock *lock;
{
list_entry *flink;
KeAcquireSpinLock(&lock->nsl_spinlock, &lock->nsl_kirql);
flink = head->nle_flink;
entry->nle_flink = flink;
entry->nle_blink = head;
flink->nle_blink = entry;
head->nle_flink = entry;
KeReleaseSpinLock(&lock->nsl_spinlock, lock->nsl_kirql);
return (flink);
}
static list_entry *
NdisInterlockedRemoveHeadList(head, lock)
list_entry *head;
ndis_spin_lock *lock;
{
list_entry *flink;
list_entry *entry;
KeAcquireSpinLock(&lock->nsl_spinlock, &lock->nsl_kirql);
entry = head->nle_flink;
flink = entry->nle_flink;
head->nle_flink = flink;
flink->nle_blink = head;
KeReleaseSpinLock(&lock->nsl_spinlock, lock->nsl_kirql);
return (entry);
}
static list_entry *
NdisInterlockedInsertTailList(head, entry, lock)
list_entry *head;
list_entry *entry;
ndis_spin_lock *lock;
{
list_entry *blink;
KeAcquireSpinLock(&lock->nsl_spinlock, &lock->nsl_kirql);
blink = head->nle_blink;
entry->nle_flink = head;
entry->nle_blink = blink;
blink->nle_flink = entry;
head->nle_blink = entry;
KeReleaseSpinLock(&lock->nsl_spinlock, lock->nsl_kirql);
return (blink);
}
static uint8_t
NdisMSynchronizeWithInterrupt(intr, syncfunc, syncctx)
ndis_miniport_interrupt *intr;
void *syncfunc;
void *syncctx;
{
return (KeSynchronizeExecution(intr->ni_introbj, syncfunc, syncctx));
}
static void
NdisGetCurrentSystemTime(tval)
uint64_t *tval;
{
ntoskrnl_time(tval);
}
/*
* Return the number of milliseconds since the system booted.
*/
static void
NdisGetSystemUpTime(tval)
uint32_t *tval;
{
struct timespec ts;
nanouptime(&ts);
*tval = ts.tv_nsec / 1000000 + ts.tv_sec * 1000;
}
static void
NdisInitializeString(dst, src)
unicode_string *dst;
char *src;
{
ansi_string as;
RtlInitAnsiString(&as, src);
RtlAnsiStringToUnicodeString(dst, &as, TRUE);
}
static void
NdisFreeString(str)
unicode_string *str;
{
RtlFreeUnicodeString(str);
}
static ndis_status
NdisMRemoveMiniport(adapter)
ndis_handle *adapter;
{
return (NDIS_STATUS_SUCCESS);
}
static void
NdisInitAnsiString(dst, src)
ansi_string *dst;
char *src;
{
RtlInitAnsiString(dst, src);
}
static void
NdisInitUnicodeString(dst, src)
unicode_string *dst;
uint16_t *src;
{
RtlInitUnicodeString(dst, src);
}
static void NdisMGetDeviceProperty(adapter, phydevobj,
funcdevobj, nextdevobj, resources, transresources)
ndis_handle adapter;
device_object **phydevobj;
device_object **funcdevobj;
device_object **nextdevobj;
cm_resource_list *resources;
cm_resource_list *transresources;
{
ndis_miniport_block *block;
block = (ndis_miniport_block *)adapter;
if (phydevobj != NULL)
*phydevobj = block->nmb_physdeviceobj;
if (funcdevobj != NULL)
*funcdevobj = block->nmb_deviceobj;
if (nextdevobj != NULL)
*nextdevobj = block->nmb_nextdeviceobj;
}
static void
NdisGetFirstBufferFromPacket(packet, buf, firstva, firstlen, totlen)
ndis_packet *packet;
ndis_buffer **buf;
void **firstva;
uint32_t *firstlen;
uint32_t *totlen;
{
ndis_buffer *tmp;
tmp = packet->np_private.npp_head;
*buf = tmp;
if (tmp == NULL) {
*firstva = NULL;
*firstlen = *totlen = 0;
} else {
*firstva = MmGetMdlVirtualAddress(tmp);
*firstlen = *totlen = MmGetMdlByteCount(tmp);
for (tmp = tmp->mdl_next; tmp != NULL; tmp = tmp->mdl_next)
*totlen += MmGetMdlByteCount(tmp);
}
}
static void
NdisGetFirstBufferFromPacketSafe(packet, buf, firstva, firstlen, totlen, prio)
ndis_packet *packet;
ndis_buffer **buf;
void **firstva;
uint32_t *firstlen;
uint32_t *totlen;
uint32_t prio;
{
NdisGetFirstBufferFromPacket(packet, buf, firstva, firstlen, totlen);
}
static int
ndis_find_sym(lf, filename, suffix, sym)
linker_file_t lf;
char *filename;
char *suffix;
caddr_t *sym;
{
char *fullsym;
char *suf;
u_int i;
fullsym = ExAllocatePoolWithTag(NonPagedPool, MAXPATHLEN, 0);
if (fullsym == NULL)
return (ENOMEM);
bzero(fullsym, MAXPATHLEN);
strncpy(fullsym, filename, MAXPATHLEN);
if (strlen(filename) < 4) {
ExFreePool(fullsym);
return (EINVAL);
}
/* If the filename has a .ko suffix, strip if off. */
suf = fullsym + (strlen(filename) - 3);
if (strcmp(suf, ".ko") == 0)
*suf = '\0';
for (i = 0; i < strlen(fullsym); i++) {
if (fullsym[i] == '.')
fullsym[i] = '_';
else
fullsym[i] = tolower(fullsym[i]);
}
strcat(fullsym, suffix);
*sym = linker_file_lookup_symbol(lf, fullsym, 0);
ExFreePool(fullsym);
if (*sym == 0)
return (ENOENT);
return (0);
}
struct ndis_checkmodule {
char *afilename;
ndis_fh *fh;
};
/*
* See if a single module contains the symbols for a specified file.
*/
static int
NdisCheckModule(linker_file_t lf, void *context)
{
struct ndis_checkmodule *nc;
caddr_t kldstart, kldend;
nc = (struct ndis_checkmodule *)context;
if (ndis_find_sym(lf, nc->afilename, "_start", &kldstart))
return (0);
if (ndis_find_sym(lf, nc->afilename, "_end", &kldend))
return (0);
nc->fh->nf_vp = lf;
nc->fh->nf_map = NULL;
nc->fh->nf_type = NDIS_FH_TYPE_MODULE;
nc->fh->nf_maplen = (kldend - kldstart) & 0xFFFFFFFF;
return (1);
}
/* can also return NDIS_STATUS_RESOURCES/NDIS_STATUS_ERROR_READING_FILE */
static void
NdisOpenFile(status, filehandle, filelength, filename, highestaddr)
ndis_status *status;
ndis_handle *filehandle;
uint32_t *filelength;
unicode_string *filename;
ndis_physaddr highestaddr;
{
ansi_string as;
char *afilename = NULL;
struct thread *td = curthread;
struct nameidata nd;
int flags, error;
struct vattr vat;
struct vattr *vap = &vat;
ndis_fh *fh;
char *path;
struct ndis_checkmodule nc;
if (RtlUnicodeStringToAnsiString(&as, filename, TRUE)) {
*status = NDIS_STATUS_RESOURCES;
return;
}
afilename = strdup(as.as_buf, M_DEVBUF);
RtlFreeAnsiString(&as);
fh = ExAllocatePoolWithTag(NonPagedPool, sizeof(ndis_fh), 0);
if (fh == NULL) {
free(afilename, M_DEVBUF);
*status = NDIS_STATUS_RESOURCES;
return;
}
fh->nf_name = afilename;
/*
* During system bootstrap, it's impossible to load files
* from the rootfs since it's not mounted yet. We therefore
* offer the possibility of opening files that have been
* preloaded as modules instead. Both choices will work
* when kldloading a module from multiuser, but only the
* module option will work during bootstrap. The module
* loading option works by using the ndiscvt(8) utility
* to convert the arbitrary file into a .ko using objcopy(1).
* This file will contain two special symbols: filename_start
* and filename_end. All we have to do is traverse the KLD
* list in search of those symbols and we've found the file
* data. As an added bonus, ndiscvt(8) will also generate
* a normal .o file which can be linked statically with
* the kernel. This means that the symbols will actual reside
* in the kernel's symbol table, but that doesn't matter to
* us since the kernel appears to us as just another module.
*/
nc.afilename = afilename;
nc.fh = fh;
if (linker_file_foreach(NdisCheckModule, &nc)) {
*filelength = fh->nf_maplen;
*filehandle = fh;
*status = NDIS_STATUS_SUCCESS;
return;
}
if (TAILQ_EMPTY(&mountlist)) {
ExFreePool(fh);
*status = NDIS_STATUS_FILE_NOT_FOUND;
printf("NDIS: could not find file %s in linker list\n",
afilename);
printf("NDIS: and no filesystems mounted yet, "
"aborting NdisOpenFile()\n");
free(afilename, M_DEVBUF);
return;
}
path = ExAllocatePoolWithTag(NonPagedPool, MAXPATHLEN, 0);
if (path == NULL) {
ExFreePool(fh);
free(afilename, M_DEVBUF);
*status = NDIS_STATUS_RESOURCES;
return;
}
snprintf(path, MAXPATHLEN, "%s/%s", ndis_filepath, afilename);
/* Some threads don't have a current working directory. */
pwd_ensure_dirs();
NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0, NULL);
if (error) {
*status = NDIS_STATUS_FILE_NOT_FOUND;
ExFreePool(fh);
printf("NDIS: open file %s failed: %d\n", path, error);
ExFreePool(path);
free(afilename, M_DEVBUF);
return;
}
ExFreePool(path);
NDFREE(&nd, NDF_ONLY_PNBUF);
/* Get the file size. */
VOP_GETATTR(nd.ni_vp, vap, td->td_ucred);
VOP_UNLOCK(nd.ni_vp, 0);
fh->nf_vp = nd.ni_vp;
fh->nf_map = NULL;
fh->nf_type = NDIS_FH_TYPE_VFS;
*filehandle = fh;
*filelength = fh->nf_maplen = vap->va_size & 0xFFFFFFFF;
*status = NDIS_STATUS_SUCCESS;
}
static void
NdisMapFile(status, mappedbuffer, filehandle)
ndis_status *status;
void **mappedbuffer;
ndis_handle filehandle;
{
ndis_fh *fh;
struct thread *td = curthread;
linker_file_t lf;
caddr_t kldstart;
int error;
ssize_t resid;
struct vnode *vp;
if (filehandle == NULL) {
*status = NDIS_STATUS_FAILURE;
return;
}
fh = (ndis_fh *)filehandle;
if (fh->nf_vp == NULL) {
*status = NDIS_STATUS_FAILURE;
return;
}
if (fh->nf_map != NULL) {
*status = NDIS_STATUS_ALREADY_MAPPED;
return;
}
if (fh->nf_type == NDIS_FH_TYPE_MODULE) {
lf = fh->nf_vp;
if (ndis_find_sym(lf, fh->nf_name, "_start", &kldstart)) {
*status = NDIS_STATUS_FAILURE;
return;
}
fh->nf_map = kldstart;
*status = NDIS_STATUS_SUCCESS;
*mappedbuffer = fh->nf_map;
return;
}
fh->nf_map = ExAllocatePoolWithTag(NonPagedPool, fh->nf_maplen, 0);
if (fh->nf_map == NULL) {
*status = NDIS_STATUS_RESOURCES;
return;
}
vp = fh->nf_vp;
error = vn_rdwr(UIO_READ, vp, fh->nf_map, fh->nf_maplen, 0,
UIO_SYSSPACE, 0, td->td_ucred, NOCRED, &resid, td);
if (error)
*status = NDIS_STATUS_FAILURE;
else {
*status = NDIS_STATUS_SUCCESS;
*mappedbuffer = fh->nf_map;
}
}
static void
NdisUnmapFile(filehandle)
ndis_handle filehandle;
{
ndis_fh *fh;
fh = (ndis_fh *)filehandle;
if (fh->nf_map == NULL)
return;
if (fh->nf_type == NDIS_FH_TYPE_VFS)
ExFreePool(fh->nf_map);
fh->nf_map = NULL;
}
static void
NdisCloseFile(filehandle)
ndis_handle filehandle;
{
struct thread *td = curthread;
ndis_fh *fh;
struct vnode *vp;
if (filehandle == NULL)
return;
fh = (ndis_fh *)filehandle;
if (fh->nf_map != NULL) {
if (fh->nf_type == NDIS_FH_TYPE_VFS)
ExFreePool(fh->nf_map);
fh->nf_map = NULL;
}
if (fh->nf_vp == NULL)
return;
if (fh->nf_type == NDIS_FH_TYPE_VFS) {
vp = fh->nf_vp;
vn_close(vp, FREAD, td->td_ucred, td);
}
fh->nf_vp = NULL;
free(fh->nf_name, M_DEVBUF);
ExFreePool(fh);
}
static uint8_t
NdisSystemProcessorCount()
{
return (mp_ncpus);
}
static void
NdisGetCurrentProcessorCounts(idle_count, kernel_and_user, index)
uint32_t *idle_count;
uint32_t *kernel_and_user;
uint32_t *index;
{
struct pcpu *pcpu;
pcpu = pcpu_find(curthread->td_oncpu);
*index = pcpu->pc_cpuid;
*idle_count = pcpu->pc_cp_time[CP_IDLE];
*kernel_and_user = pcpu->pc_cp_time[CP_INTR];
}
typedef void (*ndis_statusdone_handler)(ndis_handle);
typedef void (*ndis_status_handler)(ndis_handle, ndis_status,
void *, uint32_t);
static void
NdisMIndicateStatusComplete(adapter)
ndis_handle adapter;
{
ndis_miniport_block *block;
ndis_statusdone_handler statusdonefunc;
block = (ndis_miniport_block *)adapter;
statusdonefunc = block->nmb_statusdone_func;
MSCALL1(statusdonefunc, adapter);
}
static void
NdisMIndicateStatus(adapter, status, sbuf, slen)
ndis_handle adapter;
ndis_status status;
void *sbuf;
uint32_t slen;
{
ndis_miniport_block *block;
ndis_status_handler statusfunc;
block = (ndis_miniport_block *)adapter;
statusfunc = block->nmb_status_func;
MSCALL4(statusfunc, adapter, status, sbuf, slen);
}
/*
* The DDK documentation says that you should use IoQueueWorkItem()
* instead of ExQueueWorkItem(). The problem is, IoQueueWorkItem()
* is fundamentally incompatible with NdisScheduleWorkItem(), which
* depends on the API semantics of ExQueueWorkItem(). In our world,
* ExQueueWorkItem() is implemented on top of IoAllocateQueueItem()
* anyway.
*
* There are actually three distinct APIs here. NdisScheduleWorkItem()
* takes a pointer to an NDIS_WORK_ITEM. ExQueueWorkItem() takes a pointer
* to a WORK_QUEUE_ITEM. And finally, IoQueueWorkItem() takes a pointer
* to an opaque work item thingie which you get from IoAllocateWorkItem().
* An NDIS_WORK_ITEM is not the same as a WORK_QUEUE_ITEM. However,
* the NDIS_WORK_ITEM has some opaque storage at the end of it, and we
* (ab)use this storage as a WORK_QUEUE_ITEM, which is what we submit
* to ExQueueWorkItem().
*
* Got all that? (Sheesh.)
*/
ndis_status
NdisScheduleWorkItem(work)
ndis_work_item *work;
{
work_queue_item *wqi;
wqi = (work_queue_item *)work->nwi_wraprsvd;
ExInitializeWorkItem(wqi,
(work_item_func)work->nwi_func, work->nwi_ctx);
ExQueueWorkItem(wqi, WORKQUEUE_DELAYED);
return (NDIS_STATUS_SUCCESS);
}
static void
NdisCopyFromPacketToPacket(dpkt, doff, reqlen, spkt, soff, cpylen)
ndis_packet *dpkt;
uint32_t doff;
uint32_t reqlen;
ndis_packet *spkt;
uint32_t soff;
uint32_t *cpylen;
{
ndis_buffer *src, *dst;
char *sptr, *dptr;
int resid, copied, len, scnt, dcnt;
*cpylen = 0;
src = spkt->np_private.npp_head;
dst = dpkt->np_private.npp_head;
sptr = MmGetMdlVirtualAddress(src);
dptr = MmGetMdlVirtualAddress(dst);
scnt = MmGetMdlByteCount(src);
dcnt = MmGetMdlByteCount(dst);
while (soff) {
if (MmGetMdlByteCount(src) > soff) {
sptr += soff;
scnt = MmGetMdlByteCount(src)- soff;
break;
}
soff -= MmGetMdlByteCount(src);
src = src->mdl_next;
if (src == NULL)
return;
sptr = MmGetMdlVirtualAddress(src);
}
while (doff) {
if (MmGetMdlByteCount(dst) > doff) {
dptr += doff;
dcnt = MmGetMdlByteCount(dst) - doff;
break;
}
doff -= MmGetMdlByteCount(dst);
dst = dst->mdl_next;
if (dst == NULL)
return;
dptr = MmGetMdlVirtualAddress(dst);
}
resid = reqlen;
copied = 0;
while(1) {
if (resid < scnt)
len = resid;
else
len = scnt;
if (dcnt < len)
len = dcnt;
bcopy(sptr, dptr, len);
copied += len;
resid -= len;
if (resid == 0)
break;
dcnt -= len;
if (dcnt == 0) {
dst = dst->mdl_next;
if (dst == NULL)
break;
dptr = MmGetMdlVirtualAddress(dst);
dcnt = MmGetMdlByteCount(dst);
}
scnt -= len;
if (scnt == 0) {
src = src->mdl_next;
if (src == NULL)
break;
sptr = MmGetMdlVirtualAddress(src);
scnt = MmGetMdlByteCount(src);
}
}
*cpylen = copied;
}
static void
NdisCopyFromPacketToPacketSafe(dpkt, doff, reqlen, spkt, soff, cpylen, prio)
ndis_packet *dpkt;
uint32_t doff;
uint32_t reqlen;
ndis_packet *spkt;
uint32_t soff;
uint32_t *cpylen;
uint32_t prio;
{
NdisCopyFromPacketToPacket(dpkt, doff, reqlen, spkt, soff, cpylen);
}
static void
NdisIMCopySendPerPacketInfo(dpkt, spkt)
ndis_packet *dpkt;
ndis_packet *spkt;
{
memcpy(&dpkt->np_ext, &spkt->np_ext, sizeof(ndis_packet_extension));
}
static ndis_status
NdisMRegisterDevice(handle, devname, symname, majorfuncs, devobj, devhandle)
ndis_handle handle;
unicode_string *devname;
unicode_string *symname;
driver_dispatch *majorfuncs[];
void **devobj;
ndis_handle *devhandle;
{
uint32_t status;
device_object *dobj;
status = IoCreateDevice(handle, 0, devname,
FILE_DEVICE_UNKNOWN, 0, FALSE, &dobj);
if (status == STATUS_SUCCESS) {
*devobj = dobj;
*devhandle = dobj;
}
return (status);
}
static ndis_status
NdisMDeregisterDevice(handle)
ndis_handle handle;
{
IoDeleteDevice(handle);
return (NDIS_STATUS_SUCCESS);
}
static ndis_status
NdisMQueryAdapterInstanceName(name, handle)
unicode_string *name;
ndis_handle handle;
{
ndis_miniport_block *block;
device_t dev;
ansi_string as;
block = (ndis_miniport_block *)handle;
dev = block->nmb_physdeviceobj->do_devext;
RtlInitAnsiString(&as, __DECONST(char *, device_get_nameunit(dev)));
if (RtlAnsiStringToUnicodeString(name, &as, TRUE))
return (NDIS_STATUS_RESOURCES);
return (NDIS_STATUS_SUCCESS);
}
static void
NdisMRegisterUnloadHandler(handle, func)
ndis_handle handle;
void *func;
{
}
static void
dummy()
{
printf("NDIS dummy called...\n");
}
/*
* Note: a couple of entries in this table specify the
* number of arguments as "foo + 1". These are routines
* that accept a 64-bit argument, passed by value. On
* x86, these arguments consume two longwords on the stack,
* so we lie and say there's one additional argument so
* that the wrapping routines will do the right thing.
*/
image_patch_table ndis_functbl[] = {
IMPORT_SFUNC(NdisCopyFromPacketToPacket, 6),
IMPORT_SFUNC(NdisCopyFromPacketToPacketSafe, 7),
IMPORT_SFUNC(NdisIMCopySendPerPacketInfo, 2),
IMPORT_SFUNC(NdisScheduleWorkItem, 1),
IMPORT_SFUNC(NdisMIndicateStatusComplete, 1),
IMPORT_SFUNC(NdisMIndicateStatus, 4),
IMPORT_SFUNC(NdisSystemProcessorCount, 0),
IMPORT_SFUNC(NdisGetCurrentProcessorCounts, 3),
IMPORT_SFUNC(NdisUnchainBufferAtBack, 2),
IMPORT_SFUNC(NdisGetFirstBufferFromPacket, 5),
IMPORT_SFUNC(NdisGetFirstBufferFromPacketSafe, 6),
IMPORT_SFUNC(NdisGetBufferPhysicalArraySize, 2),
IMPORT_SFUNC(NdisMGetDeviceProperty, 6),
IMPORT_SFUNC(NdisInitAnsiString, 2),
IMPORT_SFUNC(NdisInitUnicodeString, 2),
IMPORT_SFUNC(NdisWriteConfiguration, 4),
IMPORT_SFUNC(NdisAnsiStringToUnicodeString, 2),
IMPORT_SFUNC(NdisTerminateWrapper, 2),
IMPORT_SFUNC(NdisOpenConfigurationKeyByName, 4),
IMPORT_SFUNC(NdisOpenConfigurationKeyByIndex, 5),
IMPORT_SFUNC(NdisMRemoveMiniport, 1),
IMPORT_SFUNC(NdisInitializeString, 2),
IMPORT_SFUNC(NdisFreeString, 1),
IMPORT_SFUNC(NdisGetCurrentSystemTime, 1),
IMPORT_SFUNC(NdisGetRoutineAddress, 1),
IMPORT_SFUNC(NdisGetSystemUpTime, 1),
IMPORT_SFUNC(NdisGetVersion, 0),
IMPORT_SFUNC(NdisMSynchronizeWithInterrupt, 3),
IMPORT_SFUNC(NdisMAllocateSharedMemoryAsync, 4),
IMPORT_SFUNC(NdisInterlockedInsertHeadList, 3),
IMPORT_SFUNC(NdisInterlockedInsertTailList, 3),
IMPORT_SFUNC(NdisInterlockedRemoveHeadList, 2),
IMPORT_SFUNC(NdisInitializeWrapper, 4),
IMPORT_SFUNC(NdisMRegisterMiniport, 3),
IMPORT_SFUNC(NdisAllocateMemoryWithTag, 3),
IMPORT_SFUNC(NdisAllocateMemory, 4 + 1),
IMPORT_SFUNC(NdisMSetAttributesEx, 5),
IMPORT_SFUNC(NdisCloseConfiguration, 1),
IMPORT_SFUNC(NdisReadConfiguration, 5),
IMPORT_SFUNC(NdisOpenConfiguration, 3),
IMPORT_SFUNC(NdisAcquireSpinLock, 1),
IMPORT_SFUNC(NdisReleaseSpinLock, 1),
IMPORT_SFUNC(NdisDprAcquireSpinLock, 1),
IMPORT_SFUNC(NdisDprReleaseSpinLock, 1),
IMPORT_SFUNC(NdisAllocateSpinLock, 1),
IMPORT_SFUNC(NdisInitializeReadWriteLock, 1),
IMPORT_SFUNC(NdisAcquireReadWriteLock, 3),
IMPORT_SFUNC(NdisReleaseReadWriteLock, 2),
IMPORT_SFUNC(NdisFreeSpinLock, 1),
IMPORT_SFUNC(NdisFreeMemory, 3),
IMPORT_SFUNC(NdisReadPciSlotInformation, 5),
IMPORT_SFUNC(NdisWritePciSlotInformation, 5),
IMPORT_SFUNC_MAP(NdisImmediateReadPciSlotInformation,
NdisReadPciSlotInformation, 5),
IMPORT_SFUNC_MAP(NdisImmediateWritePciSlotInformation,
NdisWritePciSlotInformation, 5),
IMPORT_CFUNC(NdisWriteErrorLogEntry, 0),
IMPORT_SFUNC(NdisMStartBufferPhysicalMapping, 6),
IMPORT_SFUNC(NdisMCompleteBufferPhysicalMapping, 3),
IMPORT_SFUNC(NdisMInitializeTimer, 4),
IMPORT_SFUNC(NdisInitializeTimer, 3),
IMPORT_SFUNC(NdisSetTimer, 2),
IMPORT_SFUNC(NdisMCancelTimer, 2),
IMPORT_SFUNC_MAP(NdisCancelTimer, NdisMCancelTimer, 2),
IMPORT_SFUNC(NdisMSetPeriodicTimer, 2),
IMPORT_SFUNC(NdisMQueryAdapterResources, 4),
IMPORT_SFUNC(NdisMRegisterIoPortRange, 4),
IMPORT_SFUNC(NdisMDeregisterIoPortRange, 4),
IMPORT_SFUNC(NdisReadNetworkAddress, 4),
IMPORT_SFUNC(NdisQueryMapRegisterCount, 2),
IMPORT_SFUNC(NdisMAllocateMapRegisters, 5),
IMPORT_SFUNC(NdisMFreeMapRegisters, 1),
IMPORT_SFUNC(NdisMAllocateSharedMemory, 5),
IMPORT_SFUNC(NdisMMapIoSpace, 4 + 1),
IMPORT_SFUNC(NdisMUnmapIoSpace, 3),
IMPORT_SFUNC(NdisGetCacheFillSize, 0),
IMPORT_SFUNC(NdisMGetDmaAlignment, 1),
IMPORT_SFUNC(NdisMInitializeScatterGatherDma, 3),
IMPORT_SFUNC(NdisAllocatePacketPool, 4),
IMPORT_SFUNC(NdisAllocatePacketPoolEx, 5),
IMPORT_SFUNC(NdisAllocatePacket, 3),
IMPORT_SFUNC(NdisFreePacket, 1),
IMPORT_SFUNC(NdisFreePacketPool, 1),
IMPORT_SFUNC_MAP(NdisDprAllocatePacket, NdisAllocatePacket, 3),
IMPORT_SFUNC_MAP(NdisDprFreePacket, NdisFreePacket, 1),
IMPORT_SFUNC(NdisAllocateBufferPool, 3),
IMPORT_SFUNC(NdisAllocateBuffer, 5),
IMPORT_SFUNC(NdisQueryBuffer, 3),
IMPORT_SFUNC(NdisQueryBufferSafe, 4),
IMPORT_SFUNC(NdisBufferVirtualAddress, 1),
IMPORT_SFUNC(NdisBufferVirtualAddressSafe, 2),
IMPORT_SFUNC(NdisBufferLength, 1),
IMPORT_SFUNC(NdisFreeBuffer, 1),
IMPORT_SFUNC(NdisFreeBufferPool, 1),
IMPORT_SFUNC(NdisInterlockedIncrement, 1),
IMPORT_SFUNC(NdisInterlockedDecrement, 1),
IMPORT_SFUNC(NdisInitializeEvent, 1),
IMPORT_SFUNC(NdisSetEvent, 1),
IMPORT_SFUNC(NdisResetEvent, 1),
IMPORT_SFUNC(NdisWaitEvent, 2),
IMPORT_SFUNC(NdisUnicodeStringToAnsiString, 2),
IMPORT_SFUNC(NdisMPciAssignResources, 3),
IMPORT_SFUNC(NdisMFreeSharedMemory, 5 + 1),
IMPORT_SFUNC(NdisMRegisterInterrupt, 7),
IMPORT_SFUNC(NdisMDeregisterInterrupt, 1),
IMPORT_SFUNC(NdisMRegisterAdapterShutdownHandler, 3),
IMPORT_SFUNC(NdisMDeregisterAdapterShutdownHandler, 1),
IMPORT_SFUNC(NDIS_BUFFER_TO_SPAN_PAGES, 1),
IMPORT_SFUNC(NdisQueryBufferOffset, 3),
IMPORT_SFUNC(NdisAdjustBufferLength, 2),
IMPORT_SFUNC(NdisPacketPoolUsage, 1),
IMPORT_SFUNC(NdisMSleep, 1),
IMPORT_SFUNC(NdisUnchainBufferAtFront, 2),
IMPORT_SFUNC(NdisReadPcmciaAttributeMemory, 4),
IMPORT_SFUNC(NdisWritePcmciaAttributeMemory, 4),
IMPORT_SFUNC(NdisOpenFile, 5 + 1),
IMPORT_SFUNC(NdisMapFile, 3),
IMPORT_SFUNC(NdisUnmapFile, 1),
IMPORT_SFUNC(NdisCloseFile, 1),
IMPORT_SFUNC(NdisMRegisterDevice, 6),
IMPORT_SFUNC(NdisMDeregisterDevice, 1),
IMPORT_SFUNC(NdisMQueryAdapterInstanceName, 2),
IMPORT_SFUNC(NdisMRegisterUnloadHandler, 2),
IMPORT_SFUNC(ndis_timercall, 4),
IMPORT_SFUNC(ndis_asyncmem_complete, 2),
IMPORT_SFUNC(ndis_intr, 2),
IMPORT_SFUNC(ndis_intrhand, 4),
/*
* This last entry is a catch-all for any function we haven't
* implemented yet. The PE import list patching routine will
* use it for any function that doesn't have an explicit match
* in this table.
*/
{ NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
/* End of list. */
{ NULL, NULL, NULL }
};
Index: head/sys/dev/aacraid/aacraid.c
===================================================================
--- head/sys/dev/aacraid/aacraid.c (revision 328217)
+++ head/sys/dev/aacraid/aacraid.c (revision 328218)
@@ -1,3864 +1,3864 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2000 Michael Smith
* Copyright (c) 2001 Scott Long
* Copyright (c) 2000 BSDi
* Copyright (c) 2001-2010 Adaptec, Inc.
* Copyright (c) 2010-2012 PMC-Sierra, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
*/
#define AAC_DRIVERNAME "aacraid"
#include "opt_aacraid.h"
/* #include <stddef.h> */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/sysctl.h>
#include <sys/poll.h>
#include <sys/ioccom.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/signalvar.h>
#include <sys/time.h>
#include <sys/eventhandler.h>
#include <sys/rman.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/aacraid/aacraid_reg.h>
#include <sys/aac_ioctl.h>
#include <dev/aacraid/aacraid_debug.h>
#include <dev/aacraid/aacraid_var.h>
#ifndef FILTER_HANDLED
#define FILTER_HANDLED 0x02
#endif
static void aac_add_container(struct aac_softc *sc,
struct aac_mntinforesp *mir, int f,
u_int32_t uid);
static void aac_get_bus_info(struct aac_softc *sc);
static void aac_container_bus(struct aac_softc *sc);
static void aac_daemon(void *arg);
static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
int pages, int nseg, int nseg_new);
/* Command Processing */
static void aac_timeout(struct aac_softc *sc);
static void aac_command_thread(struct aac_softc *sc);
static int aac_sync_fib(struct aac_softc *sc, u_int32_t command,
u_int32_t xferstate, struct aac_fib *fib,
u_int16_t datasize);
/* Command Buffer Management */
static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
int nseg, int error);
static int aac_alloc_commands(struct aac_softc *sc);
static void aac_free_commands(struct aac_softc *sc);
static void aac_unmap_command(struct aac_command *cm);
/* Hardware Interface */
static int aac_alloc(struct aac_softc *sc);
static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
int error);
static int aac_check_firmware(struct aac_softc *sc);
static void aac_define_int_mode(struct aac_softc *sc);
static int aac_init(struct aac_softc *sc);
static int aac_find_pci_capability(struct aac_softc *sc, int cap);
static int aac_setup_intr(struct aac_softc *sc);
static int aac_check_config(struct aac_softc *sc);
/* PMC SRC interface */
static int aac_src_get_fwstatus(struct aac_softc *sc);
static void aac_src_qnotify(struct aac_softc *sc, int qbit);
static int aac_src_get_istatus(struct aac_softc *sc);
static void aac_src_clear_istatus(struct aac_softc *sc, int mask);
static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
u_int32_t arg0, u_int32_t arg1,
u_int32_t arg2, u_int32_t arg3);
static int aac_src_get_mailbox(struct aac_softc *sc, int mb);
static void aac_src_access_devreg(struct aac_softc *sc, int mode);
static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
static int aac_src_get_outb_queue(struct aac_softc *sc);
static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
struct aac_interface aacraid_src_interface = {
aac_src_get_fwstatus,
aac_src_qnotify,
aac_src_get_istatus,
aac_src_clear_istatus,
aac_src_set_mailbox,
aac_src_get_mailbox,
aac_src_access_devreg,
aac_src_send_command,
aac_src_get_outb_queue,
aac_src_set_outb_queue
};
/* PMC SRCv interface */
static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
u_int32_t arg0, u_int32_t arg1,
u_int32_t arg2, u_int32_t arg3);
static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
struct aac_interface aacraid_srcv_interface = {
aac_src_get_fwstatus,
aac_src_qnotify,
aac_src_get_istatus,
aac_src_clear_istatus,
aac_srcv_set_mailbox,
aac_srcv_get_mailbox,
aac_src_access_devreg,
aac_src_send_command,
aac_src_get_outb_queue,
aac_src_set_outb_queue
};
/* Debugging and Diagnostics */
static struct aac_code_lookup aac_cpu_variant[] = {
{"i960JX", CPUI960_JX},
{"i960CX", CPUI960_CX},
{"i960HX", CPUI960_HX},
{"i960RX", CPUI960_RX},
{"i960 80303", CPUI960_80303},
{"StrongARM SA110", CPUARM_SA110},
{"PPC603e", CPUPPC_603e},
{"XScale 80321", CPU_XSCALE_80321},
{"MIPS 4KC", CPU_MIPS_4KC},
{"MIPS 5KC", CPU_MIPS_5KC},
{"Unknown StrongARM", CPUARM_xxx},
{"Unknown PowerPC", CPUPPC_xxx},
{NULL, 0},
{"Unknown processor", 0}
};
static struct aac_code_lookup aac_battery_platform[] = {
{"required battery present", PLATFORM_BAT_REQ_PRESENT},
{"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT},
{"optional battery present", PLATFORM_BAT_OPT_PRESENT},
{"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT},
{"no battery support", PLATFORM_BAT_NOT_SUPPORTED},
{NULL, 0},
{"unknown battery platform", 0}
};
static void aac_describe_controller(struct aac_softc *sc);
static char *aac_describe_code(struct aac_code_lookup *table,
u_int32_t code);
/* Management Interface */
static d_open_t aac_open;
static d_ioctl_t aac_ioctl;
static d_poll_t aac_poll;
#if __FreeBSD_version >= 702000
static void aac_cdevpriv_dtor(void *arg);
#else
static d_close_t aac_close;
#endif
static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
static void aac_request_aif(struct aac_softc *sc);
static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
static int aac_return_aif(struct aac_softc *sc,
struct aac_fib_context *ctx, caddr_t uptr);
static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
static void aac_ioctl_event(struct aac_softc *sc,
struct aac_event *event, void *arg);
static int aac_reset_adapter(struct aac_softc *sc);
static int aac_get_container_info(struct aac_softc *sc,
struct aac_fib *fib, int cid,
struct aac_mntinforesp *mir,
u_int32_t *uid);
static u_int32_t
aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
static struct cdevsw aacraid_cdevsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDGIANT,
.d_open = aac_open,
#if __FreeBSD_version < 702000
.d_close = aac_close,
#endif
.d_ioctl = aac_ioctl,
.d_poll = aac_poll,
.d_name = "aacraid",
};
MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
/* sysctl node */
SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
/*
* Device Interface
*/
/*
* Initialize the controller and softc
*/
int
aacraid_attach(struct aac_softc *sc)
{
int error, unit;
struct aac_fib *fib;
struct aac_mntinforesp mir;
int count = 0, i = 0;
u_int32_t uid;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
sc->hint_flags = device_get_flags(sc->aac_dev);
/*
* Initialize per-controller queues.
*/
aac_initq_free(sc);
aac_initq_ready(sc);
aac_initq_busy(sc);
/* mark controller as suspended until we get ourselves organised */
sc->aac_state |= AAC_STATE_SUSPEND;
/*
* Check that the firmware on the card is supported.
*/
sc->msi_enabled = FALSE;
if ((error = aac_check_firmware(sc)) != 0)
return(error);
/*
* Initialize locks
*/
mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
TAILQ_INIT(&sc->aac_container_tqh);
TAILQ_INIT(&sc->aac_ev_cmfree);
#if __FreeBSD_version >= 800000
/* Initialize the clock daemon callout. */
callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
#endif
/*
* Initialize the adapter.
*/
if ((error = aac_alloc(sc)) != 0)
return(error);
if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
aac_define_int_mode(sc);
if ((error = aac_init(sc)) != 0)
return(error);
}
/*
* Allocate and connect our interrupt.
*/
if ((error = aac_setup_intr(sc)) != 0)
return(error);
/*
* Print a little information about the controller.
*/
aac_describe_controller(sc);
/*
* Make the control device.
*/
unit = device_get_unit(sc->aac_dev);
sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
0640, "aacraid%d", unit);
sc->aac_dev_t->si_drv1 = sc;
/* Create the AIF thread */
if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
&sc->aifthread, 0, 0, "aacraid%daif", unit))
panic("Could not create AIF thread");
/* Register the shutdown method to only be called post-dump */
if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
device_printf(sc->aac_dev,
"shutdown event registration failed\n");
/* Find containers */
mtx_lock(&sc->aac_io_lock);
aac_alloc_sync_fib(sc, &fib);
/* loop over possible containers */
do {
if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
continue;
if (i == 0)
count = mir.MntRespCount;
aac_add_container(sc, &mir, 0, uid);
i++;
} while ((i < count) && (i < AAC_MAX_CONTAINERS));
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
/* Register with CAM for the containers */
TAILQ_INIT(&sc->aac_sim_tqh);
aac_container_bus(sc);
/* Register with CAM for the non-DASD devices */
if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
aac_get_bus_info(sc);
/* poke the bus to actually attach the child devices */
bus_generic_attach(sc->aac_dev);
/* mark the controller up */
sc->aac_state &= ~AAC_STATE_SUSPEND;
/* enable interrupts now */
AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
#if __FreeBSD_version >= 800000
mtx_lock(&sc->aac_io_lock);
callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
mtx_unlock(&sc->aac_io_lock);
#else
{
struct timeval tv;
tv.tv_sec = 60;
tv.tv_usec = 0;
sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
}
#endif
return(0);
}
static void
aac_daemon(void *arg)
{
struct aac_softc *sc;
struct timeval tv;
struct aac_command *cm;
struct aac_fib *fib;
sc = arg;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
#if __FreeBSD_version >= 800000
mtx_assert(&sc->aac_io_lock, MA_OWNED);
if (callout_pending(&sc->aac_daemontime) ||
callout_active(&sc->aac_daemontime) == 0)
return;
#else
mtx_lock(&sc->aac_io_lock);
#endif
getmicrotime(&tv);
if (!aacraid_alloc_command(sc, &cm)) {
fib = cm->cm_fib;
cm->cm_timestamp = time_uptime;
cm->cm_datalen = 0;
cm->cm_flags |= AAC_CMD_WAIT;
fib->Header.Size =
sizeof(struct aac_fib_header) + sizeof(u_int32_t);
fib->Header.XferState =
AAC_FIBSTATE_HOSTOWNED |
AAC_FIBSTATE_INITIALISED |
AAC_FIBSTATE_EMPTY |
AAC_FIBSTATE_FROMHOST |
AAC_FIBSTATE_REXPECTED |
AAC_FIBSTATE_NORM |
AAC_FIBSTATE_ASYNC |
AAC_FIBSTATE_FAST_RESPONSE;
fib->Header.Command = SendHostTime;
*(uint32_t *)fib->data = tv.tv_sec;
aacraid_map_command_sg(cm, NULL, 0, 0);
aacraid_release_command(cm);
}
#if __FreeBSD_version >= 800000
callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
#else
mtx_unlock(&sc->aac_io_lock);
tv.tv_sec = 30 * 60;
tv.tv_usec = 0;
sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
#endif
}
void
aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
{
switch (event->ev_type & AAC_EVENT_MASK) {
case AAC_EVENT_CMFREE:
TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
break;
default:
device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
event->ev_type);
break;
}
return;
}
/*
* Request information of container #cid
*/
static int
aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
struct aac_mntinforesp *mir, u_int32_t *uid)
{
struct aac_command *cm;
struct aac_fib *fib;
struct aac_mntinfo *mi;
struct aac_cnt_config *ccfg;
int rval;
if (sync_fib == NULL) {
if (aacraid_alloc_command(sc, &cm)) {
device_printf(sc->aac_dev,
"Warning, no free command available\n");
return (-1);
}
fib = cm->cm_fib;
} else {
fib = sync_fib;
}
mi = (struct aac_mntinfo *)&fib->data[0];
/* 4KB support?, 64-bit LBA? */
if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
mi->Command = VM_NameServeAllBlk;
else if (sc->flags & AAC_FLAGS_LBA_64BIT)
mi->Command = VM_NameServe64;
else
mi->Command = VM_NameServe;
mi->MntType = FT_FILESYS;
mi->MntCount = cid;
if (sync_fib) {
if (aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof(struct aac_mntinfo))) {
device_printf(sc->aac_dev, "Error probing container %d\n", cid);
return (-1);
}
} else {
cm->cm_timestamp = time_uptime;
cm->cm_datalen = 0;
fib->Header.Size =
sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
fib->Header.XferState =
AAC_FIBSTATE_HOSTOWNED |
AAC_FIBSTATE_INITIALISED |
AAC_FIBSTATE_EMPTY |
AAC_FIBSTATE_FROMHOST |
AAC_FIBSTATE_REXPECTED |
AAC_FIBSTATE_NORM |
AAC_FIBSTATE_ASYNC |
AAC_FIBSTATE_FAST_RESPONSE;
fib->Header.Command = ContainerCommand;
if (aacraid_wait_command(cm) != 0) {
device_printf(sc->aac_dev, "Error probing container %d\n", cid);
aacraid_release_command(cm);
return (-1);
}
}
bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
/* UID */
*uid = cid;
if (mir->MntTable[0].VolType != CT_NONE &&
!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
}
ccfg = (struct aac_cnt_config *)&fib->data[0];
bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
ccfg->Command = VM_ContainerConfig;
ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
ccfg->CTCommand.param[0] = cid;
if (sync_fib) {
rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof(struct aac_cnt_config));
if (rval == 0 && ccfg->Command == ST_OK &&
ccfg->CTCommand.param[0] == CT_OK &&
mir->MntTable[0].VolType != CT_PASSTHRU)
*uid = ccfg->CTCommand.param[1];
} else {
fib->Header.Size =
sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
fib->Header.XferState =
AAC_FIBSTATE_HOSTOWNED |
AAC_FIBSTATE_INITIALISED |
AAC_FIBSTATE_EMPTY |
AAC_FIBSTATE_FROMHOST |
AAC_FIBSTATE_REXPECTED |
AAC_FIBSTATE_NORM |
AAC_FIBSTATE_ASYNC |
AAC_FIBSTATE_FAST_RESPONSE;
fib->Header.Command = ContainerCommand;
rval = aacraid_wait_command(cm);
if (rval == 0 && ccfg->Command == ST_OK &&
ccfg->CTCommand.param[0] == CT_OK &&
mir->MntTable[0].VolType != CT_PASSTHRU)
*uid = ccfg->CTCommand.param[1];
aacraid_release_command(cm);
}
}
return (0);
}
/*
* Create a device to represent a new container
*/
static void
aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
u_int32_t uid)
{
struct aac_container *co;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/*
* Check container volume type for validity. Note that many of
* the possible types may never show up.
*/
if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
M_NOWAIT | M_ZERO);
if (co == NULL) {
panic("Out of memory?!");
}
co->co_found = f;
bcopy(&mir->MntTable[0], &co->co_mntobj,
sizeof(struct aac_mntobj));
co->co_uid = uid;
TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
}
}
/*
* Allocate resources associated with (sc)
*/
static int
aac_alloc(struct aac_softc *sc)
{
bus_size_t maxsize;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/*
* Create DMA tag for mapping buffers into controller-addressable space.
*/
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
(sc->flags & AAC_FLAGS_SG_64BIT) ?
BUS_SPACE_MAXADDR :
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->aac_max_sectors << 9, /* maxsize */
sc->aac_sg_tablesize, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&sc->aac_io_lock, /* lockfuncarg */
&sc->aac_buffer_dmat)) {
device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
return (ENOMEM);
}
/*
* Create DMA tag for mapping FIBs into controller-addressable space..
*/
if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
sizeof(struct aac_fib_xporthdr) + 31);
else
maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
(sc->flags & AAC_FLAGS_4GB_WINDOW) ?
BUS_SPACE_MAXADDR_32BIT :
0x7fffffff, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
maxsize, /* maxsize */
1, /* nsegments */
maxsize, /* maxsize */
0, /* flags */
NULL, NULL, /* No locking needed */
&sc->aac_fib_dmat)) {
device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
return (ENOMEM);
}
/*
* Create DMA tag for the common structure and allocate it.
*/
maxsize = sizeof(struct aac_common);
maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
(sc->flags & AAC_FLAGS_4GB_WINDOW) ?
BUS_SPACE_MAXADDR_32BIT :
0x7fffffff, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
maxsize, /* maxsize */
1, /* nsegments */
maxsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* No locking needed */
&sc->aac_common_dmat)) {
device_printf(sc->aac_dev,
"can't allocate common structure DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
device_printf(sc->aac_dev, "can't allocate common structure\n");
return (ENOMEM);
}
(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
sc->aac_common, maxsize,
aac_common_map, sc, 0);
bzero(sc->aac_common, maxsize);
/* Allocate some FIBs and associated command structs */
TAILQ_INIT(&sc->aac_fibmap_tqh);
sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
M_AACRAIDBUF, M_WAITOK|M_ZERO);
mtx_lock(&sc->aac_io_lock);
while (sc->total_fibs < sc->aac_max_fibs) {
if (aac_alloc_commands(sc) != 0)
break;
}
mtx_unlock(&sc->aac_io_lock);
if (sc->total_fibs == 0)
return (ENOMEM);
return (0);
}
/*
* Free all of the resources associated with (sc)
*
* Should not be called if the controller is active.
*/
void
aacraid_free(struct aac_softc *sc)
{
int i;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/* remove the control device */
if (sc->aac_dev_t != NULL)
destroy_dev(sc->aac_dev_t);
/* throw away any FIB buffers, discard the FIB DMA tag */
aac_free_commands(sc);
if (sc->aac_fib_dmat)
bus_dma_tag_destroy(sc->aac_fib_dmat);
free(sc->aac_commands, M_AACRAIDBUF);
/* destroy the common area */
if (sc->aac_common) {
bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
sc->aac_common_dmamap);
}
if (sc->aac_common_dmat)
bus_dma_tag_destroy(sc->aac_common_dmat);
/* disconnect the interrupt handler */
for (i = 0; i < AAC_MAX_MSIX; ++i) {
if (sc->aac_intr[i])
bus_teardown_intr(sc->aac_dev,
sc->aac_irq[i], sc->aac_intr[i]);
if (sc->aac_irq[i])
bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
sc->aac_irq_rid[i], sc->aac_irq[i]);
else
break;
}
if (sc->msi_enabled)
pci_release_msi(sc->aac_dev);
/* destroy data-transfer DMA tag */
if (sc->aac_buffer_dmat)
bus_dma_tag_destroy(sc->aac_buffer_dmat);
/* destroy the parent DMA tag */
if (sc->aac_parent_dmat)
bus_dma_tag_destroy(sc->aac_parent_dmat);
/* release the register window mapping */
if (sc->aac_regs_res0 != NULL)
bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
sc->aac_regs_rid0, sc->aac_regs_res0);
if (sc->aac_regs_res1 != NULL)
bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
sc->aac_regs_rid1, sc->aac_regs_res1);
}
/*
* Disconnect from the controller completely, in preparation for unload.
*/
int
aacraid_detach(device_t dev)
{
struct aac_softc *sc;
struct aac_container *co;
struct aac_sim *sim;
int error;
sc = device_get_softc(dev);
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
#if __FreeBSD_version >= 800000
callout_drain(&sc->aac_daemontime);
#else
untimeout(aac_daemon, (void *)sc, sc->timeout_id);
#endif
/* Remove the child containers */
while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
free(co, M_AACRAIDBUF);
}
/* Remove the CAM SIMs */
while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
error = device_delete_child(dev, sim->sim_dev);
if (error)
return (error);
free(sim, M_AACRAIDBUF);
}
if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
sc->aifflags |= AAC_AIFFLAGS_EXIT;
wakeup(sc->aifthread);
tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
}
if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
panic("Cannot shutdown AIF thread");
if ((error = aacraid_shutdown(dev)))
return(error);
EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
aacraid_free(sc);
mtx_destroy(&sc->aac_io_lock);
return(0);
}
/*
* Bring the controller down to a dormant state and detach all child devices.
*
* This function is called before detach or system shutdown.
*
* Note that we can assume that the bioq on the controller is empty, as we won't
* allow shutdown if any device is open.
*/
int
aacraid_shutdown(device_t dev)
{
struct aac_softc *sc;
struct aac_fib *fib;
struct aac_close_command *cc;
sc = device_get_softc(dev);
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
sc->aac_state |= AAC_STATE_SUSPEND;
/*
* Send a Container shutdown followed by a HostShutdown FIB to the
* controller to convince it that we don't want to talk to it anymore.
* We've been closed and all I/O completed already
*/
device_printf(sc->aac_dev, "shutting down controller...");
mtx_lock(&sc->aac_io_lock);
aac_alloc_sync_fib(sc, &fib);
cc = (struct aac_close_command *)&fib->data[0];
bzero(cc, sizeof(struct aac_close_command));
cc->Command = VM_CloseAll;
cc->ContainerId = 0xfffffffe;
if (aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof(struct aac_close_command)))
printf("FAILED.\n");
else
printf("done\n");
AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return(0);
}
/*
* Bring the controller to a quiescent state, ready for system suspend.
*/
int
aacraid_suspend(device_t dev)
{
struct aac_softc *sc;
sc = device_get_softc(dev);
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
sc->aac_state |= AAC_STATE_SUSPEND;
AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
return(0);
}
/*
* Bring the controller back to a state ready for operation.
*/
int
aacraid_resume(device_t dev)
{
struct aac_softc *sc;
sc = device_get_softc(dev);
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
sc->aac_state &= ~AAC_STATE_SUSPEND;
AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
return(0);
}
/*
* Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
*/
void
aacraid_new_intr_type1(void *arg)
{
struct aac_msix_ctx *ctx;
struct aac_softc *sc;
int vector_no;
struct aac_command *cm;
struct aac_fib *fib;
u_int32_t bellbits, bellbits_shifted, index, handle;
int isFastResponse, isAif, noMoreAif, mode;
ctx = (struct aac_msix_ctx *)arg;
sc = ctx->sc;
vector_no = ctx->vector_no;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&sc->aac_io_lock);
if (sc->msi_enabled) {
mode = AAC_INT_MODE_MSI;
if (vector_no == 0) {
bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
if (bellbits & 0x40000)
mode |= AAC_INT_MODE_AIF;
else if (bellbits & 0x1000)
mode |= AAC_INT_MODE_SYNC;
}
} else {
mode = AAC_INT_MODE_INTX;
bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
bellbits = AAC_DB_RESPONSE_SENT_NS;
AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
} else {
bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
if (bellbits_shifted & AAC_DB_AIF_PENDING)
mode |= AAC_INT_MODE_AIF;
else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
mode |= AAC_INT_MODE_SYNC;
}
/* ODR readback, Prep #238630 */
AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
}
if (mode & AAC_INT_MODE_SYNC) {
if (sc->aac_sync_cm) {
cm = sc->aac_sync_cm;
cm->cm_flags |= AAC_CMD_COMPLETED;
/* is there a completion handler? */
if (cm->cm_complete != NULL) {
cm->cm_complete(cm);
} else {
/* assume that someone is sleeping on this command */
wakeup(cm);
}
sc->flags &= ~AAC_QUEUE_FRZN;
sc->aac_sync_cm = NULL;
}
mode = 0;
}
if (mode & AAC_INT_MODE_AIF) {
if (mode & AAC_INT_MODE_INTX) {
aac_request_aif(sc);
mode = 0;
}
}
if (mode) {
/* handle async. status */
index = sc->aac_host_rrq_idx[vector_no];
for (;;) {
isFastResponse = isAif = noMoreAif = 0;
/* remove toggle bit (31) */
handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
/* check fast response bit (30) */
if (handle & 0x40000000)
isFastResponse = 1;
/* check AIF bit (23) */
else if (handle & 0x00800000)
isAif = TRUE;
handle &= 0x0000ffff;
if (handle == 0)
break;
cm = sc->aac_commands + (handle - 1);
fib = cm->cm_fib;
sc->aac_rrq_outstanding[vector_no]--;
if (isAif) {
noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
if (!noMoreAif)
aac_handle_aif(sc, fib);
aac_remove_busy(cm);
aacraid_release_command(cm);
} else {
if (isFastResponse) {
fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
*((u_int32_t *)(fib->data)) = ST_OK;
cm->cm_flags |= AAC_CMD_FASTRESP;
}
aac_remove_busy(cm);
aac_unmap_command(cm);
cm->cm_flags |= AAC_CMD_COMPLETED;
/* is there a completion handler? */
if (cm->cm_complete != NULL) {
cm->cm_complete(cm);
} else {
/* assume that someone is sleeping on this command */
wakeup(cm);
}
sc->flags &= ~AAC_QUEUE_FRZN;
}
sc->aac_common->ac_host_rrq[index++] = 0;
if (index == (vector_no + 1) * sc->aac_vector_cap)
index = vector_no * sc->aac_vector_cap;
sc->aac_host_rrq_idx[vector_no] = index;
if ((isAif && !noMoreAif) || sc->aif_pending)
aac_request_aif(sc);
}
}
if (mode & AAC_INT_MODE_AIF) {
aac_request_aif(sc);
AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
mode = 0;
}
/* see if we can start some more I/O */
if ((sc->flags & AAC_QUEUE_FRZN) == 0)
aacraid_startio(sc);
mtx_unlock(&sc->aac_io_lock);
}
/*
* Handle notification of one or more FIBs coming from the controller.
*/
static void
aac_command_thread(struct aac_softc *sc)
{
int retval;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&sc->aac_io_lock);
sc->aifflags = AAC_AIFFLAGS_RUNNING;
while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
retval = 0;
if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
/*
* First see if any FIBs need to be allocated. This needs
* to be called without the driver lock because contigmalloc
* will grab Giant, and would result in an LOR.
*/
if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
aac_alloc_commands(sc);
sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
aacraid_startio(sc);
}
/*
* While we're here, check to see if any commands are stuck.
* This is pretty low-priority, so it's ok if it doesn't
* always fire.
*/
if (retval == EWOULDBLOCK)
aac_timeout(sc);
/* Check the hardware printf message buffer */
if (sc->aac_common->ac_printf[0] != 0)
aac_print_printf(sc);
}
sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
mtx_unlock(&sc->aac_io_lock);
wakeup(sc->aac_dev);
aac_kthread_exit(0);
}
/*
* Submit a command to the controller, return when it completes.
* XXX This is very dangerous! If the card has gone out to lunch, we could
* be stuck here forever. At the same time, signals are not caught
* because there is a risk that a signal could wakeup the sleep before
* the card has a chance to complete the command. Since there is no way
* to cancel a command that is in progress, we can't protect against the
* card completing a command late and spamming the command and data
* memory. So, we are held hostage until the command completes.
*/
int
aacraid_wait_command(struct aac_command *cm)
{
struct aac_softc *sc;
int error;
sc = cm->cm_sc;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_assert(&sc->aac_io_lock, MA_OWNED);
/* Put the command on the ready queue and get things going */
aac_enqueue_ready(cm);
aacraid_startio(sc);
error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
return(error);
}
/*
*Command Buffer Management
*/
/*
* Allocate a command.
*/
int
aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
{
struct aac_command *cm;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
if ((cm = aac_dequeue_free(sc)) == NULL) {
if (sc->total_fibs < sc->aac_max_fibs) {
sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
wakeup(sc->aifthread);
}
return (EBUSY);
}
*cmp = cm;
return(0);
}
/*
* Release a command back to the freelist.
*/
void
aacraid_release_command(struct aac_command *cm)
{
struct aac_event *event;
struct aac_softc *sc;
sc = cm->cm_sc;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_assert(&sc->aac_io_lock, MA_OWNED);
/* (re)initialize the command/FIB */
cm->cm_sgtable = NULL;
cm->cm_flags = 0;
cm->cm_complete = NULL;
cm->cm_ccb = NULL;
cm->cm_passthr_dmat = 0;
cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
cm->cm_fib->Header.Unused = 0;
cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
/*
* These are duplicated in aac_start to cover the case where an
* intermediate stage may have destroyed them. They're left
* initialized here for debugging purposes only.
*/
cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
cm->cm_fib->Header.Handle = 0;
aac_enqueue_free(cm);
/*
* Dequeue all events so that there's no risk of events getting
* stranded.
*/
while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
event->ev_callback(sc, event, event->ev_arg);
}
}
/*
* Map helper for command/FIB allocation.
*/
static void
aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
uint64_t *fibphys;
fibphys = (uint64_t *)arg;
*fibphys = segs[0].ds_addr;
}
/*
* Allocate and initialize commands/FIBs for this adapter.
*/
static int
aac_alloc_commands(struct aac_softc *sc)
{
struct aac_command *cm;
struct aac_fibmap *fm;
uint64_t fibphys;
int i, error;
u_int32_t maxsize;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_assert(&sc->aac_io_lock, MA_OWNED);
if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
return (ENOMEM);
fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
if (fm == NULL)
return (ENOMEM);
mtx_unlock(&sc->aac_io_lock);
/* allocate the FIBs in DMAable memory and load them */
if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
device_printf(sc->aac_dev,
"Not enough contiguous memory available.\n");
free(fm, M_AACRAIDBUF);
mtx_lock(&sc->aac_io_lock);
return (ENOMEM);
}
maxsize = sc->aac_max_fib_size + 31;
if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
maxsize += sizeof(struct aac_fib_xporthdr);
/* Ignore errors since this doesn't bounce */
(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
sc->aac_max_fibs_alloc * maxsize,
aac_map_command_helper, &fibphys, 0);
mtx_lock(&sc->aac_io_lock);
/* initialize constant fields in the command structure */
bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
cm = sc->aac_commands + sc->total_fibs;
fm->aac_commands = cm;
cm->cm_sc = sc;
cm->cm_fib = (struct aac_fib *)
((u_int8_t *)fm->aac_fibs + i * maxsize);
cm->cm_fibphys = fibphys + i * maxsize;
if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
u_int64_t fibphys_aligned;
fibphys_aligned =
(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
cm->cm_fib = (struct aac_fib *)
((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
cm->cm_fibphys = fibphys_aligned;
} else {
u_int64_t fibphys_aligned;
fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
cm->cm_fib = (struct aac_fib *)
((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
cm->cm_fibphys = fibphys_aligned;
}
cm->cm_index = sc->total_fibs;
if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
&cm->cm_datamap)) != 0)
break;
if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
aacraid_release_command(cm);
sc->total_fibs++;
}
if (i > 0) {
TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
return (0);
}
bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
free(fm, M_AACRAIDBUF);
return (ENOMEM);
}
/*
* Free FIBs owned by this adapter.
*/
static void
aac_free_commands(struct aac_softc *sc)
{
struct aac_fibmap *fm;
struct aac_command *cm;
int i;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
/*
* We check against total_fibs to handle partially
* allocated blocks.
*/
for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
cm = fm->aac_commands + i;
bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
}
bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
free(fm, M_AACRAIDBUF);
}
}
/*
* Command-mapping helper function - populate this command's s/g table.
*/
void
aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct aac_softc *sc;
struct aac_command *cm;
struct aac_fib *fib;
int i;
cm = (struct aac_command *)arg;
sc = cm->cm_sc;
fib = cm->cm_fib;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
mtx_assert(&sc->aac_io_lock, MA_OWNED);
/* copy into the FIB */
if (cm->cm_sgtable != NULL) {
if (fib->Header.Command == RawIo2) {
struct aac_raw_io2 *raw;
struct aac_sge_ieee1212 *sg;
u_int32_t min_size = PAGE_SIZE, cur_size;
int conformable = TRUE;
raw = (struct aac_raw_io2 *)&fib->data[0];
sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
raw->sgeCnt = nseg;
for (i = 0; i < nseg; i++) {
cur_size = segs[i].ds_len;
sg[i].addrHigh = 0;
*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
sg[i].length = cur_size;
sg[i].flags = 0;
if (i == 0) {
raw->sgeFirstSize = cur_size;
} else if (i == 1) {
raw->sgeNominalSize = cur_size;
min_size = cur_size;
} else if ((i+1) < nseg &&
cur_size != raw->sgeNominalSize) {
conformable = FALSE;
if (cur_size < min_size)
min_size = cur_size;
}
}
/* not conformable: evaluate required sg elements */
if (!conformable) {
int j, err_found, nseg_new = nseg;
for (i = min_size / PAGE_SIZE; i >= 1; --i) {
err_found = FALSE;
nseg_new = 2;
for (j = 1; j < nseg - 1; ++j) {
if (sg[j].length % (i*PAGE_SIZE)) {
err_found = TRUE;
break;
}
nseg_new += (sg[j].length / (i*PAGE_SIZE));
}
if (!err_found)
break;
}
if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
!(sc->hint_flags & 4))
nseg = aac_convert_sgraw2(sc,
raw, i, nseg, nseg_new);
} else {
raw->flags |= RIO2_SGL_CONFORMANT;
}
/* update the FIB size for the s/g count */
fib->Header.Size += nseg *
sizeof(struct aac_sge_ieee1212);
} else if (fib->Header.Command == RawIo) {
struct aac_sg_tableraw *sg;
sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
sg->SgCount = nseg;
for (i = 0; i < nseg; i++) {
sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
sg->SgEntryRaw[i].Next = 0;
sg->SgEntryRaw[i].Prev = 0;
sg->SgEntryRaw[i].Flags = 0;
}
/* update the FIB size for the s/g count */
fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
struct aac_sg_table *sg;
sg = cm->cm_sgtable;
sg->SgCount = nseg;
for (i = 0; i < nseg; i++) {
sg->SgEntry[i].SgAddress = segs[i].ds_addr;
sg->SgEntry[i].SgByteCount = segs[i].ds_len;
}
/* update the FIB size for the s/g count */
fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
} else {
struct aac_sg_table64 *sg;
sg = (struct aac_sg_table64 *)cm->cm_sgtable;
sg->SgCount = nseg;
for (i = 0; i < nseg; i++) {
sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
}
/* update the FIB size for the s/g count */
fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
}
}
/* Fix up the address values in the FIB. Use the command array index
* instead of a pointer since these fields are only 32 bits. Shift
* the SenderFibAddress over to make room for the fast response bit
* and for the AIF bit
*/
cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
/* save a pointer to the command for speedy reverse-lookup */
cm->cm_fib->Header.Handle += cm->cm_index + 1;
if (cm->cm_passthr_dmat == 0) {
if (cm->cm_flags & AAC_CMD_DATAIN)
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
BUS_DMASYNC_PREREAD);
if (cm->cm_flags & AAC_CMD_DATAOUT)
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
BUS_DMASYNC_PREWRITE);
}
cm->cm_flags |= AAC_CMD_MAPPED;
if (sc->flags & AAC_FLAGS_SYNC_MODE) {
u_int32_t wait = 0;
aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
} else if (cm->cm_flags & AAC_CMD_WAIT) {
aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
} else {
int count = 10000000L;
while (AAC_SEND_COMMAND(sc, cm) != 0) {
if (--count == 0) {
aac_unmap_command(cm);
sc->flags |= AAC_QUEUE_FRZN;
aac_requeue_ready(cm);
}
DELAY(5); /* wait 5 usec. */
}
}
}
static int
aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
int pages, int nseg, int nseg_new)
{
struct aac_sge_ieee1212 *sge;
int i, j, pos;
u_int32_t addr_low;
- sge = mallocarray(nseg_new, sizeof(struct aac_sge_ieee1212),
+ sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
M_AACRAIDBUF, M_NOWAIT|M_ZERO);
if (sge == NULL)
return nseg;
for (i = 1, pos = 1; i < nseg - 1; ++i) {
for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
sge[pos].addrLow = addr_low;
sge[pos].addrHigh = raw->sge[i].addrHigh;
if (addr_low < raw->sge[i].addrLow)
sge[pos].addrHigh++;
sge[pos].length = pages * PAGE_SIZE;
sge[pos].flags = 0;
pos++;
}
}
sge[pos] = raw->sge[nseg-1];
for (i = 1; i < nseg_new; ++i)
raw->sge[i] = sge[i];
free(sge, M_AACRAIDBUF);
raw->sgeCnt = nseg_new;
raw->flags |= RIO2_SGL_CONFORMANT;
raw->sgeNominalSize = pages * PAGE_SIZE;
return nseg_new;
}
/*
* Unmap a command from controller-visible space.
*/
static void
aac_unmap_command(struct aac_command *cm)
{
struct aac_softc *sc;
sc = cm->cm_sc;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
if (!(cm->cm_flags & AAC_CMD_MAPPED))
return;
if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
if (cm->cm_flags & AAC_CMD_DATAIN)
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
BUS_DMASYNC_POSTREAD);
if (cm->cm_flags & AAC_CMD_DATAOUT)
bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
}
cm->cm_flags &= ~AAC_CMD_MAPPED;
}
/*
* Hardware Interface
*/
/*
* Initialize the adapter.
*/
static void
aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct aac_softc *sc;
sc = (struct aac_softc *)arg;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
sc->aac_common_busaddr = segs[0].ds_addr;
}
static int
aac_check_firmware(struct aac_softc *sc)
{
u_int32_t code, major, minor, maxsize;
u_int32_t options = 0, atu_size = 0, status, waitCount;
time_t then;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/* check if flash update is running */
if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
then = time_uptime;
do {
code = AAC_GET_FWSTATUS(sc);
if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
device_printf(sc->aac_dev,
"FATAL: controller not coming ready, "
"status %x\n", code);
return(ENXIO);
}
} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
/*
* Delay 10 seconds. Because right now FW is doing a soft reset,
* do not read scratch pad register at this time
*/
waitCount = 10 * 10000;
while (waitCount) {
DELAY(100); /* delay 100 microseconds */
waitCount--;
}
}
/*
* Wait for the adapter to come ready.
*/
then = time_uptime;
do {
code = AAC_GET_FWSTATUS(sc);
if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
device_printf(sc->aac_dev,
"FATAL: controller not coming ready, "
"status %x\n", code);
return(ENXIO);
}
} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
/*
* Retrieve the firmware version numbers. Dell PERC2/QC cards with
* firmware version 1.x are not compatible with this driver.
*/
if (sc->flags & AAC_FLAGS_PERC2QC) {
if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
NULL, NULL)) {
device_printf(sc->aac_dev,
"Error reading firmware version\n");
return (EIO);
}
/* These numbers are stored as ASCII! */
major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
if (major == 1) {
device_printf(sc->aac_dev,
"Firmware version %d.%d is not supported.\n",
major, minor);
return (EINVAL);
}
}
/*
* Retrieve the capabilities/supported options word so we know what
* work-arounds to enable. Some firmware revs don't support this
* command.
*/
if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
if (status != AAC_SRB_STS_INVALID_REQUEST) {
device_printf(sc->aac_dev,
"RequestAdapterInfo failed\n");
return (EIO);
}
} else {
options = AAC_GET_MAILBOX(sc, 1);
atu_size = AAC_GET_MAILBOX(sc, 2);
sc->supported_options = options;
if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
(sc->flags & AAC_FLAGS_NO4GB) == 0)
sc->flags |= AAC_FLAGS_4GB_WINDOW;
if (options & AAC_SUPPORTED_NONDASD)
sc->flags |= AAC_FLAGS_ENABLE_CAM;
if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
&& (sizeof(bus_addr_t) > 4)
&& (sc->hint_flags & 0x1)) {
device_printf(sc->aac_dev,
"Enabling 64-bit address support\n");
sc->flags |= AAC_FLAGS_SG_64BIT;
}
if (sc->aac_if.aif_send_command) {
if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
}
if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
sc->flags |= AAC_FLAGS_ARRAY_64BIT;
}
if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
device_printf(sc->aac_dev, "Communication interface not supported!\n");
return (ENXIO);
}
if (sc->hint_flags & 2) {
device_printf(sc->aac_dev,
"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
sc->flags |= AAC_FLAGS_SYNC_MODE;
} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
device_printf(sc->aac_dev,
"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
sc->flags |= AAC_FLAGS_SYNC_MODE;
}
/* Check for broken hardware that does a lower number of commands */
sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
/* Remap mem. resource, if required */
if (atu_size > rman_get_size(sc->aac_regs_res0)) {
bus_release_resource(
sc->aac_dev, SYS_RES_MEMORY,
sc->aac_regs_rid0, sc->aac_regs_res0);
sc->aac_regs_res0 = bus_alloc_resource_anywhere(
sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
atu_size, RF_ACTIVE);
if (sc->aac_regs_res0 == NULL) {
sc->aac_regs_res0 = bus_alloc_resource_any(
sc->aac_dev, SYS_RES_MEMORY,
&sc->aac_regs_rid0, RF_ACTIVE);
if (sc->aac_regs_res0 == NULL) {
device_printf(sc->aac_dev,
"couldn't allocate register window\n");
return (ENXIO);
}
}
sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
}
/* Read preferred settings */
sc->aac_max_fib_size = sizeof(struct aac_fib);
sc->aac_max_sectors = 128; /* 64KB */
sc->aac_max_aif = 1;
if (sc->flags & AAC_FLAGS_SG_64BIT)
sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
- sizeof(struct aac_blockwrite64))
/ sizeof(struct aac_sg_entry64);
else
sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
- sizeof(struct aac_blockwrite))
/ sizeof(struct aac_sg_entry);
if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
options = AAC_GET_MAILBOX(sc, 1);
sc->aac_max_fib_size = (options & 0xFFFF);
sc->aac_max_sectors = (options >> 16) << 1;
options = AAC_GET_MAILBOX(sc, 2);
sc->aac_sg_tablesize = (options >> 16);
options = AAC_GET_MAILBOX(sc, 3);
sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
sc->aac_max_fibs = (options & 0xFFFF);
options = AAC_GET_MAILBOX(sc, 4);
sc->aac_max_aif = (options & 0xFFFF);
options = AAC_GET_MAILBOX(sc, 5);
sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
}
maxsize = sc->aac_max_fib_size + 31;
if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
maxsize += sizeof(struct aac_fib_xporthdr);
if (maxsize > PAGE_SIZE) {
sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
maxsize = PAGE_SIZE;
}
sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
sc->flags |= AAC_FLAGS_RAW_IO;
device_printf(sc->aac_dev, "Enable Raw I/O\n");
}
if ((sc->flags & AAC_FLAGS_RAW_IO) &&
(sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
sc->flags |= AAC_FLAGS_LBA_64BIT;
device_printf(sc->aac_dev, "Enable 64-bit array\n");
}
#ifdef AACRAID_DEBUG
aacraid_get_fw_debug_buffer(sc);
#endif
return (0);
}
static int
aac_init(struct aac_softc *sc)
{
struct aac_adapter_init *ip;
int i, error;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/* reset rrq index */
sc->aac_fibs_pushed_no = 0;
for (i = 0; i < sc->aac_max_msix; i++)
sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
/*
* Fill in the init structure. This tells the adapter about the
* physical location of various important shared data structures.
*/
ip = &sc->aac_common->ac_init;
ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
sc->flags |= AAC_FLAGS_RAW_IO;
}
ip->NoOfMSIXVectors = sc->aac_max_msix;
ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
offsetof(struct aac_common, ac_fibs);
ip->AdapterFibsVirtualAddress = 0;
ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
ip->AdapterFibAlign = sizeof(struct aac_fib);
ip->PrintfBufferAddress = sc->aac_common_busaddr +
offsetof(struct aac_common, ac_printf);
ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
/*
* The adapter assumes that pages are 4K in size, except on some
* broken firmware versions that do the page->byte conversion twice,
* therefore 'assuming' that this value is in 16MB units (2^24).
* Round up since the granularity is so high.
*/
ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
ip->HostPhysMemPages =
(ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
}
ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
}
ip->MaxNumAif = sc->aac_max_aif;
ip->HostRRQ_AddrLow =
sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
/* always 32-bit address */
ip->HostRRQ_AddrHigh = 0;
if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
device_printf(sc->aac_dev, "Power Management enabled\n");
}
ip->MaxIoCommands = sc->aac_max_fibs;
ip->MaxIoSize = sc->aac_max_sectors << 9;
ip->MaxFibSize = sc->aac_max_fib_size;
/*
* Do controller-type-specific initialisation
*/
AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
/*
* Give the init structure to the controller.
*/
if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
sc->aac_common_busaddr +
offsetof(struct aac_common, ac_init), 0, 0, 0,
NULL, NULL)) {
device_printf(sc->aac_dev,
"error establishing init structure\n");
error = EIO;
goto out;
}
/*
* Check configuration issues
*/
if ((error = aac_check_config(sc)) != 0)
goto out;
error = 0;
out:
return(error);
}
static void
aac_define_int_mode(struct aac_softc *sc)
{
device_t dev;
int cap, msi_count, error = 0;
uint32_t val;
dev = sc->aac_dev;
/* max. vectors from AAC_MONKER_GETCOMMPREF */
if (sc->aac_max_msix == 0) {
sc->aac_max_msix = 1;
sc->aac_vector_cap = sc->aac_max_fibs;
return;
}
/* OS capability */
msi_count = pci_msix_count(dev);
if (msi_count > AAC_MAX_MSIX)
msi_count = AAC_MAX_MSIX;
if (msi_count > sc->aac_max_msix)
msi_count = sc->aac_max_msix;
if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
"will try MSI\n", msi_count, error);
pci_release_msi(dev);
} else {
sc->msi_enabled = TRUE;
device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
msi_count);
}
if (!sc->msi_enabled) {
msi_count = 1;
if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
device_printf(dev, "alloc msi failed - err=%d; "
"will use INTx\n", error);
pci_release_msi(dev);
} else {
sc->msi_enabled = TRUE;
device_printf(dev, "using MSI interrupts\n");
}
}
if (sc->msi_enabled) {
/* now read controller capability from PCI config. space */
cap = aac_find_pci_capability(sc, PCIY_MSIX);
val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
if (!(val & AAC_PCI_MSI_ENABLE)) {
pci_release_msi(dev);
sc->msi_enabled = FALSE;
}
}
if (!sc->msi_enabled) {
device_printf(dev, "using legacy interrupts\n");
sc->aac_max_msix = 1;
} else {
AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
if (sc->aac_max_msix > msi_count)
sc->aac_max_msix = msi_count;
}
sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
}
static int
aac_find_pci_capability(struct aac_softc *sc, int cap)
{
device_t dev;
uint32_t status;
uint8_t ptr;
dev = sc->aac_dev;
status = pci_read_config(dev, PCIR_STATUS, 2);
if (!(status & PCIM_STATUS_CAPPRESENT))
return (0);
status = pci_read_config(dev, PCIR_HDRTYPE, 1);
switch (status & PCIM_HDRTYPE) {
case 0:
case 1:
ptr = PCIR_CAP_PTR;
break;
case 2:
ptr = PCIR_CAP_PTR_2;
break;
default:
return (0);
break;
}
ptr = pci_read_config(dev, ptr, 1);
while (ptr != 0) {
int next, val;
next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
val = pci_read_config(dev, ptr + PCICAP_ID, 1);
if (val == cap)
return (ptr);
ptr = next;
}
return (0);
}
static int
aac_setup_intr(struct aac_softc *sc)
{
int i, msi_count, rid;
struct resource *res;
void *tag;
msi_count = sc->aac_max_msix;
rid = (sc->msi_enabled ? 1:0);
for (i = 0; i < msi_count; i++, rid++) {
if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE)) == NULL) {
device_printf(sc->aac_dev,"can't allocate interrupt\n");
return (EINVAL);
}
sc->aac_irq_rid[i] = rid;
sc->aac_irq[i] = res;
if (aac_bus_setup_intr(sc->aac_dev, res,
INTR_MPSAFE | INTR_TYPE_BIO, NULL,
aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
device_printf(sc->aac_dev, "can't set up interrupt\n");
return (EINVAL);
}
sc->aac_msix[i].vector_no = i;
sc->aac_msix[i].sc = sc;
sc->aac_intr[i] = tag;
}
return (0);
}
static int
aac_check_config(struct aac_softc *sc)
{
struct aac_fib *fib;
struct aac_cnt_config *ccfg;
struct aac_cf_status_hdr *cf_shdr;
int rval;
mtx_lock(&sc->aac_io_lock);
aac_alloc_sync_fib(sc, &fib);
ccfg = (struct aac_cnt_config *)&fib->data[0];
bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
ccfg->Command = VM_ContainerConfig;
ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof (struct aac_cnt_config));
cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
if (rval == 0 && ccfg->Command == ST_OK &&
ccfg->CTCommand.param[0] == CT_OK) {
if (cf_shdr->action <= CFACT_PAUSE) {
bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
ccfg->Command = VM_ContainerConfig;
ccfg->CTCommand.command = CT_COMMIT_CONFIG;
rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof (struct aac_cnt_config));
if (rval == 0 && ccfg->Command == ST_OK &&
ccfg->CTCommand.param[0] == CT_OK) {
/* successful completion */
rval = 0;
} else {
/* auto commit aborted due to error(s) */
rval = -2;
}
} else {
/* auto commit aborted due to adapter indicating
config. issues too dangerous to auto commit */
rval = -3;
}
} else {
/* error */
rval = -1;
}
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return(rval);
}
/*
* Send a synchronous command to the controller and wait for a result.
* Indicate if the controller completed the command with an error status.
*/
int
aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
u_int32_t *sp, u_int32_t *r1)
{
time_t then;
u_int32_t status;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/* populate the mailbox */
AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
/* ensure the sync command doorbell flag is cleared */
if (!sc->msi_enabled)
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
/* then set it to signal the adapter */
AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
/* spin waiting for the command to complete */
then = time_uptime;
do {
if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
return(EIO);
}
} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
/* clear the completion flag */
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
/* get the command status */
status = AAC_GET_MAILBOX(sc, 0);
if (sp != NULL)
*sp = status;
/* return parameter */
if (r1 != NULL)
*r1 = AAC_GET_MAILBOX(sc, 1);
if (status != AAC_SRB_STS_SUCCESS)
return (-1);
}
return(0);
}
static int
aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
struct aac_fib *fib, u_int16_t datasize)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_assert(&sc->aac_io_lock, MA_OWNED);
if (datasize > AAC_FIB_DATASIZE)
return(EINVAL);
/*
* Set up the sync FIB
*/
fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
AAC_FIBSTATE_INITIALISED |
AAC_FIBSTATE_EMPTY;
fib->Header.XferState |= xferstate;
fib->Header.Command = command;
fib->Header.StructType = AAC_FIBTYPE_TFIB;
fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
fib->Header.SenderSize = sizeof(struct aac_fib);
fib->Header.SenderFibAddress = 0; /* Not needed */
fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
offsetof(struct aac_common, ac_sync_fib);
/*
* Give the FIB to the controller, wait for a response.
*/
if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
return(EIO);
}
return (0);
}
/*
* Check for commands that have been outstanding for a suspiciously long time,
* and complain about them.
*/
static void
aac_timeout(struct aac_softc *sc)
{
struct aac_command *cm;
time_t deadline;
int timedout;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/*
* Traverse the busy command list, bitch about late commands once
* only.
*/
timedout = 0;
deadline = time_uptime - AAC_CMD_TIMEOUT;
TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
if (cm->cm_timestamp < deadline) {
device_printf(sc->aac_dev,
"COMMAND %p TIMEOUT AFTER %d SECONDS\n",
cm, (int)(time_uptime-cm->cm_timestamp));
AAC_PRINT_FIB(sc, cm->cm_fib);
timedout++;
}
}
if (timedout)
aac_reset_adapter(sc);
aacraid_print_queues(sc);
}
/*
* Interface Function Vectors
*/
/*
* Read the current firmware status word.
*/
static int
aac_src_get_fwstatus(struct aac_softc *sc)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
}
/*
* Notify the controller of a change in a given queue
*/
static void
aac_src_qnotify(struct aac_softc *sc, int qbit)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
}
/*
* Get the interrupt reason bits
*/
static int
aac_src_get_istatus(struct aac_softc *sc)
{
int val;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
if (sc->msi_enabled) {
val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
if (val & AAC_MSI_SYNC_STATUS)
val = AAC_DB_SYNC_COMMAND;
else
val = 0;
} else {
val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
}
return(val);
}
/*
* Clear some interrupt reason bits
*/
static void
aac_src_clear_istatus(struct aac_softc *sc, int mask)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
if (sc->msi_enabled) {
if (mask == AAC_DB_SYNC_COMMAND)
AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
} else {
AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
}
}
/*
* Populate the mailbox and set the command word
*/
static void
aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
}
static void
aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
}
/*
* Fetch the immediate command status word
*/
static int
aac_src_get_mailbox(struct aac_softc *sc, int mb)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
}
static int
aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
}
/*
* Set/clear interrupt masks
*/
static void
aac_src_access_devreg(struct aac_softc *sc, int mode)
{
u_int32_t val;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
switch (mode) {
case AAC_ENABLE_INTERRUPT:
AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
AAC_INT_ENABLE_TYPE1_INTX));
break;
case AAC_DISABLE_INTERRUPT:
AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
break;
case AAC_ENABLE_MSIX:
/* set bit 6 */
val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
val |= 0x40;
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
/* unmask int. */
val = PMC_ALL_INTERRUPT_BITS;
AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
break;
case AAC_DISABLE_MSIX:
/* reset bit 6 */
val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
val &= ~0x40;
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
break;
case AAC_CLEAR_AIF_BIT:
/* set bit 5 */
val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
val |= 0x20;
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
break;
case AAC_CLEAR_SYNC_BIT:
/* set bit 4 */
val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
val |= 0x10;
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
break;
case AAC_ENABLE_INTX:
/* set bit 7 */
val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
val |= 0x80;
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
/* unmask int. */
val = PMC_ALL_INTERRUPT_BITS;
AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
val & (~(PMC_GLOBAL_INT_BIT2)));
break;
default:
break;
}
}
/*
* New comm. interface: Send command functions
*/
static int
aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
{
struct aac_fib_xporthdr *pFibX;
u_int32_t fibsize, high_addr;
u_int64_t address;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
sc->aac_max_msix > 1) {
u_int16_t vector_no, first_choice = 0xffff;
vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
do {
vector_no += 1;
if (vector_no == sc->aac_max_msix)
vector_no = 1;
if (sc->aac_rrq_outstanding[vector_no] <
sc->aac_vector_cap)
break;
if (0xffff == first_choice)
first_choice = vector_no;
else if (vector_no == first_choice)
break;
} while (1);
if (vector_no == first_choice)
vector_no = 0;
sc->aac_rrq_outstanding[vector_no]++;
if (sc->aac_fibs_pushed_no == 0xffffffff)
sc->aac_fibs_pushed_no = 0;
else
sc->aac_fibs_pushed_no++;
cm->cm_fib->Header.Handle += (vector_no << 16);
}
if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
/* Calculate the amount to the fibsize bits */
fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
/* Fill new FIB header */
address = cm->cm_fibphys;
high_addr = (u_int32_t)(address >> 32);
if (high_addr == 0L) {
cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
cm->cm_fib->Header.u.TimeStamp = 0L;
} else {
cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
}
cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
} else {
/* Calculate the amount to the fibsize bits */
fibsize = (sizeof(struct aac_fib_xporthdr) +
cm->cm_fib->Header.Size + 127) / 128 - 1;
/* Fill XPORT header */
pFibX = (struct aac_fib_xporthdr *)
((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
pFibX->Handle = cm->cm_fib->Header.Handle;
pFibX->HostAddress = cm->cm_fibphys;
pFibX->Size = cm->cm_fib->Header.Size;
address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
high_addr = (u_int32_t)(address >> 32);
}
if (fibsize > 31)
fibsize = 31;
aac_enqueue_busy(cm);
if (high_addr) {
AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
} else {
AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
}
return 0;
}
/*
* New comm. interface: get, set outbound queue index
*/
static int
aac_src_get_outb_queue(struct aac_softc *sc)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
return(-1);
}
static void
aac_src_set_outb_queue(struct aac_softc *sc, int index)
{
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
}
/*
* Debugging and Diagnostics
*/
/*
* Print some information about the controller.
*/
static void
aac_describe_controller(struct aac_softc *sc)
{
struct aac_fib *fib;
struct aac_adapter_info *info;
char *adapter_type = "Adaptec RAID controller";
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&sc->aac_io_lock);
aac_alloc_sync_fib(sc, &fib);
if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
fib->data[0] = 0;
if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
else {
struct aac_supplement_adapter_info *supp_info;
supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
adapter_type = (char *)supp_info->AdapterTypeText;
sc->aac_feature_bits = supp_info->FeatureBits;
sc->aac_support_opt2 = supp_info->SupportedOptions2;
}
}
device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
adapter_type,
AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
fib->data[0] = 0;
if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return;
}
/* save the kernel revision structure for later use */
info = (struct aac_adapter_info *)&fib->data[0];
sc->aac_revision = info->KernelRevision;
if (bootverbose) {
device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
"(%dMB cache, %dMB execution), %s\n",
aac_describe_code(aac_cpu_variant, info->CpuVariant),
info->ClockSpeed, info->TotalMem / (1024 * 1024),
info->BufferMem / (1024 * 1024),
info->ExecutionMem / (1024 * 1024),
aac_describe_code(aac_battery_platform,
info->batteryPlatform));
device_printf(sc->aac_dev,
"Kernel %d.%d-%d, Build %d, S/N %6X\n",
info->KernelRevision.external.comp.major,
info->KernelRevision.external.comp.minor,
info->KernelRevision.external.comp.dash,
info->KernelRevision.buildNumber,
(u_int32_t)(info->SerialNumber & 0xffffff));
device_printf(sc->aac_dev, "Supported Options=%b\n",
sc->supported_options,
"\20"
"\1SNAPSHOT"
"\2CLUSTERS"
"\3WCACHE"
"\4DATA64"
"\5HOSTTIME"
"\6RAID50"
"\7WINDOW4GB"
"\10SCSIUPGD"
"\11SOFTERR"
"\12NORECOND"
"\13SGMAP64"
"\14ALARM"
"\15NONDASD"
"\16SCSIMGT"
"\17RAIDSCSI"
"\21ADPTINFO"
"\22NEWCOMM"
"\23ARRAY64BIT"
"\24HEATSENSOR");
}
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
}
/*
* Look up a text description of a numeric error code and return a pointer to
* same.
*/
static char *
aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
{
int i;
for (i = 0; table[i].string != NULL; i++)
if (table[i].code == code)
return(table[i].string);
return(table[i + 1].string);
}
/*
* Management Interface
*/
static int
aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
{
struct aac_softc *sc;
sc = dev->si_drv1;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
#if __FreeBSD_version >= 702000
device_busy(sc->aac_dev);
devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
#endif
return 0;
}
static int
aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
{
union aac_statrequest *as;
struct aac_softc *sc;
int error = 0;
as = (union aac_statrequest *)arg;
sc = dev->si_drv1;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
switch (cmd) {
case AACIO_STATS:
switch (as->as_item) {
case AACQ_FREE:
case AACQ_READY:
case AACQ_BUSY:
bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
sizeof(struct aac_qstat));
break;
default:
error = ENOENT;
break;
}
break;
case FSACTL_SENDFIB:
case FSACTL_SEND_LARGE_FIB:
arg = *(caddr_t*)arg;
case FSACTL_LNX_SENDFIB:
case FSACTL_LNX_SEND_LARGE_FIB:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
error = aac_ioctl_sendfib(sc, arg);
break;
case FSACTL_SEND_RAW_SRB:
arg = *(caddr_t*)arg;
case FSACTL_LNX_SEND_RAW_SRB:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
error = aac_ioctl_send_raw_srb(sc, arg);
break;
case FSACTL_AIF_THREAD:
case FSACTL_LNX_AIF_THREAD:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
error = EINVAL;
break;
case FSACTL_OPEN_GET_ADAPTER_FIB:
arg = *(caddr_t*)arg;
case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
error = aac_open_aif(sc, arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB:
arg = *(caddr_t*)arg;
case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
error = aac_getnext_aif(sc, arg);
break;
case FSACTL_CLOSE_GET_ADAPTER_FIB:
arg = *(caddr_t*)arg;
case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
error = aac_close_aif(sc, arg);
break;
case FSACTL_MINIPORT_REV_CHECK:
arg = *(caddr_t*)arg;
case FSACTL_LNX_MINIPORT_REV_CHECK:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
error = aac_rev_check(sc, arg);
break;
case FSACTL_QUERY_DISK:
arg = *(caddr_t*)arg;
case FSACTL_LNX_QUERY_DISK:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
error = aac_query_disk(sc, arg);
break;
case FSACTL_DELETE_DISK:
case FSACTL_LNX_DELETE_DISK:
/*
* We don't trust the underland to tell us when to delete a
* container, rather we rely on an AIF coming from the
* controller
*/
error = 0;
break;
case FSACTL_GET_PCI_INFO:
arg = *(caddr_t*)arg;
case FSACTL_LNX_GET_PCI_INFO:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
error = aac_get_pci_info(sc, arg);
break;
case FSACTL_GET_FEATURES:
arg = *(caddr_t*)arg;
case FSACTL_LNX_GET_FEATURES:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
error = aac_supported_features(sc, arg);
break;
default:
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
error = EINVAL;
break;
}
return(error);
}
static int
aac_poll(struct cdev *dev, int poll_events, struct thread *td)
{
struct aac_softc *sc;
struct aac_fib_context *ctx;
int revents;
sc = dev->si_drv1;
revents = 0;
mtx_lock(&sc->aac_io_lock);
if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
revents |= poll_events & (POLLIN | POLLRDNORM);
break;
}
}
}
mtx_unlock(&sc->aac_io_lock);
if (revents == 0) {
if (poll_events & (POLLIN | POLLRDNORM))
selrecord(td, &sc->rcv_select);
}
return (revents);
}
static void
aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
{
switch (event->ev_type) {
case AAC_EVENT_CMFREE:
mtx_assert(&sc->aac_io_lock, MA_OWNED);
if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
aacraid_add_event(sc, event);
return;
}
free(event, M_AACRAIDBUF);
wakeup(arg);
break;
default:
break;
}
}
/*
* Send a FIB supplied from userspace
*/
static int
aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
{
struct aac_command *cm;
int size, error;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
cm = NULL;
/*
* Get a command
*/
mtx_lock(&sc->aac_io_lock);
if (aacraid_alloc_command(sc, &cm)) {
struct aac_event *event;
event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
M_NOWAIT | M_ZERO);
if (event == NULL) {
error = EBUSY;
mtx_unlock(&sc->aac_io_lock);
goto out;
}
event->ev_type = AAC_EVENT_CMFREE;
event->ev_callback = aac_ioctl_event;
event->ev_arg = &cm;
aacraid_add_event(sc, event);
msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
}
mtx_unlock(&sc->aac_io_lock);
/*
* Fetch the FIB header, then re-copy to get data as well.
*/
if ((error = copyin(ufib, cm->cm_fib,
sizeof(struct aac_fib_header))) != 0)
goto out;
size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
if (size > sc->aac_max_fib_size) {
device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
size, sc->aac_max_fib_size);
size = sc->aac_max_fib_size;
}
if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
goto out;
cm->cm_fib->Header.Size = size;
cm->cm_timestamp = time_uptime;
cm->cm_datalen = 0;
/*
* Pass the FIB to the controller, wait for it to complete.
*/
mtx_lock(&sc->aac_io_lock);
error = aacraid_wait_command(cm);
mtx_unlock(&sc->aac_io_lock);
if (error != 0) {
device_printf(sc->aac_dev,
"aacraid_wait_command return %d\n", error);
goto out;
}
/*
* Copy the FIB and data back out to the caller.
*/
size = cm->cm_fib->Header.Size;
if (size > sc->aac_max_fib_size) {
device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
size, sc->aac_max_fib_size);
size = sc->aac_max_fib_size;
}
error = copyout(cm->cm_fib, ufib, size);
out:
if (cm != NULL) {
mtx_lock(&sc->aac_io_lock);
aacraid_release_command(cm);
mtx_unlock(&sc->aac_io_lock);
}
return(error);
}
/*
* Send a passthrough FIB supplied from userspace
*/
static int
aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
{
struct aac_command *cm;
struct aac_fib *fib;
struct aac_srb *srbcmd;
struct aac_srb *user_srb = (struct aac_srb *)arg;
void *user_reply;
int error, transfer_data = 0;
bus_dmamap_t orig_map = 0;
u_int32_t fibsize = 0;
u_int64_t srb_sg_address;
u_int32_t srb_sg_bytecount;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
cm = NULL;
mtx_lock(&sc->aac_io_lock);
if (aacraid_alloc_command(sc, &cm)) {
struct aac_event *event;
event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
M_NOWAIT | M_ZERO);
if (event == NULL) {
error = EBUSY;
mtx_unlock(&sc->aac_io_lock);
goto out;
}
event->ev_type = AAC_EVENT_CMFREE;
event->ev_callback = aac_ioctl_event;
event->ev_arg = &cm;
aacraid_add_event(sc, event);
msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
}
mtx_unlock(&sc->aac_io_lock);
cm->cm_data = NULL;
/* save original dma map */
orig_map = cm->cm_datamap;
fib = cm->cm_fib;
srbcmd = (struct aac_srb *)fib->data;
if ((error = copyin((void *)&user_srb->data_len, &fibsize,
sizeof (u_int32_t)) != 0))
goto out;
if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
error = EINVAL;
goto out;
}
if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
goto out;
srbcmd->function = 0; /* SRBF_ExecuteScsi */
srbcmd->retry_limit = 0; /* obsolete */
/* only one sg element from userspace supported */
if (srbcmd->sg_map.SgCount > 1) {
error = EINVAL;
goto out;
}
/* check fibsize */
if (fibsize == (sizeof(struct aac_srb) +
srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
struct aac_sg_entry sg;
if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
goto out;
srb_sg_bytecount = sg.SgByteCount;
srb_sg_address = (u_int64_t)sg.SgAddress;
} else if (fibsize == (sizeof(struct aac_srb) +
srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
#ifdef __LP64__
struct aac_sg_entry64 *sgp =
(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
struct aac_sg_entry64 sg;
if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
goto out;
srb_sg_bytecount = sg.SgByteCount;
srb_sg_address = sg.SgAddress;
if (srb_sg_address > 0xffffffffull &&
!(sc->flags & AAC_FLAGS_SG_64BIT))
#endif
{
error = EINVAL;
goto out;
}
} else {
error = EINVAL;
goto out;
}
user_reply = (char *)arg + fibsize;
srbcmd->data_len = srb_sg_bytecount;
if (srbcmd->sg_map.SgCount == 1)
transfer_data = 1;
if (transfer_data) {
/*
* Create DMA tag for the passthr. data buffer and allocate it.
*/
if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
(sc->flags & AAC_FLAGS_SG_64BIT) ?
BUS_SPACE_MAXADDR_32BIT :
0x7fffffff, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
srb_sg_bytecount, /* size */
sc->aac_sg_tablesize, /* nsegments */
srb_sg_bytecount, /* maxsegsize */
0, /* flags */
NULL, NULL, /* No locking needed */
&cm->cm_passthr_dmat)) {
error = ENOMEM;
goto out;
}
if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
BUS_DMA_NOWAIT, &cm->cm_datamap)) {
error = ENOMEM;
goto out;
}
/* fill some cm variables */
cm->cm_datalen = srb_sg_bytecount;
if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
cm->cm_flags |= AAC_CMD_DATAIN;
if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
cm->cm_flags |= AAC_CMD_DATAOUT;
if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
if ((error = copyin((void *)(uintptr_t)srb_sg_address,
cm->cm_data, cm->cm_datalen)) != 0)
goto out;
/* sync required for bus_dmamem_alloc() alloc. mem.? */
bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
BUS_DMASYNC_PREWRITE);
}
}
/* build the FIB */
fib->Header.Size = sizeof(struct aac_fib_header) +
sizeof(struct aac_srb);
fib->Header.XferState =
AAC_FIBSTATE_HOSTOWNED |
AAC_FIBSTATE_INITIALISED |
AAC_FIBSTATE_EMPTY |
AAC_FIBSTATE_FROMHOST |
AAC_FIBSTATE_REXPECTED |
AAC_FIBSTATE_NORM |
AAC_FIBSTATE_ASYNC;
fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
ScsiPortCommandU64 : ScsiPortCommand;
cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
/* send command */
if (transfer_data) {
bus_dmamap_load(cm->cm_passthr_dmat,
cm->cm_datamap, cm->cm_data,
cm->cm_datalen,
aacraid_map_command_sg, cm, 0);
} else {
aacraid_map_command_sg(cm, NULL, 0, 0);
}
/* wait for completion */
mtx_lock(&sc->aac_io_lock);
while (!(cm->cm_flags & AAC_CMD_COMPLETED))
msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
mtx_unlock(&sc->aac_io_lock);
/* copy data */
if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
if ((error = copyout(cm->cm_data,
(void *)(uintptr_t)srb_sg_address,
cm->cm_datalen)) != 0)
goto out;
/* sync required for bus_dmamem_alloc() allocated mem.? */
bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
BUS_DMASYNC_POSTREAD);
}
/* status */
error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
out:
if (cm && cm->cm_data) {
if (transfer_data)
bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
cm->cm_datamap = orig_map;
}
if (cm && cm->cm_passthr_dmat)
bus_dma_tag_destroy(cm->cm_passthr_dmat);
if (cm) {
mtx_lock(&sc->aac_io_lock);
aacraid_release_command(cm);
mtx_unlock(&sc->aac_io_lock);
}
return(error);
}
/*
* Request an AIF from the controller (new comm. type1)
*/
static void
aac_request_aif(struct aac_softc *sc)
{
struct aac_command *cm;
struct aac_fib *fib;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
if (aacraid_alloc_command(sc, &cm)) {
sc->aif_pending = 1;
return;
}
sc->aif_pending = 0;
/* build the FIB */
fib = cm->cm_fib;
fib->Header.Size = sizeof(struct aac_fib);
fib->Header.XferState =
AAC_FIBSTATE_HOSTOWNED |
AAC_FIBSTATE_INITIALISED |
AAC_FIBSTATE_EMPTY |
AAC_FIBSTATE_FROMHOST |
AAC_FIBSTATE_REXPECTED |
AAC_FIBSTATE_NORM |
AAC_FIBSTATE_ASYNC;
/* set AIF marker */
fib->Header.Handle = 0x00800000;
fib->Header.Command = AifRequest;
((struct aac_aif_command *)fib->data)->command = AifReqEvent;
aacraid_map_command_sg(cm, NULL, 0, 0);
}
#if __FreeBSD_version >= 702000
/*
* cdevpriv interface private destructor.
*/
static void
aac_cdevpriv_dtor(void *arg)
{
struct aac_softc *sc;
sc = arg;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&Giant);
device_unbusy(sc->aac_dev);
mtx_unlock(&Giant);
}
#else
static int
aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
{
struct aac_softc *sc;
sc = dev->si_drv1;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
return 0;
}
#endif
/*
* Handle an AIF sent to us by the controller; queue it for later reference.
* If the queue fills up, then drop the older entries.
*/
static void
aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
{
struct aac_aif_command *aif;
struct aac_container *co, *co_next;
struct aac_fib_context *ctx;
struct aac_fib *sync_fib;
struct aac_mntinforesp mir;
int next, current, found;
int count = 0, changed = 0, i = 0;
u_int32_t channel, uid;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
aif = (struct aac_aif_command*)&fib->data[0];
aacraid_print_aif(sc, aif);
/* Is it an event that we should care about? */
switch (aif->command) {
case AifCmdEventNotify:
switch (aif->data.EN.type) {
case AifEnAddContainer:
case AifEnDeleteContainer:
/*
* A container was added or deleted, but the message
* doesn't tell us anything else! Re-enumerate the
* containers and sort things out.
*/
aac_alloc_sync_fib(sc, &sync_fib);
do {
/*
* Ask the controller for its containers one at
* a time.
* XXX What if the controller's list changes
* midway through this enumaration?
* XXX This should be done async.
*/
if (aac_get_container_info(sc, sync_fib, i,
&mir, &uid) != 0)
continue;
if (i == 0)
count = mir.MntRespCount;
/*
* Check the container against our list.
* co->co_found was already set to 0 in a
* previous run.
*/
if ((mir.Status == ST_OK) &&
(mir.MntTable[0].VolType != CT_NONE)) {
found = 0;
TAILQ_FOREACH(co,
&sc->aac_container_tqh,
co_link) {
if (co->co_mntobj.ObjectId ==
mir.MntTable[0].ObjectId) {
co->co_found = 1;
found = 1;
break;
}
}
/*
* If the container matched, continue
* in the list.
*/
if (found) {
i++;
continue;
}
/*
* This is a new container. Do all the
* appropriate things to set it up.
*/
aac_add_container(sc, &mir, 1, uid);
changed = 1;
}
i++;
} while ((i < count) && (i < AAC_MAX_CONTAINERS));
aac_release_sync_fib(sc);
/*
* Go through our list of containers and see which ones
* were not marked 'found'. Since the controller didn't
* list them they must have been deleted. Do the
* appropriate steps to destroy the device. Also reset
* the co->co_found field.
*/
co = TAILQ_FIRST(&sc->aac_container_tqh);
while (co != NULL) {
if (co->co_found == 0) {
co_next = TAILQ_NEXT(co, co_link);
TAILQ_REMOVE(&sc->aac_container_tqh, co,
co_link);
free(co, M_AACRAIDBUF);
changed = 1;
co = co_next;
} else {
co->co_found = 0;
co = TAILQ_NEXT(co, co_link);
}
}
/* Attach the newly created containers */
if (changed) {
if (sc->cam_rescan_cb != NULL)
sc->cam_rescan_cb(sc, 0,
AAC_CAM_TARGET_WILDCARD);
}
break;
case AifEnEnclosureManagement:
switch (aif->data.EN.data.EEE.eventType) {
case AIF_EM_DRIVE_INSERTION:
case AIF_EM_DRIVE_REMOVAL:
channel = aif->data.EN.data.EEE.unitID;
if (sc->cam_rescan_cb != NULL)
sc->cam_rescan_cb(sc,
((channel>>24) & 0xF) + 1,
(channel & 0xFFFF));
break;
}
break;
case AifEnAddJBOD:
case AifEnDeleteJBOD:
case AifRawDeviceRemove:
channel = aif->data.EN.data.ECE.container;
if (sc->cam_rescan_cb != NULL)
sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
AAC_CAM_TARGET_WILDCARD);
break;
default:
break;
}
default:
break;
}
/* Copy the AIF data to the AIF queue for ioctl retrieval */
current = sc->aifq_idx;
next = (current + 1) % AAC_AIFQ_LENGTH;
if (next == 0)
sc->aifq_filled = 1;
bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
/* modify AIF contexts */
if (sc->aifq_filled) {
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
if (next == ctx->ctx_idx)
ctx->ctx_wrap = 1;
else if (current == ctx->ctx_idx && ctx->ctx_wrap)
ctx->ctx_idx = next;
}
}
sc->aifq_idx = next;
/* On the off chance that someone is sleeping for an aif... */
if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
wakeup(sc->aac_aifq);
/* Wakeup any poll()ers */
selwakeuppri(&sc->rcv_select, PRIBIO);
return;
}
/*
* Return the Revision of the driver to userspace and check to see if the
* userspace app is possibly compatible. This is extremely bogus since
* our driver doesn't follow Adaptec's versioning system. Cheat by just
* returning what the card reported.
*/
static int
aac_rev_check(struct aac_softc *sc, caddr_t udata)
{
struct aac_rev_check rev_check;
struct aac_rev_check_resp rev_check_resp;
int error = 0;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
/*
* Copyin the revision struct from userspace
*/
if ((error = copyin(udata, (caddr_t)&rev_check,
sizeof(struct aac_rev_check))) != 0) {
return error;
}
fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
rev_check.callingRevision.buildNumber);
/*
* Doctor up the response struct.
*/
rev_check_resp.possiblyCompatible = 1;
rev_check_resp.adapterSWRevision.external.comp.major =
AAC_DRIVER_MAJOR_VERSION;
rev_check_resp.adapterSWRevision.external.comp.minor =
AAC_DRIVER_MINOR_VERSION;
rev_check_resp.adapterSWRevision.external.comp.type =
AAC_DRIVER_TYPE;
rev_check_resp.adapterSWRevision.external.comp.dash =
AAC_DRIVER_BUGFIX_LEVEL;
rev_check_resp.adapterSWRevision.buildNumber =
AAC_DRIVER_BUILD;
return(copyout((caddr_t)&rev_check_resp, udata,
sizeof(struct aac_rev_check_resp)));
}
/*
* Pass the fib context to the caller
*/
static int
aac_open_aif(struct aac_softc *sc, caddr_t arg)
{
struct aac_fib_context *fibctx, *ctx;
int error = 0;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
if (fibctx == NULL)
return (ENOMEM);
mtx_lock(&sc->aac_io_lock);
/* all elements are already 0, add to queue */
if (sc->fibctx == NULL)
sc->fibctx = fibctx;
else {
for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
;
ctx->next = fibctx;
fibctx->prev = ctx;
}
/* evaluate unique value */
fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
ctx = sc->fibctx;
while (ctx != fibctx) {
if (ctx->unique == fibctx->unique) {
fibctx->unique++;
ctx = sc->fibctx;
} else {
ctx = ctx->next;
}
}
error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
mtx_unlock(&sc->aac_io_lock);
if (error)
aac_close_aif(sc, (caddr_t)ctx);
return error;
}
/*
* Close the caller's fib context
*/
static int
aac_close_aif(struct aac_softc *sc, caddr_t arg)
{
struct aac_fib_context *ctx;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&sc->aac_io_lock);
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
if (ctx->unique == *(uint32_t *)&arg) {
if (ctx == sc->fibctx)
sc->fibctx = NULL;
else {
ctx->prev->next = ctx->next;
if (ctx->next)
ctx->next->prev = ctx->prev;
}
break;
}
}
if (ctx)
free(ctx, M_AACRAIDBUF);
mtx_unlock(&sc->aac_io_lock);
return 0;
}
/*
* Pass the caller the next AIF in their queue
*/
static int
aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
{
struct get_adapter_fib_ioctl agf;
struct aac_fib_context *ctx;
int error;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&sc->aac_io_lock);
if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
if (agf.AdapterFibContext == ctx->unique)
break;
}
if (!ctx) {
mtx_unlock(&sc->aac_io_lock);
return (EFAULT);
}
error = aac_return_aif(sc, ctx, agf.AifFib);
if (error == EAGAIN && agf.Wait) {
fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
sc->aac_state |= AAC_STATE_AIF_SLEEPER;
while (error == EAGAIN) {
mtx_unlock(&sc->aac_io_lock);
error = tsleep(sc->aac_aifq, PRIBIO |
PCATCH, "aacaif", 0);
mtx_lock(&sc->aac_io_lock);
if (error == 0)
error = aac_return_aif(sc, ctx, agf.AifFib);
}
sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
}
}
mtx_unlock(&sc->aac_io_lock);
return(error);
}
/*
* Hand the next AIF off the top of the queue out to userspace.
*/
static int
aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
{
int current, error;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
current = ctx->ctx_idx;
if (current == sc->aifq_idx && !ctx->ctx_wrap) {
/* empty */
return (EAGAIN);
}
error =
copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
if (error)
device_printf(sc->aac_dev,
"aac_return_aif: copyout returned %d\n", error);
else {
ctx->ctx_wrap = 0;
ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
}
return(error);
}
static int
aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
{
struct aac_pci_info {
u_int32_t bus;
u_int32_t slot;
} pciinf;
int error;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
pciinf.bus = pci_get_bus(sc->aac_dev);
pciinf.slot = pci_get_slot(sc->aac_dev);
error = copyout((caddr_t)&pciinf, uptr,
sizeof(struct aac_pci_info));
return (error);
}
static int
aac_supported_features(struct aac_softc *sc, caddr_t uptr)
{
struct aac_features f;
int error;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
if ((error = copyin(uptr, &f, sizeof (f))) != 0)
return (error);
/*
* When the management driver receives FSACTL_GET_FEATURES ioctl with
* ALL zero in the featuresState, the driver will return the current
* state of all the supported features, the data field will not be
* valid.
* When the management driver receives FSACTL_GET_FEATURES ioctl with
* a specific bit set in the featuresState, the driver will return the
* current state of this specific feature and whatever data that are
* associated with the feature in the data field or perform whatever
* action needed indicates in the data field.
*/
if (f.feat.fValue == 0) {
f.feat.fBits.largeLBA =
(sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
f.feat.fBits.JBODSupport = 1;
/* TODO: In the future, add other features state here as well */
} else {
if (f.feat.fBits.largeLBA)
f.feat.fBits.largeLBA =
(sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
/* TODO: Add other features state and data in the future */
}
error = copyout(&f, uptr, sizeof (f));
return (error);
}
/*
* Give the userland some information about the container. The AAC arch
* expects the driver to be a SCSI passthrough type driver, so it expects
* the containers to have b:t:l numbers. Fake it.
*/
static int
aac_query_disk(struct aac_softc *sc, caddr_t uptr)
{
struct aac_query_disk query_disk;
struct aac_container *co;
int error, id;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_lock(&sc->aac_io_lock);
error = copyin(uptr, (caddr_t)&query_disk,
sizeof(struct aac_query_disk));
if (error) {
mtx_unlock(&sc->aac_io_lock);
return (error);
}
id = query_disk.ContainerNumber;
if (id == -1) {
mtx_unlock(&sc->aac_io_lock);
return (EINVAL);
}
TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
if (co->co_mntobj.ObjectId == id)
break;
}
if (co == NULL) {
query_disk.Valid = 0;
query_disk.Locked = 0;
query_disk.Deleted = 1; /* XXX is this right? */
} else {
query_disk.Valid = 1;
query_disk.Locked = 1;
query_disk.Deleted = 0;
query_disk.Bus = device_get_unit(sc->aac_dev);
query_disk.Target = 0;
query_disk.Lun = 0;
query_disk.UnMapped = 0;
}
error = copyout((caddr_t)&query_disk, uptr,
sizeof(struct aac_query_disk));
mtx_unlock(&sc->aac_io_lock);
return (error);
}
static void
aac_container_bus(struct aac_softc *sc)
{
struct aac_sim *sim;
device_t child;
sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
M_AACRAIDBUF, M_NOWAIT | M_ZERO);
if (sim == NULL) {
device_printf(sc->aac_dev,
"No memory to add container bus\n");
panic("Out of memory?!");
}
child = device_add_child(sc->aac_dev, "aacraidp", -1);
if (child == NULL) {
device_printf(sc->aac_dev,
"device_add_child failed for container bus\n");
free(sim, M_AACRAIDBUF);
panic("Out of memory?!");
}
sim->TargetsPerBus = AAC_MAX_CONTAINERS;
sim->BusNumber = 0;
sim->BusType = CONTAINER_BUS;
sim->InitiatorBusId = -1;
sim->aac_sc = sc;
sim->sim_dev = child;
sim->aac_cam = NULL;
device_set_ivars(child, sim);
device_set_desc(child, "Container Bus");
TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
/*
device_set_desc(child, aac_describe_code(aac_container_types,
mir->MntTable[0].VolType));
*/
bus_generic_attach(sc->aac_dev);
}
static void
aac_get_bus_info(struct aac_softc *sc)
{
struct aac_fib *fib;
struct aac_ctcfg *c_cmd;
struct aac_ctcfg_resp *c_resp;
struct aac_vmioctl *vmi;
struct aac_vmi_businf_resp *vmi_resp;
struct aac_getbusinf businfo;
struct aac_sim *caminf;
device_t child;
int i, error;
mtx_lock(&sc->aac_io_lock);
aac_alloc_sync_fib(sc, &fib);
c_cmd = (struct aac_ctcfg *)&fib->data[0];
bzero(c_cmd, sizeof(struct aac_ctcfg));
c_cmd->Command = VM_ContainerConfig;
c_cmd->cmd = CT_GET_SCSI_METHOD;
c_cmd->param = 0;
error = aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof(struct aac_ctcfg));
if (error) {
device_printf(sc->aac_dev, "Error %d sending "
"VM_ContainerConfig command\n", error);
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return;
}
c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
if (c_resp->Status != ST_OK) {
device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
c_resp->Status);
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return;
}
sc->scsi_method_id = c_resp->param;
vmi = (struct aac_vmioctl *)&fib->data[0];
bzero(vmi, sizeof(struct aac_vmioctl));
vmi->Command = VM_Ioctl;
vmi->ObjType = FT_DRIVE;
vmi->MethId = sc->scsi_method_id;
vmi->ObjId = 0;
vmi->IoctlCmd = GetBusInfo;
error = aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof(struct aac_vmi_businf_resp));
if (error) {
device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
error);
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return;
}
vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
if (vmi_resp->Status != ST_OK) {
device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
vmi_resp->Status);
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
return;
}
bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
aac_release_sync_fib(sc);
mtx_unlock(&sc->aac_io_lock);
for (i = 0; i < businfo.BusCount; i++) {
if (businfo.BusValid[i] != AAC_BUS_VALID)
continue;
caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
M_AACRAIDBUF, M_NOWAIT | M_ZERO);
if (caminf == NULL) {
device_printf(sc->aac_dev,
"No memory to add passthrough bus %d\n", i);
break;
}
child = device_add_child(sc->aac_dev, "aacraidp", -1);
if (child == NULL) {
device_printf(sc->aac_dev,
"device_add_child failed for passthrough bus %d\n",
i);
free(caminf, M_AACRAIDBUF);
break;
}
caminf->TargetsPerBus = businfo.TargetsPerBus;
caminf->BusNumber = i+1;
caminf->BusType = PASSTHROUGH_BUS;
caminf->InitiatorBusId = businfo.InitiatorBusId[i];
caminf->aac_sc = sc;
caminf->sim_dev = child;
caminf->aac_cam = NULL;
device_set_ivars(child, caminf);
device_set_desc(child, "SCSI Passthrough Bus");
TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
}
}
/*
* Check to see if the kernel is up and running. If we are in a
* BlinkLED state, return the BlinkLED code.
*/
static u_int32_t
aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
{
u_int32_t ret;
ret = AAC_GET_FWSTATUS(sc);
if (ret & AAC_UP_AND_RUNNING)
ret = 0;
else if (ret & AAC_KERNEL_PANIC && bled)
*bled = (ret >> 16) & 0xff;
return (ret);
}
/*
* Once do an IOP reset, basically have to re-initialize the card as
* if coming up from a cold boot, and the driver is responsible for
* any IO that was outstanding to the adapter at the time of the IOP
* RESET. And prepare the driver for IOP RESET by making the init code
* modular with the ability to call it from multiple places.
*/
static int
aac_reset_adapter(struct aac_softc *sc)
{
struct aac_command *cm;
struct aac_fib *fib;
struct aac_pause_command *pc;
u_int32_t status, reset_mask, waitCount, max_msix_orig;
int msi_enabled_orig;
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
mtx_assert(&sc->aac_io_lock, MA_OWNED);
if (sc->aac_state & AAC_STATE_RESET) {
device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
return (EINVAL);
}
sc->aac_state |= AAC_STATE_RESET;
/* disable interrupt */
AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
/*
* Abort all pending commands:
* a) on the controller
*/
while ((cm = aac_dequeue_busy(sc)) != NULL) {
cm->cm_flags |= AAC_CMD_RESET;
/* is there a completion handler? */
if (cm->cm_complete != NULL) {
cm->cm_complete(cm);
} else {
/* assume that someone is sleeping on this
* command
*/
wakeup(cm);
}
}
/* b) in the waiting queues */
while ((cm = aac_dequeue_ready(sc)) != NULL) {
cm->cm_flags |= AAC_CMD_RESET;
/* is there a completion handler? */
if (cm->cm_complete != NULL) {
cm->cm_complete(cm);
} else {
/* assume that someone is sleeping on this
* command
*/
wakeup(cm);
}
}
/* flush drives */
if (aac_check_adapter_health(sc, NULL) == 0) {
mtx_unlock(&sc->aac_io_lock);
(void) aacraid_shutdown(sc->aac_dev);
mtx_lock(&sc->aac_io_lock);
}
/* execute IOP reset */
if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
/* We need to wait for 5 seconds before accessing the MU again
* 10000 * 100us = 1000,000us = 1000ms = 1s
*/
waitCount = 5 * 10000;
while (waitCount) {
DELAY(100); /* delay 100 microseconds */
waitCount--;
}
} else if ((aacraid_sync_command(sc,
AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
/* call IOP_RESET for older firmware */
if ((aacraid_sync_command(sc,
AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
if (status == AAC_SRB_STS_INVALID_REQUEST)
device_printf(sc->aac_dev, "IOP_RESET not supported\n");
else
/* probably timeout */
device_printf(sc->aac_dev, "IOP_RESET failed\n");
/* unwind aac_shutdown() */
aac_alloc_sync_fib(sc, &fib);
pc = (struct aac_pause_command *)&fib->data[0];
pc->Command = VM_ContainerConfig;
pc->Type = CT_PAUSE_IO;
pc->Timeout = 1;
pc->Min = 1;
pc->NoRescan = 1;
(void) aac_sync_fib(sc, ContainerCommand, 0, fib,
sizeof (struct aac_pause_command));
aac_release_sync_fib(sc);
goto finish;
}
} else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
/*
* We need to wait for 5 seconds before accessing the doorbell
* again, 10000 * 100us = 1000,000us = 1000ms = 1s
*/
waitCount = 5 * 10000;
while (waitCount) {
DELAY(100); /* delay 100 microseconds */
waitCount--;
}
}
/*
* Initialize the adapter.
*/
max_msix_orig = sc->aac_max_msix;
msi_enabled_orig = sc->msi_enabled;
sc->msi_enabled = FALSE;
if (aac_check_firmware(sc) != 0)
goto finish;
if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
sc->aac_max_msix = max_msix_orig;
if (msi_enabled_orig) {
sc->msi_enabled = msi_enabled_orig;
AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
}
mtx_unlock(&sc->aac_io_lock);
aac_init(sc);
mtx_lock(&sc->aac_io_lock);
}
finish:
sc->aac_state &= ~AAC_STATE_RESET;
AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
aacraid_startio(sc);
return (0);
}
Index: head/sys/dev/advansys/advansys.c
===================================================================
--- head/sys/dev/advansys/advansys.c (revision 328217)
+++ head/sys/dev/advansys/advansys.c (revision 328218)
@@ -1,1404 +1,1404 @@
/*-
* Generic driver for the Advanced Systems Inc. SCSI controllers
* Product specific probe and attach routines can be found in:
*
* i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
* pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
* ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
* ABP970, ABP970U
*
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1996-2000 Justin Gibbs.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*-
* Ported from:
* advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
*
* Copyright (c) 1995-1997 Advanced System Products, Inc.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistributions of source
* code retain the above copyright notice and this comment without
* modification.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/cam_debug.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <dev/advansys/advansys.h>
static void adv_action(struct cam_sim *sim, union ccb *ccb);
static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
int nsegments, int error);
static void adv_intr_locked(struct adv_softc *adv);
static void adv_poll(struct cam_sim *sim);
static void adv_run_doneq(struct adv_softc *adv);
static struct adv_ccb_info *
adv_alloc_ccb_info(struct adv_softc *adv);
static void adv_destroy_ccb_info(struct adv_softc *adv,
struct adv_ccb_info *cinfo);
static __inline struct adv_ccb_info *
adv_get_ccb_info(struct adv_softc *adv);
static __inline void adv_free_ccb_info(struct adv_softc *adv,
struct adv_ccb_info *cinfo);
static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
static __inline struct adv_ccb_info *
adv_get_ccb_info(struct adv_softc *adv)
{
struct adv_ccb_info *cinfo;
if (!dumping)
mtx_assert(&adv->lock, MA_OWNED);
if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
} else {
cinfo = adv_alloc_ccb_info(adv);
}
return (cinfo);
}
static __inline void
adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
{
if (!dumping)
mtx_assert(&adv->lock, MA_OWNED);
cinfo->state = ACCB_FREE;
SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
}
static __inline void
adv_set_state(struct adv_softc *adv, adv_state state)
{
if (adv->state == 0)
xpt_freeze_simq(adv->sim, /*count*/1);
adv->state |= state;
}
static __inline void
adv_clear_state(struct adv_softc *adv, union ccb* ccb)
{
if (adv->state != 0)
adv_clear_state_really(adv, ccb);
}
static void
adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
{
if (!dumping)
mtx_assert(&adv->lock, MA_OWNED);
if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
int openings;
openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
if (openings >= adv->openings_needed) {
adv->state &= ~ADV_RESOURCE_SHORTAGE;
adv->openings_needed = 0;
}
}
if ((adv->state & ADV_IN_TIMEOUT) != 0) {
struct adv_ccb_info *cinfo;
cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
struct ccb_hdr *ccb_h;
/*
* We now traverse our list of pending CCBs
* and reinstate their timeouts.
*/
ccb_h = LIST_FIRST(&adv->pending_ccbs);
while (ccb_h != NULL) {
cinfo = ccb_h->ccb_cinfo_ptr;
callout_reset_sbt(&cinfo->timer,
SBT_1MS * ccb_h->timeout, 0,
adv_timeout, ccb_h, 0);
ccb_h = LIST_NEXT(ccb_h, sim_links.le);
}
adv->state &= ~ADV_IN_TIMEOUT;
device_printf(adv->dev, "No longer in timeout\n");
}
}
if (adv->state == 0)
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
void
adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t* physaddr;
physaddr = (bus_addr_t*)arg;
*physaddr = segs->ds_addr;
}
static void
adv_action(struct cam_sim *sim, union ccb *ccb)
{
struct adv_softc *adv;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
adv = (struct adv_softc *)cam_sim_softc(sim);
mtx_assert(&adv->lock, MA_OWNED);
switch (ccb->ccb_h.func_code) {
/* Common cases first */
case XPT_SCSI_IO: /* Execute the requested I/O operation */
{
struct ccb_hdr *ccb_h;
struct ccb_scsiio *csio;
struct adv_ccb_info *cinfo;
int error;
ccb_h = &ccb->ccb_h;
csio = &ccb->csio;
cinfo = adv_get_ccb_info(adv);
if (cinfo == NULL)
panic("XXX Handle CCB info error!!!");
ccb_h->ccb_cinfo_ptr = cinfo;
cinfo->ccb = ccb;
error = bus_dmamap_load_ccb(adv->buffer_dmat,
cinfo->dmamap,
ccb,
adv_execute_ccb,
csio, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller
* queue until our mapping is returned.
*/
adv_set_state(adv, ADV_BUSDMA_BLOCK);
}
break;
}
case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
case XPT_ABORT: /* Abort the specified CCB */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
#define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
case XPT_SET_TRAN_SETTINGS:
{
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
struct ccb_trans_settings *cts;
target_bit_vector targ_mask;
struct adv_transinfo *tconf;
u_int update_type;
cts = &ccb->cts;
targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
update_type = 0;
/*
* The user must specify which type of settings he wishes
* to change.
*/
if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) {
tconf = &adv->tinfo[cts->ccb_h.target_id].current;
update_type |= ADV_TRANS_GOAL;
} else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) {
tconf = &adv->tinfo[cts->ccb_h.target_id].user;
update_type |= ADV_TRANS_USER;
} else {
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
if ((update_type & ADV_TRANS_GOAL) != 0) {
if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
adv->disc_enable |= targ_mask;
else
adv->disc_enable &= ~targ_mask;
adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
adv->disc_enable);
}
if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
adv->cmd_qng_enabled |= targ_mask;
else
adv->cmd_qng_enabled &= ~targ_mask;
}
}
if ((update_type & ADV_TRANS_USER) != 0) {
if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
if ((spi->flags & CTS_SPI_VALID_DISC) != 0)
adv->user_disc_enable |= targ_mask;
else
adv->user_disc_enable &= ~targ_mask;
}
if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
adv->user_cmd_qng_enabled |= targ_mask;
else
adv->user_cmd_qng_enabled &= ~targ_mask;
}
}
/*
* If the user specifies either the sync rate, or offset,
* but not both, the unspecified parameter defaults to its
* current value in transfer negotiations.
*/
if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
|| ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
/*
* If the user provided a sync rate but no offset,
* use the current offset.
*/
if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
spi->sync_offset = tconf->offset;
/*
* If the user provided an offset but no sync rate,
* use the current sync rate.
*/
if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
spi->sync_period = tconf->period;
adv_period_offset_to_sdtr(adv, &spi->sync_period,
&spi->sync_offset,
cts->ccb_h.target_id);
adv_set_syncrate(adv, /*struct cam_path */NULL,
cts->ccb_h.target_id, spi->sync_period,
spi->sync_offset, update_type);
}
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_GET_TRAN_SETTINGS:
/* Get default/user set transfer settings for the target */
{
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
struct ccb_trans_settings *cts;
struct adv_transinfo *tconf;
target_bit_vector target_mask;
cts = &ccb->cts;
target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_2;
cts->transport = XPORT_SPI;
cts->transport_version = 2;
scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
tconf = &adv->tinfo[cts->ccb_h.target_id].current;
if ((adv->disc_enable & target_mask) != 0)
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
if ((adv->cmd_qng_enabled & target_mask) != 0)
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
} else {
tconf = &adv->tinfo[cts->ccb_h.target_id].user;
if ((adv->user_disc_enable & target_mask) != 0)
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
if ((adv->user_cmd_qng_enabled & target_mask) != 0)
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
}
spi->sync_period = tconf->period;
spi->sync_offset = tconf->offset;
spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
spi->valid = CTS_SPI_VALID_SYNC_RATE
| CTS_SPI_VALID_SYNC_OFFSET
| CTS_SPI_VALID_BUS_WIDTH
| CTS_SPI_VALID_DISC;
scsi->valid = CTS_SCSI_VALID_TQ;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_CALC_GEOMETRY:
{
int extended;
extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
cam_calc_geometry(&ccb->ccg, extended);
xpt_done(ccb);
break;
}
case XPT_RESET_BUS: /* Reset the specified SCSI bus */
{
adv_stop_execution(adv);
adv_reset_bus(adv, /*initiate_reset*/TRUE);
adv_start_execution(adv);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_TERM_IO: /* Terminate the I/O process */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
case XPT_PATH_INQ: /* Path routing inquiry */
{
struct ccb_pathinq *cpi = &ccb->cpi;
cpi->version_num = 1; /* XXX??? */
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
cpi->target_sprt = 0;
cpi->hba_misc = 0;
cpi->hba_eng_cnt = 0;
cpi->max_target = 7;
cpi->max_lun = 7;
cpi->initiator_id = adv->scsi_id;
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 3300;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->ccb_h.status = CAM_REQ_CMP;
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_2;
xpt_done(ccb);
break;
}
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
}
/*
* Currently, the output of bus_dmammap_load suits our needs just
* fine, but should it change, we'd need to do something here.
*/
#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
static void
adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
int nsegments, int error)
{
struct ccb_scsiio *csio;
struct ccb_hdr *ccb_h;
struct cam_sim *sim;
struct adv_softc *adv;
struct adv_ccb_info *cinfo;
struct adv_scsi_q scsiq;
struct adv_sg_head sghead;
csio = (struct ccb_scsiio *)arg;
ccb_h = &csio->ccb_h;
sim = xpt_path_sim(ccb_h->path);
adv = (struct adv_softc *)cam_sim_softc(sim);
cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
if (!dumping)
mtx_assert(&adv->lock, MA_OWNED);
/*
* Setup our done routine to release the simq on
* the next ccb that completes.
*/
if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
/* XXX Need phystovirt!!!! */
/* How about pmap_kenter??? */
scsiq.cdbptr = csio->cdb_io.cdb_ptr;
} else {
scsiq.cdbptr = csio->cdb_io.cdb_ptr;
}
} else {
scsiq.cdbptr = csio->cdb_io.cdb_bytes;
}
/*
* Build up the request
*/
scsiq.q1.status = 0;
scsiq.q1.q_no = 0;
scsiq.q1.cntl = 0;
scsiq.q1.sg_queue_cnt = 0;
scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
scsiq.q1.target_lun = ccb_h->target_lun;
scsiq.q1.sense_len = csio->sense_len;
scsiq.q1.extra_bytes = 0;
scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
ccb_h->target_lun);
scsiq.q2.flag = 0;
scsiq.q2.cdb_len = csio->cdb_len;
if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
scsiq.q2.tag_code = csio->tag_action;
else
scsiq.q2.tag_code = 0;
scsiq.q2.vm_id = 0;
if (nsegments != 0) {
bus_dmasync_op_t op;
scsiq.q1.data_addr = dm_segs->ds_addr;
scsiq.q1.data_cnt = dm_segs->ds_len;
if (nsegments > 1) {
scsiq.q1.cntl |= QC_SG_HEAD;
sghead.entry_cnt
= sghead.entry_to_copy
= nsegments;
sghead.res = 0;
sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
scsiq.sg_head = &sghead;
} else {
scsiq.sg_head = NULL;
}
if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_PREREAD;
else
op = BUS_DMASYNC_PREWRITE;
bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
} else {
scsiq.q1.data_addr = 0;
scsiq.q1.data_cnt = 0;
scsiq.sg_head = NULL;
}
/*
* Last time we need to check if this SCB needs to
* be aborted.
*/
if (ccb_h->status != CAM_REQ_INPROG) {
if (nsegments != 0)
bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
adv_clear_state(adv, (union ccb *)csio);
adv_free_ccb_info(adv, cinfo);
xpt_done((union ccb *)csio);
return;
}
if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
/* Temporary resource shortage */
adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
if (nsegments != 0)
bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
csio->ccb_h.status = CAM_REQUEUE_REQ;
adv_clear_state(adv, (union ccb *)csio);
adv_free_ccb_info(adv, cinfo);
xpt_done((union ccb *)csio);
return;
}
cinfo->state |= ACCB_ACTIVE;
ccb_h->status |= CAM_SIM_QUEUED;
LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
/* Schedule our timeout */
callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0,
adv_timeout, csio, 0);
}
static struct adv_ccb_info *
adv_alloc_ccb_info(struct adv_softc *adv)
{
int error;
struct adv_ccb_info *cinfo;
cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
cinfo->state = ACCB_FREE;
callout_init_mtx(&cinfo->timer, &adv->lock, 0);
error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
&cinfo->dmamap);
if (error != 0) {
device_printf(adv->dev, "Unable to allocate CCB info "
"dmamap - error %d\n", error);
return (NULL);
}
adv->ccb_infos_allocated++;
return (cinfo);
}
static void
adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
{
callout_drain(&cinfo->timer);
bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
}
void
adv_timeout(void *arg)
{
union ccb *ccb;
struct adv_softc *adv;
struct adv_ccb_info *cinfo, *cinfo2;
ccb = (union ccb *)arg;
adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
mtx_assert(&adv->lock, MA_OWNED);
xpt_print_path(ccb->ccb_h.path);
printf("Timed out\n");
/* Have we been taken care of already?? */
if (cinfo == NULL || cinfo->state == ACCB_FREE) {
return;
}
adv_stop_execution(adv);
if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
struct ccb_hdr *ccb_h;
/*
* In order to simplify the recovery process, we ask the XPT
* layer to halt the queue of new transactions and we traverse
* the list of pending CCBs and remove their timeouts. This
* means that the driver attempts to clear only one error
* condition at a time. In general, timeouts that occur
* close together are related anyway, so there is no benefit
* in attempting to handle errors in parallel. Timeouts will
* be reinstated when the recovery process ends.
*/
adv_set_state(adv, ADV_IN_TIMEOUT);
/* This CCB is the CCB representing our recovery actions */
cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
ccb_h = LIST_FIRST(&adv->pending_ccbs);
while (ccb_h != NULL) {
cinfo2 = ccb_h->ccb_cinfo_ptr;
callout_stop(&cinfo2->timer);
ccb_h = LIST_NEXT(ccb_h, sim_links.le);
}
/* XXX Should send a BDR */
/* Attempt an abort as our first tact */
xpt_print_path(ccb->ccb_h.path);
printf("Attempting abort\n");
adv_abort_ccb(adv, ccb->ccb_h.target_id,
ccb->ccb_h.target_lun, ccb,
CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
callout_reset(&cinfo->timer, 2 * hz, adv_timeout, ccb);
} else {
/* Our attempt to perform an abort failed, go for a reset */
xpt_print_path(ccb->ccb_h.path);
printf("Resetting bus\n");
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
adv_reset_bus(adv, /*initiate_reset*/TRUE);
}
adv_start_execution(adv);
}
struct adv_softc *
adv_alloc(device_t dev, struct resource *res, long offset)
{
struct adv_softc *adv = device_get_softc(dev);
/*
* Allocate a storage area for us
*/
LIST_INIT(&adv->pending_ccbs);
SLIST_INIT(&adv->free_ccb_infos);
adv->dev = dev;
adv->res = res;
adv->reg_off = offset;
mtx_init(&adv->lock, "adv", NULL, MTX_DEF);
return(adv);
}
void
adv_free(struct adv_softc *adv)
{
switch (adv->init_level) {
case 6:
{
struct adv_ccb_info *cinfo;
while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
adv_destroy_ccb_info(adv, cinfo);
}
bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
}
case 5:
bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
adv->sense_dmamap);
case 4:
bus_dma_tag_destroy(adv->sense_dmat);
case 3:
bus_dma_tag_destroy(adv->buffer_dmat);
case 2:
bus_dma_tag_destroy(adv->parent_dmat);
case 1:
if (adv->ccb_infos != NULL)
free(adv->ccb_infos, M_DEVBUF);
case 0:
mtx_destroy(&adv->lock);
break;
}
}
int
adv_init(struct adv_softc *adv)
{
struct adv_eeprom_config eeprom_config;
int checksum, i;
int max_sync;
u_int16_t config_lsw;
u_int16_t config_msw;
mtx_lock(&adv->lock);
adv_lib_init(adv);
/*
* Stop script execution.
*/
adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
adv_stop_execution(adv);
if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
mtx_unlock(&adv->lock);
device_printf(adv->dev,
"Unable to halt adapter. Initialization failed\n");
return (1);
}
ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
mtx_unlock(&adv->lock);
device_printf(adv->dev,
"Unable to set program counter. Initialization failed\n");
return (1);
}
config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
config_msw &= ~ADV_CFG_MSW_CLR_MASK;
/*
* XXX The Linux code flags this as an error,
* but what should we report to the user???
* It seems that clearing the config register
* makes this error recoverable.
*/
ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
}
/* Suck in the configuration from the EEProm */
checksum = adv_get_eeprom_config(adv, &eeprom_config);
if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
/*
* XXX The Linux code sets a warning level for this
* condition, yet nothing of meaning is printed to
* the user. What does this mean???
*/
if (adv->chip_version == 3) {
if (eeprom_config.cfg_lsw != config_lsw)
eeprom_config.cfg_lsw = config_lsw;
if (eeprom_config.cfg_msw != config_msw) {
eeprom_config.cfg_msw = config_msw;
}
}
}
if (checksum == eeprom_config.chksum) {
/* Range/Sanity checking */
if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
}
if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
}
if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
}
if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
}
adv->max_openings = eeprom_config.max_total_qng;
adv->user_disc_enable = eeprom_config.disc_enable;
adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
adv->control = eeprom_config.cntl;
for (i = 0; i <= ADV_MAX_TID; i++) {
u_int8_t sync_data;
if ((eeprom_config.init_sdtr & (0x1 << i)) == 0)
sync_data = 0;
else
sync_data = eeprom_config.sdtr_data[i];
adv_sdtr_to_period_offset(adv,
sync_data,
&adv->tinfo[i].user.period,
&adv->tinfo[i].user.offset,
i);
}
config_lsw = eeprom_config.cfg_lsw;
eeprom_config.cfg_msw = config_msw;
} else {
u_int8_t sync_data;
device_printf(adv->dev, "Warning EEPROM Checksum mismatch. "
"Using default device parameters\n");
/* Set reasonable defaults since we can't read the EEPROM */
adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
adv->disc_enable = TARGET_BIT_VECTOR_SET;
adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
adv->scsi_id = 7;
adv->control = 0xFFFF;
if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
/* Default to no Ultra to support the 3030 */
adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA;
sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
for (i = 0; i <= ADV_MAX_TID; i++) {
adv_sdtr_to_period_offset(adv, sync_data,
&adv->tinfo[i].user.period,
&adv->tinfo[i].user.offset,
i);
}
config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON;
}
config_msw &= ~ADV_CFG_MSW_CLR_MASK;
config_lsw |= ADV_CFG_LSW_HOST_INT_ON;
if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)
&& (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0)
/* 25ns or 10MHz */
max_sync = 25;
else
/* Unlimited */
max_sync = 0;
for (i = 0; i <= ADV_MAX_TID; i++) {
if (adv->tinfo[i].user.period < max_sync)
adv->tinfo[i].user.period = max_sync;
}
if (adv_test_external_lram(adv) == 0) {
if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
eeprom_config.max_total_qng =
ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
eeprom_config.max_tag_qng =
ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
} else {
eeprom_config.cfg_msw |= 0x0800;
config_msw |= 0x0800;
eeprom_config.max_total_qng =
ADV_MAX_PCI_INRAM_TOTAL_QNG;
eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
}
adv->max_openings = eeprom_config.max_total_qng;
}
ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw);
#if 0
/*
* Don't write the eeprom data back for now.
* I'd rather not mess up the user's card. We also don't
* fully sanitize the eeprom settings above for the write-back
* to be 100% correct.
*/
if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
device_printf(adv->dev,
"WARNING! Failure writing to EEPROM.\n");
#endif
adv_set_chip_scsiid(adv, adv->scsi_id);
if (adv_init_lram_and_mcode(adv)) {
mtx_unlock(&adv->lock);
return (1);
}
adv->disc_enable = adv->user_disc_enable;
adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
for (i = 0; i <= ADV_MAX_TID; i++) {
/*
* Start off in async mode.
*/
adv_set_syncrate(adv, /*struct cam_path */NULL,
i, /*period*/0, /*offset*/0,
ADV_TRANS_CUR);
/*
* Enable the use of tagged commands on all targets.
* This allows the kernel driver to make up it's own mind
* as it sees fit to tag queue instead of having the
* firmware try and second guess the tag_code settins.
*/
adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
adv->max_openings);
}
adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
device_printf(adv->dev,
"AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
(adv->type & ADV_ULTRA) && (max_sync == 0)
? "Ultra SCSI" : "SCSI",
adv->scsi_id, adv->max_openings);
mtx_unlock(&adv->lock);
return (0);
}
void
adv_intr(void *arg)
{
struct adv_softc *adv;
adv = arg;
mtx_lock(&adv->lock);
adv_intr_locked(adv);
mtx_unlock(&adv->lock);
}
void
adv_intr_locked(struct adv_softc *adv)
{
u_int16_t chipstat;
u_int16_t saved_ram_addr;
u_int8_t ctrl_reg;
u_int8_t saved_ctrl_reg;
u_int8_t host_flag;
if (!dumping)
mtx_assert(&adv->lock, MA_OWNED);
chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
/* Is it for us? */
if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
return;
ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
ADV_CC_TEST));
if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
device_printf(adv->dev, "Detected Bus Reset\n");
adv_reset_bus(adv, /*initiate_reset*/FALSE);
return;
}
if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
host_flag | ADV_HOST_FLAG_IN_ISR);
adv_ack_interrupt(adv);
if ((chipstat & ADV_CSW_HALTED) != 0
&& (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) {
adv_isr_chip_halted(adv);
saved_ctrl_reg &= ~ADV_CC_HALT;
} else {
adv_run_doneq(adv);
}
ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
#ifdef DIAGNOSTIC
if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
panic("adv_intr: Unable to set LRAM addr");
#endif
adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
}
ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
}
static void
adv_run_doneq(struct adv_softc *adv)
{
struct adv_q_done_info scsiq;
u_int doneq_head;
u_int done_qno;
doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
+ ADV_SCSIQ_B_FWD);
while (done_qno != ADV_QLINK_END) {
union ccb* ccb;
struct adv_ccb_info *cinfo;
u_int done_qaddr;
u_int sg_queue_cnt;
done_qaddr = ADV_QNO_TO_QADDR(done_qno);
/* Pull status from this request */
sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
adv->max_dma_count);
/* Mark it as free */
adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
scsiq.q_status & ~(QS_READY|QS_ABORTED));
/* Process request based on retrieved info */
if ((scsiq.cntl & QC_SG_HEAD) != 0) {
u_int i;
/*
* S/G based request. Free all of the queue
* structures that contained S/G information.
*/
for (i = 0; i < sg_queue_cnt; i++) {
done_qno = adv_read_lram_8(adv, done_qaddr
+ ADV_SCSIQ_B_FWD);
#ifdef DIAGNOSTIC
if (done_qno == ADV_QLINK_END) {
panic("adv_qdone: Corrupted SG "
"list encountered");
}
#endif
done_qaddr = ADV_QNO_TO_QADDR(done_qno);
/* Mark SG queue as free */
adv_write_lram_8(adv, done_qaddr
+ ADV_SCSIQ_B_STATUS, QS_FREE);
}
} else
sg_queue_cnt = 0;
#ifdef DIAGNOSTIC
if (adv->cur_active < (sg_queue_cnt + 1))
panic("adv_qdone: Attempting to free more "
"queues than are active");
#endif
adv->cur_active -= sg_queue_cnt + 1;
if ((scsiq.q_status != QS_DONE)
&& (scsiq.q_status & QS_ABORTED) == 0)
panic("adv_qdone: completed scsiq with unknown status");
scsiq.remain_bytes += scsiq.extra_bytes;
if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
(scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
scsiq.d3.done_stat = QD_NO_ERROR;
scsiq.d3.host_stat = QHSTA_NO_ERROR;
}
}
cinfo = &adv->ccb_infos[scsiq.d2.ccb_index];
ccb = cinfo->ccb;
ccb->csio.resid = scsiq.remain_bytes;
adv_done(adv, ccb,
scsiq.d3.done_stat, scsiq.d3.host_stat,
scsiq.d3.scsi_stat, scsiq.q_no);
doneq_head = done_qno;
done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
}
adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
}
void
adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
u_int host_stat, u_int scsi_status, u_int q_no)
{
struct adv_ccb_info *cinfo;
if (!dumping)
mtx_assert(&adv->lock, MA_OWNED);
cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
LIST_REMOVE(&ccb->ccb_h, sim_links.le);
callout_stop(&cinfo->timer);
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_POSTREAD;
else
op = BUS_DMASYNC_POSTWRITE;
bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
}
switch (done_stat) {
case QD_NO_ERROR:
if (host_stat == QHSTA_NO_ERROR) {
ccb->ccb_h.status = CAM_REQ_CMP;
break;
}
xpt_print_path(ccb->ccb_h.path);
printf("adv_done - queue done without error, "
"but host status non-zero(%x)\n", host_stat);
/*FALLTHROUGH*/
case QD_WITH_ERROR:
switch (host_stat) {
case QHSTA_M_TARGET_STATUS_BUSY:
case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
/*
* Assume that if we were a tagged transaction
* the target reported queue full. Otherwise,
* report busy. The firmware really should just
* pass the original status back up to us even
* if it thinks the target was in error for
* returning this status as no other transactions
* from this initiator are in effect, but this
* ignores multi-initiator setups and there is
* evidence that the firmware gets its per-device
* transaction counts screwed up occasionally.
*/
ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
&& host_stat != QHSTA_M_TARGET_STATUS_BUSY)
scsi_status = SCSI_STATUS_QUEUE_FULL;
else
scsi_status = SCSI_STATUS_BUSY;
adv_abort_ccb(adv, ccb->ccb_h.target_id,
ccb->ccb_h.target_lun,
/*ccb*/NULL, CAM_REQUEUE_REQ,
/*queued_only*/TRUE);
/*FALLTHROUGH*/
case QHSTA_M_NO_AUTO_REQ_SENSE:
case QHSTA_NO_ERROR:
ccb->csio.scsi_status = scsi_status;
switch (scsi_status) {
case SCSI_STATUS_CHECK_COND:
case SCSI_STATUS_CMD_TERMINATED:
ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
/* Structure copy */
ccb->csio.sense_data =
adv->sense_buffers[q_no - 1];
/* FALLTHROUGH */
case SCSI_STATUS_BUSY:
case SCSI_STATUS_RESERV_CONFLICT:
case SCSI_STATUS_QUEUE_FULL:
case SCSI_STATUS_COND_MET:
case SCSI_STATUS_INTERMED:
case SCSI_STATUS_INTERMED_COND_MET:
ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
break;
case SCSI_STATUS_OK:
ccb->ccb_h.status |= CAM_REQ_CMP;
break;
}
break;
case QHSTA_M_SEL_TIMEOUT:
ccb->ccb_h.status = CAM_SEL_TIMEOUT;
break;
case QHSTA_M_DATA_OVER_RUN:
ccb->ccb_h.status = CAM_DATA_RUN_ERR;
break;
case QHSTA_M_UNEXPECTED_BUS_FREE:
ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
break;
case QHSTA_M_BAD_BUS_PHASE_SEQ:
ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
break;
case QHSTA_M_BAD_CMPL_STATUS_IN:
/* No command complete after a status message */
ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
break;
case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
case QHSTA_M_WTM_TIMEOUT:
case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
/* The SCSI bus hung in a phase */
ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
adv_reset_bus(adv, /*initiate_reset*/TRUE);
break;
case QHSTA_M_AUTO_REQ_SENSE_FAIL:
ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
break;
case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
case QHSTA_D_HOST_ABORT_FAILED:
case QHSTA_D_EXE_SCSI_Q_FAILED:
case QHSTA_D_ASPI_NO_BUF_POOL:
case QHSTA_M_BAD_TAG_CODE:
case QHSTA_D_LRAM_CMP_ERROR:
case QHSTA_M_MICRO_CODE_ERROR_HALT:
default:
panic("%s: Unhandled Host status error %x",
device_get_nameunit(adv->dev), host_stat);
/* NOTREACHED */
}
break;
case QD_ABORTED_BY_HOST:
/* Don't clobber any, more explicit, error codes we've set */
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
ccb->ccb_h.status = CAM_REQ_ABORTED;
break;
default:
xpt_print_path(ccb->ccb_h.path);
printf("adv_done - queue done with unknown status %x:%x\n",
done_stat, host_stat);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
break;
}
adv_clear_state(adv, ccb);
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
&& (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
ccb->ccb_h.status |= CAM_DEV_QFRZN;
}
adv_free_ccb_info(adv, cinfo);
/*
* Null this out so that we catch driver bugs that cause a
* ccb to be completed twice.
*/
ccb->ccb_h.ccb_cinfo_ptr = NULL;
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
}
/*
* Function to poll for command completion when
* interrupts are disabled (crash dumps)
*/
static void
adv_poll(struct cam_sim *sim)
{
adv_intr_locked(cam_sim_softc(sim));
}
/*
* Attach all the sub-devices we can find
*/
int
adv_attach(adv)
struct adv_softc *adv;
{
struct ccb_setasync csa;
struct cam_devq *devq;
int max_sg;
/*
* Allocate an array of ccb mapping structures. We put the
* index of the ccb_info structure into the queue representing
* a transaction and use it for mapping the queue to the
* upper level SCSI transaction it represents.
*/
- adv->ccb_infos = mallocarray(adv->max_openings, sizeof(*adv->ccb_infos),
+ adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings,
M_DEVBUF, M_NOWAIT);
if (adv->ccb_infos == NULL)
return (ENOMEM);
adv->init_level++;
/*
* Create our DMA tags. These tags define the kinds of device
* accessible memory allocations and memory mappings we will
* need to perform during normal operation.
*
* Unless we need to further restrict the allocation, we rely
* on the restrictions of the parent dmat, hence the common
* use of MAXADDR and MAXSIZE.
*
* The ASC boards use chains of "queues" (the transactional
* resources on the board) to represent long S/G lists.
* The first queue represents the command and holds a
* single address and data pair. The queues that follow
* can each hold ADV_SG_LIST_PER_Q entries. Given the
* total number of queues, we can express the largest
* transaction we can map. We reserve a few queues for
* error recovery. Take those into account as well.
*
* There is a way to take an interrupt to download the
* next batch of S/G entries if there are more than 255
* of them (the counter in the queue structure is a u_int8_t).
* We don't use this feature, so limit the S/G list size
* accordingly.
*/
max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q;
if (max_sg > 255)
max_sg = 255;
/* DMA tag for mapping buffers into device visible space. */
if (bus_dma_tag_create(
/* parent */ adv->parent_dmat,
/* alignment */ 1,
/* boundary */ 0,
/* lowaddr */ BUS_SPACE_MAXADDR,
/* highaddr */ BUS_SPACE_MAXADDR,
/* filter */ NULL,
/* filterarg */ NULL,
/* maxsize */ ADV_MAXPHYS,
/* nsegments */ max_sg,
/* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
/* flags */ BUS_DMA_ALLOCNOW,
/* lockfunc */ busdma_lock_mutex,
/* lockarg */ &adv->lock,
&adv->buffer_dmat) != 0) {
return (ENXIO);
}
adv->init_level++;
/* DMA tag for our sense buffers */
if (bus_dma_tag_create(
/* parent */ adv->parent_dmat,
/* alignment */ 1,
/* boundary */ 0,
/* lowaddr */ BUS_SPACE_MAXADDR,
/* highaddr */ BUS_SPACE_MAXADDR,
/* filter */ NULL,
/* filterarg */ NULL,
/* maxsize */ sizeof(struct scsi_sense_data) *
adv->max_openings,
/* nsegments */ 1,
/* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
/* flags */ 0,
/* lockfunc */ busdma_lock_mutex,
/* lockarg */ &adv->lock,
&adv->sense_dmat) != 0) {
return (ENXIO);
}
adv->init_level++;
/* Allocation for our sense buffers */
if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
return (ENOMEM);
}
adv->init_level++;
/* And permanently map them */
bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
adv->sense_buffers,
sizeof(struct scsi_sense_data)*adv->max_openings,
adv_map, &adv->sense_physbase, /*flags*/0);
adv->init_level++;
/*
* Fire up the chip
*/
if (adv_start_chip(adv) != 1) {
device_printf(adv->dev,
"Unable to start on board processor. Aborting.\n");
return (ENXIO);
}
/*
* Create the device queue for our SIM.
*/
devq = cam_simq_alloc(adv->max_openings);
if (devq == NULL)
return (ENOMEM);
/*
* Construct our SIM entry.
*/
adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv,
device_get_unit(adv->dev), &adv->lock, 1, adv->max_openings, devq);
if (adv->sim == NULL)
return (ENOMEM);
/*
* Register the bus.
*/
mtx_lock(&adv->lock);
if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) {
cam_sim_free(adv->sim, /*free devq*/TRUE);
mtx_unlock(&adv->lock);
return (ENXIO);
}
if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
!= CAM_REQ_CMP) {
xpt_bus_deregister(cam_sim_path(adv->sim));
cam_sim_free(adv->sim, /*free devq*/TRUE);
mtx_unlock(&adv->lock);
return (ENXIO);
}
xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
csa.callback = advasync;
csa.callback_arg = adv;
xpt_action((union ccb *)&csa);
mtx_unlock(&adv->lock);
return (0);
}
MODULE_DEPEND(adv, cam, 1, 1, 1);
Index: head/sys/dev/ath/if_ath_rx_edma.c
===================================================================
--- head/sys/dev/ath/if_ath_rx_edma.c (revision 328217)
+++ head/sys/dev/ath/if_ath_rx_edma.c (revision 328218)
@@ -1,1009 +1,1010 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tsf.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_keycache.h>
#include <dev/ath/if_ath_rx.h>
#include <dev/ath/if_ath_beacon.h>
#include <dev/ath/if_athdfs.h>
#include <dev/ath/if_ath_descdma.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#include <dev/ath/if_ath_rx_edma.h>
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
/*
* some general macros
*/
#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
#define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
MALLOC_DECLARE(M_ATHDEV);
/*
* XXX TODO:
*
* + Make sure the FIFO is correctly flushed and reinitialised
* through a reset;
* + Verify multi-descriptor frames work!
* + There's a "memory use after free" which needs to be tracked down
* and fixed ASAP. I've seen this in the legacy path too, so it
* may be a generic RX path issue.
*/
/*
* XXX shuffle the function orders so these pre-declarations aren't
* required!
*/
static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int nbufs);
static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
static void ath_edma_recv_proc_queue(struct ath_softc *sc,
HAL_RX_QUEUE qtype, int dosched);
static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
HAL_RX_QUEUE qtype, int dosched);
static void
ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
{
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
ath_hal_stoppcurecv(ah);
ath_hal_setrxfilter(ah, 0);
/*
*
*/
if (ath_hal_stopdmarecv(ah) == AH_TRUE)
sc->sc_rx_stopped = 1;
/*
* Give the various bus FIFOs (not EDMA descriptor FIFO)
* time to finish flushing out data.
*/
DELAY(3000);
/* Flush RX pending for each queue */
/* XXX should generic-ify this */
if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
}
if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
}
ATH_RX_UNLOCK(sc);
}
/*
* Re-initialise the FIFO given the current buffer contents.
* Specifically, walk from head -> tail, pushing the FIFO contents
* back into the FIFO.
*/
static void
ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
struct ath_buf *bf;
int i, j;
ATH_RX_LOCK_ASSERT(sc);
i = re->m_fifo_head;
for (j = 0; j < re->m_fifo_depth; j++) {
bf = re->m_fifo[i];
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Q%d: pos=%i, addr=0x%jx\n",
__func__,
qtype,
i,
(uintmax_t)bf->bf_daddr);
ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
INCR(i, re->m_fifolen);
}
/* Ensure this worked out right */
if (i != re->m_fifo_tail) {
device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
__func__,
i,
re->m_fifo_tail);
}
}
/*
* Start receive.
*/
static int
ath_edma_startrecv(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
/*
* Sanity check - are we being called whilst RX
* isn't stopped? If so, we may end up pushing
* too many entries into the RX FIFO and
* badness occurs.
*/
/* Enable RX FIFO */
ath_hal_rxena(ah);
/*
* In theory the hardware has been initialised, right?
*/
if (sc->sc_rx_resetted == 1) {
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Re-initing HP FIFO\n", __func__);
ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Re-initing LP FIFO\n", __func__);
ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
sc->sc_rx_resetted = 0;
} else {
device_printf(sc->sc_dev,
"%s: called without resetting chip?\n",
__func__);
}
/* Add up to m_fifolen entries in each queue */
/*
* These must occur after the above write so the FIFO buffers
* are pushed/tracked in the same order as the hardware will
* process them.
*
* XXX TODO: is this really necessary? We should've stopped
* the hardware already and reinitialised it, so it's a no-op.
*/
ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
ath_mode_init(sc);
ath_hal_startpcurecv(ah);
/*
* We're now doing RX DMA!
*/
sc->sc_rx_stopped = 0;
ATH_RX_UNLOCK(sc);
return (0);
}
static void
ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_edma_recv_proc_queue(sc, qtype, dosched);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
static void
ath_edma_recv_sched(struct ath_softc *sc, int dosched)
{
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
static void
ath_edma_recv_flush(struct ath_softc *sc)
{
DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt++;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
/*
* Flush any active frames from FIFO -> deferred list
*/
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
/*
* Process what's in the deferred queue
*/
/*
* XXX: If we read the tsf/channoise here and then pass it in,
* we could restore the power state before processing
* the deferred queue.
*/
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt--;
ATH_PCU_UNLOCK(sc);
}
/*
* Process frames from the current queue into the deferred queue.
*/
static void
ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
struct ath_rx_status *rs;
struct ath_desc *ds;
struct ath_buf *bf;
struct mbuf *m;
struct ath_hal *ah = sc->sc_ah;
uint64_t tsf;
uint16_t nf;
int npkts = 0;
tsf = ath_hal_gettsf64(ah);
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
sc->sc_stats.ast_rx_noise = nf;
ATH_RX_LOCK(sc);
#if 1
if (sc->sc_rx_resetted == 1) {
/*
* XXX We shouldn't ever be scheduled if
* receive has been stopped - so complain
* loudly!
*/
device_printf(sc->sc_dev,
"%s: sc_rx_resetted=1! Bad!\n",
__func__);
ATH_RX_UNLOCK(sc);
return;
}
#endif
do {
bf = re->m_fifo[re->m_fifo_head];
/* This shouldn't occur! */
if (bf == NULL) {
device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
__func__,
qtype);
break;
}
m = bf->bf_m;
ds = bf->bf_desc;
/*
* Sync descriptor memory - this also syncs the buffer for us.
* EDMA descriptors are in cached memory.
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rs = &bf->bf_status.ds_rxstat;
bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
NULL, rs);
if (bf->bf_rxstatus == HAL_EINPROGRESS)
break;
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
#endif /* ATH_DEBUG */
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
sc->sc_rx_statuslen, (char *) ds);
#endif /* ATH_DEBUG */
/*
* Completed descriptor.
*/
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Q%d: completed!\n", __func__, qtype);
npkts++;
/*
* We've been synced already, so unmap.
*/
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
/*
* Remove the FIFO entry and place it on the completion
* queue.
*/
re->m_fifo[re->m_fifo_head] = NULL;
TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
/* Bump the descriptor FIFO stats */
INCR(re->m_fifo_head, re->m_fifolen);
re->m_fifo_depth--;
/* XXX check it doesn't fall below 0 */
} while (re->m_fifo_depth > 0);
/* Append some more fresh frames to the FIFO */
if (dosched)
ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
ATH_RX_UNLOCK(sc);
/* rx signal state monitoring */
ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
"ath edma rx proc: npkts=%d\n",
npkts);
return;
}
/*
* Flush the deferred queue.
*
* This destructively flushes the deferred queue - it doesn't
* call the wireless stack on each mbuf.
*/
static void
ath_edma_flush_deferred_queue(struct ath_softc *sc)
{
struct ath_buf *bf;
ATH_RX_LOCK_ASSERT(sc);
/* Free in one set, inside the lock */
while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) {
bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) {
bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
}
static int
ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
int ngood = 0;
uint64_t tsf;
struct ath_buf *bf, *next;
struct ath_rx_status *rs;
int16_t nf;
ath_bufhead rxlist;
struct mbuf *m;
TAILQ_INIT(&rxlist);
nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
/*
* XXX TODO: the NF/TSF should be stamped on the bufs themselves,
* otherwise we may end up adding in the wrong values if this
* is delayed too far..
*/
tsf = ath_hal_gettsf64(sc->sc_ah);
/* Copy the list over */
ATH_RX_LOCK(sc);
TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
ATH_RX_UNLOCK(sc);
/* Handle the completed descriptors */
/*
* XXX is this SAFE call needed? The ath_buf entries
* aren't modified by ath_rx_pkt, right?
*/
TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
/*
* Skip the RX descriptor status - start at the data offset
*/
m_adj(bf->bf_m, sc->sc_rx_statuslen);
/* Handle the frame */
rs = &bf->bf_status.ds_rxstat;
m = bf->bf_m;
bf->bf_m = NULL;
if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
ngood++;
}
if (ngood) {
sc->sc_lastrx = tsf;
}
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
"ath edma rx deferred proc: ngood=%d\n",
ngood);
/* Free in one set, inside the lock */
ATH_RX_LOCK(sc);
while (! TAILQ_EMPTY(&rxlist)) {
bf = TAILQ_FIRST(&rxlist);
TAILQ_REMOVE(&rxlist, bf, bf_list);
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
ATH_RX_UNLOCK(sc);
return (ngood);
}
static void
ath_edma_recv_tasklet(void *arg, int npending)
{
struct ath_softc *sc = (struct ath_softc *) arg;
#ifdef IEEE80211_SUPPORT_SUPERG
struct ieee80211com *ic = &sc->sc_ic;
#endif
DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
__func__,
npending);
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt > 0) {
device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
__func__);
ATH_PCU_UNLOCK(sc);
return;
}
sc->sc_rxproc_cnt++;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
/*
* XXX: If we read the tsf/channoise here and then pass it in,
* we could restore the power state before processing
* the deferred queue.
*/
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_ff_age_all(ic, 100);
#endif
if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt--;
ATH_PCU_UNLOCK(sc);
}
/*
* Allocate an RX mbuf for the given ath_buf and initialise
* it for EDMA.
*
* + Allocate a 4KB mbuf;
* + Setup the DMA map for the given buffer;
* + Return that.
*/
static int
ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
{
struct mbuf *m;
int error;
int len;
ATH_RX_LOCK_ASSERT(sc);
m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
if (! m)
return (ENOBUFS); /* XXX ?*/
/* XXX warn/enforce alignment */
len = m->m_ext.ext_size;
#if 0
device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
__func__,
m,
len,
mtod(m, char *));
#endif
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
/*
* Populate ath_buf fields.
*/
bf->bf_desc = mtod(m, struct ath_desc *);
bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */
bf->bf_m = m;
/*
* Zero the descriptor and ensure it makes it out to the
* bounce buffer if one is required.
*
* XXX PREWRITE will copy the whole buffer; we only needed it
* to sync the first 32 DWORDS. Oh well.
*/
memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
/*
* Create DMA mapping.
*/
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev, "%s: failed; error=%d\n",
__func__,
error);
m_freem(m);
return (error);
}
/*
* Set daddr to the physical mapping page.
*/
bf->bf_daddr = bf->bf_segs[0].ds_addr;
/*
* Prepare for the upcoming read.
*
* We need to both sync some data into the buffer (the zero'ed
* descriptor payload) and also prepare for the read that's going
* to occur.
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Finish! */
return (0);
}
/*
* Allocate a RX buffer.
*/
static struct ath_buf *
ath_edma_rxbuf_alloc(struct ath_softc *sc)
{
struct ath_buf *bf;
int error;
ATH_RX_LOCK_ASSERT(sc);
/* Allocate buffer */
bf = TAILQ_FIRST(&sc->sc_rxbuf);
/* XXX shouldn't happen upon startup? */
if (bf == NULL) {
device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
__func__);
return (NULL);
}
/* Remove it from the free list */
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
/* Assign RX mbuf to it */
error = ath_edma_rxbuf_init(sc, bf);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: bf=%p, rxbuf alloc failed! error=%d\n",
__func__,
bf,
error);
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
return (NULL);
}
return (bf);
}
static void
ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
{
ATH_RX_LOCK_ASSERT(sc);
/*
* Only unload the frame if we haven't consumed
* the mbuf via ath_rx_pkt().
*/
if (bf->bf_m) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
/* XXX lock? */
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
}
/*
* Allocate up to 'n' entries and push them onto the hardware FIFO.
*
* Return how many entries were successfully pushed onto the
* FIFO.
*/
static int
ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
struct ath_buf *bf;
int i;
ATH_RX_LOCK_ASSERT(sc);
/*
* Allocate buffers until the FIFO is full or nbufs is reached.
*/
for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
/* Ensure the FIFO is already blank, complain loudly! */
if (re->m_fifo[re->m_fifo_tail] != NULL) {
device_printf(sc->sc_dev,
"%s: Q%d: fifo[%d] != NULL (%p)\n",
__func__,
qtype,
re->m_fifo_tail,
re->m_fifo[re->m_fifo_tail]);
/* Free the slot */
ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
re->m_fifo_depth--;
/* XXX check it's not < 0 */
re->m_fifo[re->m_fifo_tail] = NULL;
}
bf = ath_edma_rxbuf_alloc(sc);
/* XXX should ensure the FIFO is not NULL? */
if (bf == NULL) {
device_printf(sc->sc_dev,
"%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
__func__,
qtype,
i,
nbufs);
break;
}
re->m_fifo[re->m_fifo_tail] = bf;
/* Write to the RX FIFO */
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Q%d: putrxbuf=%p (0x%jx)\n",
__func__,
qtype,
bf->bf_desc,
(uintmax_t) bf->bf_daddr);
ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
re->m_fifo_depth++;
INCR(re->m_fifo_tail, re->m_fifolen);
}
/*
* Return how many were allocated.
*/
DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
__func__,
qtype,
nbufs,
i);
return (i);
}
static int
ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
int i;
ATH_RX_LOCK_ASSERT(sc);
for (i = 0; i < re->m_fifolen; i++) {
if (re->m_fifo[i] != NULL) {
#ifdef ATH_DEBUG
struct ath_buf *bf = re->m_fifo[i];
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, HAL_OK);
#endif
ath_edma_rxbuf_free(sc, re->m_fifo[i]);
re->m_fifo[i] = NULL;
re->m_fifo_depth--;
}
}
if (re->m_rxpending != NULL) {
m_freem(re->m_rxpending);
re->m_rxpending = NULL;
}
re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
return (0);
}
/*
* Setup the initial RX FIFO structure.
*/
static int
ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
ATH_RX_LOCK_ASSERT(sc);
if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
__func__,
qtype);
return (-EINVAL);
}
if (bootverbose)
device_printf(sc->sc_dev,
"%s: type=%d, FIFO depth = %d entries\n",
__func__,
qtype,
re->m_fifolen);
/* Allocate ath_buf FIFO array, pre-zero'ed */
- re->m_fifo = mallocarray(re->m_fifolen, sizeof(struct ath_buf *),
- M_ATHDEV, M_NOWAIT | M_ZERO);
+ re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen,
+ M_ATHDEV,
+ M_NOWAIT | M_ZERO);
if (re->m_fifo == NULL) {
device_printf(sc->sc_dev, "%s: malloc failed\n",
__func__);
return (-ENOMEM);
}
/*
* Set initial "empty" state.
*/
re->m_rxpending = NULL;
re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
return (0);
}
static int
ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
__func__,
qtype);
free(re->m_fifo, M_ATHDEV);
return (0);
}
static int
ath_edma_dma_rxsetup(struct ath_softc *sc)
{
int error;
/*
* Create RX DMA tag and buffers.
*/
error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
"rx", ath_rxbuf, sc->sc_rx_statuslen);
if (error != 0)
return error;
ATH_RX_LOCK(sc);
(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
ATH_RX_UNLOCK(sc);
return (0);
}
static int
ath_edma_dma_rxteardown(struct ath_softc *sc)
{
ATH_RX_LOCK(sc);
ath_edma_flush_deferred_queue(sc);
ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
ATH_RX_UNLOCK(sc);
/* Free RX ath_buf */
/* Free RX DMA tag */
if (sc->sc_rxdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
return (0);
}
void
ath_recv_setup_edma(struct ath_softc *sc)
{
/* Set buffer size to 4k */
sc->sc_edma_bufsize = 4096;
/* Fetch EDMA field and buffer sizes */
(void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
/* Configure the hardware with the RX buffer size */
(void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
sc->sc_rx_statuslen);
if (bootverbose) {
device_printf(sc->sc_dev, "RX status length: %d\n",
sc->sc_rx_statuslen);
device_printf(sc->sc_dev, "RX buffer size: %d\n",
sc->sc_edma_bufsize);
}
sc->sc_rx.recv_stop = ath_edma_stoprecv;
sc->sc_rx.recv_start = ath_edma_startrecv;
sc->sc_rx.recv_flush = ath_edma_recv_flush;
sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
sc->sc_rx.recv_sched = ath_edma_recv_sched;
sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
}
Index: head/sys/dev/beri/virtio/virtio.c
===================================================================
--- head/sys/dev/beri/virtio/virtio.c (revision 328217)
+++ head/sys/dev/beri/virtio/virtio.c (revision 328218)
@@ -1,260 +1,260 @@
/*-
* Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* BERI virtio mmio backend common methods
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cdefs.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/rman.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
#include <sys/conf.h>
#include <sys/uio.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/event.h>
#include <sys/selinfo.h>
#include <sys/endian.h>
#include <sys/rwlock.h>
#include <machine/bus.h>
#include <machine/fdt.h>
#include <machine/cpu.h>
#include <machine/intr.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/beri/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/virtio_ring.h>
#include <dev/altera/pio/pio.h>
#include "pio_if.h"
int
vq_ring_ready(struct vqueue_info *vq)
{
return (vq->vq_flags & VQ_ALLOC);
}
int
vq_has_descs(struct vqueue_info *vq)
{
return (vq_ring_ready(vq) && vq->vq_last_avail !=
be16toh(vq->vq_avail->idx));
}
void *
paddr_map(uint32_t offset, uint32_t phys, uint32_t size)
{
bus_space_handle_t bsh;
if (bus_space_map(fdtbus_bs_tag, (phys + offset),
size, 0, &bsh) != 0) {
panic("Couldn't map 0x%08x\n", (phys + offset));
}
return (void *)(bsh);
}
void
paddr_unmap(void *phys, uint32_t size)
{
bus_space_unmap(fdtbus_bs_tag, (bus_space_handle_t)phys, size);
}
static inline void
_vq_record(uint32_t offs, int i, volatile struct vring_desc *vd,
struct iovec *iov, int n_iov, uint16_t *flags) {
if (i >= n_iov)
return;
iov[i].iov_base = paddr_map(offs, be64toh(vd->addr),
be32toh(vd->len));
iov[i].iov_len = be32toh(vd->len);
if (flags != NULL)
flags[i] = be16toh(vd->flags);
}
int
vq_getchain(uint32_t offs, struct vqueue_info *vq,
struct iovec *iov, int n_iov, uint16_t *flags)
{
volatile struct vring_desc *vdir, *vindir, *vp;
int idx, ndesc, n_indir;
int head, next;
int i;
idx = vq->vq_last_avail;
ndesc = (be16toh(vq->vq_avail->idx) - idx);
if (ndesc == 0)
return (0);
head = be16toh(vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]);
next = head;
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = be16toh(vdir->next)) {
vdir = &vq->vq_desc[next];
if ((be16toh(vdir->flags) & VRING_DESC_F_INDIRECT) == 0) {
_vq_record(offs, i, vdir, iov, n_iov, flags);
i++;
} else {
n_indir = be32toh(vdir->len) / 16;
vindir = paddr_map(offs, be64toh(vdir->addr),
be32toh(vdir->len));
next = 0;
for (;;) {
vp = &vindir[next];
_vq_record(offs, i, vp, iov, n_iov, flags);
i+=1;
if ((be16toh(vp->flags) & \
VRING_DESC_F_NEXT) == 0)
break;
next = be16toh(vp->next);
}
paddr_unmap(__DEVOLATILE(void *, vindir), be32toh(vdir->len));
}
if ((be16toh(vdir->flags) & VRING_DESC_F_NEXT) == 0)
return (i);
}
return (i);
}
void
vq_relchain(struct vqueue_info *vq, struct iovec *iov, int n, uint32_t iolen)
{
volatile struct vring_used_elem *vue;
volatile struct vring_used *vu;
uint16_t head, uidx, mask;
int i;
mask = vq->vq_qsize - 1;
vu = vq->vq_used;
head = be16toh(vq->vq_avail->ring[vq->vq_last_avail++ & mask]);
uidx = be16toh(vu->idx);
vue = &vu->ring[uidx++ & mask];
vue->id = htobe32(head);
vue->len = htobe32(iolen);
vu->idx = htobe16(uidx);
/* Clean up */
for (i = 0; i < n; i++) {
paddr_unmap((void *)iov[i].iov_base, iov[i].iov_len);
}
}
int
setup_pio(device_t dev, char *name, device_t *pio_dev)
{
phandle_t pio_node;
struct fdt_ic *ic;
phandle_t xref;
phandle_t node;
if ((node = ofw_bus_get_node(dev)) == -1)
return (ENXIO);
if (OF_searchencprop(node, name, &xref,
sizeof(xref)) == -1) {
return (ENXIO);
}
pio_node = OF_node_from_xref(xref);
SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
if (ic->iph == pio_node) {
*pio_dev = ic->dev;
return (0);
}
}
return (ENXIO);
}
int
setup_offset(device_t dev, uint32_t *offset)
{
pcell_t dts_value[2];
phandle_t mem_node;
phandle_t xref;
phandle_t node;
int len;
if ((node = ofw_bus_get_node(dev)) == -1)
return (ENXIO);
if (OF_searchencprop(node, "beri-mem", &xref,
sizeof(xref)) == -1) {
return (ENXIO);
}
mem_node = OF_node_from_xref(xref);
if ((len = OF_getproplen(mem_node, "reg")) <= 0)
return (ENXIO);
OF_getencprop(mem_node, "reg", dts_value, len);
*offset = dts_value[0];
return (0);
}
struct iovec *
getcopy(struct iovec *iov, int n)
{
struct iovec *tiov;
int i;
- tiov = mallocarray(n, sizeof(struct iovec), M_DEVBUF, M_NOWAIT);
+ tiov = malloc(n * sizeof(struct iovec), M_DEVBUF, M_NOWAIT);
for (i = 0; i < n; i++) {
tiov[i].iov_base = iov[i].iov_base;
tiov[i].iov_len = iov[i].iov_len;
}
return (tiov);
}
Index: head/sys/dev/bnxt/if_bnxt.c
===================================================================
--- head/sys/dev/bnxt/if_bnxt.c (revision 328217)
+++ head/sys/dev/bnxt/if_bnxt.c (revision 328218)
@@ -1,2499 +1,2498 @@
/*-
* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016 Broadcom, All Rights Reserved.
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/endian.h>
#include <sys/sockio.h>
#include <sys/priv.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/iflib.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#include "ifdi_if.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ioctl.h"
#include "bnxt_sysctl.h"
#include "hsi_struct_def.h"
/*
* PCI Device ID Table
*/
static pci_vendor_info_t bnxt_vendor_info_array[] =
{
PVID(BROADCOM_VENDOR_ID, BCM57301,
"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57302,
"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57304,
"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57311,
"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57312,
"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57314,
"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57402,
"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
"Broadcom BCM57402 NetXtreme-E Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57404,
"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
"Broadcom BCM57404 NetXtreme-E Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57406,
"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
"Broadcom BCM57406 NetXtreme-E Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57407,
"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
PVID(BROADCOM_VENDOR_ID, BCM57412,
"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57414,
"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57416,
"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57417,
"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57454,
"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM58700,
"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
"Broadcom NetXtreme-C Ethernet Virtual Function"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
"Broadcom NetXtreme-C Ethernet Virtual Function"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
"Broadcom NetXtreme-C Ethernet Virtual Function"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
"Broadcom NetXtreme-E Ethernet Virtual Function"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
"Broadcom NetXtreme-E Ethernet Virtual Function"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
"Broadcom NetXtreme-E Ethernet Virtual Function"),
/* required last entry */
PVID_END
};
/*
* Function prototypes
*/
static void *bnxt_register(device_t dev);
/* Soft queue setup and teardown */
static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
uint64_t *paddrs, int ntxqs, int ntxqsets);
static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
uint64_t *paddrs, int nrxqs, int nrxqsets);
static void bnxt_queues_free(if_ctx_t ctx);
/* Device setup and teardown */
static int bnxt_attach_pre(if_ctx_t ctx);
static int bnxt_attach_post(if_ctx_t ctx);
static int bnxt_detach(if_ctx_t ctx);
/* Device configuration */
static void bnxt_init(if_ctx_t ctx);
static void bnxt_stop(if_ctx_t ctx);
static void bnxt_multi_set(if_ctx_t ctx);
static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
static int bnxt_media_change(if_ctx_t ctx);
static int bnxt_promisc_set(if_ctx_t ctx, int flags);
static uint64_t bnxt_get_counter(if_ctx_t, ift_counter);
static void bnxt_update_admin_status(if_ctx_t ctx);
static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
/* Interrupt enable / disable */
static void bnxt_intr_enable(if_ctx_t ctx);
static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
static void bnxt_disable_intr(if_ctx_t ctx);
static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
/* vlan support */
static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
/* ioctl */
static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
static int bnxt_shutdown(if_ctx_t ctx);
static int bnxt_suspend(if_ctx_t ctx);
static int bnxt_resume(if_ctx_t ctx);
/* Internal support functions */
static int bnxt_probe_phy(struct bnxt_softc *softc);
static void bnxt_add_media_types(struct bnxt_softc *softc);
static int bnxt_pci_mapping(struct bnxt_softc *softc);
static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
static int bnxt_handle_def_cp(void *arg);
static int bnxt_handle_rx_cp(void *arg);
static void bnxt_clear_ids(struct bnxt_softc *softc);
static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
static void bnxt_def_cp_task(void *context);
static void bnxt_handle_async_event(struct bnxt_softc *softc,
struct cmpl_base *cmpl);
static uint8_t get_phy_type(struct bnxt_softc *softc);
static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
static void bnxt_get_wol_settings(struct bnxt_softc *softc);
static int bnxt_wol_config(if_ctx_t ctx);
/*
* Device Interface Declaration
*/
static device_method_t bnxt_methods[] = {
/* Device interface */
DEVMETHOD(device_register, bnxt_register),
DEVMETHOD(device_probe, iflib_device_probe),
DEVMETHOD(device_attach, iflib_device_attach),
DEVMETHOD(device_detach, iflib_device_detach),
DEVMETHOD(device_shutdown, iflib_device_shutdown),
DEVMETHOD(device_suspend, iflib_device_suspend),
DEVMETHOD(device_resume, iflib_device_resume),
DEVMETHOD_END
};
static driver_t bnxt_driver = {
"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
};
devclass_t bnxt_devclass;
DRIVER_MODULE(bnxt, pci, bnxt_driver, bnxt_devclass, 0, 0);
MODULE_DEPEND(bnxt, pci, 1, 1, 1);
MODULE_DEPEND(bnxt, ether, 1, 1, 1);
MODULE_DEPEND(bnxt, iflib, 1, 1, 1);
IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
static device_method_t bnxt_iflib_methods[] = {
DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
DEVMETHOD(ifdi_detach, bnxt_detach),
DEVMETHOD(ifdi_init, bnxt_init),
DEVMETHOD(ifdi_stop, bnxt_stop),
DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
DEVMETHOD(ifdi_media_status, bnxt_media_status),
DEVMETHOD(ifdi_media_change, bnxt_media_change),
DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
DEVMETHOD(ifdi_timer, bnxt_if_timer),
DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
DEVMETHOD(ifdi_suspend, bnxt_suspend),
DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
DEVMETHOD(ifdi_resume, bnxt_resume),
DEVMETHOD_END
};
static driver_t bnxt_iflib_driver = {
"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
};
/*
* iflib shared context
*/
#define BNXT_DRIVER_VERSION "1.0.0.2"
char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
extern struct if_txrx bnxt_txrx;
static struct if_shared_ctx bnxt_sctx_init = {
.isc_magic = IFLIB_MAGIC,
.isc_driver = &bnxt_iflib_driver,
.isc_nfl = 2, // Number of Free Lists
.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
.isc_q_align = PAGE_SIZE,
.isc_tx_maxsize = BNXT_TSO_SIZE,
.isc_tx_maxsegsize = BNXT_TSO_SIZE,
.isc_rx_maxsize = BNXT_TSO_SIZE,
.isc_rx_maxsegsize = BNXT_TSO_SIZE,
// Only use a single segment to avoid page size constraints
.isc_rx_nsegments = 1,
.isc_ntxqs = 2,
.isc_nrxqs = 3,
.isc_nrxd_min = {16, 16, 16},
.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
.isc_nrxd_max = {INT32_MAX, INT32_MAX, INT32_MAX},
.isc_ntxd_min = {16, 16, 16},
.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
PAGE_SIZE / sizeof(struct tx_bd_short)},
.isc_ntxd_max = {INT32_MAX, INT32_MAX, INT32_MAX},
.isc_admin_intrcnt = 1,
.isc_vendor_info = bnxt_vendor_info_array,
.isc_driver_version = bnxt_driver_version,
};
if_shared_ctx_t bnxt_sctx = &bnxt_sctx_init;
/*
* Device Methods
*/
static void *
bnxt_register(device_t dev)
{
return bnxt_sctx;
}
/*
* Device Dependent Configuration Functions
*/
/* Soft queue setup and teardown */
static int
bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
uint64_t *paddrs, int ntxqs, int ntxqsets)
{
struct bnxt_softc *softc;
int i;
int rc;
softc = iflib_get_softc(ctx);
- softc->tx_cp_rings = mallocarray(ntxqsets, sizeof(struct bnxt_cp_ring),
+ softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!softc->tx_cp_rings) {
device_printf(iflib_get_dev(ctx),
"unable to allocate TX completion rings\n");
rc = ENOMEM;
goto cp_alloc_fail;
}
- softc->tx_rings = mallocarray(ntxqsets, sizeof(struct bnxt_ring),
+ softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!softc->tx_rings) {
device_printf(iflib_get_dev(ctx),
"unable to allocate TX rings\n");
rc = ENOMEM;
goto ring_alloc_fail;
}
rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats) * ntxqsets,
&softc->tx_stats, 0);
if (rc)
goto dma_alloc_fail;
bus_dmamap_sync(softc->tx_stats.idi_tag, softc->tx_stats.idi_map,
BUS_DMASYNC_PREREAD);
for (i = 0; i < ntxqsets; i++) {
/* Set up the completion ring */
softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
softc->tx_cp_rings[i].ring.phys_id =
(uint16_t)HWRM_NA_SIGNATURE;
softc->tx_cp_rings[i].ring.softc = softc;
softc->tx_cp_rings[i].ring.id =
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
softc->tx_cp_rings[i].ring.doorbell =
softc->tx_cp_rings[i].ring.id * 0x80;
softc->tx_cp_rings[i].ring.ring_size =
softc->scctx->isc_ntxd[0];
softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
/* Set up the TX ring */
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->tx_rings[i].softc = softc;
softc->tx_rings[i].id =
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
softc->tx_rings[i].doorbell = softc->tx_rings[i].id * 0x80;
softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
bnxt_create_tx_sysctls(softc, i);
}
softc->ntxqsets = ntxqsets;
return rc;
dma_alloc_fail:
free(softc->tx_rings, M_DEVBUF);
ring_alloc_fail:
free(softc->tx_cp_rings, M_DEVBUF);
cp_alloc_fail:
return rc;
}
static void
bnxt_queues_free(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
// Free TX queues
iflib_dma_free(&softc->tx_stats);
free(softc->tx_rings, M_DEVBUF);
softc->tx_rings = NULL;
free(softc->tx_cp_rings, M_DEVBUF);
softc->tx_cp_rings = NULL;
softc->ntxqsets = 0;
// Free RX queues
iflib_dma_free(&softc->rx_stats);
iflib_dma_free(&softc->hw_tx_port_stats);
iflib_dma_free(&softc->hw_rx_port_stats);
free(softc->grp_info, M_DEVBUF);
free(softc->ag_rings, M_DEVBUF);
free(softc->rx_rings, M_DEVBUF);
free(softc->rx_cp_rings, M_DEVBUF);
}
static int
bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
uint64_t *paddrs, int nrxqs, int nrxqsets)
{
struct bnxt_softc *softc;
int i;
int rc;
softc = iflib_get_softc(ctx);
- softc->rx_cp_rings = mallocarray(nrxqsets, sizeof(struct bnxt_cp_ring),
+ softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!softc->rx_cp_rings) {
device_printf(iflib_get_dev(ctx),
"unable to allocate RX completion rings\n");
rc = ENOMEM;
goto cp_alloc_fail;
}
- softc->rx_rings = mallocarray(nrxqsets, sizeof(struct bnxt_ring),
+ softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!softc->rx_rings) {
device_printf(iflib_get_dev(ctx),
"unable to allocate RX rings\n");
rc = ENOMEM;
goto ring_alloc_fail;
}
- softc->ag_rings = mallocarray(nrxqsets, sizeof(struct bnxt_ring),
+ softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!softc->ag_rings) {
device_printf(iflib_get_dev(ctx),
"unable to allocate aggregation rings\n");
rc = ENOMEM;
goto ag_alloc_fail;
}
- softc->grp_info = mallocarray(nrxqsets, sizeof(struct bnxt_grp_info),
+ softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!softc->grp_info) {
device_printf(iflib_get_dev(ctx),
"unable to allocate ring groups\n");
rc = ENOMEM;
goto grp_alloc_fail;
}
rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats) * nrxqsets,
&softc->rx_stats, 0);
if (rc)
goto hw_stats_alloc_fail;
bus_dmamap_sync(softc->rx_stats.idi_tag, softc->rx_stats.idi_map,
BUS_DMASYNC_PREREAD);
/*
* Additional 512 bytes for future expansion.
* To prevent corruption when loaded with newer firmwares with added counters.
* This can be deleted when there will be no further additions of counters.
*/
#define BNXT_PORT_STAT_PADDING 512
rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
&softc->hw_rx_port_stats, 0);
if (rc)
goto hw_port_rx_stats_alloc_fail;
bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
&softc->hw_tx_port_stats, 0);
if (rc)
goto hw_port_tx_stats_alloc_fail;
bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
for (i = 0; i < nrxqsets; i++) {
/* Allocation the completion ring */
softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
softc->rx_cp_rings[i].ring.phys_id =
(uint16_t)HWRM_NA_SIGNATURE;
softc->rx_cp_rings[i].ring.softc = softc;
softc->rx_cp_rings[i].ring.id = i + 1;
softc->rx_cp_rings[i].ring.doorbell =
softc->rx_cp_rings[i].ring.id * 0x80;
/*
* If this ring overflows, RX stops working.
*/
softc->rx_cp_rings[i].ring.ring_size =
softc->scctx->isc_nrxd[0];
softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
/* Allocate the RX ring */
softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->rx_rings[i].softc = softc;
softc->rx_rings[i].id = i + 1;
softc->rx_rings[i].doorbell = softc->rx_rings[i].id * 0x80;
softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
/* Allocate the TPA start buffer */
- softc->rx_rings[i].tpa_start = mallocarray(
- RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT,
- sizeof(struct bnxt_full_tpa_start), M_DEVBUF,
- M_NOWAIT | M_ZERO);
+ softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
+ (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
if (softc->rx_rings[i].tpa_start == NULL) {
rc = -ENOMEM;
device_printf(softc->dev,
"Unable to allocate space for TPA\n");
goto tpa_alloc_fail;
}
/* Allocate the AG ring */
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->ag_rings[i].softc = softc;
softc->ag_rings[i].id = nrxqsets + i + 1;
softc->ag_rings[i].doorbell = softc->ag_rings[i].id * 0x80;
softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
/* Allocate the ring group */
softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->grp_info[i].stats_ctx =
softc->rx_cp_rings[i].stats_ctx_id;
softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
softc->grp_info[i].cp_ring_id =
softc->rx_cp_rings[i].ring.phys_id;
bnxt_create_rx_sysctls(softc, i);
}
/*
* When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
* HWRM every sec with which firmware timeouts can happen
*/
if (BNXT_PF(softc))
bnxt_create_port_stats_sysctls(softc);
/* And finally, the VNIC */
softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
softc->vnic_info.flow_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->vnic_info.filter_id = -1;
softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
softc->vnic_info.mc_list_count = 0;
softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
&softc->vnic_info.mc_list, 0);
if (rc)
goto mc_list_alloc_fail;
/* The VNIC RSS Hash Key */
rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
&softc->vnic_info.rss_hash_key_tbl, 0);
if (rc)
goto rss_hash_alloc_fail;
bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
softc->vnic_info.rss_hash_key_tbl.idi_map,
BUS_DMASYNC_PREWRITE);
memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
/* Allocate the RSS tables */
rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
&softc->vnic_info.rss_grp_tbl, 0);
if (rc)
goto rss_grp_alloc_fail;
bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
softc->vnic_info.rss_grp_tbl.idi_map,
BUS_DMASYNC_PREWRITE);
memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
softc->vnic_info.rss_grp_tbl.idi_size);
softc->nrxqsets = nrxqsets;
return rc;
rss_grp_alloc_fail:
iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
rss_hash_alloc_fail:
iflib_dma_free(&softc->vnic_info.mc_list);
tpa_alloc_fail:
mc_list_alloc_fail:
for (i = i - 1; i >= 0; i--)
free(softc->rx_rings[i].tpa_start, M_DEVBUF);
iflib_dma_free(&softc->hw_tx_port_stats);
hw_port_tx_stats_alloc_fail:
iflib_dma_free(&softc->hw_rx_port_stats);
hw_port_rx_stats_alloc_fail:
iflib_dma_free(&softc->rx_stats);
hw_stats_alloc_fail:
free(softc->grp_info, M_DEVBUF);
grp_alloc_fail:
free(softc->ag_rings, M_DEVBUF);
ag_alloc_fail:
free(softc->rx_rings, M_DEVBUF);
ring_alloc_fail:
free(softc->rx_cp_rings, M_DEVBUF);
cp_alloc_fail:
return rc;
}
static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
{
if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
}
static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
{
int rc;
rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
&softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
return rc;
}
/* Device setup and teardown */
static int
bnxt_attach_pre(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if_softc_ctx_t scctx;
int rc = 0;
softc->ctx = ctx;
softc->dev = iflib_get_dev(ctx);
softc->media = iflib_get_media(ctx);
softc->scctx = iflib_get_softc_ctx(ctx);
softc->sctx = iflib_get_sctx(ctx);
scctx = softc->scctx;
/* TODO: Better way of detecting NPAR/VF is needed */
switch (pci_get_device(softc->dev)) {
case BCM57402_NPAR:
case BCM57404_NPAR:
case BCM57406_NPAR:
case BCM57407_NPAR:
case BCM57412_NPAR1:
case BCM57412_NPAR2:
case BCM57414_NPAR1:
case BCM57414_NPAR2:
case BCM57416_NPAR1:
case BCM57416_NPAR2:
softc->flags |= BNXT_FLAG_NPAR;
break;
case NETXTREME_C_VF1:
case NETXTREME_C_VF2:
case NETXTREME_C_VF3:
case NETXTREME_E_VF1:
case NETXTREME_E_VF2:
case NETXTREME_E_VF3:
softc->flags |= BNXT_FLAG_VF;
break;
}
pci_enable_busmaster(softc->dev);
if (bnxt_pci_mapping(softc))
return (ENXIO);
/* HWRM setup/init */
BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
rc = bnxt_alloc_hwrm_dma_mem(softc);
if (rc)
goto dma_fail;
/* Get firmware version and compare with driver */
softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (softc->ver_info == NULL) {
rc = ENOMEM;
device_printf(softc->dev,
"Unable to allocate space for version info\n");
goto ver_alloc_fail;
}
/* Default minimum required HWRM version */
softc->ver_info->hwrm_min_major = 1;
softc->ver_info->hwrm_min_minor = 2;
softc->ver_info->hwrm_min_update = 2;
rc = bnxt_hwrm_ver_get(softc);
if (rc) {
device_printf(softc->dev, "attach: hwrm ver get failed\n");
goto ver_fail;
}
if (softc->flags & BNXT_FLAG_SHORT_CMD) {
rc = bnxt_alloc_hwrm_short_cmd_req(softc);
if (rc)
goto hwrm_short_cmd_alloc_fail;
}
/* Get NVRAM info */
if (BNXT_PF(softc)) {
softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (softc->nvm_info == NULL) {
rc = ENOMEM;
device_printf(softc->dev,
"Unable to allocate space for NVRAM info\n");
goto nvm_alloc_fail;
}
rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
&softc->nvm_info->device_id, &softc->nvm_info->sector_size,
&softc->nvm_info->size, &softc->nvm_info->reserved_size,
&softc->nvm_info->available_size);
}
/* Register the driver with the FW */
rc = bnxt_hwrm_func_drv_rgtr(softc);
if (rc) {
device_printf(softc->dev, "attach: hwrm drv rgtr failed\n");
goto drv_rgtr_fail;
}
rc = bnxt_hwrm_func_rgtr_async_events(softc, NULL, 0);
if (rc) {
device_printf(softc->dev, "attach: hwrm rgtr async evts failed\n");
goto drv_rgtr_fail;
}
/* Get the HW capabilities */
rc = bnxt_hwrm_func_qcaps(softc);
if (rc)
goto failed;
/* Get the current configuration of this function */
rc = bnxt_hwrm_func_qcfg(softc);
if (rc) {
device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
goto failed;
}
iflib_set_mac(ctx, softc->func.mac_addr);
scctx->isc_txrx = &bnxt_txrx;
scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
scctx->isc_capenable =
/* These are translated to hwassit bits */
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
/* These are checked by iflib */
IFCAP_LRO | IFCAP_VLAN_HWFILTER |
/* These are part of the iflib mask */
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
/* These likely get lost... */
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
if (bnxt_wol_supported(softc))
scctx->isc_capenable |= IFCAP_WOL_MAGIC;
/* Get the queue config */
rc = bnxt_hwrm_queue_qportcfg(softc);
if (rc) {
device_printf(softc->dev, "attach: hwrm qportcfg failed\n");
goto failed;
}
bnxt_get_wol_settings(softc);
/* Now perform a function reset */
rc = bnxt_hwrm_func_reset(softc);
bnxt_clear_ids(softc);
if (rc)
goto failed;
/* Now set up iflib sc */
scctx->isc_tx_nsegments = 31,
scctx->isc_tx_tso_segments_max = 31;
scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
scctx->isc_vectors = softc->func.max_cp_rings;
scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
scctx->isc_txrx = &bnxt_txrx;
if (scctx->isc_nrxd[0] <
((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
device_printf(softc->dev,
"WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n",
scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
device_printf(softc->dev,
"WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n",
scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
scctx->isc_ntxd[1];
scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
scctx->isc_nrxd[1];
scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
scctx->isc_nrxd[2];
scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
softc->fn_qcfg.alloc_completion_rings - 1);
scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
softc->fn_qcfg.alloc_rx_rings);
scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
softc->fn_qcfg.alloc_vnics);
scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
/* iflib will map and release this bar */
scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
/*
* Default settings for HW LRO (TPA):
* Disable HW LRO by default
* Can be enabled after taking care of 'packet forwarding'
*/
softc->hw_lro.enable = 0;
softc->hw_lro.is_mode_gro = 0;
softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
softc->hw_lro.min_agg_len = 512;
/* Allocate the default completion ring */
softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->def_cp_ring.ring.softc = softc;
softc->def_cp_ring.ring.id = 0;
softc->def_cp_ring.ring.doorbell = softc->def_cp_ring.ring.id * 0x80;
softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
sizeof(struct cmpl_base);
rc = iflib_dma_alloc(ctx,
sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
&softc->def_cp_ring_mem, 0);
softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task,
"dflt_cp");
rc = bnxt_init_sysctl_ctx(softc);
if (rc)
goto init_sysctl_failed;
if (BNXT_PF(softc)) {
rc = bnxt_create_nvram_sysctls(softc->nvm_info);
if (rc)
goto failed;
}
arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
softc->vnic_info.rss_hash_type =
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
rc = bnxt_create_config_sysctls_pre(softc);
if (rc)
goto failed;
rc = bnxt_create_hw_lro_sysctls(softc);
if (rc)
goto failed;
rc = bnxt_create_pause_fc_sysctls(softc);
if (rc)
goto failed;
/* Initialize the vlan list */
SLIST_INIT(&softc->vnic_info.vlan_tags);
softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
return (rc);
failed:
bnxt_free_sysctl_ctx(softc);
init_sysctl_failed:
bnxt_hwrm_func_drv_unrgtr(softc, false);
drv_rgtr_fail:
if (BNXT_PF(softc))
free(softc->nvm_info, M_DEVBUF);
nvm_alloc_fail:
bnxt_free_hwrm_short_cmd_req(softc);
hwrm_short_cmd_alloc_fail:
ver_fail:
free(softc->ver_info, M_DEVBUF);
ver_alloc_fail:
bnxt_free_hwrm_dma_mem(softc);
dma_fail:
BNXT_HWRM_LOCK_DESTROY(softc);
bnxt_pci_mapping_free(softc);
pci_disable_busmaster(softc->dev);
return (rc);
}
static int
bnxt_attach_post(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
int rc;
bnxt_create_config_sysctls_post(softc);
/* Update link state etc... */
rc = bnxt_probe_phy(softc);
if (rc)
goto failed;
/* Needs to be done after probing the phy */
bnxt_create_ver_sysctls(softc);
bnxt_add_media_types(softc);
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
softc->scctx->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN;
failed:
return rc;
}
static int
bnxt_detach(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct bnxt_vlan_tag *tag;
struct bnxt_vlan_tag *tmp;
int i;
bnxt_wol_config(ctx);
bnxt_do_disable_intr(&softc->def_cp_ring);
bnxt_free_sysctl_ctx(softc);
bnxt_hwrm_func_reset(softc);
bnxt_clear_ids(softc);
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
iflib_config_gtask_deinit(&softc->def_cp_task);
/* We need to free() these here... */
for (i = softc->nrxqsets-1; i>=0; i--) {
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
}
iflib_dma_free(&softc->vnic_info.mc_list);
iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
if (softc->vnic_info.vlan_tag_list.idi_vaddr)
iflib_dma_free(&softc->vnic_info.vlan_tag_list);
SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
free(tag, M_DEVBUF);
iflib_dma_free(&softc->def_cp_ring_mem);
for (i = 0; i < softc->nrxqsets; i++)
free(softc->rx_rings[i].tpa_start, M_DEVBUF);
free(softc->ver_info, M_DEVBUF);
if (BNXT_PF(softc))
free(softc->nvm_info, M_DEVBUF);
bnxt_hwrm_func_drv_unrgtr(softc, false);
bnxt_free_hwrm_dma_mem(softc);
bnxt_free_hwrm_short_cmd_req(softc);
BNXT_HWRM_LOCK_DESTROY(softc);
pci_disable_busmaster(softc->dev);
bnxt_pci_mapping_free(softc);
return 0;
}
/* Device configuration */
static void
bnxt_init(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct ifmediareq ifmr;
int i, j;
int rc;
rc = bnxt_hwrm_func_reset(softc);
if (rc)
return;
bnxt_clear_ids(softc);
/* Allocate the default completion ring */
softc->def_cp_ring.cons = UINT32_MAX;
softc->def_cp_ring.v_bit = 1;
bnxt_mark_cpr_invalid(&softc->def_cp_ring);
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
&softc->def_cp_ring.ring,
(uint16_t)HWRM_NA_SIGNATURE,
HWRM_NA_SIGNATURE, true);
if (rc)
goto fail;
/* And now set the default CP ring as the async CP ring */
rc = bnxt_cfg_async_cr(softc);
if (rc)
goto fail;
for (i = 0; i < softc->nrxqsets; i++) {
/* Allocate the statistics context */
rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
softc->rx_stats.idi_paddr +
(sizeof(struct ctx_hw_stats) * i));
if (rc)
goto fail;
/* Allocate the completion ring */
softc->rx_cp_rings[i].cons = UINT32_MAX;
softc->rx_cp_rings[i].v_bit = 1;
softc->rx_cp_rings[i].last_idx = UINT32_MAX;
bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
&softc->rx_cp_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE,
HWRM_NA_SIGNATURE, true);
if (rc)
goto fail;
/* Allocate the RX ring */
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
&softc->rx_rings[i], (uint16_t)HWRM_NA_SIGNATURE,
HWRM_NA_SIGNATURE, false);
if (rc)
goto fail;
BNXT_RX_DB(&softc->rx_rings[i], 0);
/* TODO: Cumulus+ doesn't need the double doorbell */
BNXT_RX_DB(&softc->rx_rings[i], 0);
/* Allocate the AG ring */
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
&softc->ag_rings[i], (uint16_t)HWRM_NA_SIGNATURE,
HWRM_NA_SIGNATURE, false);
if (rc)
goto fail;
BNXT_RX_DB(&softc->rx_rings[i], 0);
/* TODO: Cumulus+ doesn't need the double doorbell */
BNXT_RX_DB(&softc->ag_rings[i], 0);
/* Allocate the ring group */
softc->grp_info[i].stats_ctx =
softc->rx_cp_rings[i].stats_ctx_id;
softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
softc->grp_info[i].cp_ring_id =
softc->rx_cp_rings[i].ring.phys_id;
rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
if (rc)
goto fail;
}
/* Allocate the VNIC RSS context */
rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
if (rc)
goto fail;
/* Allocate the vnic */
softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
if (rc)
goto fail;
rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
if (rc)
goto fail;
rc = bnxt_hwrm_set_filter(softc, &softc->vnic_info);
if (rc)
goto fail;
/* Enable RSS on the VNICs */
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
((uint16_t *)
softc->vnic_info.rss_grp_tbl.idi_vaddr)[i] =
htole16(softc->grp_info[j].grp_id);
if (++j == softc->nrxqsets)
j = 0;
}
rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
softc->vnic_info.rss_hash_type);
if (rc)
goto fail;
rc = bnxt_hwrm_vnic_tpa_cfg(softc);
if (rc)
goto fail;
for (i = 0; i < softc->ntxqsets; i++) {
/* Allocate the statistics context */
rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
softc->tx_stats.idi_paddr +
(sizeof(struct ctx_hw_stats) * i));
if (rc)
goto fail;
/* Allocate the completion ring */
softc->tx_cp_rings[i].cons = UINT32_MAX;
softc->tx_cp_rings[i].v_bit = 1;
bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
&softc->tx_cp_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE,
HWRM_NA_SIGNATURE, false);
if (rc)
goto fail;
/* Allocate the TX ring */
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
&softc->tx_rings[i], softc->tx_cp_rings[i].ring.phys_id,
softc->tx_cp_rings[i].stats_ctx_id, false);
if (rc)
goto fail;
BNXT_TX_DB(&softc->tx_rings[i], 0);
/* TODO: Cumulus+ doesn't need the double doorbell */
BNXT_TX_DB(&softc->tx_rings[i], 0);
}
bnxt_do_enable_intr(&softc->def_cp_ring);
bnxt_media_status(softc->ctx, &ifmr);
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
return;
fail:
bnxt_hwrm_func_reset(softc);
bnxt_clear_ids(softc);
return;
}
static void
bnxt_stop(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
bnxt_do_disable_intr(&softc->def_cp_ring);
bnxt_hwrm_func_reset(softc);
bnxt_clear_ids(softc);
return;
}
static void
bnxt_multi_set(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
uint8_t *mta;
int cnt, mcnt;
mcnt = if_multiaddr_count(ifp, -1);
if (mcnt > BNXT_MAX_MC_ADDRS) {
softc->vnic_info.rx_mask |=
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
}
else {
softc->vnic_info.rx_mask &=
~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
mta = softc->vnic_info.mc_list.idi_vaddr;
bzero(mta, softc->vnic_info.mc_list.idi_size);
if_multiaddr_array(ifp, mta, &cnt, mcnt);
bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
softc->vnic_info.mc_list_count = cnt;
softc->vnic_info.rx_mask |=
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
device_printf(softc->dev,
"set_multi: rx_mask set failed\n");
}
}
static int
bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if (mtu > BNXT_MAX_MTU)
return EINVAL;
softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
return 0;
}
static void
bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct bnxt_link_info *link_info = &softc->link_info;
struct ifmedia_entry *next;
uint64_t target_baudrate = bnxt_get_baudrate(link_info);
int active_media = IFM_UNKNOWN;
bnxt_update_link(softc, true);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (link_info->link_up)
ifmr->ifm_status |= IFM_ACTIVE;
else
ifmr->ifm_status &= ~IFM_ACTIVE;
if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
/*
* Go through the list of supported media which got prepared
* as part of bnxt_add_media_types() using api ifmedia_add().
*/
LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
active_media = next->ifm_media;
break;
}
}
ifmr->ifm_active |= active_media;
if (link_info->flow_ctrl.rx)
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
if (link_info->flow_ctrl.tx)
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
bnxt_report_link(softc);
return;
}
static int
bnxt_media_change(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct ifmedia *ifm = iflib_get_media(ctx);
struct ifmediareq ifmr;
int rc;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return EINVAL;
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_100_T:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
break;
case IFM_1000_KX:
case IFM_1000_T:
case IFM_1000_SGMII:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
break;
case IFM_2500_KX:
case IFM_2500_T:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
break;
case IFM_10G_CR1:
case IFM_10G_KR:
case IFM_10G_LR:
case IFM_10G_SR:
case IFM_10G_T:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
case IFM_20G_KR2:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
break;
case IFM_25G_CR:
case IFM_25G_KR:
case IFM_25G_SR:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
break;
case IFM_40G_CR4:
case IFM_40G_KR4:
case IFM_40G_LR4:
case IFM_40G_SR4:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
case IFM_50G_CR2:
case IFM_50G_KR2:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
case IFM_100G_CR4:
case IFM_100G_KR4:
case IFM_100G_LR4:
case IFM_100G_SR4:
softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
softc->link_info.req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
default:
device_printf(softc->dev,
"Unsupported media type! Using auto\n");
/* Fall-through */
case IFM_AUTO:
// Auto
softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
break;
}
rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
bnxt_media_status(softc->ctx, &ifmr);
return rc;
}
static int
bnxt_promisc_set(if_ctx_t ctx, int flags)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
int rc;
if (ifp->if_flags & IFF_ALLMULTI ||
if_multiaddr_count(ifp, -1) > BNXT_MAX_MC_ADDRS)
softc->vnic_info.rx_mask |=
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
else
softc->vnic_info.rx_mask &=
~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
if (ifp->if_flags & IFF_PROMISC)
softc->vnic_info.rx_mask |=
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
else
softc->vnic_info.rx_mask &=
~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
return rc;
}
static uint64_t
bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
{
if_t ifp = iflib_get_ifp(ctx);
if (cnt < IFCOUNTERS)
return if_get_counter_default(ifp, cnt);
return 0;
}
static void
bnxt_update_admin_status(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
/*
* When SR-IOV is enabled, avoid each VF sending this HWRM
* request every sec with which firmware timeouts can happen
*/
if (BNXT_PF(softc)) {
bnxt_hwrm_port_qstats(softc);
}
return;
}
static void
bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
uint64_t ticks_now = ticks;
/* Schedule bnxt_update_admin_status() once per sec */
if (ticks_now - softc->admin_ticks >= hz) {
softc->admin_ticks = ticks_now;
iflib_admin_intr_deferred(ctx);
}
return;
}
static void inline
bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
{
if (cpr->ring.phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
/* First time enabling, do not set index */
if (cpr->cons == UINT32_MAX)
BNXT_CP_ENABLE_DB(&cpr->ring);
else
BNXT_CP_IDX_ENABLE_DB(&cpr->ring, cpr->cons);
}
}
static void inline
bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
{
if (cpr->ring.phys_id != (uint16_t)HWRM_NA_SIGNATURE)
BNXT_CP_DISABLE_DB(&cpr->ring);
}
/* Enable all interrupts */
static void
bnxt_intr_enable(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
int i;
bnxt_do_enable_intr(&softc->def_cp_ring);
for (i = 0; i < softc->nrxqsets; i++)
bnxt_do_enable_intr(&softc->rx_cp_rings[i]);
return;
}
/* Enable interrupt for a single queue */
static int
bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
bnxt_do_enable_intr(&softc->tx_cp_rings[qid]);
return 0;
}
static int
bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
bnxt_do_enable_intr(&softc->rx_cp_rings[qid]);
return 0;
}
/* Disable all interrupts */
static void
bnxt_disable_intr(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
int i;
/*
* NOTE: These TX interrupts should never get enabled, so don't
* update the index
*/
for (i = 0; i < softc->ntxqsets; i++)
bnxt_do_disable_intr(&softc->tx_cp_rings[i]);
for (i = 0; i < softc->nrxqsets; i++)
bnxt_do_disable_intr(&softc->rx_cp_rings[i]);
return;
}
static int
bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
int rc;
int i;
char irq_name[16];
rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
bnxt_handle_def_cp, softc, 0, "def_cp");
if (rc) {
device_printf(iflib_get_dev(ctx),
"Failed to register default completion ring handler\n");
return rc;
}
for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
rc = iflib_irq_alloc_generic(ctx, &softc->rx_cp_rings[i].irq,
softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RX,
bnxt_handle_rx_cp, &softc->rx_cp_rings[i], i, irq_name);
if (rc) {
device_printf(iflib_get_dev(ctx),
"Failed to register RX completion ring handler\n");
i--;
goto fail;
}
}
for (i=0; i<softc->scctx->isc_ntxqsets; i++)
iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
return rc;
fail:
for (; i>=0; i--)
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
return rc;
}
/*
* We're explicitly allowing duplicates here. They will need to be
* removed as many times as they are added.
*/
static void
bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct bnxt_vlan_tag *new_tag;
new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
if (new_tag == NULL)
return;
new_tag->tag = vtag;
new_tag->tpid = 8100;
SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
};
static void
bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct bnxt_vlan_tag *vlan_tag;
SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
if (vlan_tag->tag == vtag) {
SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
bnxt_vlan_tag, next);
free(vlan_tag, M_DEVBUF);
break;
}
}
}
static int
bnxt_wol_config(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
if (!softc)
return -EBUSY;
if (!bnxt_wol_supported(softc))
return -ENOTSUP;
if (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) {
if (!softc->wol) {
if (bnxt_hwrm_alloc_wol_fltr(softc))
return -EBUSY;
softc->wol = 1;
}
} else {
if (softc->wol) {
if (bnxt_hwrm_free_wol_fltr(softc))
return -EBUSY;
softc->wol = 0;
}
}
return 0;
}
static int
bnxt_shutdown(if_ctx_t ctx)
{
bnxt_wol_config(ctx);
return 0;
}
static int
bnxt_suspend(if_ctx_t ctx)
{
bnxt_wol_config(ctx);
return 0;
}
static int
bnxt_resume(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
bnxt_get_wol_settings(softc);
return 0;
}
static int
bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct ifreq *ifr = (struct ifreq *)data;
struct ifreq_buffer *ifbuf = &ifr->ifr_ifru.ifru_buffer;
struct bnxt_ioctl_header *ioh =
(struct bnxt_ioctl_header *)(ifbuf->buffer);
int rc = ENOTSUP;
struct bnxt_ioctl_data *iod = NULL;
switch (command) {
case SIOCGPRIVATE_0:
if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
goto exit;
iod = malloc(ifbuf->length, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!iod) {
rc = ENOMEM;
goto exit;
}
copyin(ioh, iod, ifbuf->length);
switch (ioh->type) {
case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
{
struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
&iod->find;
rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
&find->ordinal, find->ext, &find->index,
find->use_index, find->search_opt,
&find->data_length, &find->item_length,
&find->fw_ver);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_READ:
{
struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
struct iflib_dma_info dma_data;
size_t offset;
size_t remain;
size_t csize;
/*
* Some HWRM versions can't read more than 0x8000 bytes
*/
rc = iflib_dma_alloc(softc->ctx,
min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
if (rc)
break;
for (remain = rd->length, offset = 0;
remain && offset < rd->length; offset += 0x8000) {
csize = min(remain, 0x8000);
rc = bnxt_hwrm_nvm_read(softc, rd->index,
rd->offset + offset, csize, &dma_data);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
break;
}
else {
copyout(dma_data.idi_vaddr,
rd->data + offset, csize);
iod->hdr.rc = 0;
}
remain -= csize;
}
if (iod->hdr.rc == 0)
copyout(iod, ioh, ifbuf->length);
iflib_dma_free(&dma_data);
rc = 0;
goto exit;
}
case BNXT_HWRM_FW_RESET:
{
struct bnxt_ioctl_hwrm_fw_reset *rst =
&iod->reset;
rc = bnxt_hwrm_fw_reset(softc, rst->processor,
&rst->selfreset);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_FW_QSTATUS:
{
struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
&iod->status;
rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
&qstat->selfreset);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_WRITE:
{
struct bnxt_ioctl_hwrm_nvm_write *wr =
&iod->write;
rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
wr->type, wr->ordinal, wr->ext, wr->attr,
wr->option, wr->data_length, wr->keep,
&wr->item_length, &wr->index);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
{
struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
&iod->erase;
rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_GET_DIR_INFO:
{
struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
&iod->dir_info;
rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
&info->entry_length);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
{
struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
&iod->dir_entries;
struct iflib_dma_info dma_data;
rc = iflib_dma_alloc(softc->ctx, get->max_size,
&dma_data, BUS_DMA_NOWAIT);
if (rc)
break;
rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
&get->entry_length, &dma_data);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
copyout(dma_data.idi_vaddr, get->data,
get->entry_length * get->entries);
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
iflib_dma_free(&dma_data);
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_VERIFY_UPDATE:
{
struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
&iod->verify;
rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
vrfy->ordinal, vrfy->ext);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_INSTALL_UPDATE:
{
struct bnxt_ioctl_hwrm_nvm_install_update *inst =
&iod->install;
rc = bnxt_hwrm_nvm_install_update(softc,
inst->install_type, &inst->installed_items,
&inst->result, &inst->problem_item,
&inst->reset_required);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_NVM_MODIFY:
{
struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
rc = bnxt_hwrm_nvm_modify(softc, mod->index,
mod->offset, mod->data, true, mod->length);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_FW_GET_TIME:
{
struct bnxt_ioctl_hwrm_fw_get_time *gtm =
&iod->get_time;
rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
&gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
&gtm->second, &gtm->millisecond, &gtm->zone);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
case BNXT_HWRM_FW_SET_TIME:
{
struct bnxt_ioctl_hwrm_fw_set_time *stm =
&iod->set_time;
rc = bnxt_hwrm_fw_set_time(softc, stm->year,
stm->month, stm->day, stm->hour, stm->minute,
stm->second, stm->millisecond, stm->zone);
if (rc) {
iod->hdr.rc = rc;
copyout(&iod->hdr.rc, &ioh->rc,
sizeof(ioh->rc));
}
else {
iod->hdr.rc = 0;
copyout(iod, ioh, ifbuf->length);
}
rc = 0;
goto exit;
}
}
break;
}
exit:
if (iod)
free(iod, M_DEVBUF);
return rc;
}
/*
* Support functions
*/
static int
bnxt_probe_phy(struct bnxt_softc *softc)
{
struct bnxt_link_info *link_info = &softc->link_info;
int rc = 0;
rc = bnxt_update_link(softc, false);
if (rc) {
device_printf(softc->dev,
"Probe phy can't update link (rc: %x)\n", rc);
return (rc);
}
/*initialize the ethool setting copy with NVM settings */
if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
link_info->autoneg |= BNXT_AUTONEG_SPEED;
link_info->req_duplex = link_info->duplex_setting;
if (link_info->autoneg & BNXT_AUTONEG_SPEED)
link_info->req_link_speed = link_info->auto_link_speed;
else
link_info->req_link_speed = link_info->force_link_speed;
return (rc);
}
static void
bnxt_add_media_types(struct bnxt_softc *softc)
{
struct bnxt_link_info *link_info = &softc->link_info;
uint16_t supported;
uint8_t phy_type = get_phy_type(softc);
supported = link_info->support_speeds;
/* Auto is always supported */
ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
if (softc->flags & BNXT_FLAG_NPAR)
return;
switch (phy_type) {
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
/* Only Autoneg is supported for TYPE_UNKNOWN */
device_printf(softc->dev, "Unknown phy type\n");
break;
default:
/* Only Autoneg is supported for new phy type values */
device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
break;
}
return;
}
static int
bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
{
uint32_t flag;
if (bar->res != NULL) {
device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
return EDOOFUS;
}
bar->rid = PCIR_BAR(bar_num);
flag = RF_ACTIVE;
if (shareable)
flag |= RF_SHAREABLE;
if ((bar->res =
bus_alloc_resource_any(softc->dev,
SYS_RES_MEMORY,
&bar->rid,
flag)) == NULL) {
device_printf(softc->dev,
"PCI BAR%d mapping failure\n", bar_num);
return (ENXIO);
}
bar->tag = rman_get_bustag(bar->res);
bar->handle = rman_get_bushandle(bar->res);
bar->size = rman_get_size(bar->res);
return 0;
}
static int
bnxt_pci_mapping(struct bnxt_softc *softc)
{
int rc;
rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
if (rc)
return rc;
rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
return rc;
}
static void
bnxt_pci_mapping_free(struct bnxt_softc *softc)
{
if (softc->hwrm_bar.res != NULL)
bus_release_resource(softc->dev, SYS_RES_MEMORY,
softc->hwrm_bar.rid, softc->hwrm_bar.res);
softc->hwrm_bar.res = NULL;
if (softc->doorbell_bar.res != NULL)
bus_release_resource(softc->dev, SYS_RES_MEMORY,
softc->doorbell_bar.rid, softc->doorbell_bar.res);
softc->doorbell_bar.res = NULL;
}
static int
bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
{
struct bnxt_link_info *link_info = &softc->link_info;
uint8_t link_up = link_info->link_up;
int rc = 0;
rc = bnxt_hwrm_port_phy_qcfg(softc);
if (rc)
goto exit;
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status ==
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
link_info->link_up = 1;
else
link_info->link_up = 0;
if (link_up != link_info->link_up)
bnxt_report_link(softc);
} else {
/* always link down if not require to update link state */
link_info->link_up = 0;
}
exit:
return rc;
}
void
bnxt_report_link(struct bnxt_softc *softc)
{
struct bnxt_link_info *link_info = &softc->link_info;
const char *duplex = NULL, *flow_ctrl = NULL;
if (link_info->link_up == link_info->last_link_up) {
if (!link_info->link_up)
return;
if ((link_info->duplex == link_info->last_duplex) &&
(!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
return;
}
if (link_info->link_up) {
if (link_info->duplex ==
HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
duplex = "full duplex";
else
duplex = "half duplex";
if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
flow_ctrl = "FC - receive & transmit";
else if (link_info->flow_ctrl.tx)
flow_ctrl = "FC - transmit";
else if (link_info->flow_ctrl.rx)
flow_ctrl = "FC - receive";
else
flow_ctrl = "FC - none";
iflib_link_state_change(softc->ctx, LINK_STATE_UP,
IF_Gbps(100));
device_printf(softc->dev, "Link is UP %s, %s - %d Mbps \n", duplex,
flow_ctrl, (link_info->link_speed * 100));
} else {
iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
bnxt_get_baudrate(&softc->link_info));
device_printf(softc->dev, "Link is Down\n");
}
link_info->last_link_up = link_info->link_up;
link_info->last_duplex = link_info->duplex;
link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
/* update media types */
ifmedia_removeall(softc->media);
bnxt_add_media_types(softc);
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
}
static int
bnxt_handle_rx_cp(void *arg)
{
struct bnxt_cp_ring *cpr = arg;
/* Disable further interrupts for this queue */
BNXT_CP_DISABLE_DB(&cpr->ring);
return FILTER_SCHEDULE_THREAD;
}
static int
bnxt_handle_def_cp(void *arg)
{
struct bnxt_softc *softc = arg;
BNXT_CP_DISABLE_DB(&softc->def_cp_ring.ring);
GROUPTASK_ENQUEUE(&softc->def_cp_task);
return FILTER_HANDLED;
}
static void
bnxt_clear_ids(struct bnxt_softc *softc)
{
int i;
softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
for (i = 0; i < softc->ntxqsets; i++) {
softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
softc->tx_cp_rings[i].ring.phys_id =
(uint16_t)HWRM_NA_SIGNATURE;
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
}
for (i = 0; i < softc->nrxqsets; i++) {
softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
softc->rx_cp_rings[i].ring.phys_id =
(uint16_t)HWRM_NA_SIGNATURE;
softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
}
softc->vnic_info.filter_id = -1;
softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
softc->vnic_info.rss_grp_tbl.idi_size);
}
static void
bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
{
struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
int i;
for (i = 0; i < cpr->ring.ring_size; i++)
cmp[i].info3_v = !cpr->v_bit;
}
static void
bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
{
struct hwrm_async_event_cmpl *ae = (void *)cmpl;
uint16_t async_id = le16toh(ae->event_id);
struct ifmediareq ifmr;
switch (async_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
bnxt_media_status(softc->ctx, &ifmr);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
device_printf(softc->dev,
"Unhandled async completion type %u\n", async_id);
break;
default:
device_printf(softc->dev,
"Unknown async completion type %u\n", async_id);
break;
}
}
static void
bnxt_def_cp_task(void *context)
{
if_ctx_t ctx = context;
struct bnxt_softc *softc = iflib_get_softc(ctx);
struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
/* Handle completions on the default completion ring */
struct cmpl_base *cmpl;
uint32_t cons = cpr->cons;
bool v_bit = cpr->v_bit;
bool last_v_bit;
uint32_t last_cons;
uint16_t type;
for (;;) {
last_cons = cons;
last_v_bit = v_bit;
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
if (!CMP_VALID(cmpl, v_bit))
break;
type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
switch (type) {
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
bnxt_handle_async_event(softc, cmpl);
break;
case CMPL_BASE_TYPE_TX_L2:
case CMPL_BASE_TYPE_RX_L2:
case CMPL_BASE_TYPE_RX_AGG:
case CMPL_BASE_TYPE_RX_TPA_START:
case CMPL_BASE_TYPE_RX_TPA_END:
case CMPL_BASE_TYPE_STAT_EJECT:
case CMPL_BASE_TYPE_HWRM_DONE:
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
case CMPL_BASE_TYPE_HWRM_FWD_RESP:
case CMPL_BASE_TYPE_CQ_NOTIFICATION:
case CMPL_BASE_TYPE_SRQ_EVENT:
case CMPL_BASE_TYPE_DBQ_EVENT:
case CMPL_BASE_TYPE_QP_EVENT:
case CMPL_BASE_TYPE_FUNC_EVENT:
device_printf(softc->dev,
"Unhandled completion type %u\n", type);
break;
default:
device_printf(softc->dev,
"Unknown completion type %u\n", type);
break;
}
}
cpr->cons = last_cons;
cpr->v_bit = last_v_bit;
BNXT_CP_IDX_ENABLE_DB(&cpr->ring, cpr->cons);
}
static uint8_t
get_phy_type(struct bnxt_softc *softc)
{
struct bnxt_link_info *link_info = &softc->link_info;
uint8_t phy_type = link_info->phy_type;
uint16_t supported;
if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
return phy_type;
/* Deduce the phy type from the media type and supported speeds */
supported = link_info->support_speeds;
if (link_info->media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
if (link_info->media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
}
if (link_info->media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
return phy_type;
}
bool
bnxt_check_hwrm_version(struct bnxt_softc *softc)
{
char buf[16];
sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
device_printf(softc->dev,
"WARNING: HWRM version %s is too old (older than %s)\n",
softc->ver_info->hwrm_if_ver, buf);
return false;
}
else if(softc->ver_info->hwrm_min_major ==
softc->ver_info->hwrm_if_major) {
if (softc->ver_info->hwrm_min_minor >
softc->ver_info->hwrm_if_minor) {
device_printf(softc->dev,
"WARNING: HWRM version %s is too old (older than %s)\n",
softc->ver_info->hwrm_if_ver, buf);
return false;
}
else if (softc->ver_info->hwrm_min_minor ==
softc->ver_info->hwrm_if_minor) {
if (softc->ver_info->hwrm_min_update >
softc->ver_info->hwrm_if_update) {
device_printf(softc->dev,
"WARNING: HWRM version %s is too old (older than %s)\n",
softc->ver_info->hwrm_if_ver, buf);
return false;
}
}
}
return true;
}
static uint64_t
bnxt_get_baudrate(struct bnxt_link_info *link)
{
switch (link->link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
return IF_Mbps(100);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
return IF_Gbps(1);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
return IF_Gbps(2);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
return IF_Mbps(2500);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
return IF_Gbps(10);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
return IF_Gbps(20);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
return IF_Gbps(25);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
return IF_Gbps(40);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
return IF_Gbps(50);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
return IF_Gbps(100);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
return IF_Mbps(10);
}
return IF_Gbps(100);
}
static void
bnxt_get_wol_settings(struct bnxt_softc *softc)
{
uint16_t wol_handle = 0;
if (!bnxt_wol_supported(softc))
return;
do {
wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
}
Index: head/sys/dev/bwn/if_bwn.c
===================================================================
--- head/sys/dev/bwn/if_bwn.c (revision 328217)
+++ head/sys/dev/bwn/if_bwn.c (revision 328218)
@@ -1,7502 +1,7502 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2009-2010 Weongyo Jeong <weongyo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* The Broadcom Wireless LAN controller driver.
*/
#include "opt_bwn.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/firmware.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/bwn/if_bwn_siba.h>
#include <dev/bwn/if_bwnreg.h>
#include <dev/bwn/if_bwnvar.h>
#include <dev/bwn/if_bwn_debug.h>
#include <dev/bwn/if_bwn_misc.h>
#include <dev/bwn/if_bwn_util.h>
#include <dev/bwn/if_bwn_phy_common.h>
#include <dev/bwn/if_bwn_phy_g.h>
#include <dev/bwn/if_bwn_phy_lp.h>
#include <dev/bwn/if_bwn_phy_n.h>
static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0,
"Broadcom driver parameters");
/*
* Tunable & sysctl variables.
*/
#ifdef BWN_DEBUG
static int bwn_debug = 0;
SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0,
"Broadcom debugging printfs");
#endif
static int bwn_bfp = 0; /* use "Bad Frames Preemption" */
SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0,
"uses Bad Frames Preemption");
static int bwn_bluetooth = 1;
SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0,
"turns on Bluetooth Coexistence");
static int bwn_hwpctl = 0;
SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0,
"uses H/W power control");
static int bwn_msi_disable = 0; /* MSI disabled */
TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable);
static int bwn_usedma = 1;
SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0,
"uses DMA");
TUNABLE_INT("hw.bwn.usedma", &bwn_usedma);
static int bwn_wme = 1;
SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0,
"uses WME support");
static void bwn_attach_pre(struct bwn_softc *);
static int bwn_attach_post(struct bwn_softc *);
static void bwn_sprom_bugfixes(device_t);
static int bwn_init(struct bwn_softc *);
static void bwn_parent(struct ieee80211com *);
static void bwn_start(struct bwn_softc *);
static int bwn_transmit(struct ieee80211com *, struct mbuf *);
static int bwn_attach_core(struct bwn_mac *);
static int bwn_phy_getinfo(struct bwn_mac *, int);
static int bwn_chiptest(struct bwn_mac *);
static int bwn_setup_channels(struct bwn_mac *, int, int);
static void bwn_shm_ctlword(struct bwn_mac *, uint16_t,
uint16_t);
static void bwn_addchannels(struct ieee80211_channel [], int, int *,
const struct bwn_channelinfo *, const uint8_t []);
static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void bwn_updateslot(struct ieee80211com *);
static void bwn_update_promisc(struct ieee80211com *);
static void bwn_wme_init(struct bwn_mac *);
static int bwn_wme_update(struct ieee80211com *);
static void bwn_wme_clear(struct bwn_softc *);
static void bwn_wme_load(struct bwn_mac *);
static void bwn_wme_loadparams(struct bwn_mac *,
const struct wmeParams *, uint16_t);
static void bwn_scan_start(struct ieee80211com *);
static void bwn_scan_end(struct ieee80211com *);
static void bwn_set_channel(struct ieee80211com *);
static struct ieee80211vap *bwn_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void bwn_vap_delete(struct ieee80211vap *);
static void bwn_stop(struct bwn_softc *);
static int bwn_core_init(struct bwn_mac *);
static void bwn_core_start(struct bwn_mac *);
static void bwn_core_exit(struct bwn_mac *);
static void bwn_bt_disable(struct bwn_mac *);
static int bwn_chip_init(struct bwn_mac *);
static void bwn_set_txretry(struct bwn_mac *, int, int);
static void bwn_rate_init(struct bwn_mac *);
static void bwn_set_phytxctl(struct bwn_mac *);
static void bwn_spu_setdelay(struct bwn_mac *, int);
static void bwn_bt_enable(struct bwn_mac *);
static void bwn_set_macaddr(struct bwn_mac *);
static void bwn_crypt_init(struct bwn_mac *);
static void bwn_chip_exit(struct bwn_mac *);
static int bwn_fw_fillinfo(struct bwn_mac *);
static int bwn_fw_loaducode(struct bwn_mac *);
static int bwn_gpio_init(struct bwn_mac *);
static int bwn_fw_loadinitvals(struct bwn_mac *);
static int bwn_phy_init(struct bwn_mac *);
static void bwn_set_txantenna(struct bwn_mac *, int);
static void bwn_set_opmode(struct bwn_mac *);
static void bwn_rate_write(struct bwn_mac *, uint16_t, int);
static uint8_t bwn_plcp_getcck(const uint8_t);
static uint8_t bwn_plcp_getofdm(const uint8_t);
static void bwn_pio_init(struct bwn_mac *);
static uint16_t bwn_pio_idx2base(struct bwn_mac *, int);
static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *,
int);
static void bwn_pio_setupqueue_rx(struct bwn_mac *,
struct bwn_pio_rxqueue *, int);
static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *);
static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *,
uint16_t);
static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *);
static int bwn_pio_rx(struct bwn_pio_rxqueue *);
static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *);
static void bwn_pio_handle_txeof(struct bwn_mac *,
const struct bwn_txstatus *);
static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t);
static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t);
static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t,
uint16_t);
static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t,
uint32_t);
static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *,
struct mbuf *);
static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t);
static uint32_t bwn_pio_write_multi_4(struct bwn_mac *,
struct bwn_pio_txqueue *, uint32_t, const void *, int);
static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *,
uint16_t, uint32_t);
static uint16_t bwn_pio_write_multi_2(struct bwn_mac *,
struct bwn_pio_txqueue *, uint16_t, const void *, int);
static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *,
struct bwn_pio_txqueue *, uint16_t, struct mbuf *);
static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *,
uint16_t, struct bwn_pio_txpkt **);
static void bwn_dma_init(struct bwn_mac *);
static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t);
static int bwn_dma_mask2type(uint64_t);
static uint64_t bwn_dma_mask(struct bwn_mac *);
static uint16_t bwn_dma_base(int, int);
static void bwn_dma_ringfree(struct bwn_dma_ring **);
static void bwn_dma_32_getdesc(struct bwn_dma_ring *,
int, struct bwn_dmadesc_generic **,
struct bwn_dmadesc_meta **);
static void bwn_dma_32_setdesc(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int,
int, int);
static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int);
static void bwn_dma_32_suspend(struct bwn_dma_ring *);
static void bwn_dma_32_resume(struct bwn_dma_ring *);
static int bwn_dma_32_get_curslot(struct bwn_dma_ring *);
static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int);
static void bwn_dma_64_getdesc(struct bwn_dma_ring *,
int, struct bwn_dmadesc_generic **,
struct bwn_dmadesc_meta **);
static void bwn_dma_64_setdesc(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int,
int, int);
static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int);
static void bwn_dma_64_suspend(struct bwn_dma_ring *);
static void bwn_dma_64_resume(struct bwn_dma_ring *);
static int bwn_dma_64_get_curslot(struct bwn_dma_ring *);
static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int);
static int bwn_dma_allocringmemory(struct bwn_dma_ring *);
static void bwn_dma_setup(struct bwn_dma_ring *);
static void bwn_dma_free_ringmemory(struct bwn_dma_ring *);
static void bwn_dma_cleanup(struct bwn_dma_ring *);
static void bwn_dma_free_descbufs(struct bwn_dma_ring *);
static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int);
static void bwn_dma_rx(struct bwn_dma_ring *);
static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int);
static void bwn_dma_free_descbuf(struct bwn_dma_ring *,
struct bwn_dmadesc_meta *);
static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *);
static int bwn_dma_gettype(struct bwn_mac *);
static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
static int bwn_dma_freeslot(struct bwn_dma_ring *);
static int bwn_dma_nextslot(struct bwn_dma_ring *, int);
static void bwn_dma_rxeof(struct bwn_dma_ring *, int *);
static int bwn_dma_newbuf(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *,
int);
static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int,
bus_size_t, int);
static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *);
static void bwn_ratectl_tx_complete(const struct ieee80211_node *,
const struct bwn_txstatus *);
static void bwn_dma_handle_txeof(struct bwn_mac *,
const struct bwn_txstatus *);
static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *,
struct mbuf *);
static int bwn_dma_getslot(struct bwn_dma_ring *);
static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *,
uint8_t);
static int bwn_dma_attach(struct bwn_mac *);
static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *,
int, int, int);
static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *,
const struct bwn_txstatus *, uint16_t, int *);
static void bwn_dma_free(struct bwn_mac *);
static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype);
static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype,
const char *, struct bwn_fwfile *);
static void bwn_release_firmware(struct bwn_mac *);
static void bwn_do_release_fw(struct bwn_fwfile *);
static uint16_t bwn_fwcaps_read(struct bwn_mac *);
static int bwn_fwinitvals_write(struct bwn_mac *,
const struct bwn_fwinitvals *, size_t, size_t);
static uint16_t bwn_ant2phy(int);
static void bwn_mac_write_bssid(struct bwn_mac *);
static void bwn_mac_setfilter(struct bwn_mac *, uint16_t,
const uint8_t *);
static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t,
const uint8_t *, size_t, const uint8_t *);
static void bwn_key_macwrite(struct bwn_mac *, uint8_t,
const uint8_t *);
static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t,
const uint8_t *);
static void bwn_phy_exit(struct bwn_mac *);
static void bwn_core_stop(struct bwn_mac *);
static int bwn_switch_band(struct bwn_softc *,
struct ieee80211_channel *);
static void bwn_phy_reset(struct bwn_mac *);
static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void bwn_set_pretbtt(struct bwn_mac *);
static int bwn_intr(void *);
static void bwn_intrtask(void *, int);
static void bwn_restart(struct bwn_mac *, const char *);
static void bwn_intr_ucode_debug(struct bwn_mac *);
static void bwn_intr_tbtt_indication(struct bwn_mac *);
static void bwn_intr_atim_end(struct bwn_mac *);
static void bwn_intr_beacon(struct bwn_mac *);
static void bwn_intr_pmq(struct bwn_mac *);
static void bwn_intr_noise(struct bwn_mac *);
static void bwn_intr_txeof(struct bwn_mac *);
static void bwn_hwreset(void *, int);
static void bwn_handle_fwpanic(struct bwn_mac *);
static void bwn_load_beacon0(struct bwn_mac *);
static void bwn_load_beacon1(struct bwn_mac *);
static uint32_t bwn_jssi_read(struct bwn_mac *);
static void bwn_noise_gensample(struct bwn_mac *);
static void bwn_handle_txeof(struct bwn_mac *,
const struct bwn_txstatus *);
static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *);
static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t);
static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *,
struct mbuf *);
static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *);
static int bwn_set_txhdr(struct bwn_mac *,
struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *,
uint16_t);
static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t,
const uint8_t);
static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t);
static uint8_t bwn_get_fbrate(uint8_t);
static void bwn_txpwr(void *, int);
static void bwn_tasks(void *);
static void bwn_task_15s(struct bwn_mac *);
static void bwn_task_30s(struct bwn_mac *);
static void bwn_task_60s(struct bwn_mac *);
static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *,
uint8_t);
static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *);
static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *,
const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int,
int, int);
static void bwn_tsf_read(struct bwn_mac *, uint64_t *);
static void bwn_set_slot_time(struct bwn_mac *, uint16_t);
static void bwn_watchdog(void *);
static void bwn_dma_stop(struct bwn_mac *);
static void bwn_pio_stop(struct bwn_mac *);
static void bwn_dma_ringstop(struct bwn_dma_ring **);
static void bwn_led_attach(struct bwn_mac *);
static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state);
static void bwn_led_event(struct bwn_mac *, int);
static void bwn_led_blink_start(struct bwn_mac *, int, int);
static void bwn_led_blink_next(void *);
static void bwn_led_blink_end(void *);
static void bwn_rfswitch(void *);
static void bwn_rf_turnon(struct bwn_mac *);
static void bwn_rf_turnoff(struct bwn_mac *);
static void bwn_sysctl_node(struct bwn_softc *);
static struct resource_spec bwn_res_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec bwn_res_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static const struct bwn_channelinfo bwn_chantable_bg = {
.channels = {
{ 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 },
{ 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 },
{ 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 },
{ 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 },
{ 2472, 13, 30 }, { 2484, 14, 30 } },
.nchannels = 14
};
static const struct bwn_channelinfo bwn_chantable_a = {
.channels = {
{ 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 },
{ 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 },
{ 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 },
{ 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 },
{ 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 },
{ 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 },
{ 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 },
{ 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 },
{ 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 },
{ 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 },
{ 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 },
{ 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 },
{ 6080, 216, 30 } },
.nchannels = 37
};
#if 0
static const struct bwn_channelinfo bwn_chantable_n = {
.channels = {
{ 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 },
{ 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 },
{ 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 },
{ 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 },
{ 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 },
{ 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 },
{ 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 },
{ 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 },
{ 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 },
{ 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 },
{ 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 },
{ 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 },
{ 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 },
{ 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 },
{ 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 },
{ 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 },
{ 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 },
{ 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 },
{ 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 },
{ 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 },
{ 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 },
{ 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 },
{ 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 },
{ 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 },
{ 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 },
{ 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 },
{ 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 },
{ 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 },
{ 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 },
{ 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 },
{ 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 },
{ 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 },
{ 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 },
{ 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 },
{ 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 },
{ 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 },
{ 6130, 226, 30 }, { 6140, 228, 30 } },
.nchannels = 110
};
#endif
#define VENDOR_LED_ACT(vendor) \
{ \
.vid = PCI_VENDOR_##vendor, \
.led_act = { BWN_VENDOR_LED_ACT_##vendor } \
}
static const struct {
uint16_t vid;
uint8_t led_act[BWN_LED_MAX];
} bwn_vendor_led_act[] = {
VENDOR_LED_ACT(COMPAQ),
VENDOR_LED_ACT(ASUSTEK)
};
static const uint8_t bwn_default_led_act[BWN_LED_MAX] =
{ BWN_VENDOR_LED_ACT_DEFAULT };
#undef VENDOR_LED_ACT
static const struct {
int on_dur;
int off_dur;
} bwn_led_duration[109] = {
[0] = { 400, 100 },
[2] = { 150, 75 },
[4] = { 90, 45 },
[11] = { 66, 34 },
[12] = { 53, 26 },
[18] = { 42, 21 },
[22] = { 35, 17 },
[24] = { 32, 16 },
[36] = { 21, 10 },
[48] = { 16, 8 },
[72] = { 11, 5 },
[96] = { 9, 4 },
[108] = { 7, 3 }
};
static const uint16_t bwn_wme_shm_offsets[] = {
[0] = BWN_WME_BESTEFFORT,
[1] = BWN_WME_BACKGROUND,
[2] = BWN_WME_VOICE,
[3] = BWN_WME_VIDEO,
};
static const struct siba_devid bwn_devs[] = {
SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"),
SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"),
SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"),
SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"),
SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"),
SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"),
SIBA_DEV(BROADCOM, 80211, 12, "Revision 12"),
SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"),
SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"),
SIBA_DEV(BROADCOM, 80211, 16, "Revision 16")
};
static const struct bwn_bus_ops *
bwn_get_bus_ops(device_t dev)
{
#if BWN_USE_SIBA
return (NULL);
#else
devclass_t bus_cls;
bus_cls = device_get_devclass(device_get_parent(dev));
if (bus_cls == devclass_find("bhnd"))
return (&bwn_bhnd_bus_ops);
else
return (&bwn_siba_bus_ops);
#endif
}
static int
bwn_probe(device_t dev)
{
struct bwn_softc *sc;
int i;
sc = device_get_softc(dev);
sc->sc_bus_ops = bwn_get_bus_ops(dev);
for (i = 0; i < nitems(bwn_devs); i++) {
if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor &&
siba_get_device(dev) == bwn_devs[i].sd_device &&
siba_get_revid(dev) == bwn_devs[i].sd_rev)
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
int
bwn_attach(device_t dev)
{
struct bwn_mac *mac;
struct bwn_softc *sc = device_get_softc(dev);
int error, i, msic, reg;
sc->sc_dev = dev;
#ifdef BWN_DEBUG
sc->sc_debug = bwn_debug;
#endif
sc->sc_bus_ops = bwn_get_bus_ops(dev);
if ((error = BWN_BUS_OPS_ATTACH(dev))) {
device_printf(sc->sc_dev,
"bus-specific initialization failed (%d)\n", error);
return (error);
}
if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) {
bwn_attach_pre(sc);
bwn_sprom_bugfixes(dev);
sc->sc_flags |= BWN_FLAG_ATTACHED;
}
if (!TAILQ_EMPTY(&sc->sc_maclist)) {
if (siba_get_pci_device(dev) != 0x4313 &&
siba_get_pci_device(dev) != 0x431a &&
siba_get_pci_device(dev) != 0x4321) {
device_printf(sc->sc_dev,
"skip 802.11 cores\n");
return (ENODEV);
}
}
mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO);
mac->mac_sc = sc;
mac->mac_status = BWN_MAC_STATUS_UNINIT;
if (bwn_bfp != 0)
mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP;
TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac);
TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac);
TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac);
error = bwn_attach_core(mac);
if (error)
goto fail0;
bwn_led_attach(mac);
device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) "
"PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n",
siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev),
mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev,
mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver,
mac->mac_phy.rf_rev);
if (mac->mac_flags & BWN_MAC_FLAG_DMA)
device_printf(sc->sc_dev, "DMA (%d bits)\n",
mac->mac_method.dma.dmatype);
else
device_printf(sc->sc_dev, "PIO\n");
#ifdef BWN_GPL_PHY
device_printf(sc->sc_dev,
"Note: compiled with BWN_GPL_PHY; includes GPLv2 code\n");
#endif
/*
* setup PCI resources and interrupt.
*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
msic = pci_msi_count(dev);
if (bootverbose)
device_printf(sc->sc_dev, "MSI count : %d\n", msic);
} else
msic = 0;
mac->mac_intr_spec = bwn_res_spec_legacy;
if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) {
if (pci_alloc_msi(dev, &msic) == 0) {
device_printf(sc->sc_dev,
"Using %d MSI messages\n", msic);
mac->mac_intr_spec = bwn_res_spec_msi;
mac->mac_msi = 1;
}
}
error = bus_alloc_resources(dev, mac->mac_intr_spec,
mac->mac_res_irq);
if (error) {
device_printf(sc->sc_dev,
"couldn't allocate IRQ resources (%d)\n", error);
goto fail1;
}
if (mac->mac_msi == 0)
error = bus_setup_intr(dev, mac->mac_res_irq[0],
INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac,
&mac->mac_intrhand[0]);
else {
for (i = 0; i < BWN_MSI_MESSAGES; i++) {
error = bus_setup_intr(dev, mac->mac_res_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac,
&mac->mac_intrhand[i]);
if (error != 0) {
device_printf(sc->sc_dev,
"couldn't setup interrupt (%d)\n", error);
break;
}
}
}
TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list);
/*
* calls attach-post routine
*/
if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0)
bwn_attach_post(sc);
return (0);
fail1:
if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0)
pci_release_msi(dev);
fail0:
BWN_BUS_OPS_DETACH(dev);
free(mac, M_DEVBUF);
return (error);
}
static int
bwn_is_valid_ether_addr(uint8_t *addr)
{
char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
return (FALSE);
return (TRUE);
}
static int
bwn_attach_post(struct bwn_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(sc->sc_dev);
/* XXX not right but it's not used anywhere important */
ic->ic_phytype = IEEE80211_T_OFDM;
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WME /* WME/WMM supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
#if 0
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#endif
| IEEE80211_C_TXPMGT /* capable of txpow mgt */
;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */
IEEE80211_ADDR_COPY(ic->ic_macaddr,
bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ?
siba_sprom_get_mac_80211a(sc->sc_dev) :
siba_sprom_get_mac_80211bg(sc->sc_dev));
/* call MI attach routine. */
ieee80211_ifattach(ic);
ic->ic_headroom = sizeof(struct bwn_txhdr);
/* override default methods */
ic->ic_raw_xmit = bwn_raw_xmit;
ic->ic_updateslot = bwn_updateslot;
ic->ic_update_promisc = bwn_update_promisc;
ic->ic_wme.wme_update = bwn_wme_update;
ic->ic_scan_start = bwn_scan_start;
ic->ic_scan_end = bwn_scan_end;
ic->ic_set_channel = bwn_set_channel;
ic->ic_vap_create = bwn_vap_create;
ic->ic_vap_delete = bwn_vap_delete;
ic->ic_transmit = bwn_transmit;
ic->ic_parent = bwn_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
BWN_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
BWN_RX_RADIOTAP_PRESENT);
bwn_sysctl_node(sc);
if (bootverbose)
ieee80211_announce(ic);
return (0);
}
static void
bwn_phy_detach(struct bwn_mac *mac)
{
if (mac->mac_phy.detach != NULL)
mac->mac_phy.detach(mac);
}
int
bwn_detach(device_t dev)
{
struct bwn_softc *sc = device_get_softc(dev);
struct bwn_mac *mac = sc->sc_curmac;
struct ieee80211com *ic = &sc->sc_ic;
int i;
sc->sc_flags |= BWN_FLAG_INVALID;
if (device_is_attached(sc->sc_dev)) {
BWN_LOCK(sc);
bwn_stop(sc);
BWN_UNLOCK(sc);
bwn_dma_free(mac);
callout_drain(&sc->sc_led_blink_ch);
callout_drain(&sc->sc_rfswitch_ch);
callout_drain(&sc->sc_task_ch);
callout_drain(&sc->sc_watchdog_ch);
bwn_phy_detach(mac);
ieee80211_draintask(ic, &mac->mac_hwreset);
ieee80211_draintask(ic, &mac->mac_txpower);
ieee80211_ifdetach(ic);
}
taskqueue_drain(sc->sc_tq, &mac->mac_intrtask);
taskqueue_free(sc->sc_tq);
for (i = 0; i < BWN_MSI_MESSAGES; i++) {
if (mac->mac_intrhand[i] != NULL) {
bus_teardown_intr(dev, mac->mac_res_irq[i],
mac->mac_intrhand[i]);
mac->mac_intrhand[i] = NULL;
}
}
bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq);
if (mac->mac_msi != 0)
pci_release_msi(dev);
mbufq_drain(&sc->sc_snd);
bwn_release_firmware(mac);
BWN_LOCK_DESTROY(sc);
BWN_BUS_OPS_DETACH(dev);
return (0);
}
static void
bwn_attach_pre(struct bwn_softc *sc)
{
BWN_LOCK_INIT(sc);
TAILQ_INIT(&sc->sc_maclist);
callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0);
mbufq_init(&sc->sc_snd, ifqmaxlen);
sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->sc_tq);
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->sc_dev));
}
static void
bwn_sprom_bugfixes(device_t dev)
{
#define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \
((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \
(siba_get_pci_device(dev) == _device) && \
(siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \
(siba_get_pci_subdevice(dev) == _subdevice))
if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE &&
siba_get_pci_subdevice(dev) == 0x4e &&
siba_get_pci_revid(dev) > 0x40)
siba_sprom_set_bf_lo(dev,
siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL);
if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL &&
siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74)
siba_sprom_set_bf_lo(dev,
siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST);
if (siba_get_type(dev) == SIBA_TYPE_PCI) {
if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) ||
BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) ||
BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) ||
BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) ||
BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) ||
BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) ||
BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010))
siba_sprom_set_bf_lo(dev,
siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST);
}
#undef BWN_ISDEV
}
static void
bwn_parent(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
int startall = 0;
BWN_LOCK(sc);
if (ic->ic_nrunning > 0) {
if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) {
bwn_init(sc);
startall = 1;
} else
bwn_update_promisc(ic);
} else if (sc->sc_flags & BWN_FLAG_RUNNING)
bwn_stop(sc);
BWN_UNLOCK(sc);
if (startall)
ieee80211_start_all(ic);
}
static int
bwn_transmit(struct ieee80211com *ic, struct mbuf *m)
{
struct bwn_softc *sc = ic->ic_softc;
int error;
BWN_LOCK(sc);
if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) {
BWN_UNLOCK(sc);
return (ENXIO);
}
error = mbufq_enqueue(&sc->sc_snd, m);
if (error) {
BWN_UNLOCK(sc);
return (error);
}
bwn_start(sc);
BWN_UNLOCK(sc);
return (0);
}
static void
bwn_start(struct bwn_softc *sc)
{
struct bwn_mac *mac = sc->sc_curmac;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct ieee80211_key *k;
struct mbuf *m;
BWN_ASSERT_LOCKED(sc);
if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL ||
mac->mac_status < BWN_MAC_STATUS_STARTED)
return;
while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
if (bwn_tx_isfull(sc, m))
break;
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (ni == NULL) {
device_printf(sc->sc_dev, "unexpected NULL ni\n");
m_freem(m);
counter_u64_add(sc->sc_ic.ic_oerrors, 1);
continue;
}
wh = mtod(m, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
m_freem(m);
continue;
}
}
wh = NULL; /* Catch any invalid use */
if (bwn_tx_start(sc, ni, m) != 0) {
if (ni != NULL) {
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
}
continue;
}
sc->sc_watchdog_timer = 5;
}
}
static int
bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m)
{
struct bwn_dma_ring *dr;
struct bwn_mac *mac = sc->sc_curmac;
struct bwn_pio_txqueue *tq;
int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4);
BWN_ASSERT_LOCKED(sc);
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
dr = bwn_dma_select(mac, M_WME_GETAC(m));
if (dr->dr_stop == 1 ||
bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) {
dr->dr_stop = 1;
goto full;
}
} else {
tq = bwn_pio_select(mac, M_WME_GETAC(m));
if (tq->tq_free == 0 || pktlen > tq->tq_size ||
pktlen > (tq->tq_size - tq->tq_used))
goto full;
}
return (0);
full:
mbufq_prepend(&sc->sc_snd, m);
return (1);
}
static int
bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m)
{
struct bwn_mac *mac = sc->sc_curmac;
int error;
BWN_ASSERT_LOCKED(sc);
if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) {
m_freem(m);
return (ENXIO);
}
error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ?
bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m);
if (error) {
m_freem(m);
return (error);
}
return (0);
}
static int
bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m)
{
struct bwn_pio_txpkt *tp;
struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m));
struct bwn_softc *sc = mac->mac_sc;
struct bwn_txhdr txhdr;
struct mbuf *m_new;
uint32_t ctl32;
int error;
uint16_t ctl16;
BWN_ASSERT_LOCKED(sc);
/* XXX TODO send packets after DTIM */
KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__));
tp = TAILQ_FIRST(&tq->tq_pktlist);
tp->tp_ni = ni;
tp->tp_m = m;
error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp));
if (error) {
device_printf(sc->sc_dev, "tx fail\n");
return (error);
}
TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list);
tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4);
tq->tq_free--;
if (siba_get_revid(sc->sc_dev) >= 8) {
/*
* XXX please removes m_defrag(9)
*/
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
device_printf(sc->sc_dev,
"%s: can't defrag TX buffer\n",
__func__);
return (ENOBUFS);
}
if (m_new->m_next != NULL)
device_printf(sc->sc_dev,
"TODO: fragmented packets for PIO\n");
tp->tp_m = m_new;
/* send HEADER */
ctl32 = bwn_pio_write_multi_4(mac, tq,
(BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) |
BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF,
(const uint8_t *)&txhdr, BWN_HDRSIZE(mac));
/* send BODY */
ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32,
mtod(m_new, const void *), m_new->m_pkthdr.len);
bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL,
ctl32 | BWN_PIO8_TXCTL_EOF);
} else {
ctl16 = bwn_pio_write_multi_2(mac, tq,
(bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) |
BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF,
(const uint8_t *)&txhdr, BWN_HDRSIZE(mac));
ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL,
ctl16 | BWN_PIO_TXCTL_EOF);
}
return (0);
}
static struct bwn_pio_txqueue *
bwn_pio_select(struct bwn_mac *mac, uint8_t prio)
{
if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0)
return (&mac->mac_method.pio.wme[WME_AC_BE]);
switch (prio) {
case 0:
return (&mac->mac_method.pio.wme[WME_AC_BE]);
case 1:
return (&mac->mac_method.pio.wme[WME_AC_BK]);
case 2:
return (&mac->mac_method.pio.wme[WME_AC_VI]);
case 3:
return (&mac->mac_method.pio.wme[WME_AC_VO]);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (NULL);
}
static int
bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m)
{
#define BWN_GET_TXHDRCACHE(slot) \
&(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)])
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m));
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *mt;
struct bwn_softc *sc = mac->mac_sc;
uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache;
int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot };
BWN_ASSERT_LOCKED(sc);
KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__));
/* XXX send after DTIM */
slot = bwn_dma_getslot(dr);
dr->getdesc(dr, slot, &desc, &mt);
KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER,
("%s:%d: fail", __func__, __LINE__));
error = bwn_set_txhdr(dr->dr_mac, ni, m,
(struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot),
BWN_DMA_COOKIE(dr, slot));
if (error)
goto fail;
error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap,
BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr,
&mt->mt_paddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n",
__func__, error);
goto fail;
}
bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap,
BUS_DMASYNC_PREWRITE);
dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0);
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
slot = bwn_dma_getslot(dr);
dr->getdesc(dr, slot, &desc, &mt);
KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY &&
mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__));
mt->mt_m = m;
mt->mt_ni = ni;
error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m,
bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT);
if (error && error != EFBIG) {
device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n",
__func__, error);
goto fail;
}
if (error) { /* error == EFBIG */
struct mbuf *m_new;
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
device_printf(sc->sc_dev,
"%s: can't defrag TX buffer\n",
__func__);
error = ENOBUFS;
goto fail;
} else {
m = m_new;
}
mt->mt_m = m;
error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap,
m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev,
"%s: can't load TX buffer (2) %d\n",
__func__, error);
goto fail;
}
}
bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE);
dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1);
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
/* XXX send after DTIM */
dr->start_transfer(dr, bwn_dma_nextslot(dr, slot));
return (0);
fail:
dr->dr_curslot = backup[0];
dr->dr_usedslot = backup[1];
return (error);
#undef BWN_GET_TXHDRCACHE
}
static void
bwn_watchdog(void *arg)
{
struct bwn_softc *sc = arg;
if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
counter_u64_add(sc->sc_ic.ic_oerrors, 1);
}
callout_schedule(&sc->sc_watchdog_ch, hz);
}
static int
bwn_attach_core(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int error, have_bg = 0, have_a = 0;
uint32_t high;
KASSERT(siba_get_revid(sc->sc_dev) >= 5,
("unsupported revision %d", siba_get_revid(sc->sc_dev)));
siba_powerup(sc->sc_dev, 0);
high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH);
have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0;
have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0;
if (high & BWN_TGSHIGH_DUALPHY) {
have_bg = 1;
have_a = 1;
}
#if 0
device_printf(sc->sc_dev, "%s: high=0x%08x, have_a=%d, have_bg=%d,"
" deviceid=0x%04x, siba_deviceid=0x%04x\n",
__func__,
high,
have_a,
have_bg,
siba_get_pci_device(sc->sc_dev),
siba_get_chipid(sc->sc_dev));
#endif
/*
* Guess at whether it has A-PHY or G-PHY.
* This is just used for resetting the core to probe things;
* we will re-guess once it's all up and working.
*/
bwn_reset_core(mac, have_bg);
/*
* Get the PHY version.
*/
error = bwn_phy_getinfo(mac, have_bg);
if (error)
goto fail;
/*
* This is the whitelist of devices which we "believe"
* the SPROM PHY config from. The rest are "guessed".
*/
if (siba_get_pci_device(sc->sc_dev) != 0x4312 &&
siba_get_pci_device(sc->sc_dev) != 0x4315 &&
siba_get_pci_device(sc->sc_dev) != 0x4319 &&
siba_get_pci_device(sc->sc_dev) != 0x4324 &&
siba_get_pci_device(sc->sc_dev) != 0x4328 &&
siba_get_pci_device(sc->sc_dev) != 0x432b) {
have_a = have_bg = 0;
if (mac->mac_phy.type == BWN_PHYTYPE_A)
have_a = 1;
else if (mac->mac_phy.type == BWN_PHYTYPE_G ||
mac->mac_phy.type == BWN_PHYTYPE_N ||
mac->mac_phy.type == BWN_PHYTYPE_LP)
have_bg = 1;
else
KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__,
mac->mac_phy.type));
}
/*
* XXX The PHY-G support doesn't do 5GHz operation.
*/
if (mac->mac_phy.type != BWN_PHYTYPE_LP &&
mac->mac_phy.type != BWN_PHYTYPE_N) {
device_printf(sc->sc_dev,
"%s: forcing 2GHz only; no dual-band support for PHY\n",
__func__);
have_a = 0;
have_bg = 1;
}
mac->mac_phy.phy_n = NULL;
if (mac->mac_phy.type == BWN_PHYTYPE_G) {
mac->mac_phy.attach = bwn_phy_g_attach;
mac->mac_phy.detach = bwn_phy_g_detach;
mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw;
mac->mac_phy.init_pre = bwn_phy_g_init_pre;
mac->mac_phy.init = bwn_phy_g_init;
mac->mac_phy.exit = bwn_phy_g_exit;
mac->mac_phy.phy_read = bwn_phy_g_read;
mac->mac_phy.phy_write = bwn_phy_g_write;
mac->mac_phy.rf_read = bwn_phy_g_rf_read;
mac->mac_phy.rf_write = bwn_phy_g_rf_write;
mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl;
mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff;
mac->mac_phy.switch_analog = bwn_phy_switch_analog;
mac->mac_phy.switch_channel = bwn_phy_g_switch_channel;
mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan;
mac->mac_phy.set_antenna = bwn_phy_g_set_antenna;
mac->mac_phy.set_im = bwn_phy_g_im;
mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr;
mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr;
mac->mac_phy.task_15s = bwn_phy_g_task_15s;
mac->mac_phy.task_60s = bwn_phy_g_task_60s;
} else if (mac->mac_phy.type == BWN_PHYTYPE_LP) {
mac->mac_phy.init_pre = bwn_phy_lp_init_pre;
mac->mac_phy.init = bwn_phy_lp_init;
mac->mac_phy.phy_read = bwn_phy_lp_read;
mac->mac_phy.phy_write = bwn_phy_lp_write;
mac->mac_phy.phy_maskset = bwn_phy_lp_maskset;
mac->mac_phy.rf_read = bwn_phy_lp_rf_read;
mac->mac_phy.rf_write = bwn_phy_lp_rf_write;
mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff;
mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog;
mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel;
mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan;
mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna;
mac->mac_phy.task_60s = bwn_phy_lp_task_60s;
} else if (mac->mac_phy.type == BWN_PHYTYPE_N) {
mac->mac_phy.attach = bwn_phy_n_attach;
mac->mac_phy.detach = bwn_phy_n_detach;
mac->mac_phy.prepare_hw = bwn_phy_n_prepare_hw;
mac->mac_phy.init_pre = bwn_phy_n_init_pre;
mac->mac_phy.init = bwn_phy_n_init;
mac->mac_phy.exit = bwn_phy_n_exit;
mac->mac_phy.phy_read = bwn_phy_n_read;
mac->mac_phy.phy_write = bwn_phy_n_write;
mac->mac_phy.rf_read = bwn_phy_n_rf_read;
mac->mac_phy.rf_write = bwn_phy_n_rf_write;
mac->mac_phy.use_hwpctl = bwn_phy_n_hwpctl;
mac->mac_phy.rf_onoff = bwn_phy_n_rf_onoff;
mac->mac_phy.switch_analog = bwn_phy_n_switch_analog;
mac->mac_phy.switch_channel = bwn_phy_n_switch_channel;
mac->mac_phy.get_default_chan = bwn_phy_n_get_default_chan;
mac->mac_phy.set_antenna = bwn_phy_n_set_antenna;
mac->mac_phy.set_im = bwn_phy_n_im;
mac->mac_phy.recalc_txpwr = bwn_phy_n_recalc_txpwr;
mac->mac_phy.set_txpwr = bwn_phy_n_set_txpwr;
mac->mac_phy.task_15s = bwn_phy_n_task_15s;
mac->mac_phy.task_60s = bwn_phy_n_task_60s;
} else {
device_printf(sc->sc_dev, "unsupported PHY type (%d)\n",
mac->mac_phy.type);
error = ENXIO;
goto fail;
}
mac->mac_phy.gmode = have_bg;
if (mac->mac_phy.attach != NULL) {
error = mac->mac_phy.attach(mac);
if (error) {
device_printf(sc->sc_dev, "failed\n");
goto fail;
}
}
bwn_reset_core(mac, have_bg);
error = bwn_chiptest(mac);
if (error)
goto fail;
error = bwn_setup_channels(mac, have_bg, have_a);
if (error) {
device_printf(sc->sc_dev, "failed to setup channels\n");
goto fail;
}
if (sc->sc_curmac == NULL)
sc->sc_curmac = mac;
error = bwn_dma_attach(mac);
if (error != 0) {
device_printf(sc->sc_dev, "failed to initialize DMA\n");
goto fail;
}
mac->mac_phy.switch_analog(mac, 0);
siba_dev_down(sc->sc_dev, 0);
fail:
siba_powerdown(sc->sc_dev);
bwn_release_firmware(mac);
return (error);
}
/*
* Reset - SIBA.
*/
void
bwn_reset_core(struct bwn_mac *mac, int g_mode)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t low, ctl;
uint32_t flags = 0;
DPRINTF(sc, BWN_DEBUG_RESET, "%s: g_mode=%d\n", __func__, g_mode);
flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET);
if (g_mode)
flags |= BWN_TGSLOW_SUPPORT_G;
/* XXX N-PHY only; and hard-code to 20MHz for now */
if (mac->mac_phy.type == BWN_PHYTYPE_N)
flags |= BWN_TGSLOW_PHY_BANDWIDTH_20MHZ;
siba_dev_up(sc->sc_dev, flags);
DELAY(2000);
/* Take PHY out of reset */
low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) &
~(BWN_TGSLOW_PHYRESET | BWN_TGSLOW_PHYCLOCK_ENABLE);
siba_write_4(sc->sc_dev, SIBA_TGSLOW, low);
siba_read_4(sc->sc_dev, SIBA_TGSLOW);
DELAY(2000);
low &= ~SIBA_TGSLOW_FGC;
low |= BWN_TGSLOW_PHYCLOCK_ENABLE;
siba_write_4(sc->sc_dev, SIBA_TGSLOW, low);
siba_read_4(sc->sc_dev, SIBA_TGSLOW);
DELAY(2000);
if (mac->mac_phy.switch_analog != NULL)
mac->mac_phy.switch_analog(mac, 1);
ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE;
if (g_mode)
ctl |= BWN_MACCTL_GMODE;
BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON);
}
static int
bwn_phy_getinfo(struct bwn_mac *mac, int gmode)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
uint32_t tmp;
/* PHY */
tmp = BWN_READ_2(mac, BWN_PHYVER);
phy->gmode = gmode;
phy->rf_on = 1;
phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12;
phy->type = (tmp & BWN_PHYVER_TYPE) >> 8;
phy->rev = (tmp & BWN_PHYVER_VERSION);
if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) ||
(phy->type == BWN_PHYTYPE_B && phy->rev != 2 &&
phy->rev != 4 && phy->rev != 6 && phy->rev != 7) ||
(phy->type == BWN_PHYTYPE_G && phy->rev > 9) ||
(phy->type == BWN_PHYTYPE_N && phy->rev > 6) ||
(phy->type == BWN_PHYTYPE_LP && phy->rev > 2))
goto unsupphy;
/* RADIO */
if (siba_get_chipid(sc->sc_dev) == 0x4317) {
if (siba_get_chiprev(sc->sc_dev) == 0)
tmp = 0x3205017f;
else if (siba_get_chiprev(sc->sc_dev) == 1)
tmp = 0x4205017f;
else
tmp = 0x5205017f;
} else {
BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID);
tmp = BWN_READ_2(mac, BWN_RFDATALO);
BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID);
tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16;
}
phy->rf_rev = (tmp & 0xf0000000) >> 28;
phy->rf_ver = (tmp & 0x0ffff000) >> 12;
phy->rf_manuf = (tmp & 0x00000fff);
/*
* For now, just always do full init (ie, what bwn has traditionally
* done)
*/
phy->phy_do_full_init = 1;
if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */
goto unsupradio;
if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 ||
phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) ||
(phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) ||
(phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) ||
(phy->type == BWN_PHYTYPE_N &&
phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) ||
(phy->type == BWN_PHYTYPE_LP &&
phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063))
goto unsupradio;
return (0);
unsupphy:
device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, "
"analog %#x)\n",
phy->type, phy->rev, phy->analog);
return (ENXIO);
unsupradio:
device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, "
"rev %#x)\n",
phy->rf_manuf, phy->rf_ver, phy->rf_rev);
return (ENXIO);
}
static int
bwn_chiptest(struct bwn_mac *mac)
{
#define TESTVAL0 0x55aaaa55
#define TESTVAL1 0xaa5555aa
struct bwn_softc *sc = mac->mac_sc;
uint32_t v, backup;
BWN_LOCK(sc);
backup = bwn_shm_read_4(mac, BWN_SHARED, 0);
bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0);
if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0)
goto error;
bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1);
if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1)
goto error;
bwn_shm_write_4(mac, BWN_SHARED, 0, backup);
if ((siba_get_revid(sc->sc_dev) >= 3) &&
(siba_get_revid(sc->sc_dev) <= 10)) {
BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa);
BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb);
if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb)
goto error;
if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc)
goto error;
}
BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0);
v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE;
if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON))
goto error;
BWN_UNLOCK(sc);
return (0);
error:
BWN_UNLOCK(sc);
device_printf(sc->sc_dev, "failed to validate the chipaccess\n");
return (ENODEV);
}
static int
bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint8_t bands[IEEE80211_MODE_BYTES];
memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
ic->ic_nchans = 0;
DPRINTF(sc, BWN_DEBUG_EEPROM, "%s: called; bg=%d, a=%d\n",
__func__,
have_bg,
have_a);
if (have_bg) {
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX,
&ic->ic_nchans, &bwn_chantable_bg, bands);
}
if (have_a) {
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11A);
bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX,
&ic->ic_nchans, &bwn_chantable_a, bands);
}
mac->mac_phy.supports_2ghz = have_bg;
mac->mac_phy.supports_5ghz = have_a;
return (ic->ic_nchans == 0 ? ENXIO : 0);
}
uint32_t
bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset)
{
uint32_t ret;
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED);
ret <<= 16;
bwn_shm_ctlword(mac, way, (offset >> 2) + 1);
ret |= BWN_READ_2(mac, BWN_SHM_DATA);
goto out;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
ret = BWN_READ_4(mac, BWN_SHM_DATA);
out:
return (ret);
}
uint16_t
bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset)
{
uint16_t ret;
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED);
goto out;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
ret = BWN_READ_2(mac, BWN_SHM_DATA);
out:
return (ret);
}
static void
bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way,
uint16_t offset)
{
uint32_t control;
control = way;
control <<= 16;
control |= offset;
BWN_WRITE_4(mac, BWN_SHM_CONTROL, control);
}
void
bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset,
uint32_t value)
{
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
bwn_shm_ctlword(mac, way, (offset >> 2) + 1);
BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff);
return;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
BWN_WRITE_4(mac, BWN_SHM_DATA, value);
}
void
bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset,
uint16_t value)
{
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value);
return;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
BWN_WRITE_2(mac, BWN_SHM_DATA, value);
}
static void
bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
const struct bwn_channelinfo *ci, const uint8_t bands[])
{
int i, error;
for (i = 0, error = 0; i < ci->nchannels && error == 0; i++) {
const struct bwn_channel *hc = &ci->channels[i];
error = ieee80211_add_channel(chans, maxchans, nchans,
hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
}
}
static int
bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
int error;
if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 ||
mac->mac_status < BWN_MAC_STATUS_STARTED) {
m_freem(m);
return (ENETDOWN);
}
BWN_LOCK(sc);
if (bwn_tx_isfull(sc, m)) {
m_freem(m);
BWN_UNLOCK(sc);
return (ENOBUFS);
}
error = bwn_tx_start(sc, ni, m);
if (error == 0)
sc->sc_watchdog_timer = 5;
BWN_UNLOCK(sc);
return (error);
}
/*
* Callback from the 802.11 layer to update the slot time
* based on the current setting. We use it to notify the
* firmware of ERP changes and the f/w takes care of things
* like slot time and preamble.
*/
static void
bwn_updateslot(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac;
BWN_LOCK(sc);
if (sc->sc_flags & BWN_FLAG_RUNNING) {
mac = (struct bwn_mac *)sc->sc_curmac;
bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic));
}
BWN_UNLOCK(sc);
}
/*
* Callback from the 802.11 layer after a promiscuous mode change.
* Note this interface does not check the operating mode as this
* is an internal callback and we are expected to honor the current
* state (e.g. this is used for setting the interface in promiscuous
* mode when operating in hostap mode to do ACS).
*/
static void
bwn_update_promisc(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
if (ic->ic_promisc > 0)
sc->sc_filters |= BWN_MACCTL_PROMISC;
else
sc->sc_filters &= ~BWN_MACCTL_PROMISC;
bwn_set_opmode(mac);
}
BWN_UNLOCK(sc);
}
/*
* Callback from the 802.11 layer to update WME parameters.
*/
static int
bwn_wme_update(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
struct chanAccParams chp;
struct wmeParams *wmep;
int i;
ieee80211_wme_ic_getparams(ic, &chp);
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
bwn_mac_suspend(mac);
for (i = 0; i < N(sc->sc_wmeParams); i++) {
wmep = &chp.cap_wmeParams[i];
bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]);
}
bwn_mac_enable(mac);
}
BWN_UNLOCK(sc);
return (0);
}
static void
bwn_scan_start(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC;
bwn_set_opmode(mac);
/* disable CFP update during scan */
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE);
}
BWN_UNLOCK(sc);
}
static void
bwn_scan_end(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC;
bwn_set_opmode(mac);
bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE);
}
BWN_UNLOCK(sc);
}
static void
bwn_set_channel(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
struct bwn_phy *phy = &mac->mac_phy;
int chan, error;
BWN_LOCK(sc);
error = bwn_switch_band(sc, ic->ic_curchan);
if (error)
goto fail;
bwn_mac_suspend(mac);
bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG);
chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
if (chan != phy->chan)
bwn_switch_channel(mac, chan);
/* TX power level */
if (ic->ic_curchan->ic_maxpower != 0 &&
ic->ic_curchan->ic_maxpower != phy->txpower) {
phy->txpower = ic->ic_curchan->ic_maxpower / 2;
bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME |
BWN_TXPWR_IGNORE_TSSI);
}
bwn_set_txantenna(mac, BWN_ANT_DEFAULT);
if (phy->set_antenna)
phy->set_antenna(mac, BWN_ANT_DEFAULT);
if (sc->sc_rf_enabled != phy->rf_on) {
if (sc->sc_rf_enabled) {
bwn_rf_turnon(mac);
if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON))
device_printf(sc->sc_dev,
"please turn on the RF switch\n");
} else
bwn_rf_turnoff(mac);
}
bwn_mac_enable(mac);
fail:
/*
* Setup radio tap channel freq and flags
*/
sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq =
htole16(ic->ic_curchan->ic_freq);
sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags =
htole16(ic->ic_curchan->ic_flags & 0xffff);
BWN_UNLOCK(sc);
}
static struct ieee80211vap *
bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ieee80211vap *vap;
struct bwn_vap *bvp;
switch (opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
case IEEE80211_M_STA:
case IEEE80211_M_WDS:
case IEEE80211_M_MONITOR:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
break;
default:
return (NULL);
}
bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &bvp->bv_vap;
ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override with driver methods */
bvp->bv_newstate = vap->iv_newstate;
vap->iv_newstate = bwn_newstate;
/* override max aid so sta's cannot assoc when we're out of sta id's */
vap->iv_max_aid = BWN_STAID_MAX;
ieee80211_ratectl_init(vap);
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
ieee80211_media_status, mac);
return (vap);
}
static void
bwn_vap_delete(struct ieee80211vap *vap)
{
struct bwn_vap *bvp = BWN_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(bvp, M_80211_VAP);
}
static int
bwn_init(struct bwn_softc *sc)
{
struct bwn_mac *mac;
int error;
BWN_ASSERT_LOCKED(sc);
DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__);
bzero(sc->sc_bssid, IEEE80211_ADDR_LEN);
sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP;
sc->sc_filters = 0;
bwn_wme_clear(sc);
sc->sc_beacons[0] = sc->sc_beacons[1] = 0;
sc->sc_rf_enabled = 1;
mac = sc->sc_curmac;
if (mac->mac_status == BWN_MAC_STATUS_UNINIT) {
error = bwn_core_init(mac);
if (error != 0)
return (error);
}
if (mac->mac_status == BWN_MAC_STATUS_INITED)
bwn_core_start(mac);
bwn_set_opmode(mac);
bwn_set_pretbtt(mac);
bwn_spu_setdelay(mac, 0);
bwn_set_macaddr(mac);
sc->sc_flags |= BWN_FLAG_RUNNING;
callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc);
callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc);
return (0);
}
static void
bwn_stop(struct bwn_softc *sc)
{
struct bwn_mac *mac = sc->sc_curmac;
BWN_ASSERT_LOCKED(sc);
DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__);
if (mac->mac_status >= BWN_MAC_STATUS_INITED) {
/* XXX FIXME opmode not based on VAP */
bwn_set_opmode(mac);
bwn_set_macaddr(mac);
}
if (mac->mac_status >= BWN_MAC_STATUS_STARTED)
bwn_core_stop(mac);
callout_stop(&sc->sc_led_blink_ch);
sc->sc_led_blinking = 0;
bwn_core_exit(mac);
sc->sc_rf_enabled = 0;
sc->sc_flags &= ~BWN_FLAG_RUNNING;
}
static void
bwn_wme_clear(struct bwn_softc *sc)
{
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
struct wmeParams *p;
unsigned int i;
KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams),
("%s:%d: fail", __func__, __LINE__));
for (i = 0; i < N(sc->sc_wmeParams); i++) {
p = &(sc->sc_wmeParams[i]);
switch (bwn_wme_shm_offsets[i]) {
case BWN_WME_VOICE:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 2;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX);
break;
case BWN_WME_VIDEO:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 2;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX);
break;
case BWN_WME_BESTEFFORT:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 3;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX);
break;
case BWN_WME_BACKGROUND:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 7;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
}
static int
bwn_core_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint64_t hf;
int error;
KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT,
("%s:%d: fail", __func__, __LINE__));
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__);
siba_powerup(sc->sc_dev, 0);
if (!siba_dev_isup(sc->sc_dev))
bwn_reset_core(mac, mac->mac_phy.gmode);
mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID;
mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON;
mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0;
BWN_GETTIME(mac->mac_phy.nexttime);
mac->mac_phy.txerrors = BWN_TXERROR_MAX;
bzero(&mac->mac_stats, sizeof(mac->mac_stats));
mac->mac_stats.link_noise = -95;
mac->mac_reason_intr = 0;
bzero(mac->mac_reason, sizeof(mac->mac_reason));
mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE;
#ifdef BWN_DEBUG
if (sc->sc_debug & BWN_DEBUG_XMIT)
mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR;
#endif
mac->mac_suspended = 1;
mac->mac_task_state = 0;
memset(&mac->mac_noise, 0, sizeof(mac->mac_noise));
mac->mac_phy.init_pre(mac);
siba_pcicore_intr(sc->sc_dev);
siba_fix_imcfglobug(sc->sc_dev);
bwn_bt_disable(mac);
if (mac->mac_phy.prepare_hw) {
error = mac->mac_phy.prepare_hw(mac);
if (error)
goto fail0;
}
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: chip_init\n", __func__);
error = bwn_chip_init(mac);
if (error)
goto fail0;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV,
siba_get_revid(sc->sc_dev));
hf = bwn_hf_read(mac);
if (mac->mac_phy.type == BWN_PHYTYPE_G) {
hf |= BWN_HF_GPHY_SYM_WORKAROUND;
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)
hf |= BWN_HF_PAGAINBOOST_OFDM_ON;
if (mac->mac_phy.rev == 1)
hf |= BWN_HF_GPHY_DC_CANCELFILTER;
}
if (mac->mac_phy.rf_ver == 0x2050) {
if (mac->mac_phy.rf_rev < 6)
hf |= BWN_HF_FORCE_VCO_RECALC;
if (mac->mac_phy.rf_rev == 6)
hf |= BWN_HF_4318_TSSI;
}
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)
hf |= BWN_HF_SLOWCLOCK_REQ_OFF;
if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) &&
(siba_get_pcicore_revid(sc->sc_dev) <= 10))
hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND;
hf &= ~BWN_HF_SKIP_CFP_UPDATE;
bwn_hf_write(mac, hf);
/* Tell the firmware about the MAC capabilities */
if (siba_get_revid(sc->sc_dev) >= 13) {
uint32_t cap;
cap = BWN_READ_4(mac, BWN_MAC_HW_CAP);
DPRINTF(sc, BWN_DEBUG_RESET,
"%s: hw capabilities: 0x%08x\n",
__func__, cap);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_L,
cap & 0xffff);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_H,
(cap >> 16) & 0xffff);
}
bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1);
bwn_rate_init(mac);
bwn_set_phytxctl(mac);
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN,
(mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf);
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff);
if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0)
bwn_pio_init(mac);
else
bwn_dma_init(mac);
bwn_wme_init(mac);
bwn_spu_setdelay(mac, 1);
bwn_bt_enable(mac);
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: powerup\n", __func__);
siba_powerup(sc->sc_dev,
!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW));
bwn_set_macaddr(mac);
bwn_crypt_init(mac);
/* XXX LED initializatin */
mac->mac_status = BWN_MAC_STATUS_INITED;
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: done\n", __func__);
return (error);
fail0:
siba_powerdown(sc->sc_dev);
KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT,
("%s:%d: fail", __func__, __LINE__));
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: fail\n", __func__);
return (error);
}
static void
bwn_core_start(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t tmp;
KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED,
("%s:%d: fail", __func__, __LINE__));
if (siba_get_revid(sc->sc_dev) < 5)
return;
while (1) {
tmp = BWN_READ_4(mac, BWN_XMITSTAT_0);
if (!(tmp & 0x00000001))
break;
tmp = BWN_READ_4(mac, BWN_XMITSTAT_1);
}
bwn_mac_enable(mac);
BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask);
callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac);
mac->mac_status = BWN_MAC_STATUS_STARTED;
}
static void
bwn_core_exit(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t macctl;
BWN_ASSERT_LOCKED(mac->mac_sc);
KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED,
("%s:%d: fail", __func__, __LINE__));
if (mac->mac_status != BWN_MAC_STATUS_INITED)
return;
mac->mac_status = BWN_MAC_STATUS_UNINIT;
macctl = BWN_READ_4(mac, BWN_MACCTL);
macctl &= ~BWN_MACCTL_MCODE_RUN;
macctl |= BWN_MACCTL_MCODE_JMP0;
BWN_WRITE_4(mac, BWN_MACCTL, macctl);
bwn_dma_stop(mac);
bwn_pio_stop(mac);
bwn_chip_exit(mac);
mac->mac_phy.switch_analog(mac, 0);
siba_dev_down(sc->sc_dev, 0);
siba_powerdown(sc->sc_dev);
}
static void
bwn_bt_disable(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
(void)sc;
/* XXX do nothing yet */
}
static int
bwn_chip_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
uint32_t macctl;
int error;
macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA;
if (phy->gmode)
macctl |= BWN_MACCTL_GMODE;
BWN_WRITE_4(mac, BWN_MACCTL, macctl);
error = bwn_fw_fillinfo(mac);
if (error)
return (error);
error = bwn_fw_loaducode(mac);
if (error)
return (error);
error = bwn_gpio_init(mac);
if (error)
return (error);
error = bwn_fw_loadinitvals(mac);
if (error) {
siba_gpio_set(sc->sc_dev, 0);
return (error);
}
phy->switch_analog(mac, 1);
error = bwn_phy_init(mac);
if (error) {
siba_gpio_set(sc->sc_dev, 0);
return (error);
}
if (phy->set_im)
phy->set_im(mac, BWN_IMMODE_NONE);
if (phy->set_antenna)
phy->set_antenna(mac, BWN_ANT_DEFAULT);
bwn_set_txantenna(mac, BWN_ANT_DEFAULT);
if (phy->type == BWN_PHYTYPE_B)
BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004);
BWN_WRITE_4(mac, 0x0100, 0x01000000);
if (siba_get_revid(sc->sc_dev) < 5)
BWN_WRITE_4(mac, 0x010c, 0x01000000);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA);
bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000);
bwn_set_opmode(mac);
if (siba_get_revid(sc->sc_dev) < 3) {
BWN_WRITE_2(mac, 0x060e, 0x0000);
BWN_WRITE_2(mac, 0x0610, 0x8000);
BWN_WRITE_2(mac, 0x0604, 0x0000);
BWN_WRITE_2(mac, 0x0606, 0x0200);
} else {
BWN_WRITE_4(mac, 0x0188, 0x80000000);
BWN_WRITE_4(mac, 0x018c, 0x02000000);
}
BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000);
BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00);
BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00);
BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00);
BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00);
BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00);
BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00);
bwn_mac_phy_clock_set(mac, true);
/* SIBA powerup */
BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev));
return (error);
}
/* read hostflags */
uint64_t
bwn_hf_read(struct bwn_mac *mac)
{
uint64_t ret;
ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI);
ret <<= 16;
ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI);
ret <<= 16;
ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO);
return (ret);
}
void
bwn_hf_write(struct bwn_mac *mac, uint64_t value)
{
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO,
(value & 0x00000000ffffull));
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI,
(value & 0x0000ffff0000ull) >> 16);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI,
(value & 0xffff00000000ULL) >> 32);
}
static void
bwn_set_txretry(struct bwn_mac *mac, int s, int l)
{
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf));
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf));
}
static void
bwn_rate_init(struct bwn_mac *mac)
{
switch (mac->mac_phy.type) {
case BWN_PHYTYPE_A:
case BWN_PHYTYPE_G:
case BWN_PHYTYPE_LP:
case BWN_PHYTYPE_N:
bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1);
if (mac->mac_phy.type == BWN_PHYTYPE_A)
break;
/* FALLTHROUGH */
case BWN_PHYTYPE_B:
bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0);
bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0);
bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0);
bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
static void
bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm)
{
uint16_t offset;
if (ofdm) {
offset = 0x480;
offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2;
} else {
offset = 0x4c0;
offset += (bwn_plcp_getcck(rate) & 0x000f) * 2;
}
bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20,
bwn_shm_read_2(mac, BWN_SHARED, offset));
}
static uint8_t
bwn_plcp_getcck(const uint8_t bitrate)
{
switch (bitrate) {
case BWN_CCK_RATE_1MB:
return (0x0a);
case BWN_CCK_RATE_2MB:
return (0x14);
case BWN_CCK_RATE_5MB:
return (0x37);
case BWN_CCK_RATE_11MB:
return (0x6e);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static uint8_t
bwn_plcp_getofdm(const uint8_t bitrate)
{
switch (bitrate) {
case BWN_OFDM_RATE_6MB:
return (0xb);
case BWN_OFDM_RATE_9MB:
return (0xf);
case BWN_OFDM_RATE_12MB:
return (0xa);
case BWN_OFDM_RATE_18MB:
return (0xe);
case BWN_OFDM_RATE_24MB:
return (0x9);
case BWN_OFDM_RATE_36MB:
return (0xd);
case BWN_OFDM_RATE_48MB:
return (0x8);
case BWN_OFDM_RATE_54MB:
return (0xc);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static void
bwn_set_phytxctl(struct bwn_mac *mac)
{
uint16_t ctl;
ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO |
BWN_TX_PHY_TXPWR);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl);
}
static void
bwn_pio_init(struct bwn_mac *mac)
{
struct bwn_pio *pio = &mac->mac_method.pio;
BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL)
& ~BWN_MACCTL_BIGENDIAN);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3);
bwn_pio_set_txqueue(mac, &pio->mcast, 4);
bwn_pio_setupqueue_rx(mac, &pio->rx, 0);
}
static void
bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
int index)
{
struct bwn_pio_txpkt *tp;
struct bwn_softc *sc = mac->mac_sc;
unsigned int i;
tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac);
tq->tq_index = index;
tq->tq_free = BWN_PIO_MAX_TXPACKETS;
if (siba_get_revid(sc->sc_dev) >= 8)
tq->tq_size = 1920;
else {
tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE);
tq->tq_size -= 80;
}
TAILQ_INIT(&tq->tq_pktlist);
for (i = 0; i < N(tq->tq_pkts); i++) {
tp = &(tq->tq_pkts[i]);
tp->tp_index = i;
tp->tp_queue = tq;
TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list);
}
}
static uint16_t
bwn_pio_idx2base(struct bwn_mac *mac, int index)
{
struct bwn_softc *sc = mac->mac_sc;
static const uint16_t bases[] = {
BWN_PIO_BASE0,
BWN_PIO_BASE1,
BWN_PIO_BASE2,
BWN_PIO_BASE3,
BWN_PIO_BASE4,
BWN_PIO_BASE5,
BWN_PIO_BASE6,
BWN_PIO_BASE7,
};
static const uint16_t bases_rev11[] = {
BWN_PIO11_BASE0,
BWN_PIO11_BASE1,
BWN_PIO11_BASE2,
BWN_PIO11_BASE3,
BWN_PIO11_BASE4,
BWN_PIO11_BASE5,
};
if (siba_get_revid(sc->sc_dev) >= 11) {
if (index >= N(bases_rev11))
device_printf(sc->sc_dev, "%s: warning\n", __func__);
return (bases_rev11[index]);
}
if (index >= N(bases))
device_printf(sc->sc_dev, "%s: warning\n", __func__);
return (bases[index]);
}
static void
bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq,
int index)
{
struct bwn_softc *sc = mac->mac_sc;
prq->prq_mac = mac;
prq->prq_rev = siba_get_revid(sc->sc_dev);
prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac);
bwn_dma_rxdirectfifo(mac, index, 1);
}
static void
bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq)
{
if (tq == NULL)
return;
bwn_pio_cancel_tx_packets(tq);
}
static void
bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio)
{
bwn_destroy_pioqueue_tx(pio);
}
static uint16_t
bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t offset)
{
return (BWN_READ_2(mac, tq->tq_base + offset));
}
static void
bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable)
{
uint32_t ctl;
int type;
uint16_t base;
type = bwn_dma_mask2type(bwn_dma_mask(mac));
base = bwn_dma_base(type, idx);
if (type == BWN_DMA_64BIT) {
ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL);
ctl &= ~BWN_DMA64_RXDIRECTFIFO;
if (enable)
ctl |= BWN_DMA64_RXDIRECTFIFO;
BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl);
} else {
ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL);
ctl &= ~BWN_DMA32_RXDIRECTFIFO;
if (enable)
ctl |= BWN_DMA32_RXDIRECTFIFO;
BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl);
}
}
static uint64_t
bwn_dma_mask(struct bwn_mac *mac)
{
uint32_t tmp;
uint16_t base;
tmp = BWN_READ_4(mac, SIBA_TGSHIGH);
if (tmp & SIBA_TGSHIGH_DMA64)
return (BWN_DMA_BIT_MASK(64));
base = bwn_dma_base(0, 0);
BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK);
tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL);
if (tmp & BWN_DMA32_TXADDREXT_MASK)
return (BWN_DMA_BIT_MASK(32));
return (BWN_DMA_BIT_MASK(30));
}
static int
bwn_dma_mask2type(uint64_t dmamask)
{
if (dmamask == BWN_DMA_BIT_MASK(30))
return (BWN_DMA_30BIT);
if (dmamask == BWN_DMA_BIT_MASK(32))
return (BWN_DMA_32BIT);
if (dmamask == BWN_DMA_BIT_MASK(64))
return (BWN_DMA_64BIT);
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (BWN_DMA_30BIT);
}
static void
bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq)
{
struct bwn_pio_txpkt *tp;
unsigned int i;
for (i = 0; i < N(tq->tq_pkts); i++) {
tp = &(tq->tq_pkts[i]);
if (tp->tp_m) {
m_freem(tp->tp_m);
tp->tp_m = NULL;
}
}
}
static uint16_t
bwn_dma_base(int type, int controller_idx)
{
static const uint16_t map64[] = {
BWN_DMA64_BASE0,
BWN_DMA64_BASE1,
BWN_DMA64_BASE2,
BWN_DMA64_BASE3,
BWN_DMA64_BASE4,
BWN_DMA64_BASE5,
};
static const uint16_t map32[] = {
BWN_DMA32_BASE0,
BWN_DMA32_BASE1,
BWN_DMA32_BASE2,
BWN_DMA32_BASE3,
BWN_DMA32_BASE4,
BWN_DMA32_BASE5,
};
if (type == BWN_DMA_64BIT) {
KASSERT(controller_idx >= 0 && controller_idx < N(map64),
("%s:%d: fail", __func__, __LINE__));
return (map64[controller_idx]);
}
KASSERT(controller_idx >= 0 && controller_idx < N(map32),
("%s:%d: fail", __func__, __LINE__));
return (map32[controller_idx]);
}
static void
bwn_dma_init(struct bwn_mac *mac)
{
struct bwn_dma *dma = &mac->mac_method.dma;
/* setup TX DMA channels. */
bwn_dma_setup(dma->wme[WME_AC_BK]);
bwn_dma_setup(dma->wme[WME_AC_BE]);
bwn_dma_setup(dma->wme[WME_AC_VI]);
bwn_dma_setup(dma->wme[WME_AC_VO]);
bwn_dma_setup(dma->mcast);
/* setup RX DMA channel. */
bwn_dma_setup(dma->rx);
}
static struct bwn_dma_ring *
bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index,
int for_tx, int type)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *mt;
struct bwn_softc *sc = mac->mac_sc;
int error, i;
dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO);
if (dr == NULL)
goto out;
dr->dr_numslots = BWN_RXRING_SLOTS;
if (for_tx)
dr->dr_numslots = BWN_TXRING_SLOTS;
- dr->dr_meta = mallocarray(dr->dr_numslots,
- sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO);
+ dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
if (dr->dr_meta == NULL)
goto fail0;
dr->dr_type = type;
dr->dr_mac = mac;
dr->dr_base = bwn_dma_base(type, controller_index);
dr->dr_index = controller_index;
if (type == BWN_DMA_64BIT) {
dr->getdesc = bwn_dma_64_getdesc;
dr->setdesc = bwn_dma_64_setdesc;
dr->start_transfer = bwn_dma_64_start_transfer;
dr->suspend = bwn_dma_64_suspend;
dr->resume = bwn_dma_64_resume;
dr->get_curslot = bwn_dma_64_get_curslot;
dr->set_curslot = bwn_dma_64_set_curslot;
} else {
dr->getdesc = bwn_dma_32_getdesc;
dr->setdesc = bwn_dma_32_setdesc;
dr->start_transfer = bwn_dma_32_start_transfer;
dr->suspend = bwn_dma_32_suspend;
dr->resume = bwn_dma_32_resume;
dr->get_curslot = bwn_dma_32_get_curslot;
dr->set_curslot = bwn_dma_32_set_curslot;
}
if (for_tx) {
dr->dr_tx = 1;
dr->dr_curslot = -1;
} else {
if (dr->dr_index == 0) {
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
case BWN_FW_HDR_410:
dr->dr_rx_bufsize =
BWN_DMA0_RX_BUFFERSIZE_FW351;
dr->dr_frameoffset =
BWN_DMA0_RX_FRAMEOFFSET_FW351;
break;
case BWN_FW_HDR_598:
dr->dr_rx_bufsize =
BWN_DMA0_RX_BUFFERSIZE_FW598;
dr->dr_frameoffset =
BWN_DMA0_RX_FRAMEOFFSET_FW598;
break;
}
} else
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
error = bwn_dma_allocringmemory(dr);
if (error)
goto fail2;
if (for_tx) {
/*
* Assumption: BWN_TXRING_SLOTS can be divided by
* BWN_TX_SLOTS_PER_FRAME
*/
KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0,
("%s:%d: fail", __func__, __LINE__));
dr->dr_txhdr_cache = contigmalloc(
(dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) *
BWN_MAXTXHDRSIZE, M_DEVBUF, M_ZERO,
0, BUS_SPACE_MAXADDR, 8, 0);
if (dr->dr_txhdr_cache == NULL) {
device_printf(sc->sc_dev,
"can't allocate TX header DMA memory\n");
goto fail1;
}
/*
* Create TX ring DMA stuffs
*/
error = bus_dma_tag_create(dma->parent_dtag,
BWN_ALIGN, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
BWN_HDRSIZE(mac),
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dr->dr_txring_dtag);
if (error) {
device_printf(sc->sc_dev,
"can't create TX ring DMA tag: TODO frees\n");
goto fail2;
}
for (i = 0; i < dr->dr_numslots; i += 2) {
dr->getdesc(dr, i, &desc, &mt);
mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER;
mt->mt_m = NULL;
mt->mt_ni = NULL;
mt->mt_islast = 0;
error = bus_dmamap_create(dr->dr_txring_dtag, 0,
&mt->mt_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto fail2;
}
dr->getdesc(dr, i + 1, &desc, &mt);
mt->mt_txtype = BWN_DMADESC_METATYPE_BODY;
mt->mt_m = NULL;
mt->mt_ni = NULL;
mt->mt_islast = 1;
error = bus_dmamap_create(dma->txbuf_dtag, 0,
&mt->mt_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto fail2;
}
}
} else {
error = bus_dmamap_create(dma->rxbuf_dtag, 0,
&dr->dr_spare_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto out; /* XXX wrong! */
}
for (i = 0; i < dr->dr_numslots; i++) {
dr->getdesc(dr, i, &desc, &mt);
error = bus_dmamap_create(dma->rxbuf_dtag, 0,
&mt->mt_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto out; /* XXX wrong! */
}
error = bwn_dma_newbuf(dr, desc, mt, 1);
if (error) {
device_printf(sc->sc_dev,
"failed to allocate RX buf\n");
goto out; /* XXX wrong! */
}
}
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
dr->dr_usedslot = dr->dr_numslots;
}
out:
return (dr);
fail2:
if (dr->dr_txhdr_cache != NULL) {
contigfree(dr->dr_txhdr_cache,
(dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) *
BWN_MAXTXHDRSIZE, M_DEVBUF);
}
fail1:
free(dr->dr_meta, M_DEVBUF);
fail0:
free(dr, M_DEVBUF);
return (NULL);
}
static void
bwn_dma_ringfree(struct bwn_dma_ring **dr)
{
if (dr == NULL)
return;
bwn_dma_free_descbufs(*dr);
bwn_dma_free_ringmemory(*dr);
if ((*dr)->dr_txhdr_cache != NULL) {
contigfree((*dr)->dr_txhdr_cache,
((*dr)->dr_numslots / BWN_TX_SLOTS_PER_FRAME) *
BWN_MAXTXHDRSIZE, M_DEVBUF);
}
free((*dr)->dr_meta, M_DEVBUF);
free(*dr, M_DEVBUF);
*dr = NULL;
}
static void
bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot,
struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta)
{
struct bwn_dmadesc32 *desc;
*meta = &(dr->dr_meta[slot]);
desc = dr->dr_ring_descbase;
desc = &(desc[slot]);
*gdesc = (struct bwn_dmadesc_generic *)desc;
}
static void
bwn_dma_32_setdesc(struct bwn_dma_ring *dr,
struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize,
int start, int end, int irq)
{
struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase;
struct bwn_softc *sc = dr->dr_mac->mac_sc;
uint32_t addr, addrext, ctl;
int slot;
slot = (int)(&(desc->dma.dma32) - descbase);
KASSERT(slot >= 0 && slot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK);
addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30;
addr |= siba_dma_translation(sc->sc_dev);
ctl = bufsize & BWN_DMA32_DCTL_BYTECNT;
if (slot == dr->dr_numslots - 1)
ctl |= BWN_DMA32_DCTL_DTABLEEND;
if (start)
ctl |= BWN_DMA32_DCTL_FRAMESTART;
if (end)
ctl |= BWN_DMA32_DCTL_FRAMEEND;
if (irq)
ctl |= BWN_DMA32_DCTL_IRQ;
ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT)
& BWN_DMA32_DCTL_ADDREXT_MASK;
desc->dma.dma32.control = htole32(ctl);
desc->dma.dma32.address = htole32(addr);
}
static void
bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX,
(uint32_t)(slot * sizeof(struct bwn_dmadesc32)));
}
static void
bwn_dma_32_suspend(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL,
BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND);
}
static void
bwn_dma_32_resume(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL,
BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND);
}
static int
bwn_dma_32_get_curslot(struct bwn_dma_ring *dr)
{
uint32_t val;
val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS);
val &= BWN_DMA32_RXDPTR;
return (val / sizeof(struct bwn_dmadesc32));
}
static void
bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX,
(uint32_t) (slot * sizeof(struct bwn_dmadesc32)));
}
static void
bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot,
struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta)
{
struct bwn_dmadesc64 *desc;
*meta = &(dr->dr_meta[slot]);
desc = dr->dr_ring_descbase;
desc = &(desc[slot]);
*gdesc = (struct bwn_dmadesc_generic *)desc;
}
static void
bwn_dma_64_setdesc(struct bwn_dma_ring *dr,
struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize,
int start, int end, int irq)
{
struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase;
struct bwn_softc *sc = dr->dr_mac->mac_sc;
int slot;
uint32_t ctl0 = 0, ctl1 = 0;
uint32_t addrlo, addrhi;
uint32_t addrext;
slot = (int)(&(desc->dma.dma64) - descbase);
KASSERT(slot >= 0 && slot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
addrlo = (uint32_t) (dmaaddr & 0xffffffff);
addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK);
addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >>
30;
addrhi |= (siba_dma_translation(sc->sc_dev) << 1);
if (slot == dr->dr_numslots - 1)
ctl0 |= BWN_DMA64_DCTL0_DTABLEEND;
if (start)
ctl0 |= BWN_DMA64_DCTL0_FRAMESTART;
if (end)
ctl0 |= BWN_DMA64_DCTL0_FRAMEEND;
if (irq)
ctl0 |= BWN_DMA64_DCTL0_IRQ;
ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT;
ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT)
& BWN_DMA64_DCTL1_ADDREXT_MASK;
desc->dma.dma64.control0 = htole32(ctl0);
desc->dma.dma64.control1 = htole32(ctl1);
desc->dma.dma64.address_low = htole32(addrlo);
desc->dma.dma64.address_high = htole32(addrhi);
}
static void
bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX,
(uint32_t)(slot * sizeof(struct bwn_dmadesc64)));
}
static void
bwn_dma_64_suspend(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL,
BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND);
}
static void
bwn_dma_64_resume(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL,
BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND);
}
static int
bwn_dma_64_get_curslot(struct bwn_dma_ring *dr)
{
uint32_t val;
val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS);
val &= BWN_DMA64_RXSTATDPTR;
return (val / sizeof(struct bwn_dmadesc64));
}
static void
bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX,
(uint32_t)(slot * sizeof(struct bwn_dmadesc64)));
}
static int
bwn_dma_allocringmemory(struct bwn_dma_ring *dr)
{
struct bwn_mac *mac = dr->dr_mac;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_softc *sc = mac->mac_sc;
int error;
error = bus_dma_tag_create(dma->parent_dtag,
BWN_ALIGN, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
BWN_DMA_RINGMEMSIZE,
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dr->dr_ring_dtag);
if (error) {
device_printf(sc->sc_dev,
"can't create TX ring DMA tag: TODO frees\n");
return (-1);
}
error = bus_dmamem_alloc(dr->dr_ring_dtag,
&dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO,
&dr->dr_ring_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't allocate DMA mem: TODO frees\n");
return (-1);
}
error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap,
dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE,
bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev,
"can't load DMA mem: TODO free\n");
return (-1);
}
return (0);
}
static void
bwn_dma_setup(struct bwn_dma_ring *dr)
{
struct bwn_softc *sc = dr->dr_mac->mac_sc;
uint64_t ring64;
uint32_t addrext, ring32, value;
uint32_t trans = siba_dma_translation(sc->sc_dev);
if (dr->dr_tx) {
dr->dr_curslot = -1;
if (dr->dr_type == BWN_DMA_64BIT) {
ring64 = (uint64_t)(dr->dr_ring_dmabase);
addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK)
>> 30;
value = BWN_DMA64_TXENABLE;
value |= BWN_DMA64_TXPARITY_DISABLE;
value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT)
& BWN_DMA64_TXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO,
(ring64 & 0xffffffff));
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI,
((ring64 >> 32) &
~SIBA_DMA_TRANSLATION_MASK) | (trans << 1));
} else {
ring32 = (uint32_t)(dr->dr_ring_dmabase);
addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30;
value = BWN_DMA32_TXENABLE;
value |= BWN_DMA32_TXPARITY_DISABLE;
value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT)
& BWN_DMA32_TXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA32_TXRING,
(ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans);
}
return;
}
/*
* set for RX
*/
dr->dr_usedslot = dr->dr_numslots;
if (dr->dr_type == BWN_DMA_64BIT) {
ring64 = (uint64_t)(dr->dr_ring_dmabase);
addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30;
value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT);
value |= BWN_DMA64_RXENABLE;
value |= BWN_DMA64_RXPARITY_DISABLE;
value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT)
& BWN_DMA64_RXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff));
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI,
((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK)
| (trans << 1));
BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots *
sizeof(struct bwn_dmadesc64));
} else {
ring32 = (uint32_t)(dr->dr_ring_dmabase);
addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30;
value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT);
value |= BWN_DMA32_RXENABLE;
value |= BWN_DMA32_RXPARITY_DISABLE;
value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT)
& BWN_DMA32_RXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA32_RXRING,
(ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans);
BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots *
sizeof(struct bwn_dmadesc32));
}
}
static void
bwn_dma_free_ringmemory(struct bwn_dma_ring *dr)
{
bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap);
bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase,
dr->dr_ring_dmap);
}
static void
bwn_dma_cleanup(struct bwn_dma_ring *dr)
{
if (dr->dr_tx) {
bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type);
if (dr->dr_type == BWN_DMA_64BIT) {
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0);
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0);
} else
BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0);
} else {
bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type);
if (dr->dr_type == BWN_DMA_64BIT) {
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0);
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0);
} else
BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0);
}
}
static void
bwn_dma_free_descbufs(struct bwn_dma_ring *dr)
{
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_mac *mac = dr->dr_mac;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_softc *sc = mac->mac_sc;
int i;
if (!dr->dr_usedslot)
return;
for (i = 0; i < dr->dr_numslots; i++) {
dr->getdesc(dr, i, &desc, &meta);
if (meta->mt_m == NULL) {
if (!dr->dr_tx)
device_printf(sc->sc_dev, "%s: not TX?\n",
__func__);
continue;
}
if (dr->dr_tx) {
if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER)
bus_dmamap_unload(dr->dr_txring_dtag,
meta->mt_dmap);
else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY)
bus_dmamap_unload(dma->txbuf_dtag,
meta->mt_dmap);
} else
bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap);
bwn_dma_free_descbuf(dr, meta);
}
}
static int
bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base,
int type)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t value;
int i;
uint16_t offset;
for (i = 0; i < 10; i++) {
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS :
BWN_DMA32_TXSTATUS;
value = BWN_READ_4(mac, base + offset);
if (type == BWN_DMA_64BIT) {
value &= BWN_DMA64_TXSTAT;
if (value == BWN_DMA64_TXSTAT_DISABLED ||
value == BWN_DMA64_TXSTAT_IDLEWAIT ||
value == BWN_DMA64_TXSTAT_STOPPED)
break;
} else {
value &= BWN_DMA32_TXSTATE;
if (value == BWN_DMA32_TXSTAT_DISABLED ||
value == BWN_DMA32_TXSTAT_IDLEWAIT ||
value == BWN_DMA32_TXSTAT_STOPPED)
break;
}
DELAY(1000);
}
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL;
BWN_WRITE_4(mac, base + offset, 0);
for (i = 0; i < 10; i++) {
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS :
BWN_DMA32_TXSTATUS;
value = BWN_READ_4(mac, base + offset);
if (type == BWN_DMA_64BIT) {
value &= BWN_DMA64_TXSTAT;
if (value == BWN_DMA64_TXSTAT_DISABLED) {
i = -1;
break;
}
} else {
value &= BWN_DMA32_TXSTATE;
if (value == BWN_DMA32_TXSTAT_DISABLED) {
i = -1;
break;
}
}
DELAY(1000);
}
if (i != -1) {
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (ENODEV);
}
DELAY(1000);
return (0);
}
static int
bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base,
int type)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t value;
int i;
uint16_t offset;
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL;
BWN_WRITE_4(mac, base + offset, 0);
for (i = 0; i < 10; i++) {
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS :
BWN_DMA32_RXSTATUS;
value = BWN_READ_4(mac, base + offset);
if (type == BWN_DMA_64BIT) {
value &= BWN_DMA64_RXSTAT;
if (value == BWN_DMA64_RXSTAT_DISABLED) {
i = -1;
break;
}
} else {
value &= BWN_DMA32_RXSTATE;
if (value == BWN_DMA32_RXSTAT_DISABLED) {
i = -1;
break;
}
}
DELAY(1000);
}
if (i != -1) {
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (ENODEV);
}
return (0);
}
static void
bwn_dma_free_descbuf(struct bwn_dma_ring *dr,
struct bwn_dmadesc_meta *meta)
{
if (meta->mt_m != NULL) {
m_freem(meta->mt_m);
meta->mt_m = NULL;
}
if (meta->mt_ni != NULL) {
ieee80211_free_node(meta->mt_ni);
meta->mt_ni = NULL;
}
}
static void
bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m)
{
struct bwn_rxhdr4 *rxhdr;
unsigned char *frame;
rxhdr = mtod(m, struct bwn_rxhdr4 *);
rxhdr->frame_len = 0;
KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset +
sizeof(struct bwn_plcp6) + 2,
("%s:%d: fail", __func__, __LINE__));
frame = mtod(m, char *) + dr->dr_frameoffset;
memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */);
}
static uint8_t
bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m)
{
unsigned char *f = mtod(m, char *) + dr->dr_frameoffset;
return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7])
== 0xff);
}
static void
bwn_wme_init(struct bwn_mac *mac)
{
bwn_wme_load(mac);
/* enable WME support. */
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF);
BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) |
BWN_IFSCTL_USE_EDCF);
}
static void
bwn_spu_setdelay(struct bwn_mac *mac, int idle)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint16_t delay; /* microsec */
delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050;
if (ic->ic_opmode == IEEE80211_M_IBSS || idle)
delay = 500;
if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8))
delay = max(delay, (uint16_t)2400);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay);
}
static void
bwn_bt_enable(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint64_t hf;
if (bwn_bluetooth == 0)
return;
if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0)
return;
if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode)
return;
hf = bwn_hf_read(mac);
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD)
hf |= BWN_HF_BT_COEXISTALT;
else
hf |= BWN_HF_BT_COEXIST;
bwn_hf_write(mac, hf);
}
static void
bwn_set_macaddr(struct bwn_mac *mac)
{
bwn_mac_write_bssid(mac);
bwn_mac_setfilter(mac, BWN_MACFILTER_SELF,
mac->mac_sc->sc_ic.ic_macaddr);
}
static void
bwn_clear_keys(struct bwn_mac *mac)
{
int i;
for (i = 0; i < mac->mac_max_nr_keys; i++) {
KASSERT(i >= 0 && i < mac->mac_max_nr_keys,
("%s:%d: fail", __func__, __LINE__));
bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE,
NULL, BWN_SEC_KEYSIZE, NULL);
if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) {
bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE,
NULL, BWN_SEC_KEYSIZE, NULL);
}
mac->mac_key[i].keyconf = NULL;
}
}
static void
bwn_crypt_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20;
KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key),
("%s:%d: fail", __func__, __LINE__));
mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP);
mac->mac_ktp *= 2;
if (siba_get_revid(sc->sc_dev) >= 5)
BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8);
bwn_clear_keys(mac);
}
static void
bwn_chip_exit(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
bwn_phy_exit(mac);
siba_gpio_set(sc->sc_dev, 0);
}
static int
bwn_fw_fillinfo(struct bwn_mac *mac)
{
int error;
error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT);
if (error == 0)
return (0);
error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE);
if (error == 0)
return (0);
return (error);
}
static int
bwn_gpio_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t mask = 0x1f, set = 0xf, value;
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK);
BWN_WRITE_2(mac, BWN_GPIO_MASK,
BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f);
if (siba_get_chipid(sc->sc_dev) == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
}
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) {
BWN_WRITE_2(mac, BWN_GPIO_MASK,
BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
if (siba_get_revid(sc->sc_dev) >= 2)
mask |= 0x0010;
value = siba_gpio_get(sc->sc_dev);
if (value == -1)
return (0);
siba_gpio_set(sc->sc_dev, (value & mask) | set);
return (0);
}
static int
bwn_fw_loadinitvals(struct bwn_mac *mac)
{
#define GETFWOFFSET(fwp, offset) \
((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset))
const size_t hdr_len = sizeof(struct bwn_fwhdr);
const struct bwn_fwhdr *hdr;
struct bwn_fw *fw = &mac->mac_fw;
int error;
hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data);
error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len),
be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len);
if (error)
return (error);
if (fw->initvals_band.fw) {
hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data);
error = bwn_fwinitvals_write(mac,
GETFWOFFSET(fw->initvals_band, hdr_len),
be32toh(hdr->size),
fw->initvals_band.fw->datasize - hdr_len);
}
return (error);
#undef GETFWOFFSET
}
static int
bwn_phy_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int error;
mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac);
mac->mac_phy.rf_onoff(mac, 1);
error = mac->mac_phy.init(mac);
if (error) {
device_printf(sc->sc_dev, "PHY init failed\n");
goto fail0;
}
error = bwn_switch_channel(mac,
mac->mac_phy.get_default_chan(mac));
if (error) {
device_printf(sc->sc_dev,
"failed to switch default channel\n");
goto fail1;
}
return (0);
fail1:
if (mac->mac_phy.exit)
mac->mac_phy.exit(mac);
fail0:
mac->mac_phy.rf_onoff(mac, 0);
return (error);
}
static void
bwn_set_txantenna(struct bwn_mac *mac, int antenna)
{
uint16_t ant;
uint16_t tmp;
ant = bwn_ant2phy(antenna);
/* For ACK/CTS */
tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL);
tmp = (tmp & ~BWN_TX_PHY_ANT) | ant;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp);
/* For Probe Resposes */
tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL);
tmp = (tmp & ~BWN_TX_PHY_ANT) | ant;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp);
}
static void
bwn_set_opmode(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint32_t ctl;
uint16_t cfp_pretbtt;
ctl = BWN_READ_4(mac, BWN_MACCTL);
ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL |
BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS |
BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC);
ctl |= BWN_MACCTL_STA;
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS)
ctl |= BWN_MACCTL_HOSTAP;
else if (ic->ic_opmode == IEEE80211_M_IBSS)
ctl &= ~BWN_MACCTL_STA;
ctl |= sc->sc_filters;
if (siba_get_revid(sc->sc_dev) <= 4)
ctl |= BWN_MACCTL_PROMISC;
BWN_WRITE_4(mac, BWN_MACCTL, ctl);
cfp_pretbtt = 2;
if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) {
if (siba_get_chipid(sc->sc_dev) == 0x4306 &&
siba_get_chiprev(sc->sc_dev) == 3)
cfp_pretbtt = 100;
else
cfp_pretbtt = 50;
}
BWN_WRITE_2(mac, 0x612, cfp_pretbtt);
}
static int
bwn_dma_gettype(struct bwn_mac *mac)
{
uint32_t tmp;
uint16_t base;
tmp = BWN_READ_4(mac, SIBA_TGSHIGH);
if (tmp & SIBA_TGSHIGH_DMA64)
return (BWN_DMA_64BIT);
base = bwn_dma_base(0, 0);
BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK);
tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL);
if (tmp & BWN_DMA32_TXADDREXT_MASK)
return (BWN_DMA_32BIT);
return (BWN_DMA_30BIT);
}
static void
bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
{
if (!error) {
KASSERT(nseg == 1, ("too many segments(%d)\n", nseg));
*((bus_addr_t *)arg) = seg->ds_addr;
}
}
void
bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
unsigned int i, max_loop;
uint16_t value;
uint32_t buffer[5] = {
0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000
};
if (ofdm) {
max_loop = 0x1e;
buffer[0] = 0x000201cc;
} else {
max_loop = 0xfa;
buffer[0] = 0x000b846e;
}
BWN_ASSERT_LOCKED(mac->mac_sc);
for (i = 0; i < 5; i++)
bwn_ram_write(mac, i * 4, buffer[i]);
BWN_WRITE_2(mac, 0x0568, 0x0000);
BWN_WRITE_2(mac, 0x07c0,
(siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100);
value = (ofdm ? 0x41 : 0x40);
BWN_WRITE_2(mac, 0x050c, value);
if (phy->type == BWN_PHYTYPE_N || phy->type == BWN_PHYTYPE_LP ||
phy->type == BWN_PHYTYPE_LCN)
BWN_WRITE_2(mac, 0x0514, 0x1a02);
BWN_WRITE_2(mac, 0x0508, 0x0000);
BWN_WRITE_2(mac, 0x050a, 0x0000);
BWN_WRITE_2(mac, 0x054c, 0x0000);
BWN_WRITE_2(mac, 0x056a, 0x0014);
BWN_WRITE_2(mac, 0x0568, 0x0826);
BWN_WRITE_2(mac, 0x0500, 0x0000);
/* XXX TODO: n phy pa override? */
switch (phy->type) {
case BWN_PHYTYPE_N:
case BWN_PHYTYPE_LCN:
BWN_WRITE_2(mac, 0x0502, 0x00d0);
break;
case BWN_PHYTYPE_LP:
BWN_WRITE_2(mac, 0x0502, 0x0050);
break;
default:
BWN_WRITE_2(mac, 0x0502, 0x0030);
break;
}
/* flush */
BWN_READ_2(mac, 0x0502);
if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5)
BWN_RF_WRITE(mac, 0x0051, 0x0017);
for (i = 0x00; i < max_loop; i++) {
value = BWN_READ_2(mac, 0x050e);
if (value & 0x0080)
break;
DELAY(10);
}
for (i = 0x00; i < 0x0a; i++) {
value = BWN_READ_2(mac, 0x050e);
if (value & 0x0400)
break;
DELAY(10);
}
for (i = 0x00; i < 0x19; i++) {
value = BWN_READ_2(mac, 0x0690);
if (!(value & 0x0100))
break;
DELAY(10);
}
if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5)
BWN_RF_WRITE(mac, 0x0051, 0x0037);
}
void
bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val)
{
uint32_t macctl;
KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__));
macctl = BWN_READ_4(mac, BWN_MACCTL);
if (macctl & BWN_MACCTL_BIGENDIAN)
printf("TODO: need swap\n");
BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
BWN_WRITE_4(mac, BWN_RAM_DATA, val);
}
void
bwn_mac_suspend(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
uint32_t tmp;
KASSERT(mac->mac_suspended >= 0,
("%s:%d: fail", __func__, __LINE__));
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n",
__func__, mac->mac_suspended);
if (mac->mac_suspended == 0) {
bwn_psctl(mac, BWN_PS_AWAKE);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL)
& ~BWN_MACCTL_ON);
BWN_READ_4(mac, BWN_MACCTL);
for (i = 35; i; i--) {
tmp = BWN_READ_4(mac, BWN_INTR_REASON);
if (tmp & BWN_INTR_MAC_SUSPENDED)
goto out;
DELAY(10);
}
for (i = 40; i; i--) {
tmp = BWN_READ_4(mac, BWN_INTR_REASON);
if (tmp & BWN_INTR_MAC_SUSPENDED)
goto out;
DELAY(1000);
}
device_printf(sc->sc_dev, "MAC suspend failed\n");
}
out:
mac->mac_suspended++;
}
void
bwn_mac_enable(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t state;
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n",
__func__, mac->mac_suspended);
state = bwn_shm_read_2(mac, BWN_SHARED,
BWN_SHARED_UCODESTAT);
if (state != BWN_SHARED_UCODESTAT_SUSPEND &&
state != BWN_SHARED_UCODESTAT_SLEEP) {
DPRINTF(sc, BWN_DEBUG_FW,
"%s: warn: firmware state (%d)\n",
__func__, state);
}
mac->mac_suspended--;
KASSERT(mac->mac_suspended >= 0,
("%s:%d: fail", __func__, __LINE__));
if (mac->mac_suspended == 0) {
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON);
BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED);
BWN_READ_4(mac, BWN_MACCTL);
BWN_READ_4(mac, BWN_INTR_REASON);
bwn_psctl(mac, 0);
}
}
void
bwn_psctl(struct bwn_mac *mac, uint32_t flags)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
uint16_t ucstat;
KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)),
("%s:%d: fail", __func__, __LINE__));
KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)),
("%s:%d: fail", __func__, __LINE__));
/* XXX forcibly awake and hwps-off */
BWN_WRITE_4(mac, BWN_MACCTL,
(BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) &
~BWN_MACCTL_HWPS);
BWN_READ_4(mac, BWN_MACCTL);
if (siba_get_revid(sc->sc_dev) >= 5) {
for (i = 0; i < 100; i++) {
ucstat = bwn_shm_read_2(mac, BWN_SHARED,
BWN_SHARED_UCODESTAT);
if (ucstat != BWN_SHARED_UCODESTAT_SLEEP)
break;
DELAY(10);
}
}
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: ucstat=%d\n", __func__,
ucstat);
}
static int
bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_fw *fw = &mac->mac_fw;
const uint8_t rev = siba_get_revid(sc->sc_dev);
const char *filename;
uint32_t high;
int error;
/* microcode */
filename = NULL;
switch (rev) {
case 42:
if (mac->mac_phy.type == BWN_PHYTYPE_AC)
filename = "ucode42";
break;
case 40:
if (mac->mac_phy.type == BWN_PHYTYPE_AC)
filename = "ucode40";
break;
case 33:
if (mac->mac_phy.type == BWN_PHYTYPE_LCN40)
filename = "ucode33_lcn40";
break;
case 30:
if (mac->mac_phy.type == BWN_PHYTYPE_N)
filename = "ucode30_mimo";
break;
case 29:
if (mac->mac_phy.type == BWN_PHYTYPE_HT)
filename = "ucode29_mimo";
break;
case 26:
if (mac->mac_phy.type == BWN_PHYTYPE_HT)
filename = "ucode26_mimo";
break;
case 28:
case 25:
if (mac->mac_phy.type == BWN_PHYTYPE_N)
filename = "ucode25_mimo";
else if (mac->mac_phy.type == BWN_PHYTYPE_LCN)
filename = "ucode25_lcn";
break;
case 24:
if (mac->mac_phy.type == BWN_PHYTYPE_LCN)
filename = "ucode24_lcn";
break;
case 23:
if (mac->mac_phy.type == BWN_PHYTYPE_N)
filename = "ucode16_mimo";
break;
case 16:
case 17:
case 18:
case 19:
if (mac->mac_phy.type == BWN_PHYTYPE_N)
filename = "ucode16_mimo";
else if (mac->mac_phy.type == BWN_PHYTYPE_LP)
filename = "ucode16_lp";
break;
case 15:
filename = "ucode15";
break;
case 14:
filename = "ucode14";
break;
case 13:
filename = "ucode13";
break;
case 12:
case 11:
filename = "ucode11";
break;
case 10:
case 9:
case 8:
case 7:
case 6:
case 5:
filename = "ucode5";
break;
default:
device_printf(sc->sc_dev, "no ucode for rev %d\n", rev);
bwn_release_firmware(mac);
return (EOPNOTSUPP);
}
device_printf(sc->sc_dev, "ucode fw: %s\n", filename);
error = bwn_fw_get(mac, type, filename, &fw->ucode);
if (error) {
bwn_release_firmware(mac);
return (error);
}
/* PCM */
KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__));
if (rev >= 5 && rev <= 10) {
error = bwn_fw_get(mac, type, "pcm5", &fw->pcm);
if (error == ENOENT)
fw->no_pcmfile = 1;
else if (error) {
bwn_release_firmware(mac);
return (error);
}
} else if (rev < 11) {
device_printf(sc->sc_dev, "no PCM for rev %d\n", rev);
bwn_release_firmware(mac);
return (EOPNOTSUPP);
}
/* initvals */
high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH);
switch (mac->mac_phy.type) {
case BWN_PHYTYPE_A:
if (rev < 5 || rev > 10)
goto fail1;
if (high & BWN_TGSHIGH_HAVE_2GHZ)
filename = "a0g1initvals5";
else
filename = "a0g0initvals5";
break;
case BWN_PHYTYPE_G:
if (rev >= 5 && rev <= 10)
filename = "b0g0initvals5";
else if (rev >= 13)
filename = "b0g0initvals13";
else
goto fail1;
break;
case BWN_PHYTYPE_LP:
if (rev == 13)
filename = "lp0initvals13";
else if (rev == 14)
filename = "lp0initvals14";
else if (rev >= 15)
filename = "lp0initvals15";
else
goto fail1;
break;
case BWN_PHYTYPE_N:
if (rev == 30)
filename = "n16initvals30";
else if (rev == 28 || rev == 25)
filename = "n0initvals25";
else if (rev == 24)
filename = "n0initvals24";
else if (rev == 23)
filename = "n0initvals16";
else if (rev >= 16 && rev <= 18)
filename = "n0initvals16";
else if (rev >= 11 && rev <= 12)
filename = "n0initvals11";
else
goto fail1;
break;
default:
goto fail1;
}
error = bwn_fw_get(mac, type, filename, &fw->initvals);
if (error) {
bwn_release_firmware(mac);
return (error);
}
/* bandswitch initvals */
switch (mac->mac_phy.type) {
case BWN_PHYTYPE_A:
if (rev >= 5 && rev <= 10) {
if (high & BWN_TGSHIGH_HAVE_2GHZ)
filename = "a0g1bsinitvals5";
else
filename = "a0g0bsinitvals5";
} else if (rev >= 11)
filename = NULL;
else
goto fail1;
break;
case BWN_PHYTYPE_G:
if (rev >= 5 && rev <= 10)
filename = "b0g0bsinitvals5";
else if (rev >= 11)
filename = NULL;
else
goto fail1;
break;
case BWN_PHYTYPE_LP:
if (rev == 13)
filename = "lp0bsinitvals13";
else if (rev == 14)
filename = "lp0bsinitvals14";
else if (rev >= 15)
filename = "lp0bsinitvals15";
else
goto fail1;
break;
case BWN_PHYTYPE_N:
if (rev == 30)
filename = "n16bsinitvals30";
else if (rev == 28 || rev == 25)
filename = "n0bsinitvals25";
else if (rev == 24)
filename = "n0bsinitvals24";
else if (rev == 23)
filename = "n0bsinitvals16";
else if (rev >= 16 && rev <= 18)
filename = "n0bsinitvals16";
else if (rev >= 11 && rev <= 12)
filename = "n0bsinitvals11";
else
goto fail1;
break;
default:
device_printf(sc->sc_dev, "unknown phy (%d)\n",
mac->mac_phy.type);
goto fail1;
}
error = bwn_fw_get(mac, type, filename, &fw->initvals_band);
if (error) {
bwn_release_firmware(mac);
return (error);
}
return (0);
fail1:
device_printf(sc->sc_dev, "no INITVALS for rev %d, phy.type %d\n",
rev, mac->mac_phy.type);
bwn_release_firmware(mac);
return (EOPNOTSUPP);
}
static int
bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type,
const char *name, struct bwn_fwfile *bfw)
{
const struct bwn_fwhdr *hdr;
struct bwn_softc *sc = mac->mac_sc;
const struct firmware *fw;
char namebuf[64];
if (name == NULL) {
bwn_do_release_fw(bfw);
return (0);
}
if (bfw->filename != NULL) {
if (bfw->type == type && (strcmp(bfw->filename, name) == 0))
return (0);
bwn_do_release_fw(bfw);
}
snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s",
(type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "",
(mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name);
/* XXX Sleeping on "fwload" with the non-sleepable locks held */
fw = firmware_get(namebuf);
if (fw == NULL) {
device_printf(sc->sc_dev, "the fw file(%s) not found\n",
namebuf);
return (ENOENT);
}
if (fw->datasize < sizeof(struct bwn_fwhdr))
goto fail;
hdr = (const struct bwn_fwhdr *)(fw->data);
switch (hdr->type) {
case BWN_FWTYPE_UCODE:
case BWN_FWTYPE_PCM:
if (be32toh(hdr->size) !=
(fw->datasize - sizeof(struct bwn_fwhdr)))
goto fail;
/* FALLTHROUGH */
case BWN_FWTYPE_IV:
if (hdr->ver != 1)
goto fail;
break;
default:
goto fail;
}
bfw->filename = name;
bfw->fw = fw;
bfw->type = type;
return (0);
fail:
device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf);
if (fw != NULL)
firmware_put(fw, FIRMWARE_UNLOAD);
return (EPROTO);
}
static void
bwn_release_firmware(struct bwn_mac *mac)
{
bwn_do_release_fw(&mac->mac_fw.ucode);
bwn_do_release_fw(&mac->mac_fw.pcm);
bwn_do_release_fw(&mac->mac_fw.initvals);
bwn_do_release_fw(&mac->mac_fw.initvals_band);
}
static void
bwn_do_release_fw(struct bwn_fwfile *bfw)
{
if (bfw->fw != NULL)
firmware_put(bfw->fw, FIRMWARE_UNLOAD);
bfw->fw = NULL;
bfw->filename = NULL;
}
static int
bwn_fw_loaducode(struct bwn_mac *mac)
{
#define GETFWOFFSET(fwp, offset) \
((const uint32_t *)((const char *)fwp.fw->data + offset))
#define GETFWSIZE(fwp, offset) \
((fwp.fw->datasize - offset) / sizeof(uint32_t))
struct bwn_softc *sc = mac->mac_sc;
const uint32_t *data;
unsigned int i;
uint32_t ctl;
uint16_t date, fwcaps, time;
int error = 0;
ctl = BWN_READ_4(mac, BWN_MACCTL);
ctl |= BWN_MACCTL_MCODE_JMP0;
KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__,
__LINE__));
BWN_WRITE_4(mac, BWN_MACCTL, ctl);
for (i = 0; i < 64; i++)
bwn_shm_write_2(mac, BWN_SCRATCH, i, 0);
for (i = 0; i < 4096; i += 2)
bwn_shm_write_2(mac, BWN_SHARED, i, 0);
data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr));
bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000);
for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr));
i++) {
BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i]));
DELAY(10);
}
if (mac->mac_fw.pcm.fw) {
data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr));
bwn_shm_ctlword(mac, BWN_HW, 0x01ea);
BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000);
bwn_shm_ctlword(mac, BWN_HW, 0x01eb);
for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm,
sizeof(struct bwn_fwhdr)); i++) {
BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i]));
DELAY(10);
}
}
BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL);
BWN_WRITE_4(mac, BWN_MACCTL,
(BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) |
BWN_MACCTL_MCODE_RUN);
for (i = 0; i < 21; i++) {
if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED)
break;
if (i >= 20) {
device_printf(sc->sc_dev, "ucode timeout\n");
error = ENXIO;
goto error;
}
DELAY(50000);
}
BWN_READ_4(mac, BWN_INTR_REASON);
mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV);
if (mac->mac_fw.rev <= 0x128) {
device_printf(sc->sc_dev, "the firmware is too old\n");
error = EOPNOTSUPP;
goto error;
}
/*
* Determine firmware header version; needed for TX/RX packet
* handling.
*/
if (mac->mac_fw.rev >= 598)
mac->mac_fw.fw_hdr_format = BWN_FW_HDR_598;
else if (mac->mac_fw.rev >= 410)
mac->mac_fw.fw_hdr_format = BWN_FW_HDR_410;
else
mac->mac_fw.fw_hdr_format = BWN_FW_HDR_351;
/*
* We don't support rev 598 or later; that requires
* another round of changes to the TX/RX descriptor
* and status layout.
*
* So, complain this is the case and exit out, rather
* than attaching and then failing.
*/
#if 0
if (mac->mac_fw.fw_hdr_format == BWN_FW_HDR_598) {
device_printf(sc->sc_dev,
"firmware is too new (>=598); not supported\n");
error = EOPNOTSUPP;
goto error;
}
#endif
mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED,
BWN_SHARED_UCODE_PATCH);
date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE);
mac->mac_fw.opensource = (date == 0xffff);
if (bwn_wme != 0)
mac->mac_flags |= BWN_MAC_FLAG_WME;
mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO;
time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME);
if (mac->mac_fw.opensource == 0) {
device_printf(sc->sc_dev,
"firmware version (rev %u patch %u date %#x time %#x)\n",
mac->mac_fw.rev, mac->mac_fw.patch, date, time);
if (mac->mac_fw.no_pcmfile)
device_printf(sc->sc_dev,
"no HW crypto acceleration due to pcm5\n");
} else {
mac->mac_fw.patch = time;
fwcaps = bwn_fwcaps_read(mac);
if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) {
device_printf(sc->sc_dev,
"disabling HW crypto acceleration\n");
mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO;
}
if (!(fwcaps & BWN_FWCAPS_WME)) {
device_printf(sc->sc_dev, "disabling WME support\n");
mac->mac_flags &= ~BWN_MAC_FLAG_WME;
}
}
if (BWN_ISOLDFMT(mac))
device_printf(sc->sc_dev, "using old firmware image\n");
return (0);
error:
BWN_WRITE_4(mac, BWN_MACCTL,
(BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) |
BWN_MACCTL_MCODE_JMP0);
return (error);
#undef GETFWSIZE
#undef GETFWOFFSET
}
/* OpenFirmware only */
static uint16_t
bwn_fwcaps_read(struct bwn_mac *mac)
{
KASSERT(mac->mac_fw.opensource == 1,
("%s:%d: fail", __func__, __LINE__));
return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS));
}
static int
bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals,
size_t count, size_t array_size)
{
#define GET_NEXTIV16(iv) \
((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \
sizeof(uint16_t) + sizeof(uint16_t)))
#define GET_NEXTIV32(iv) \
((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \
sizeof(uint16_t) + sizeof(uint32_t)))
struct bwn_softc *sc = mac->mac_sc;
const struct bwn_fwinitvals *iv;
uint16_t offset;
size_t i;
uint8_t bit32;
KASSERT(sizeof(struct bwn_fwinitvals) == 6,
("%s:%d: fail", __func__, __LINE__));
iv = ivals;
for (i = 0; i < count; i++) {
if (array_size < sizeof(iv->offset_size))
goto fail;
array_size -= sizeof(iv->offset_size);
offset = be16toh(iv->offset_size);
bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0;
offset &= BWN_FWINITVALS_OFFSET_MASK;
if (offset >= 0x1000)
goto fail;
if (bit32) {
if (array_size < sizeof(iv->data.d32))
goto fail;
array_size -= sizeof(iv->data.d32);
BWN_WRITE_4(mac, offset, be32toh(iv->data.d32));
iv = GET_NEXTIV32(iv);
} else {
if (array_size < sizeof(iv->data.d16))
goto fail;
array_size -= sizeof(iv->data.d16);
BWN_WRITE_2(mac, offset, be16toh(iv->data.d16));
iv = GET_NEXTIV16(iv);
}
}
if (array_size != 0)
goto fail;
return (0);
fail:
device_printf(sc->sc_dev, "initvals: invalid format\n");
return (EPROTO);
#undef GET_NEXTIV16
#undef GET_NEXTIV32
}
int
bwn_switch_channel(struct bwn_mac *mac, int chan)
{
struct bwn_phy *phy = &(mac->mac_phy);
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint16_t channelcookie, savedcookie;
int error;
if (chan == 0xffff)
chan = phy->get_default_chan(mac);
channelcookie = chan;
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
channelcookie |= 0x100;
savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie);
error = phy->switch_channel(mac, chan);
if (error)
goto fail;
mac->mac_phy.chan = chan;
DELAY(8000);
return (0);
fail:
device_printf(sc->sc_dev, "failed to switch channel\n");
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie);
return (error);
}
static uint16_t
bwn_ant2phy(int antenna)
{
switch (antenna) {
case BWN_ANT0:
return (BWN_TX_PHY_ANT0);
case BWN_ANT1:
return (BWN_TX_PHY_ANT1);
case BWN_ANT2:
return (BWN_TX_PHY_ANT2);
case BWN_ANT3:
return (BWN_TX_PHY_ANT3);
case BWN_ANTAUTO:
return (BWN_TX_PHY_ANT01AUTO);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static void
bwn_wme_load(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams),
("%s:%d: fail", __func__, __LINE__));
bwn_mac_suspend(mac);
for (i = 0; i < N(sc->sc_wmeParams); i++)
bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]),
bwn_wme_shm_offsets[i]);
bwn_mac_enable(mac);
}
static void
bwn_wme_loadparams(struct bwn_mac *mac,
const struct wmeParams *p, uint16_t shm_offset)
{
#define SM(_v, _f) (((_v) << _f##_S) & _f)
struct bwn_softc *sc = mac->mac_sc;
uint16_t params[BWN_NR_WMEPARAMS];
int slot, tmp;
unsigned int i;
slot = BWN_READ_2(mac, BWN_RNG) &
SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN);
memset(&params, 0, sizeof(params));
DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d "
"wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit,
p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn);
params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32;
params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN);
params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX);
params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN);
params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn;
params[BWN_WMEPARAM_BSLOTS] = slot;
params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn;
for (i = 0; i < N(params); i++) {
if (i == BWN_WMEPARAM_STATUS) {
tmp = bwn_shm_read_2(mac, BWN_SHARED,
shm_offset + (i * 2));
tmp |= 0x100;
bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2),
tmp);
} else {
bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2),
params[i]);
}
}
}
static void
bwn_mac_write_bssid(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t tmp;
int i;
uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2];
bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid);
memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN);
memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid,
IEEE80211_ADDR_LEN);
for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) {
tmp = (uint32_t) (mac_bssid[i + 0]);
tmp |= (uint32_t) (mac_bssid[i + 1]) << 8;
tmp |= (uint32_t) (mac_bssid[i + 2]) << 16;
tmp |= (uint32_t) (mac_bssid[i + 3]) << 24;
bwn_ram_write(mac, 0x20 + i, tmp);
}
}
static void
bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset,
const uint8_t *macaddr)
{
static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 };
uint16_t data;
if (!mac)
macaddr = zero;
offset |= 0x0020;
BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset);
data = macaddr[0];
data |= macaddr[1] << 8;
BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data);
data = macaddr[2];
data |= macaddr[3] << 8;
BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data);
data = macaddr[4];
data |= macaddr[5] << 8;
BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data);
}
static void
bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm,
const uint8_t *key, size_t key_len, const uint8_t *mac_addr)
{
uint8_t buf[BWN_SEC_KEYSIZE] = { 0, };
uint8_t per_sta_keys_start = 8;
if (BWN_SEC_NEWAPI(mac))
per_sta_keys_start = 4;
KASSERT(index < mac->mac_max_nr_keys,
("%s:%d: fail", __func__, __LINE__));
KASSERT(key_len <= BWN_SEC_KEYSIZE,
("%s:%d: fail", __func__, __LINE__));
if (index >= per_sta_keys_start)
bwn_key_macwrite(mac, index, NULL);
if (key)
memcpy(buf, key, key_len);
bwn_key_write(mac, index, algorithm, buf);
if (index >= per_sta_keys_start)
bwn_key_macwrite(mac, index, mac_addr);
mac->mac_key[index].algorithm = algorithm;
}
static void
bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t addrtmp[2] = { 0, 0 };
uint8_t start = 8;
if (BWN_SEC_NEWAPI(mac))
start = 4;
KASSERT(index >= start,
("%s:%d: fail", __func__, __LINE__));
index -= start;
if (addr) {
addrtmp[0] = addr[0];
addrtmp[0] |= ((uint32_t) (addr[1]) << 8);
addrtmp[0] |= ((uint32_t) (addr[2]) << 16);
addrtmp[0] |= ((uint32_t) (addr[3]) << 24);
addrtmp[1] = addr[4];
addrtmp[1] |= ((uint32_t) (addr[5]) << 8);
}
if (siba_get_revid(sc->sc_dev) >= 5) {
bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]);
bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]);
} else {
if (index >= 8) {
bwn_shm_write_4(mac, BWN_SHARED,
BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]);
bwn_shm_write_2(mac, BWN_SHARED,
BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]);
}
}
}
static void
bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm,
const uint8_t *key)
{
unsigned int i;
uint32_t offset;
uint16_t kidx, value;
kidx = BWN_SEC_KEY2FW(mac, index);
bwn_shm_write_2(mac, BWN_SHARED,
BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm);
offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE);
for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) {
value = key[i];
value |= (uint16_t)(key[i + 1]) << 8;
bwn_shm_write_2(mac, BWN_SHARED, offset + i, value);
}
}
static void
bwn_phy_exit(struct bwn_mac *mac)
{
mac->mac_phy.rf_onoff(mac, 0);
if (mac->mac_phy.exit != NULL)
mac->mac_phy.exit(mac);
}
static void
bwn_dma_free(struct bwn_mac *mac)
{
struct bwn_dma *dma;
if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0)
return;
dma = &mac->mac_method.dma;
bwn_dma_ringfree(&dma->rx);
bwn_dma_ringfree(&dma->wme[WME_AC_BK]);
bwn_dma_ringfree(&dma->wme[WME_AC_BE]);
bwn_dma_ringfree(&dma->wme[WME_AC_VI]);
bwn_dma_ringfree(&dma->wme[WME_AC_VO]);
bwn_dma_ringfree(&dma->mcast);
}
static void
bwn_core_stop(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status < BWN_MAC_STATUS_STARTED)
return;
callout_stop(&sc->sc_rfswitch_ch);
callout_stop(&sc->sc_task_ch);
callout_stop(&sc->sc_watchdog_ch);
sc->sc_watchdog_timer = 0;
BWN_WRITE_4(mac, BWN_INTR_MASK, 0);
BWN_READ_4(mac, BWN_INTR_MASK);
bwn_mac_suspend(mac);
mac->mac_status = BWN_MAC_STATUS_INITED;
}
static int
bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan)
{
struct bwn_mac *up_dev = NULL;
struct bwn_mac *down_dev;
struct bwn_mac *mac;
int err, status;
uint8_t gmode;
BWN_ASSERT_LOCKED(sc);
TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) {
if (IEEE80211_IS_CHAN_2GHZ(chan) &&
mac->mac_phy.supports_2ghz) {
up_dev = mac;
gmode = 1;
} else if (IEEE80211_IS_CHAN_5GHZ(chan) &&
mac->mac_phy.supports_5ghz) {
up_dev = mac;
gmode = 0;
} else {
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (EINVAL);
}
if (up_dev != NULL)
break;
}
if (up_dev == NULL) {
device_printf(sc->sc_dev, "Could not find a device\n");
return (ENODEV);
}
if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode)
return (0);
DPRINTF(sc, BWN_DEBUG_RF | BWN_DEBUG_PHY | BWN_DEBUG_RESET,
"switching to %s-GHz band\n",
IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5");
down_dev = sc->sc_curmac;
status = down_dev->mac_status;
if (status >= BWN_MAC_STATUS_STARTED)
bwn_core_stop(down_dev);
if (status >= BWN_MAC_STATUS_INITED)
bwn_core_exit(down_dev);
if (down_dev != up_dev)
bwn_phy_reset(down_dev);
up_dev->mac_phy.gmode = gmode;
if (status >= BWN_MAC_STATUS_INITED) {
err = bwn_core_init(up_dev);
if (err) {
device_printf(sc->sc_dev,
"fatal: failed to initialize for %s-GHz\n",
IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5");
goto fail;
}
}
if (status >= BWN_MAC_STATUS_STARTED)
bwn_core_start(up_dev);
KASSERT(up_dev->mac_status == status, ("%s: fail", __func__));
sc->sc_curmac = up_dev;
return (0);
fail:
sc->sc_curmac = NULL;
return (err);
}
static void
bwn_rf_turnon(struct bwn_mac *mac)
{
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__);
bwn_mac_suspend(mac);
mac->mac_phy.rf_onoff(mac, 1);
mac->mac_phy.rf_on = 1;
bwn_mac_enable(mac);
}
static void
bwn_rf_turnoff(struct bwn_mac *mac)
{
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__);
bwn_mac_suspend(mac);
mac->mac_phy.rf_onoff(mac, 0);
mac->mac_phy.rf_on = 0;
bwn_mac_enable(mac);
}
/*
* PHY reset.
*/
static void
bwn_phy_reset(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
siba_write_4(sc->sc_dev, SIBA_TGSLOW,
((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) |
BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC);
DELAY(1000);
siba_write_4(sc->sc_dev, SIBA_TGSLOW,
(siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC));
DELAY(1000);
}
static int
bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct bwn_vap *bvp = BWN_VAP(vap);
struct ieee80211com *ic= vap->iv_ic;
enum ieee80211_state ostate = vap->iv_state;
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
int error;
DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
error = bvp->bv_newstate(vap, nstate, arg);
if (error != 0)
return (error);
BWN_LOCK(sc);
bwn_led_newstate(mac, nstate);
/*
* Clear the BSSID when we stop a STA
*/
if (vap->iv_opmode == IEEE80211_M_STA) {
if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
/*
* Clear out the BSSID. If we reassociate to
* the same AP, this will reinialize things
* correctly...
*/
if (ic->ic_opmode == IEEE80211_M_STA &&
(sc->sc_flags & BWN_FLAG_INVALID) == 0) {
memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN);
bwn_set_macaddr(mac);
}
}
}
if (vap->iv_opmode == IEEE80211_M_MONITOR ||
vap->iv_opmode == IEEE80211_M_AHDEMO) {
/* XXX nothing to do? */
} else if (nstate == IEEE80211_S_RUN) {
memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN);
bwn_set_opmode(mac);
bwn_set_pretbtt(mac);
bwn_spu_setdelay(mac, 0);
bwn_set_macaddr(mac);
}
BWN_UNLOCK(sc);
return (error);
}
static void
bwn_set_pretbtt(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint16_t pretbtt;
if (ic->ic_opmode == IEEE80211_M_IBSS)
pretbtt = 2;
else
pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt);
BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt);
}
static int
bwn_intr(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
uint32_t reason;
if (mac->mac_status < BWN_MAC_STATUS_STARTED ||
(sc->sc_flags & BWN_FLAG_INVALID))
return (FILTER_STRAY);
DPRINTF(sc, BWN_DEBUG_INTR, "%s: called\n", __func__);
reason = BWN_READ_4(mac, BWN_INTR_REASON);
if (reason == 0xffffffff) /* shared IRQ */
return (FILTER_STRAY);
reason &= mac->mac_intr_mask;
if (reason == 0)
return (FILTER_HANDLED);
DPRINTF(sc, BWN_DEBUG_INTR, "%s: reason=0x%08x\n", __func__, reason);
mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00;
mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00;
mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00;
mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00;
mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00;
BWN_WRITE_4(mac, BWN_INTR_REASON, reason);
BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]);
BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]);
BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]);
BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]);
BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]);
/* Disable interrupts. */
BWN_WRITE_4(mac, BWN_INTR_MASK, 0);
mac->mac_reason_intr = reason;
BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask);
return (FILTER_HANDLED);
}
static void
bwn_intrtask(void *arg, int npending)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
uint32_t merged = 0;
int i, tx = 0, rx = 0;
BWN_LOCK(sc);
if (mac->mac_status < BWN_MAC_STATUS_STARTED ||
(sc->sc_flags & BWN_FLAG_INVALID)) {
BWN_UNLOCK(sc);
return;
}
for (i = 0; i < N(mac->mac_reason); i++)
merged |= mac->mac_reason[i];
if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR)
device_printf(sc->sc_dev, "MAC trans error\n");
if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) {
DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__);
mac->mac_phy.txerrors--;
if (mac->mac_phy.txerrors == 0) {
mac->mac_phy.txerrors = BWN_TXERROR_MAX;
bwn_restart(mac, "PHY TX errors");
}
}
if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) {
if (merged & BWN_DMAINTR_FATALMASK) {
device_printf(sc->sc_dev,
"Fatal DMA error: %#x %#x %#x %#x %#x %#x\n",
mac->mac_reason[0], mac->mac_reason[1],
mac->mac_reason[2], mac->mac_reason[3],
mac->mac_reason[4], mac->mac_reason[5]);
bwn_restart(mac, "DMA error");
BWN_UNLOCK(sc);
return;
}
if (merged & BWN_DMAINTR_NONFATALMASK) {
device_printf(sc->sc_dev,
"DMA error: %#x %#x %#x %#x %#x %#x\n",
mac->mac_reason[0], mac->mac_reason[1],
mac->mac_reason[2], mac->mac_reason[3],
mac->mac_reason[4], mac->mac_reason[5]);
}
}
if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG)
bwn_intr_ucode_debug(mac);
if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI)
bwn_intr_tbtt_indication(mac);
if (mac->mac_reason_intr & BWN_INTR_ATIM_END)
bwn_intr_atim_end(mac);
if (mac->mac_reason_intr & BWN_INTR_BEACON)
bwn_intr_beacon(mac);
if (mac->mac_reason_intr & BWN_INTR_PMQ)
bwn_intr_pmq(mac);
if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK)
bwn_intr_noise(mac);
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) {
bwn_dma_rx(mac->mac_method.dma.rx);
rx = 1;
}
} else
rx = bwn_pio_rx(&mac->mac_method.pio.rx);
KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
if (mac->mac_reason_intr & BWN_INTR_TX_OK) {
bwn_intr_txeof(mac);
tx = 1;
}
BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask);
if (sc->sc_blink_led != NULL && sc->sc_led_blink) {
int evt = BWN_LED_EVENT_NONE;
if (tx && rx) {
if (sc->sc_rx_rate > sc->sc_tx_rate)
evt = BWN_LED_EVENT_RX;
else
evt = BWN_LED_EVENT_TX;
} else if (tx) {
evt = BWN_LED_EVENT_TX;
} else if (rx) {
evt = BWN_LED_EVENT_RX;
} else if (rx == 0) {
evt = BWN_LED_EVENT_POLL;
}
if (evt != BWN_LED_EVENT_NONE)
bwn_led_event(mac, evt);
}
if (mbufq_first(&sc->sc_snd) != NULL)
bwn_start(sc);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
BWN_UNLOCK(sc);
}
static void
bwn_restart(struct bwn_mac *mac, const char *msg)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
if (mac->mac_status < BWN_MAC_STATUS_INITED)
return;
device_printf(sc->sc_dev, "HW reset: %s\n", msg);
ieee80211_runtask(ic, &mac->mac_hwreset);
}
static void
bwn_intr_ucode_debug(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t reason;
if (mac->mac_fw.opensource == 0)
return;
reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG);
switch (reason) {
case BWN_DEBUGINTR_PANIC:
bwn_handle_fwpanic(mac);
break;
case BWN_DEBUGINTR_DUMP_SHM:
device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n");
break;
case BWN_DEBUGINTR_DUMP_REGS:
device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n");
break;
case BWN_DEBUGINTR_MARKER:
device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n");
break;
default:
device_printf(sc->sc_dev,
"ucode debug unknown reason: %#x\n", reason);
}
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG,
BWN_DEBUGINTR_ACK);
}
static void
bwn_intr_tbtt_indication(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
if (ic->ic_opmode != IEEE80211_M_HOSTAP)
bwn_psctl(mac, 0);
if (ic->ic_opmode == IEEE80211_M_IBSS)
mac->mac_flags |= BWN_MAC_FLAG_DFQVALID;
}
static void
bwn_intr_atim_end(struct bwn_mac *mac)
{
if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) {
BWN_WRITE_4(mac, BWN_MACCMD,
BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID);
mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID;
}
}
static void
bwn_intr_beacon(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint32_t cmd, beacon0, beacon1;
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS)
return;
mac->mac_intr_mask &= ~BWN_INTR_BEACON;
cmd = BWN_READ_4(mac, BWN_MACCMD);
beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID);
beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID);
if (beacon0 && beacon1) {
BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON);
mac->mac_intr_mask |= BWN_INTR_BEACON;
return;
}
if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) {
sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP;
bwn_load_beacon0(mac);
bwn_load_beacon1(mac);
cmd = BWN_READ_4(mac, BWN_MACCMD);
cmd |= BWN_MACCMD_BEACON0_VALID;
BWN_WRITE_4(mac, BWN_MACCMD, cmd);
} else {
if (!beacon0) {
bwn_load_beacon0(mac);
cmd = BWN_READ_4(mac, BWN_MACCMD);
cmd |= BWN_MACCMD_BEACON0_VALID;
BWN_WRITE_4(mac, BWN_MACCMD, cmd);
} else if (!beacon1) {
bwn_load_beacon1(mac);
cmd = BWN_READ_4(mac, BWN_MACCMD);
cmd |= BWN_MACCMD_BEACON1_VALID;
BWN_WRITE_4(mac, BWN_MACCMD, cmd);
}
}
}
static void
bwn_intr_pmq(struct bwn_mac *mac)
{
uint32_t tmp;
while (1) {
tmp = BWN_READ_4(mac, BWN_PS_STATUS);
if (!(tmp & 0x00000008))
break;
}
BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002);
}
static void
bwn_intr_noise(struct bwn_mac *mac)
{
struct bwn_phy_g *pg = &mac->mac_phy.phy_g;
uint16_t tmp;
uint8_t noise[4];
uint8_t i, j;
int32_t average;
if (mac->mac_phy.type != BWN_PHYTYPE_G)
return;
KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__));
*((uint32_t *)noise) = htole32(bwn_jssi_read(mac));
if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f ||
noise[3] == 0x7f)
goto new;
KASSERT(mac->mac_noise.noi_nsamples < 8,
("%s:%d: fail", __func__, __LINE__));
i = mac->mac_noise.noi_nsamples;
noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1);
noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1);
noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1);
noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1);
mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]];
mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]];
mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]];
mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]];
mac->mac_noise.noi_nsamples++;
if (mac->mac_noise.noi_nsamples == 8) {
average = 0;
for (i = 0; i < 8; i++) {
for (j = 0; j < 4; j++)
average += mac->mac_noise.noi_samples[i][j];
}
average = (((average / 32) * 125) + 64) / 128;
tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f;
if (tmp >= 8)
average += 2;
else
average -= 25;
average -= (tmp == 8) ? 72 : 48;
mac->mac_stats.link_noise = average;
mac->mac_noise.noi_running = 0;
return;
}
new:
bwn_noise_gensample(mac);
}
static int
bwn_pio_rx(struct bwn_pio_rxqueue *prq)
{
struct bwn_mac *mac = prq->prq_mac;
struct bwn_softc *sc = mac->mac_sc;
unsigned int i;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status < BWN_MAC_STATUS_STARTED)
return (0);
for (i = 0; i < 5000; i++) {
if (bwn_pio_rxeof(prq) == 0)
break;
}
if (i >= 5000)
device_printf(sc->sc_dev, "too many RX frames in PIO mode\n");
return ((i > 0) ? 1 : 0);
}
static void
bwn_dma_rx(struct bwn_dma_ring *dr)
{
int slot, curslot;
KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__));
curslot = dr->get_curslot(dr);
KASSERT(curslot >= 0 && curslot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
slot = dr->dr_curslot;
for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot))
bwn_dma_rxeof(dr, &slot);
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
dr->set_curslot(dr, slot);
dr->dr_curslot = slot;
}
static void
bwn_intr_txeof(struct bwn_mac *mac)
{
struct bwn_txstatus stat;
uint32_t stat0, stat1;
uint16_t tmp;
BWN_ASSERT_LOCKED(mac->mac_sc);
while (1) {
stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0);
if (!(stat0 & 0x00000001))
break;
stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1);
DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT,
"%s: stat0=0x%08x, stat1=0x%08x\n",
__func__,
stat0,
stat1);
stat.cookie = (stat0 >> 16);
stat.seq = (stat1 & 0x0000ffff);
stat.phy_stat = ((stat1 & 0x00ff0000) >> 16);
tmp = (stat0 & 0x0000ffff);
stat.framecnt = ((tmp & 0xf000) >> 12);
stat.rtscnt = ((tmp & 0x0f00) >> 8);
stat.sreason = ((tmp & 0x001c) >> 2);
stat.pm = (tmp & 0x0080) ? 1 : 0;
stat.im = (tmp & 0x0040) ? 1 : 0;
stat.ampdu = (tmp & 0x0020) ? 1 : 0;
stat.ack = (tmp & 0x0002) ? 1 : 0;
DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT,
"%s: cookie=%d, seq=%d, phystat=0x%02x, framecnt=%d, "
"rtscnt=%d, sreason=%d, pm=%d, im=%d, ampdu=%d, ack=%d\n",
__func__,
stat.cookie,
stat.seq,
stat.phy_stat,
stat.framecnt,
stat.rtscnt,
stat.sreason,
stat.pm,
stat.im,
stat.ampdu,
stat.ack);
bwn_handle_txeof(mac, &stat);
}
}
static void
bwn_hwreset(void *arg, int npending)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
int error = 0;
int prev_status;
BWN_LOCK(sc);
prev_status = mac->mac_status;
if (prev_status >= BWN_MAC_STATUS_STARTED)
bwn_core_stop(mac);
if (prev_status >= BWN_MAC_STATUS_INITED)
bwn_core_exit(mac);
if (prev_status >= BWN_MAC_STATUS_INITED) {
error = bwn_core_init(mac);
if (error)
goto out;
}
if (prev_status >= BWN_MAC_STATUS_STARTED)
bwn_core_start(mac);
out:
if (error) {
device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error);
sc->sc_curmac = NULL;
}
BWN_UNLOCK(sc);
}
static void
bwn_handle_fwpanic(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t reason;
reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG);
device_printf(sc->sc_dev,"fw panic (%u)\n", reason);
if (reason == BWN_FWPANIC_RESTART)
bwn_restart(mac, "ucode panic");
}
static void
bwn_load_beacon0(struct bwn_mac *mac)
{
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
static void
bwn_load_beacon1(struct bwn_mac *mac)
{
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
static uint32_t
bwn_jssi_read(struct bwn_mac *mac)
{
uint32_t val = 0;
val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a);
val <<= 16;
val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088);
return (val);
}
static void
bwn_noise_gensample(struct bwn_mac *mac)
{
uint32_t jssi = 0x7f7f7f7f;
bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff));
bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16);
BWN_WRITE_4(mac, BWN_MACCMD,
BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE);
}
static int
bwn_dma_freeslot(struct bwn_dma_ring *dr)
{
BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc);
return (dr->dr_numslots - dr->dr_usedslot);
}
static int
bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot)
{
BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc);
KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1,
("%s:%d: fail", __func__, __LINE__));
if (slot == dr->dr_numslots - 1)
return (0);
return (slot + 1);
}
static void
bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot)
{
struct bwn_mac *mac = dr->dr_mac;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_rxhdr4 *rxhdr;
struct mbuf *m;
uint32_t macstat;
int32_t tmp;
int cnt = 0;
uint16_t len;
dr->getdesc(dr, *slot, &desc, &meta);
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD);
m = meta->mt_m;
if (bwn_dma_newbuf(dr, desc, meta, 0)) {
counter_u64_add(sc->sc_ic.ic_ierrors, 1);
return;
}
rxhdr = mtod(m, struct bwn_rxhdr4 *);
len = le16toh(rxhdr->frame_len);
if (len <= 0) {
counter_u64_add(sc->sc_ic.ic_ierrors, 1);
return;
}
if (bwn_dma_check_redzone(dr, m)) {
device_printf(sc->sc_dev, "redzone error.\n");
bwn_dma_set_redzone(dr, m);
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap,
BUS_DMASYNC_PREWRITE);
return;
}
if (len > dr->dr_rx_bufsize) {
tmp = len;
while (1) {
dr->getdesc(dr, *slot, &desc, &meta);
bwn_dma_set_redzone(dr, meta->mt_m);
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap,
BUS_DMASYNC_PREWRITE);
*slot = bwn_dma_nextslot(dr, *slot);
cnt++;
tmp -= dr->dr_rx_bufsize;
if (tmp <= 0)
break;
}
device_printf(sc->sc_dev, "too small buffer "
"(len %u buffer %u dropped %d)\n",
len, dr->dr_rx_bufsize, cnt);
return;
}
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
case BWN_FW_HDR_410:
macstat = le32toh(rxhdr->ps4.r351.mac_status);
break;
case BWN_FW_HDR_598:
macstat = le32toh(rxhdr->ps4.r598.mac_status);
break;
}
if (macstat & BWN_RX_MAC_FCSERR) {
if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) {
device_printf(sc->sc_dev, "RX drop\n");
return;
}
}
m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset;
m_adj(m, dr->dr_frameoffset);
bwn_rxeof(dr->dr_mac, m, rxhdr);
}
static void
bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_stats *stats = &mac->mac_stats;
BWN_ASSERT_LOCKED(mac->mac_sc);
if (status->im)
device_printf(sc->sc_dev, "TODO: STATUS IM\n");
if (status->ampdu)
device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n");
if (status->rtscnt) {
if (status->rtscnt == 0xf)
stats->rtsfail++;
else
stats->rts++;
}
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
bwn_dma_handle_txeof(mac, status);
} else {
bwn_pio_handle_txeof(mac, status);
}
bwn_phy_txpower_check(mac, 0);
}
static uint8_t
bwn_pio_rxeof(struct bwn_pio_rxqueue *prq)
{
struct bwn_mac *mac = prq->prq_mac;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_rxhdr4 rxhdr;
struct mbuf *m;
uint32_t ctl32, macstat, v32;
unsigned int i, padding;
uint16_t ctl16, len, totlen, v16;
unsigned char *mp;
char *data;
memset(&rxhdr, 0, sizeof(rxhdr));
if (prq->prq_rev >= 8) {
ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL);
if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY))
return (0);
bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL,
BWN_PIO8_RXCTL_FRAMEREADY);
for (i = 0; i < 10; i++) {
ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL);
if (ctl32 & BWN_PIO8_RXCTL_DATAREADY)
goto ready;
DELAY(10);
}
} else {
ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL);
if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY))
return (0);
bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL,
BWN_PIO_RXCTL_FRAMEREADY);
for (i = 0; i < 10; i++) {
ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL);
if (ctl16 & BWN_PIO_RXCTL_DATAREADY)
goto ready;
DELAY(10);
}
}
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (1);
ready:
if (prq->prq_rev >= 8)
siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr),
prq->prq_base + BWN_PIO8_RXDATA);
else
siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr),
prq->prq_base + BWN_PIO_RXDATA);
len = le16toh(rxhdr.frame_len);
if (len > 0x700) {
device_printf(sc->sc_dev, "%s: len is too big\n", __func__);
goto error;
}
if (len == 0) {
device_printf(sc->sc_dev, "%s: len is 0\n", __func__);
goto error;
}
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
case BWN_FW_HDR_410:
macstat = le32toh(rxhdr.ps4.r351.mac_status);
break;
case BWN_FW_HDR_598:
macstat = le32toh(rxhdr.ps4.r598.mac_status);
break;
}
if (macstat & BWN_RX_MAC_FCSERR) {
if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) {
device_printf(sc->sc_dev, "%s: FCS error", __func__);
goto error;
}
}
padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0;
totlen = len + padding;
KASSERT(totlen <= MCLBYTES, ("too big..\n"));
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->sc_dev, "%s: out of memory", __func__);
goto error;
}
mp = mtod(m, unsigned char *);
if (prq->prq_rev >= 8) {
siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3),
prq->prq_base + BWN_PIO8_RXDATA);
if (totlen & 3) {
v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA);
data = &(mp[totlen - 1]);
switch (totlen & 3) {
case 3:
*data = (v32 >> 16);
data--;
case 2:
*data = (v32 >> 8);
data--;
case 1:
*data = v32;
}
}
} else {
siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1),
prq->prq_base + BWN_PIO_RXDATA);
if (totlen & 1) {
v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA);
mp[totlen - 1] = v16;
}
}
m->m_len = m->m_pkthdr.len = totlen;
bwn_rxeof(prq->prq_mac, m, &rxhdr);
return (1);
error:
if (prq->prq_rev >= 8)
bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL,
BWN_PIO8_RXCTL_DATAREADY);
else
bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY);
return (1);
}
static int
bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc,
struct bwn_dmadesc_meta *meta, int init)
{
struct bwn_mac *mac = dr->dr_mac;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_rxhdr4 *hdr;
bus_dmamap_t map;
bus_addr_t paddr;
struct mbuf *m;
int error;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
error = ENOBUFS;
/*
* If the NIC is up and running, we need to:
* - Clear RX buffer's header.
* - Restore RX descriptor settings.
*/
if (init)
return (error);
else
goto back;
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
bwn_dma_set_redzone(dr, m);
/*
* Try to load RX buf into temporary DMA map
*/
error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m,
bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
/*
* See the comment above
*/
if (init)
return (error);
else
goto back;
}
if (!init)
bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap);
meta->mt_m = m;
meta->mt_paddr = paddr;
/*
* Swap RX buf's DMA map with the loaded temporary one
*/
map = meta->mt_dmap;
meta->mt_dmap = dr->dr_spare_dmap;
dr->dr_spare_dmap = map;
back:
/*
* Clear RX buf header
*/
hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *);
bzero(hdr, sizeof(*hdr));
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap,
BUS_DMASYNC_PREWRITE);
/*
* Setup RX buf descriptor
*/
dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len -
sizeof(*hdr), 0, 0, 0);
return (error);
}
static void
bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg,
bus_size_t mapsz __unused, int error)
{
if (!error) {
KASSERT(nseg == 1, ("too many segments(%d)\n", nseg));
*((bus_addr_t *)arg) = seg->ds_addr;
}
}
static int
bwn_hwrate2ieeerate(int rate)
{
switch (rate) {
case BWN_CCK_RATE_1MB:
return (2);
case BWN_CCK_RATE_2MB:
return (4);
case BWN_CCK_RATE_5MB:
return (11);
case BWN_CCK_RATE_11MB:
return (22);
case BWN_OFDM_RATE_6MB:
return (12);
case BWN_OFDM_RATE_9MB:
return (18);
case BWN_OFDM_RATE_12MB:
return (24);
case BWN_OFDM_RATE_18MB:
return (36);
case BWN_OFDM_RATE_24MB:
return (48);
case BWN_OFDM_RATE_36MB:
return (72);
case BWN_OFDM_RATE_48MB:
return (96);
case BWN_OFDM_RATE_54MB:
return (108);
default:
printf("Ooops\n");
return (0);
}
}
/*
* Post process the RX provided RSSI.
*
* Valid for A, B, G, LP PHYs.
*/
static int8_t
bwn_rx_rssi_calc(struct bwn_mac *mac, uint8_t in_rssi,
int ofdm, int adjust_2053, int adjust_2050)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *gphy = &phy->phy_g;
int tmp;
switch (phy->rf_ver) {
case 0x2050:
if (ofdm) {
tmp = in_rssi;
if (tmp > 127)
tmp -= 256;
tmp = tmp * 73 / 64;
if (adjust_2050)
tmp += 25;
else
tmp -= 3;
} else {
if (siba_sprom_get_bf_lo(mac->mac_sc->sc_dev)
& BWN_BFL_RSSI) {
if (in_rssi > 63)
in_rssi = 63;
tmp = gphy->pg_nrssi_lt[in_rssi];
tmp = (31 - tmp) * -131 / 128 - 57;
} else {
tmp = in_rssi;
tmp = (31 - tmp) * -149 / 128 - 68;
}
if (phy->type == BWN_PHYTYPE_G && adjust_2050)
tmp += 25;
}
break;
case 0x2060:
if (in_rssi > 127)
tmp = in_rssi - 256;
else
tmp = in_rssi;
break;
default:
tmp = in_rssi;
tmp = (tmp - 11) * 103 / 64;
if (adjust_2053)
tmp -= 109;
else
tmp -= 83;
}
return (tmp);
}
static void
bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr)
{
const struct bwn_rxhdr4 *rxhdr = _rxhdr;
struct bwn_plcp6 *plcp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211_frame_min *wh;
struct ieee80211_node *ni;
struct ieee80211com *ic = &sc->sc_ic;
uint32_t macstat;
int padding, rate, rssi = 0, noise = 0, type;
uint16_t phytype, phystat0, phystat3, chanstat;
unsigned char *mp = mtod(m, unsigned char *);
static int rx_mac_dec_rpt = 0;
BWN_ASSERT_LOCKED(sc);
phystat0 = le16toh(rxhdr->phy_status0);
/*
* XXX Note: phy_status3 doesn't exist for HT-PHY; it's only
* used for LP-PHY.
*/
phystat3 = le16toh(rxhdr->ps3.lp.phy_status3);
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
case BWN_FW_HDR_410:
macstat = le32toh(rxhdr->ps4.r351.mac_status);
chanstat = le16toh(rxhdr->ps4.r351.channel);
break;
case BWN_FW_HDR_598:
macstat = le32toh(rxhdr->ps4.r598.mac_status);
chanstat = le16toh(rxhdr->ps4.r598.channel);
break;
}
phytype = chanstat & BWN_RX_CHAN_PHYTYPE;
if (macstat & BWN_RX_MAC_FCSERR)
device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n");
if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV))
device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n");
if (macstat & BWN_RX_MAC_DECERR)
goto drop;
padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0;
if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) {
device_printf(sc->sc_dev, "frame too short (length=%d)\n",
m->m_pkthdr.len);
goto drop;
}
plcp = (struct bwn_plcp6 *)(mp + padding);
m_adj(m, sizeof(struct bwn_plcp6) + padding);
if (m->m_pkthdr.len < IEEE80211_MIN_LEN) {
device_printf(sc->sc_dev, "frame too short (length=%d)\n",
m->m_pkthdr.len);
goto drop;
}
wh = mtod(m, struct ieee80211_frame_min *);
if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50)
device_printf(sc->sc_dev,
"RX decryption attempted (old %d keyidx %#x)\n",
BWN_ISOLDFMT(mac),
(macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT);
if (phystat0 & BWN_RX_PHYST0_OFDM)
rate = bwn_plcp_get_ofdmrate(mac, plcp,
phytype == BWN_PHYTYPE_A);
else
rate = bwn_plcp_get_cckrate(mac, plcp);
if (rate == -1) {
if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP))
goto drop;
}
sc->sc_rx_rate = bwn_hwrate2ieeerate(rate);
/* rssi/noise */
switch (phytype) {
case BWN_PHYTYPE_A:
case BWN_PHYTYPE_B:
case BWN_PHYTYPE_G:
case BWN_PHYTYPE_LP:
rssi = bwn_rx_rssi_calc(mac, rxhdr->phy.abg.rssi,
!! (phystat0 & BWN_RX_PHYST0_OFDM),
!! (phystat0 & BWN_RX_PHYST0_GAINCTL),
!! (phystat3 & BWN_RX_PHYST3_TRSTATE));
break;
case BWN_PHYTYPE_N:
/* Broadcom has code for min/avg, but always used max */
if (rxhdr->phy.n.power0 == 16 || rxhdr->phy.n.power0 == 32)
rssi = max(rxhdr->phy.n.power1, rxhdr->ps2.n.power2);
else
rssi = max(rxhdr->phy.n.power0, rxhdr->phy.n.power1);
#if 0
DPRINTF(mac->mac_sc, BWN_DEBUG_RECV,
"%s: power0=%d, power1=%d, power2=%d\n",
__func__,
rxhdr->phy.n.power0,
rxhdr->phy.n.power1,
rxhdr->ps2.n.power2);
#endif
break;
default:
/* XXX TODO: implement rssi for other PHYs */
break;
}
/*
* RSSI here is absolute, not relative to the noise floor.
*/
noise = mac->mac_stats.link_noise;
rssi = rssi - noise;
/* RX radio tap */
if (ieee80211_radiotap_active(ic))
bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise);
m_adj(m, -IEEE80211_CRC_LEN);
BWN_UNLOCK(sc);
ni = ieee80211_find_rxnode(ic, wh);
if (ni != NULL) {
type = ieee80211_input(ni, m, rssi, noise);
ieee80211_free_node(ni);
} else
type = ieee80211_input_all(ic, m, rssi, noise);
BWN_LOCK(sc);
return;
drop:
device_printf(sc->sc_dev, "%s: dropped\n", __func__);
}
static void
bwn_ratectl_tx_complete(const struct ieee80211_node *ni,
const struct bwn_txstatus *status)
{
struct ieee80211_ratectl_tx_status txs;
int retrycnt = 0;
/*
* If we don't get an ACK, then we should log the
* full framecnt. That may be 0 if it's a PHY
* failure, so ensure that gets logged as some
* retry attempt.
*/
txs.flags = IEEE80211_RATECTL_STATUS_LONG_RETRY;
if (status->ack) {
txs.status = IEEE80211_RATECTL_TX_SUCCESS;
retrycnt = status->framecnt - 1;
} else {
txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
retrycnt = status->framecnt;
if (retrycnt == 0)
retrycnt = 1;
}
txs.long_retries = retrycnt;
ieee80211_ratectl_tx_complete(ni, &txs);
}
static void
bwn_dma_handle_txeof(struct bwn_mac *mac,
const struct bwn_txstatus *status)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_softc *sc = mac->mac_sc;
int slot;
BWN_ASSERT_LOCKED(sc);
dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot);
if (dr == NULL) {
device_printf(sc->sc_dev, "failed to parse cookie\n");
return;
}
KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__));
while (1) {
KASSERT(slot >= 0 && slot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
dr->getdesc(dr, slot, &desc, &meta);
if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER)
bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap);
else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY)
bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap);
if (meta->mt_islast) {
KASSERT(meta->mt_m != NULL,
("%s:%d: fail", __func__, __LINE__));
bwn_ratectl_tx_complete(meta->mt_ni, status);
ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0);
meta->mt_ni = NULL;
meta->mt_m = NULL;
} else
KASSERT(meta->mt_m == NULL,
("%s:%d: fail", __func__, __LINE__));
dr->dr_usedslot--;
if (meta->mt_islast)
break;
slot = bwn_dma_nextslot(dr, slot);
}
sc->sc_watchdog_timer = 0;
if (dr->dr_stop) {
KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME,
("%s:%d: fail", __func__, __LINE__));
dr->dr_stop = 0;
}
}
static void
bwn_pio_handle_txeof(struct bwn_mac *mac,
const struct bwn_txstatus *status)
{
struct bwn_pio_txqueue *tq;
struct bwn_pio_txpkt *tp = NULL;
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(sc);
tq = bwn_pio_parse_cookie(mac, status->cookie, &tp);
if (tq == NULL)
return;
tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4);
tq->tq_free++;
/* XXX ieee80211_tx_complete()? */
if (tp->tp_ni != NULL) {
/*
* Do any tx complete callback. Note this must
* be done before releasing the node reference.
*/
bwn_ratectl_tx_complete(tp->tp_ni, status);
if (tp->tp_m->m_flags & M_TXCB)
ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0);
ieee80211_free_node(tp->tp_ni);
tp->tp_ni = NULL;
}
m_freem(tp->tp_m);
tp->tp_m = NULL;
TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list);
sc->sc_watchdog_timer = 0;
}
static void
bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct ieee80211com *ic = &sc->sc_ic;
unsigned long now;
bwn_txpwr_result_t result;
BWN_GETTIME(now);
if (!(flags & BWN_TXPWR_IGNORE_TIME) && ieee80211_time_before(now, phy->nexttime))
return;
phy->nexttime = now + 2 * 1000;
if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306)
return;
if (phy->recalc_txpwr != NULL) {
result = phy->recalc_txpwr(mac,
(flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0);
if (result == BWN_TXPWR_RES_DONE)
return;
KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST,
("%s: fail", __func__));
KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__));
ieee80211_runtask(ic, &mac->mac_txpower);
}
}
static uint16_t
bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset)
{
return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset));
}
static uint32_t
bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset)
{
return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset));
}
static void
bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value)
{
BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value);
}
static void
bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value)
{
BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value);
}
static int
bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12:
return (BWN_OFDM_RATE_6MB);
case 18:
return (BWN_OFDM_RATE_9MB);
case 24:
return (BWN_OFDM_RATE_12MB);
case 36:
return (BWN_OFDM_RATE_18MB);
case 48:
return (BWN_OFDM_RATE_24MB);
case 72:
return (BWN_OFDM_RATE_36MB);
case 96:
return (BWN_OFDM_RATE_48MB);
case 108:
return (BWN_OFDM_RATE_54MB);
/* CCK rates (NB: not IEEE std, device-specific) */
case 2:
return (BWN_CCK_RATE_1MB);
case 4:
return (BWN_CCK_RATE_2MB);
case 11:
return (BWN_CCK_RATE_5MB);
case 22:
return (BWN_CCK_RATE_11MB);
}
device_printf(sc->sc_dev, "unsupported rate %d\n", rate);
return (BWN_CCK_RATE_1MB);
}
static uint16_t
bwn_set_txhdr_phyctl1(struct bwn_mac *mac, uint8_t bitrate)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t control = 0;
uint16_t bw;
/* XXX TODO: this is for LP phy, what about N-PHY, etc? */
bw = BWN_TXH_PHY1_BW_20;
if (BWN_ISCCKRATE(bitrate) && phy->type != BWN_PHYTYPE_LP) {
control = bw;
} else {
control = bw;
/* Figure out coding rate and modulation */
/* XXX TODO: table-ize, for MCS transmit */
/* Note: this is BWN_*_RATE values */
switch (bitrate) {
case BWN_CCK_RATE_1MB:
control |= 0;
break;
case BWN_CCK_RATE_2MB:
control |= 1;
break;
case BWN_CCK_RATE_5MB:
control |= 2;
break;
case BWN_CCK_RATE_11MB:
control |= 3;
break;
case BWN_OFDM_RATE_6MB:
control |= BWN_TXH_PHY1_CRATE_1_2;
control |= BWN_TXH_PHY1_MODUL_BPSK;
break;
case BWN_OFDM_RATE_9MB:
control |= BWN_TXH_PHY1_CRATE_3_4;
control |= BWN_TXH_PHY1_MODUL_BPSK;
break;
case BWN_OFDM_RATE_12MB:
control |= BWN_TXH_PHY1_CRATE_1_2;
control |= BWN_TXH_PHY1_MODUL_QPSK;
break;
case BWN_OFDM_RATE_18MB:
control |= BWN_TXH_PHY1_CRATE_3_4;
control |= BWN_TXH_PHY1_MODUL_QPSK;
break;
case BWN_OFDM_RATE_24MB:
control |= BWN_TXH_PHY1_CRATE_1_2;
control |= BWN_TXH_PHY1_MODUL_QAM16;
break;
case BWN_OFDM_RATE_36MB:
control |= BWN_TXH_PHY1_CRATE_3_4;
control |= BWN_TXH_PHY1_MODUL_QAM16;
break;
case BWN_OFDM_RATE_48MB:
control |= BWN_TXH_PHY1_CRATE_1_2;
control |= BWN_TXH_PHY1_MODUL_QAM64;
break;
case BWN_OFDM_RATE_54MB:
control |= BWN_TXH_PHY1_CRATE_3_4;
control |= BWN_TXH_PHY1_MODUL_QAM64;
break;
default:
break;
}
control |= BWN_TXH_PHY1_MODE_SISO;
}
return control;
}
static int
bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni,
struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie)
{
const struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211_frame *wh;
struct ieee80211_frame *protwh;
struct ieee80211_frame_cts *cts;
struct ieee80211_frame_rts *rts;
const struct ieee80211_txparam *tp = ni->ni_txparms;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *mprot;
unsigned int len;
uint32_t macctl = 0;
int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type;
uint16_t phyctl = 0;
uint8_t rate, rate_fb;
int fill_phy_ctl1 = 0;
wh = mtod(m, struct ieee80211_frame *);
memset(txhdr, 0, sizeof(*txhdr));
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
if ((phy->type == BWN_PHYTYPE_N) || (phy->type == BWN_PHYTYPE_LP)
|| (phy->type == BWN_PHYTYPE_HT))
fill_phy_ctl1 = 1;
/*
* Find TX rate
*/
if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL))
rate = rate_fb = tp->mgmtrate;
else if (ismcast)
rate = rate_fb = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = rate_fb = tp->ucastrate;
else {
rix = ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
if (rix > 0)
rate_fb = ni->ni_rates.rs_rates[rix - 1] &
IEEE80211_RATE_VAL;
else
rate_fb = rate;
}
sc->sc_tx_rate = rate;
/* Note: this maps the select ieee80211 rate to hardware rate */
rate = bwn_ieeerate2hwrate(sc, rate);
rate_fb = bwn_ieeerate2hwrate(sc, rate_fb);
txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) :
bwn_plcp_getcck(rate);
bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc));
bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN);
/* XXX rate/rate_fb is the hardware rate */
if ((rate_fb == rate) ||
(*(u_int16_t *)wh->i_dur & htole16(0x8000)) ||
(*(u_int16_t *)wh->i_dur == htole16(0)))
txhdr->dur_fb = *(u_int16_t *)wh->i_dur;
else
txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt,
m->m_pkthdr.len, rate, isshort);
/* XXX TX encryption */
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r351.plcp),
m->m_pkthdr.len + IEEE80211_CRC_LEN, rate);
break;
case BWN_FW_HDR_410:
bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r410.plcp),
m->m_pkthdr.len + IEEE80211_CRC_LEN, rate);
break;
case BWN_FW_HDR_598:
bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r598.plcp),
m->m_pkthdr.len + IEEE80211_CRC_LEN, rate);
break;
}
bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb),
m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb);
txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM :
BWN_TX_EFT_FB_CCK;
txhdr->chan = phy->chan;
phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM :
BWN_TX_PHY_ENC_CCK;
/* XXX preamble? obey net80211 */
if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB ||
rate == BWN_CCK_RATE_11MB))
phyctl |= BWN_TX_PHY_SHORTPRMBL;
if (! phy->gmode)
macctl |= BWN_TX_MAC_5GHZ;
/* XXX TX antenna selection */
switch (bwn_antenna_sanitize(mac, 0)) {
case 0:
phyctl |= BWN_TX_PHY_ANT01AUTO;
break;
case 1:
phyctl |= BWN_TX_PHY_ANT0;
break;
case 2:
phyctl |= BWN_TX_PHY_ANT1;
break;
case 3:
phyctl |= BWN_TX_PHY_ANT2;
break;
case 4:
phyctl |= BWN_TX_PHY_ANT3;
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
if (!ismcast)
macctl |= BWN_TX_MAC_ACK;
macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
macctl |= BWN_TX_MAC_LONGFRAME;
if (ic->ic_flags & IEEE80211_F_USEPROT) {
/* Note: don't fall back to CCK rates for 5G */
if (phy->gmode)
rts_rate = BWN_CCK_RATE_1MB;
else
rts_rate = BWN_OFDM_RATE_6MB;
rts_rate_fb = bwn_get_fbrate(rts_rate);
/* XXX 'rate' here is hardware rate now, not the net80211 rate */
protdur = ieee80211_compute_duration(ic->ic_rt,
m->m_pkthdr.len, rate, isshort) +
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
cts = (struct ieee80211_frame_cts *)
txhdr->body.r351.rts_frame;
break;
case BWN_FW_HDR_410:
cts = (struct ieee80211_frame_cts *)
txhdr->body.r410.rts_frame;
break;
case BWN_FW_HDR_598:
cts = (struct ieee80211_frame_cts *)
txhdr->body.r598.rts_frame;
break;
}
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr,
protdur);
KASSERT(mprot != NULL, ("failed to alloc mbuf\n"));
bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts,
mprot->m_pkthdr.len);
m_freem(mprot);
macctl |= BWN_TX_MAC_SEND_CTSTOSELF;
len = sizeof(struct ieee80211_frame_cts);
} else {
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
rts = (struct ieee80211_frame_rts *)
txhdr->body.r351.rts_frame;
break;
case BWN_FW_HDR_410:
rts = (struct ieee80211_frame_rts *)
txhdr->body.r410.rts_frame;
break;
case BWN_FW_HDR_598:
rts = (struct ieee80211_frame_rts *)
txhdr->body.r598.rts_frame;
break;
}
/* XXX rate/rate_fb is the hardware rate */
protdur += ieee80211_ack_duration(ic->ic_rt, rate,
isshort);
mprot = ieee80211_alloc_rts(ic, wh->i_addr1,
wh->i_addr2, protdur);
KASSERT(mprot != NULL, ("failed to alloc mbuf\n"));
bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts,
mprot->m_pkthdr.len);
m_freem(mprot);
macctl |= BWN_TX_MAC_SEND_RTSCTS;
len = sizeof(struct ieee80211_frame_rts);
}
len += IEEE80211_CRC_LEN;
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
bwn_plcp_genhdr((struct bwn_plcp4 *)
&txhdr->body.r351.rts_plcp, len, rts_rate);
break;
case BWN_FW_HDR_410:
bwn_plcp_genhdr((struct bwn_plcp4 *)
&txhdr->body.r410.rts_plcp, len, rts_rate);
break;
case BWN_FW_HDR_598:
bwn_plcp_genhdr((struct bwn_plcp4 *)
&txhdr->body.r598.rts_plcp, len, rts_rate);
break;
}
bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len,
rts_rate_fb);
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
protwh = (struct ieee80211_frame *)
&txhdr->body.r351.rts_frame;
break;
case BWN_FW_HDR_410:
protwh = (struct ieee80211_frame *)
&txhdr->body.r410.rts_frame;
break;
case BWN_FW_HDR_598:
protwh = (struct ieee80211_frame *)
&txhdr->body.r598.rts_frame;
break;
}
txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur;
if (BWN_ISOFDMRATE(rts_rate)) {
txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM;
txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate);
} else {
txhdr->eftypes |= BWN_TX_EFT_RTS_CCK;
txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate);
}
txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ?
BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK;
if (fill_phy_ctl1) {
txhdr->phyctl_1rts = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate));
txhdr->phyctl_1rtsfb = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate_fb));
}
}
if (fill_phy_ctl1) {
txhdr->phyctl_1 = htole16(bwn_set_txhdr_phyctl1(mac, rate));
txhdr->phyctl_1fb = htole16(bwn_set_txhdr_phyctl1(mac, rate_fb));
}
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
txhdr->body.r351.cookie = htole16(cookie);
break;
case BWN_FW_HDR_410:
txhdr->body.r410.cookie = htole16(cookie);
break;
case BWN_FW_HDR_598:
txhdr->body.r598.cookie = htole16(cookie);
break;
}
txhdr->macctl = htole32(macctl);
txhdr->phyctl = htole16(phyctl);
/*
* TX radio tap
*/
if (ieee80211_radiotap_active_vap(vap)) {
sc->sc_tx_th.wt_flags = 0;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
if (isshort &&
(rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB ||
rate == BWN_CCK_RATE_11MB))
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
sc->sc_tx_th.wt_rate = rate;
ieee80211_radiotap_tx(vap, m);
}
return (0);
}
static void
bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets,
const uint8_t rate)
{
uint32_t d, plen;
uint8_t *raw = plcp->o.raw;
if (BWN_ISOFDMRATE(rate)) {
d = bwn_plcp_getofdm(rate);
KASSERT(!(octets & 0xf000),
("%s:%d: fail", __func__, __LINE__));
d |= (octets << 5);
plcp->o.data = htole32(d);
} else {
plen = octets * 16 / rate;
if ((octets * 16 % rate) > 0) {
plen++;
if ((rate == BWN_CCK_RATE_11MB)
&& ((octets * 8 % 11) < 4)) {
raw[1] = 0x84;
} else
raw[1] = 0x04;
} else
raw[1] = 0x04;
plcp->o.data |= htole32(plen << 16);
raw[0] = bwn_plcp_getcck(rate);
}
}
static uint8_t
bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n)
{
struct bwn_softc *sc = mac->mac_sc;
uint8_t mask;
if (n == 0)
return (0);
if (mac->mac_phy.gmode)
mask = siba_sprom_get_ant_bg(sc->sc_dev);
else
mask = siba_sprom_get_ant_a(sc->sc_dev);
if (!(mask & (1 << (n - 1))))
return (0);
return (n);
}
/*
* Return a fallback rate for the given rate.
*
* Note: Don't fall back from OFDM to CCK.
*/
static uint8_t
bwn_get_fbrate(uint8_t bitrate)
{
switch (bitrate) {
/* CCK */
case BWN_CCK_RATE_1MB:
return (BWN_CCK_RATE_1MB);
case BWN_CCK_RATE_2MB:
return (BWN_CCK_RATE_1MB);
case BWN_CCK_RATE_5MB:
return (BWN_CCK_RATE_2MB);
case BWN_CCK_RATE_11MB:
return (BWN_CCK_RATE_5MB);
/* OFDM */
case BWN_OFDM_RATE_6MB:
return (BWN_OFDM_RATE_6MB);
case BWN_OFDM_RATE_9MB:
return (BWN_OFDM_RATE_6MB);
case BWN_OFDM_RATE_12MB:
return (BWN_OFDM_RATE_9MB);
case BWN_OFDM_RATE_18MB:
return (BWN_OFDM_RATE_12MB);
case BWN_OFDM_RATE_24MB:
return (BWN_OFDM_RATE_18MB);
case BWN_OFDM_RATE_36MB:
return (BWN_OFDM_RATE_24MB);
case BWN_OFDM_RATE_48MB:
return (BWN_OFDM_RATE_36MB);
case BWN_OFDM_RATE_54MB:
return (BWN_OFDM_RATE_48MB);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static uint32_t
bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint32_t ctl, const void *_data, int len)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t value = 0;
const uint8_t *data = _data;
ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 |
BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31;
bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl);
siba_write_multi_4(sc->sc_dev, data, (len & ~3),
tq->tq_base + BWN_PIO8_TXDATA);
if (len & 3) {
ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 |
BWN_PIO8_TXCTL_24_31);
data = &(data[len - 1]);
switch (len & 3) {
case 3:
ctl |= BWN_PIO8_TXCTL_16_23;
value |= (uint32_t)(*data) << 16;
data--;
case 2:
ctl |= BWN_PIO8_TXCTL_8_15;
value |= (uint32_t)(*data) << 8;
data--;
case 1:
value |= (uint32_t)(*data);
}
bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl);
bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value);
}
return (ctl);
}
static void
bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t offset, uint32_t value)
{
BWN_WRITE_4(mac, tq->tq_base + offset, value);
}
static uint16_t
bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t ctl, const void *_data, int len)
{
struct bwn_softc *sc = mac->mac_sc;
const uint8_t *data = _data;
ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
siba_write_multi_2(sc->sc_dev, data, (len & ~1),
tq->tq_base + BWN_PIO_TXDATA);
if (len & 1) {
ctl &= ~BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]);
}
return (ctl);
}
static uint16_t
bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t ctl, struct mbuf *m0)
{
int i, j = 0;
uint16_t data = 0;
const uint8_t *buf;
struct mbuf *m = m0;
ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
for (; m != NULL; m = m->m_next) {
buf = mtod(m, const uint8_t *);
for (i = 0; i < m->m_len; i++) {
if (!((j++) % 2))
data |= buf[i];
else {
data |= (buf[i] << 8);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data);
data = 0;
}
}
}
if (m0->m_pkthdr.len % 2) {
ctl &= ~BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data);
}
return (ctl);
}
static void
bwn_set_slot_time(struct bwn_mac *mac, uint16_t time)
{
/* XXX should exit if 5GHz band .. */
if (mac->mac_phy.type != BWN_PHYTYPE_G)
return;
BWN_WRITE_2(mac, 0x684, 510 + time);
/* Disabled in Linux b43, can adversely effect performance */
#if 0
bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time);
#endif
}
static struct bwn_dma_ring *
bwn_dma_select(struct bwn_mac *mac, uint8_t prio)
{
if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0)
return (mac->mac_method.dma.wme[WME_AC_BE]);
switch (prio) {
case 3:
return (mac->mac_method.dma.wme[WME_AC_VO]);
case 2:
return (mac->mac_method.dma.wme[WME_AC_VI]);
case 0:
return (mac->mac_method.dma.wme[WME_AC_BE]);
case 1:
return (mac->mac_method.dma.wme[WME_AC_BK]);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (NULL);
}
static int
bwn_dma_getslot(struct bwn_dma_ring *dr)
{
int slot;
BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc);
KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__));
KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__));
KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__));
slot = bwn_dma_nextslot(dr, dr->dr_curslot);
KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__));
dr->dr_curslot = slot;
dr->dr_usedslot++;
return (slot);
}
static struct bwn_pio_txqueue *
bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie,
struct bwn_pio_txpkt **pack)
{
struct bwn_pio *pio = &mac->mac_method.pio;
struct bwn_pio_txqueue *tq = NULL;
unsigned int index;
switch (cookie & 0xf000) {
case 0x1000:
tq = &pio->wme[WME_AC_BK];
break;
case 0x2000:
tq = &pio->wme[WME_AC_BE];
break;
case 0x3000:
tq = &pio->wme[WME_AC_VI];
break;
case 0x4000:
tq = &pio->wme[WME_AC_VO];
break;
case 0x5000:
tq = &pio->mcast;
break;
}
KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__));
if (tq == NULL)
return (NULL);
index = (cookie & 0x0fff);
KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__));
if (index >= N(tq->tq_pkts))
return (NULL);
*pack = &tq->tq_pkts[index];
KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__));
return (tq);
}
static void
bwn_txpwr(void *arg, int npending)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc;
if (mac == NULL)
return;
sc = mac->mac_sc;
BWN_LOCK(sc);
if (mac->mac_status >= BWN_MAC_STATUS_STARTED &&
mac->mac_phy.set_txpwr != NULL)
mac->mac_phy.set_txpwr(mac);
BWN_UNLOCK(sc);
}
static void
bwn_task_15s(struct bwn_mac *mac)
{
uint16_t reg;
if (mac->mac_fw.opensource) {
reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG);
if (reg) {
bwn_restart(mac, "fw watchdog");
return;
}
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1);
}
if (mac->mac_phy.task_15s)
mac->mac_phy.task_15s(mac);
mac->mac_phy.txerrors = BWN_TXERROR_MAX;
}
static void
bwn_task_30s(struct bwn_mac *mac)
{
if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running)
return;
mac->mac_noise.noi_running = 1;
mac->mac_noise.noi_nsamples = 0;
bwn_noise_gensample(mac);
}
static void
bwn_task_60s(struct bwn_mac *mac)
{
if (mac->mac_phy.task_60s)
mac->mac_phy.task_60s(mac);
bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME);
}
static void
bwn_tasks(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status != BWN_MAC_STATUS_STARTED)
return;
if (mac->mac_task_state % 4 == 0)
bwn_task_60s(mac);
if (mac->mac_task_state % 2 == 0)
bwn_task_30s(mac);
bwn_task_15s(mac);
mac->mac_task_state++;
callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac);
}
static int
bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a)
{
struct bwn_softc *sc = mac->mac_sc;
KASSERT(a == 0, ("not support APHY\n"));
switch (plcp->o.raw[0] & 0xf) {
case 0xb:
return (BWN_OFDM_RATE_6MB);
case 0xf:
return (BWN_OFDM_RATE_9MB);
case 0xa:
return (BWN_OFDM_RATE_12MB);
case 0xe:
return (BWN_OFDM_RATE_18MB);
case 0x9:
return (BWN_OFDM_RATE_24MB);
case 0xd:
return (BWN_OFDM_RATE_36MB);
case 0x8:
return (BWN_OFDM_RATE_48MB);
case 0xc:
return (BWN_OFDM_RATE_54MB);
}
device_printf(sc->sc_dev, "incorrect OFDM rate %d\n",
plcp->o.raw[0] & 0xf);
return (-1);
}
static int
bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp)
{
struct bwn_softc *sc = mac->mac_sc;
switch (plcp->o.raw[0]) {
case 0x0a:
return (BWN_CCK_RATE_1MB);
case 0x14:
return (BWN_CCK_RATE_2MB);
case 0x37:
return (BWN_CCK_RATE_5MB);
case 0x6e:
return (BWN_CCK_RATE_11MB);
}
device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]);
return (-1);
}
static void
bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m,
const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate,
int rssi, int noise)
{
struct bwn_softc *sc = mac->mac_sc;
const struct ieee80211_frame_min *wh;
uint64_t tsf;
uint16_t low_mactime_now;
uint16_t mt;
if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
wh = mtod(m, const struct ieee80211_frame_min *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP;
bwn_tsf_read(mac, &tsf);
low_mactime_now = tsf;
tsf = tsf & ~0xffffULL;
switch (mac->mac_fw.fw_hdr_format) {
case BWN_FW_HDR_351:
case BWN_FW_HDR_410:
mt = le16toh(rxhdr->ps4.r351.mac_time);
break;
case BWN_FW_HDR_598:
mt = le16toh(rxhdr->ps4.r598.mac_time);
break;
}
tsf += mt;
if (low_mactime_now < mt)
tsf -= 0x10000;
sc->sc_rx_th.wr_tsf = tsf;
sc->sc_rx_th.wr_rate = rate;
sc->sc_rx_th.wr_antsignal = rssi;
sc->sc_rx_th.wr_antnoise = noise;
}
static void
bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf)
{
uint32_t low, high;
KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3,
("%s:%d: fail", __func__, __LINE__));
low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW);
high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH);
*tsf = high;
*tsf <<= 32;
*tsf |= low;
}
static int
bwn_dma_attach(struct bwn_mac *mac)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_softc *sc = mac->mac_sc;
bus_addr_t lowaddr = 0;
int error;
if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0)
return (0);
KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__));
mac->mac_flags |= BWN_MAC_FLAG_DMA;
dma->dmatype = bwn_dma_gettype(mac);
if (dma->dmatype == BWN_DMA_30BIT)
lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT;
else if (dma->dmatype == BWN_DMA_32BIT)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
else
lowaddr = BUS_SPACE_MAXADDR;
/*
* Create top level DMA tag
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
BWN_ALIGN, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&dma->parent_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create parent DMA tag\n");
return (error);
}
/*
* Create TX/RX mbuf DMA tag
*/
error = bus_dma_tag_create(dma->parent_dtag,
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
MCLBYTES,
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dma->rxbuf_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create mbuf DMA tag\n");
goto fail0;
}
error = bus_dma_tag_create(dma->parent_dtag,
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
MCLBYTES,
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dma->txbuf_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create mbuf DMA tag\n");
goto fail1;
}
dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype);
if (!dma->wme[WME_AC_BK])
goto fail2;
dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype);
if (!dma->wme[WME_AC_BE])
goto fail3;
dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype);
if (!dma->wme[WME_AC_VI])
goto fail4;
dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype);
if (!dma->wme[WME_AC_VO])
goto fail5;
dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype);
if (!dma->mcast)
goto fail6;
dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype);
if (!dma->rx)
goto fail7;
return (error);
fail7: bwn_dma_ringfree(&dma->mcast);
fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]);
fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]);
fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]);
fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]);
fail2: bus_dma_tag_destroy(dma->txbuf_dtag);
fail1: bus_dma_tag_destroy(dma->rxbuf_dtag);
fail0: bus_dma_tag_destroy(dma->parent_dtag);
return (error);
}
static struct bwn_dma_ring *
bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status,
uint16_t cookie, int *slot)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr;
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(mac->mac_sc);
switch (cookie & 0xf000) {
case 0x1000:
dr = dma->wme[WME_AC_BK];
break;
case 0x2000:
dr = dma->wme[WME_AC_BE];
break;
case 0x3000:
dr = dma->wme[WME_AC_VI];
break;
case 0x4000:
dr = dma->wme[WME_AC_VO];
break;
case 0x5000:
dr = dma->mcast;
break;
default:
dr = NULL;
KASSERT(0 == 1,
("invalid cookie value %d", cookie & 0xf000));
}
*slot = (cookie & 0x0fff);
if (*slot < 0 || *slot >= dr->dr_numslots) {
/*
* XXX FIXME: sometimes H/W returns TX DONE events duplicately
* that it occurs events which have same H/W sequence numbers.
* When it's occurred just prints a WARNING msgs and ignores.
*/
KASSERT(status->seq == dma->lastseq,
("%s:%d: fail", __func__, __LINE__));
device_printf(sc->sc_dev,
"out of slot ranges (0 < %d < %d)\n", *slot,
dr->dr_numslots);
return (NULL);
}
dma->lastseq = status->seq;
return (dr);
}
static void
bwn_dma_stop(struct bwn_mac *mac)
{
struct bwn_dma *dma;
if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0)
return;
dma = &mac->mac_method.dma;
bwn_dma_ringstop(&dma->rx);
bwn_dma_ringstop(&dma->wme[WME_AC_BK]);
bwn_dma_ringstop(&dma->wme[WME_AC_BE]);
bwn_dma_ringstop(&dma->wme[WME_AC_VI]);
bwn_dma_ringstop(&dma->wme[WME_AC_VO]);
bwn_dma_ringstop(&dma->mcast);
}
static void
bwn_dma_ringstop(struct bwn_dma_ring **dr)
{
if (dr == NULL)
return;
bwn_dma_cleanup(*dr);
}
static void
bwn_pio_stop(struct bwn_mac *mac)
{
struct bwn_pio *pio;
if (mac->mac_flags & BWN_MAC_FLAG_DMA)
return;
pio = &mac->mac_method.pio;
bwn_destroy_queue_tx(&pio->mcast);
bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]);
bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]);
bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]);
bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]);
}
static void
bwn_led_attach(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
const uint8_t *led_act = NULL;
uint16_t val[BWN_LED_MAX];
int i;
sc->sc_led_idle = (2350 * hz) / 1000;
sc->sc_led_blink = 1;
for (i = 0; i < N(bwn_vendor_led_act); ++i) {
if (siba_get_pci_subvendor(sc->sc_dev) ==
bwn_vendor_led_act[i].vid) {
led_act = bwn_vendor_led_act[i].led_act;
break;
}
}
if (led_act == NULL)
led_act = bwn_default_led_act;
val[0] = siba_sprom_get_gpio0(sc->sc_dev);
val[1] = siba_sprom_get_gpio1(sc->sc_dev);
val[2] = siba_sprom_get_gpio2(sc->sc_dev);
val[3] = siba_sprom_get_gpio3(sc->sc_dev);
for (i = 0; i < BWN_LED_MAX; ++i) {
struct bwn_led *led = &sc->sc_leds[i];
if (val[i] == 0xff) {
led->led_act = led_act[i];
} else {
if (val[i] & BWN_LED_ACT_LOW)
led->led_flags |= BWN_LED_F_ACTLOW;
led->led_act = val[i] & BWN_LED_ACT_MASK;
}
led->led_mask = (1 << i);
if (led->led_act == BWN_LED_ACT_BLINK_SLOW ||
led->led_act == BWN_LED_ACT_BLINK_POLL ||
led->led_act == BWN_LED_ACT_BLINK) {
led->led_flags |= BWN_LED_F_BLINK;
if (led->led_act == BWN_LED_ACT_BLINK_POLL)
led->led_flags |= BWN_LED_F_POLLABLE;
else if (led->led_act == BWN_LED_ACT_BLINK_SLOW)
led->led_flags |= BWN_LED_F_SLOW;
if (sc->sc_blink_led == NULL) {
sc->sc_blink_led = led;
if (led->led_flags & BWN_LED_F_SLOW)
BWN_LED_SLOWDOWN(sc->sc_led_idle);
}
}
DPRINTF(sc, BWN_DEBUG_LED,
"%dth led, act %d, lowact %d\n", i,
led->led_act, led->led_flags & BWN_LED_F_ACTLOW);
}
callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0);
}
static __inline uint16_t
bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on)
{
if (led->led_flags & BWN_LED_F_ACTLOW)
on = !on;
if (on)
val |= led->led_mask;
else
val &= ~led->led_mask;
return val;
}
static void
bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint16_t val;
int i;
if (nstate == IEEE80211_S_INIT) {
callout_stop(&sc->sc_led_blink_ch);
sc->sc_led_blinking = 0;
}
if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0)
return;
val = BWN_READ_2(mac, BWN_GPIO_CONTROL);
for (i = 0; i < BWN_LED_MAX; ++i) {
struct bwn_led *led = &sc->sc_leds[i];
int on;
if (led->led_act == BWN_LED_ACT_UNKN ||
led->led_act == BWN_LED_ACT_NULL)
continue;
if ((led->led_flags & BWN_LED_F_BLINK) &&
nstate != IEEE80211_S_INIT)
continue;
switch (led->led_act) {
case BWN_LED_ACT_ON: /* Always on */
on = 1;
break;
case BWN_LED_ACT_OFF: /* Always off */
case BWN_LED_ACT_5GHZ: /* TODO: 11A */
on = 0;
break;
default:
on = 1;
switch (nstate) {
case IEEE80211_S_INIT:
on = 0;
break;
case IEEE80211_S_RUN:
if (led->led_act == BWN_LED_ACT_11G &&
ic->ic_curmode != IEEE80211_MODE_11G)
on = 0;
break;
default:
if (led->led_act == BWN_LED_ACT_ASSOC)
on = 0;
break;
}
break;
}
val = bwn_led_onoff(led, val, on);
}
BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val);
}
static void
bwn_led_event(struct bwn_mac *mac, int event)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_led *led = sc->sc_blink_led;
int rate;
if (event == BWN_LED_EVENT_POLL) {
if ((led->led_flags & BWN_LED_F_POLLABLE) == 0)
return;
if (ticks - sc->sc_led_ticks < sc->sc_led_idle)
return;
}
sc->sc_led_ticks = ticks;
if (sc->sc_led_blinking)
return;
switch (event) {
case BWN_LED_EVENT_RX:
rate = sc->sc_rx_rate;
break;
case BWN_LED_EVENT_TX:
rate = sc->sc_tx_rate;
break;
case BWN_LED_EVENT_POLL:
rate = 0;
break;
default:
panic("unknown LED event %d\n", event);
break;
}
bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur,
bwn_led_duration[rate].off_dur);
}
static void
bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_led *led = sc->sc_blink_led;
uint16_t val;
val = BWN_READ_2(mac, BWN_GPIO_CONTROL);
val = bwn_led_onoff(led, val, 1);
BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val);
if (led->led_flags & BWN_LED_F_SLOW) {
BWN_LED_SLOWDOWN(on_dur);
BWN_LED_SLOWDOWN(off_dur);
}
sc->sc_led_blinking = 1;
sc->sc_led_blink_offdur = off_dur;
callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac);
}
static void
bwn_led_blink_next(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
uint16_t val;
val = BWN_READ_2(mac, BWN_GPIO_CONTROL);
val = bwn_led_onoff(sc->sc_blink_led, val, 0);
BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val);
callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur,
bwn_led_blink_end, mac);
}
static void
bwn_led_blink_end(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
sc->sc_led_blinking = 0;
}
static int
bwn_suspend(device_t dev)
{
struct bwn_softc *sc = device_get_softc(dev);
BWN_LOCK(sc);
bwn_stop(sc);
BWN_UNLOCK(sc);
return (0);
}
static int
bwn_resume(device_t dev)
{
struct bwn_softc *sc = device_get_softc(dev);
int error = EDOOFUS;
BWN_LOCK(sc);
if (sc->sc_ic.ic_nrunning > 0)
error = bwn_init(sc);
BWN_UNLOCK(sc);
if (error == 0)
ieee80211_start_all(&sc->sc_ic);
return (0);
}
static void
bwn_rfswitch(void *arg)
{
struct bwn_softc *sc = arg;
struct bwn_mac *mac = sc->sc_curmac;
int cur = 0, prev = 0;
KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED,
("%s: invalid MAC status %d", __func__, mac->mac_status));
if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP
|| mac->mac_phy.type == BWN_PHYTYPE_N) {
if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI)
& BWN_RF_HWENABLED_HI_MASK))
cur = 1;
} else {
if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO)
& BWN_RF_HWENABLED_LO_MASK)
cur = 1;
}
if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)
prev = 1;
DPRINTF(sc, BWN_DEBUG_RESET, "%s: called; cur=%d, prev=%d\n",
__func__, cur, prev);
if (cur != prev) {
if (cur)
mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON;
else
mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON;
device_printf(sc->sc_dev,
"status of RF switch is changed to %s\n",
cur ? "ON" : "OFF");
if (cur != mac->mac_phy.rf_on) {
if (cur)
bwn_rf_turnon(mac);
else
bwn_rf_turnoff(mac);
}
}
callout_schedule(&sc->sc_rfswitch_ch, hz);
}
static void
bwn_sysctl_node(struct bwn_softc *sc)
{
device_t dev = sc->sc_dev;
struct bwn_mac *mac;
struct bwn_stats *stats;
/* XXX assume that count of MAC is only 1. */
if ((mac = sc->sc_curmac) == NULL)
return;
stats = &mac->mac_stats;
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rts", CTLFLAG_RW, &stats->rts, 0, "RTS");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send");
#ifdef BWN_DEBUG
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags");
#endif
}
static device_method_t bwn_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bwn_probe),
DEVMETHOD(device_attach, bwn_attach),
DEVMETHOD(device_detach, bwn_detach),
DEVMETHOD(device_suspend, bwn_suspend),
DEVMETHOD(device_resume, bwn_resume),
DEVMETHOD_END
};
driver_t bwn_driver = {
"bwn",
bwn_methods,
sizeof(struct bwn_softc)
};
static devclass_t bwn_devclass;
DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0);
MODULE_DEPEND(bwn, bwn_pci, 1, 1, 1);
MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1);
MODULE_DEPEND(bwn, gpiobus, 1, 1, 1);
MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */
MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */
MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1);
MODULE_VERSION(bwn, 1);
Index: head/sys/dev/bwn/if_bwn_phy_lp.c
===================================================================
--- head/sys/dev/bwn/if_bwn_phy_lp.c (revision 328217)
+++ head/sys/dev/bwn/if_bwn_phy_lp.c (revision 328218)
@@ -1,3660 +1,3660 @@
/*-
* Copyright (c) 2009-2010 Weongyo Jeong <weongyo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_bwn.h"
#include "opt_wlan.h"
/*
* The Broadcom Wireless LAN controller driver.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/firmware.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/bwn/if_bwn_siba.h>
#include <dev/bwn/if_bwnreg.h>
#include <dev/bwn/if_bwnvar.h>
#include <dev/bwn/if_bwn_debug.h>
#include <dev/bwn/if_bwn_misc.h>
#include <dev/bwn/if_bwn_util.h>
#include <dev/bwn/if_bwn_phy_common.h>
#include <dev/bwn/if_bwn_phy_lp.h>
static void bwn_phy_lp_readsprom(struct bwn_mac *);
static void bwn_phy_lp_bbinit(struct bwn_mac *);
static void bwn_phy_lp_txpctl_init(struct bwn_mac *);
static void bwn_phy_lp_calib(struct bwn_mac *);
static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *, uint8_t);
static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_set_anafilter(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_set_gaintbl(struct bwn_mac *, uint32_t);
static void bwn_phy_lp_digflt_save(struct bwn_mac *);
static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *);
static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_bugfix(struct bwn_mac *);
static void bwn_phy_lp_digflt_restore(struct bwn_mac *);
static void bwn_phy_lp_tblinit(struct bwn_mac *);
static void bwn_phy_lp_bbinit_r2(struct bwn_mac *);
static void bwn_phy_lp_bbinit_r01(struct bwn_mac *);
static void bwn_phy_lp_b2062_init(struct bwn_mac *);
static void bwn_phy_lp_b2063_init(struct bwn_mac *);
static void bwn_phy_lp_rxcal_r2(struct bwn_mac *);
static void bwn_phy_lp_rccal_r12(struct bwn_mac *);
static void bwn_phy_lp_set_rccap(struct bwn_mac *);
static uint32_t bwn_phy_lp_roundup(uint32_t, uint32_t, uint8_t);
static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *);
static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *);
static void bwn_tab_write_multi(struct bwn_mac *, uint32_t, int,
const void *);
static void bwn_tab_read_multi(struct bwn_mac *, uint32_t, int, void *);
static struct bwn_txgain
bwn_phy_lp_get_txgain(struct bwn_mac *);
static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *);
static void bwn_phy_lp_set_txgain(struct bwn_mac *, struct bwn_txgain *);
static void bwn_phy_lp_set_bbmult(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_set_trsw_over(struct bwn_mac *, uint8_t, uint8_t);
static void bwn_phy_lp_set_rxgain(struct bwn_mac *, uint32_t);
static void bwn_phy_lp_set_deaf(struct bwn_mac *, uint8_t);
static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_clear_deaf(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_tblinit_r01(struct bwn_mac *);
static void bwn_phy_lp_tblinit_r2(struct bwn_mac *);
static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *);
static void bwn_tab_write(struct bwn_mac *, uint32_t, uint32_t);
static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *);
static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *);
static int bwn_phy_lp_loopback(struct bwn_mac *);
static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *, int, int, int, int,
int);
static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *, uint16_t, uint8_t,
struct bwn_phy_lp_iq_est *);
static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *);
static uint32_t bwn_tab_read(struct bwn_mac *, uint32_t);
static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_set_txgain_override(struct bwn_mac *);
static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *);
static uint8_t bwn_nbits(int32_t);
static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *, int, int,
struct bwn_txgain_entry *);
static void bwn_phy_lp_gaintbl_write(struct bwn_mac *, int,
struct bwn_txgain_entry);
static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *, int,
struct bwn_txgain_entry);
static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *, int,
struct bwn_txgain_entry);
static const uint8_t bwn_b2063_chantable_data[33][12] = {
{ 0x6f, 0x3c, 0x3c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6f, 0x2c, 0x2c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6f, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6e, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6e, 0xc, 0xc, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6a, 0xc, 0xc, 0, 0x2, 0x5, 0xd, 0xd, 0x77, 0x80, 0x20, 0 },
{ 0x6a, 0xc, 0xc, 0, 0x1, 0x5, 0xd, 0xc, 0x77, 0x80, 0x20, 0 },
{ 0x6a, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x80, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x70, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xb, 0xc, 0x77, 0x70, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0, 0x4, 0xb, 0xb, 0x77, 0x60, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xb, 0x77, 0x60, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xa, 0x77, 0x60, 0x20, 0 },
{ 0x68, 0xc, 0xc, 0, 0, 0x2, 0x9, 0x9, 0x77, 0x60, 0x20, 0 },
{ 0x68, 0xc, 0xc, 0, 0, 0x1, 0x8, 0x8, 0x77, 0x50, 0x10, 0 },
{ 0x67, 0xc, 0xc, 0, 0, 0, 0x8, 0x8, 0x77, 0x50, 0x10, 0 },
{ 0x64, 0xc, 0xc, 0, 0, 0, 0x2, 0x1, 0x77, 0x20, 0, 0 },
{ 0x64, 0xc, 0xc, 0, 0, 0, 0x1, 0x1, 0x77, 0x20, 0, 0 },
{ 0x63, 0xc, 0xc, 0, 0, 0, 0x1, 0, 0x77, 0x10, 0, 0 },
{ 0x63, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 },
{ 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 },
{ 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 },
{ 0x61, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 },
{ 0x60, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 },
{ 0x6e, 0xc, 0xc, 0, 0x9, 0xe, 0xf, 0xf, 0x77, 0xc0, 0x50, 0 },
{ 0x6e, 0xc, 0xc, 0, 0x9, 0xd, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 },
{ 0x6e, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 },
{ 0x6d, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 },
{ 0x6d, 0xc, 0xc, 0, 0x8, 0xb, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 },
{ 0x6d, 0xc, 0xc, 0, 0x8, 0xa, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 },
{ 0x6c, 0xc, 0xc, 0, 0x7, 0x9, 0xf, 0xf, 0x77, 0x90, 0x40, 0 },
{ 0x6c, 0xc, 0xc, 0, 0x6, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 },
{ 0x6c, 0xc, 0xc, 0, 0x5, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }
};
static const struct bwn_b206x_chan bwn_b2063_chantable[] = {
{ 1, 2412, bwn_b2063_chantable_data[0] },
{ 2, 2417, bwn_b2063_chantable_data[0] },
{ 3, 2422, bwn_b2063_chantable_data[0] },
{ 4, 2427, bwn_b2063_chantable_data[1] },
{ 5, 2432, bwn_b2063_chantable_data[1] },
{ 6, 2437, bwn_b2063_chantable_data[1] },
{ 7, 2442, bwn_b2063_chantable_data[1] },
{ 8, 2447, bwn_b2063_chantable_data[1] },
{ 9, 2452, bwn_b2063_chantable_data[2] },
{ 10, 2457, bwn_b2063_chantable_data[2] },
{ 11, 2462, bwn_b2063_chantable_data[3] },
{ 12, 2467, bwn_b2063_chantable_data[3] },
{ 13, 2472, bwn_b2063_chantable_data[3] },
{ 14, 2484, bwn_b2063_chantable_data[4] },
{ 34, 5170, bwn_b2063_chantable_data[5] },
{ 36, 5180, bwn_b2063_chantable_data[6] },
{ 38, 5190, bwn_b2063_chantable_data[7] },
{ 40, 5200, bwn_b2063_chantable_data[8] },
{ 42, 5210, bwn_b2063_chantable_data[9] },
{ 44, 5220, bwn_b2063_chantable_data[10] },
{ 46, 5230, bwn_b2063_chantable_data[11] },
{ 48, 5240, bwn_b2063_chantable_data[12] },
{ 52, 5260, bwn_b2063_chantable_data[13] },
{ 56, 5280, bwn_b2063_chantable_data[14] },
{ 60, 5300, bwn_b2063_chantable_data[14] },
{ 64, 5320, bwn_b2063_chantable_data[15] },
{ 100, 5500, bwn_b2063_chantable_data[16] },
{ 104, 5520, bwn_b2063_chantable_data[17] },
{ 108, 5540, bwn_b2063_chantable_data[18] },
{ 112, 5560, bwn_b2063_chantable_data[19] },
{ 116, 5580, bwn_b2063_chantable_data[20] },
{ 120, 5600, bwn_b2063_chantable_data[21] },
{ 124, 5620, bwn_b2063_chantable_data[21] },
{ 128, 5640, bwn_b2063_chantable_data[22] },
{ 132, 5660, bwn_b2063_chantable_data[22] },
{ 136, 5680, bwn_b2063_chantable_data[22] },
{ 140, 5700, bwn_b2063_chantable_data[23] },
{ 149, 5745, bwn_b2063_chantable_data[23] },
{ 153, 5765, bwn_b2063_chantable_data[23] },
{ 157, 5785, bwn_b2063_chantable_data[23] },
{ 161, 5805, bwn_b2063_chantable_data[23] },
{ 165, 5825, bwn_b2063_chantable_data[23] },
{ 184, 4920, bwn_b2063_chantable_data[24] },
{ 188, 4940, bwn_b2063_chantable_data[25] },
{ 192, 4960, bwn_b2063_chantable_data[26] },
{ 196, 4980, bwn_b2063_chantable_data[27] },
{ 200, 5000, bwn_b2063_chantable_data[28] },
{ 204, 5020, bwn_b2063_chantable_data[29] },
{ 208, 5040, bwn_b2063_chantable_data[30] },
{ 212, 5060, bwn_b2063_chantable_data[31] },
{ 216, 5080, bwn_b2063_chantable_data[32] }
};
static const uint8_t bwn_b2062_chantable_data[22][12] = {
{ 0xff, 0xff, 0xb5, 0x1b, 0x24, 0x32, 0x32, 0x88, 0x88, 0, 0, 0 },
{ 0, 0x22, 0x20, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0x10, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0x20, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0x10, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x63, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x62, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x30, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x20, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x10, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0x55, 0x77, 0x90, 0xf7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x44, 0x77, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x44, 0x66, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x33, 0x66, 0x70, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x22, 0x55, 0x60, 0xd7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x22, 0x55, 0x60, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x22, 0x44, 0x50, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x11, 0x44, 0x50, 0xa5, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x44, 0x40, 0xb6, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }
};
static const struct bwn_b206x_chan bwn_b2062_chantable[] = {
{ 1, 2412, bwn_b2062_chantable_data[0] },
{ 2, 2417, bwn_b2062_chantable_data[0] },
{ 3, 2422, bwn_b2062_chantable_data[0] },
{ 4, 2427, bwn_b2062_chantable_data[0] },
{ 5, 2432, bwn_b2062_chantable_data[0] },
{ 6, 2437, bwn_b2062_chantable_data[0] },
{ 7, 2442, bwn_b2062_chantable_data[0] },
{ 8, 2447, bwn_b2062_chantable_data[0] },
{ 9, 2452, bwn_b2062_chantable_data[0] },
{ 10, 2457, bwn_b2062_chantable_data[0] },
{ 11, 2462, bwn_b2062_chantable_data[0] },
{ 12, 2467, bwn_b2062_chantable_data[0] },
{ 13, 2472, bwn_b2062_chantable_data[0] },
{ 14, 2484, bwn_b2062_chantable_data[0] },
{ 34, 5170, bwn_b2062_chantable_data[1] },
{ 38, 5190, bwn_b2062_chantable_data[2] },
{ 42, 5210, bwn_b2062_chantable_data[2] },
{ 46, 5230, bwn_b2062_chantable_data[3] },
{ 36, 5180, bwn_b2062_chantable_data[4] },
{ 40, 5200, bwn_b2062_chantable_data[5] },
{ 44, 5220, bwn_b2062_chantable_data[6] },
{ 48, 5240, bwn_b2062_chantable_data[3] },
{ 52, 5260, bwn_b2062_chantable_data[3] },
{ 56, 5280, bwn_b2062_chantable_data[3] },
{ 60, 5300, bwn_b2062_chantable_data[7] },
{ 64, 5320, bwn_b2062_chantable_data[8] },
{ 100, 5500, bwn_b2062_chantable_data[9] },
{ 104, 5520, bwn_b2062_chantable_data[10] },
{ 108, 5540, bwn_b2062_chantable_data[10] },
{ 112, 5560, bwn_b2062_chantable_data[10] },
{ 116, 5580, bwn_b2062_chantable_data[11] },
{ 120, 5600, bwn_b2062_chantable_data[12] },
{ 124, 5620, bwn_b2062_chantable_data[12] },
{ 128, 5640, bwn_b2062_chantable_data[12] },
{ 132, 5660, bwn_b2062_chantable_data[12] },
{ 136, 5680, bwn_b2062_chantable_data[12] },
{ 140, 5700, bwn_b2062_chantable_data[12] },
{ 149, 5745, bwn_b2062_chantable_data[12] },
{ 153, 5765, bwn_b2062_chantable_data[12] },
{ 157, 5785, bwn_b2062_chantable_data[12] },
{ 161, 5805, bwn_b2062_chantable_data[12] },
{ 165, 5825, bwn_b2062_chantable_data[12] },
{ 184, 4920, bwn_b2062_chantable_data[13] },
{ 188, 4940, bwn_b2062_chantable_data[14] },
{ 192, 4960, bwn_b2062_chantable_data[15] },
{ 196, 4980, bwn_b2062_chantable_data[16] },
{ 200, 5000, bwn_b2062_chantable_data[17] },
{ 204, 5020, bwn_b2062_chantable_data[18] },
{ 208, 5040, bwn_b2062_chantable_data[19] },
{ 212, 5060, bwn_b2062_chantable_data[20] },
{ 216, 5080, bwn_b2062_chantable_data[21] }
};
/* for LP PHY */
static const struct bwn_rxcompco bwn_rxcompco_5354[] = {
{ 1, -66, 15 }, { 2, -66, 15 }, { 3, -66, 15 }, { 4, -66, 15 },
{ 5, -66, 15 }, { 6, -66, 15 }, { 7, -66, 14 }, { 8, -66, 14 },
{ 9, -66, 14 }, { 10, -66, 14 }, { 11, -66, 14 }, { 12, -66, 13 },
{ 13, -66, 13 }, { 14, -66, 13 },
};
/* for LP PHY */
static const struct bwn_rxcompco bwn_rxcompco_r12[] = {
{ 1, -64, 13 }, { 2, -64, 13 }, { 3, -64, 13 }, { 4, -64, 13 },
{ 5, -64, 12 }, { 6, -64, 12 }, { 7, -64, 12 }, { 8, -64, 12 },
{ 9, -64, 12 }, { 10, -64, 11 }, { 11, -64, 11 }, { 12, -64, 11 },
{ 13, -64, 11 }, { 14, -64, 10 }, { 34, -62, 24 }, { 38, -62, 24 },
{ 42, -62, 24 }, { 46, -62, 23 }, { 36, -62, 24 }, { 40, -62, 24 },
{ 44, -62, 23 }, { 48, -62, 23 }, { 52, -62, 23 }, { 56, -62, 22 },
{ 60, -62, 22 }, { 64, -62, 22 }, { 100, -62, 16 }, { 104, -62, 16 },
{ 108, -62, 15 }, { 112, -62, 14 }, { 116, -62, 14 }, { 120, -62, 13 },
{ 124, -62, 12 }, { 128, -62, 12 }, { 132, -62, 12 }, { 136, -62, 11 },
{ 140, -62, 10 }, { 149, -61, 9 }, { 153, -61, 9 }, { 157, -61, 9 },
{ 161, -61, 8 }, { 165, -61, 8 }, { 184, -62, 25 }, { 188, -62, 25 },
{ 192, -62, 25 }, { 196, -62, 25 }, { 200, -62, 25 }, { 204, -62, 25 },
{ 208, -62, 25 }, { 212, -62, 25 }, { 216, -62, 26 },
};
static const struct bwn_rxcompco bwn_rxcompco_r2 = { 0, -64, 0 };
static const uint8_t bwn_tab_sigsq_tbl[] = {
0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd,
0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00,
0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
};
static const uint8_t bwn_tab_pllfrac_tbl[] = {
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
};
static const uint16_t bwn_tabl_iqlocal_tbl[] = {
0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002,
0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600,
0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006,
0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
void
bwn_phy_lp_init_pre(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_lp *plp = &phy->phy_lp;
plp->plp_antenna = BWN_ANT_DEFAULT;
}
int
bwn_phy_lp_init(struct bwn_mac *mac)
{
static const struct bwn_stxtable tables[] = {
{ 2, 6, 0x3d, 3, 0x01 }, { 1, 12, 0x4c, 1, 0x01 },
{ 1, 8, 0x50, 0, 0x7f }, { 0, 8, 0x44, 0, 0xff },
{ 1, 0, 0x4a, 0, 0xff }, { 0, 4, 0x4d, 0, 0xff },
{ 1, 4, 0x4e, 0, 0xff }, { 0, 12, 0x4f, 0, 0x0f },
{ 1, 0, 0x4f, 4, 0x0f }, { 3, 0, 0x49, 0, 0x0f },
{ 4, 3, 0x46, 4, 0x07 }, { 3, 15, 0x46, 0, 0x01 },
{ 4, 0, 0x46, 1, 0x07 }, { 3, 8, 0x48, 4, 0x07 },
{ 3, 11, 0x48, 0, 0x0f }, { 3, 4, 0x49, 4, 0x0f },
{ 2, 15, 0x45, 0, 0x01 }, { 5, 13, 0x52, 4, 0x07 },
{ 6, 0, 0x52, 7, 0x01 }, { 5, 3, 0x41, 5, 0x07 },
{ 5, 6, 0x41, 0, 0x0f }, { 5, 10, 0x42, 5, 0x07 },
{ 4, 15, 0x42, 0, 0x01 }, { 5, 0, 0x42, 1, 0x07 },
{ 4, 11, 0x43, 4, 0x0f }, { 4, 7, 0x43, 0, 0x0f },
{ 4, 6, 0x45, 1, 0x01 }, { 2, 7, 0x40, 4, 0x0f },
{ 2, 11, 0x40, 0, 0x0f }
};
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
const struct bwn_stxtable *st;
struct ieee80211com *ic = &sc->sc_ic;
int i, error;
uint16_t tmp;
bwn_phy_lp_readsprom(mac); /* XXX bad place */
bwn_phy_lp_bbinit(mac);
/* initialize RF */
BWN_PHY_SET(mac, BWN_PHY_4WIRECTL, 0x2);
DELAY(1);
BWN_PHY_MASK(mac, BWN_PHY_4WIRECTL, 0xfffd);
DELAY(1);
if (mac->mac_phy.rf_ver == 0x2062)
bwn_phy_lp_b2062_init(mac);
else {
bwn_phy_lp_b2063_init(mac);
/* synchronize stx table. */
for (i = 0; i < N(tables); i++) {
st = &tables[i];
tmp = BWN_RF_READ(mac, st->st_rfaddr);
tmp >>= st->st_rfshift;
tmp <<= st->st_physhift;
BWN_PHY_SETMASK(mac,
BWN_PHY_OFDM(0xf2 + st->st_phyoffset),
~(st->st_mask << st->st_physhift), tmp);
}
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf0), 0x5f80);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf1), 0);
}
/* calibrate RC */
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_rxcal_r2(mac);
else if (!plp->plp_rccap) {
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_rccal_r12(mac);
} else
bwn_phy_lp_set_rccap(mac);
error = bwn_phy_lp_switch_channel(mac, 7);
if (error)
device_printf(sc->sc_dev,
"failed to change channel 7 (%d)\n", error);
bwn_phy_lp_txpctl_init(mac);
bwn_phy_lp_calib(mac);
return (0);
}
uint16_t
bwn_phy_lp_read(struct bwn_mac *mac, uint16_t reg)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
return (BWN_READ_2(mac, BWN_PHYDATA));
}
void
bwn_phy_lp_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
BWN_WRITE_2(mac, BWN_PHYDATA, value);
}
void
bwn_phy_lp_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask,
uint16_t set)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
BWN_WRITE_2(mac, BWN_PHYDATA,
(BWN_READ_2(mac, BWN_PHYDATA) & mask) | set);
}
uint16_t
bwn_phy_lp_rf_read(struct bwn_mac *mac, uint16_t reg)
{
KASSERT(reg != 1, ("unaccessible register %d", reg));
if (mac->mac_phy.rev < 2 && reg != 0x4001)
reg |= 0x100;
if (mac->mac_phy.rev >= 2)
reg |= 0x200;
BWN_WRITE_2(mac, BWN_RFCTL, reg);
return BWN_READ_2(mac, BWN_RFDATALO);
}
void
bwn_phy_lp_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
KASSERT(reg != 1, ("unaccessible register %d", reg));
BWN_WRITE_2(mac, BWN_RFCTL, reg);
BWN_WRITE_2(mac, BWN_RFDATALO, value);
}
void
bwn_phy_lp_rf_onoff(struct bwn_mac *mac, int on)
{
if (on) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xe0ff);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2,
(mac->mac_phy.rev >= 2) ? 0xf7f7 : 0xffe7);
return;
}
if (mac->mac_phy.rev >= 2) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x83ff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0x80ff);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xdfff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0808);
return;
}
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xe0ff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfcff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0018);
}
int
bwn_phy_lp_switch_channel(struct bwn_mac *mac, uint32_t chan)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_lp *plp = &phy->phy_lp;
int error;
if (phy->rf_ver == 0x2063) {
error = bwn_phy_lp_b2063_switch_channel(mac, chan);
if (error)
return (error);
} else {
error = bwn_phy_lp_b2062_switch_channel(mac, chan);
if (error)
return (error);
bwn_phy_lp_set_anafilter(mac, chan);
bwn_phy_lp_set_gaintbl(mac, ieee80211_ieee2mhz(chan, 0));
}
plp->plp_chan = chan;
BWN_WRITE_2(mac, BWN_CHANNEL, chan);
return (0);
}
uint32_t
bwn_phy_lp_get_default_chan(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
return (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? 1 : 36);
}
void
bwn_phy_lp_set_antenna(struct bwn_mac *mac, int antenna)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_lp *plp = &phy->phy_lp;
if (phy->rev >= 2 || antenna > BWN_ANTAUTO1)
return;
bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffd, antenna & 0x2);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffe, antenna & 0x1);
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_UCODE_ANTDIV_HELPER);
plp->plp_antenna = antenna;
}
void
bwn_phy_lp_task_60s(struct bwn_mac *mac)
{
bwn_phy_lp_calib(mac);
}
static void
bwn_phy_lp_readsprom(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
plp->plp_txisoband_m = siba_sprom_get_tri2g(sc->sc_dev);
plp->plp_bxarch = siba_sprom_get_bxa2g(sc->sc_dev);
plp->plp_rxpwroffset = siba_sprom_get_rxpo2g(sc->sc_dev);
plp->plp_rssivf = siba_sprom_get_rssismf2g(sc->sc_dev);
plp->plp_rssivc = siba_sprom_get_rssismc2g(sc->sc_dev);
plp->plp_rssigs = siba_sprom_get_rssisav2g(sc->sc_dev);
return;
}
plp->plp_txisoband_l = siba_sprom_get_tri5gl(sc->sc_dev);
plp->plp_txisoband_m = siba_sprom_get_tri5g(sc->sc_dev);
plp->plp_txisoband_h = siba_sprom_get_tri5gh(sc->sc_dev);
plp->plp_bxarch = siba_sprom_get_bxa5g(sc->sc_dev);
plp->plp_rxpwroffset = siba_sprom_get_rxpo5g(sc->sc_dev);
plp->plp_rssivf = siba_sprom_get_rssismf5g(sc->sc_dev);
plp->plp_rssivc = siba_sprom_get_rssismc5g(sc->sc_dev);
plp->plp_rssigs = siba_sprom_get_rssisav5g(sc->sc_dev);
}
static void
bwn_phy_lp_bbinit(struct bwn_mac *mac)
{
bwn_phy_lp_tblinit(mac);
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_bbinit_r2(mac);
else
bwn_phy_lp_bbinit_r01(mac);
}
static void
bwn_phy_lp_txpctl_init(struct bwn_mac *mac)
{
struct bwn_txgain gain_2ghz = { 4, 12, 12, 0 };
struct bwn_txgain gain_5ghz = { 7, 15, 14, 0 };
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
bwn_phy_lp_set_txgain(mac,
IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? &gain_2ghz : &gain_5ghz);
bwn_phy_lp_set_bbmult(mac, 150);
}
static void
bwn_phy_lp_calib(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
const struct bwn_rxcompco *rc = NULL;
struct bwn_txgain ogain;
int i, omode, oafeovr, orf, obbmult;
uint8_t mode, fc = 0;
if (plp->plp_chanfullcal != plp->plp_chan) {
plp->plp_chanfullcal = plp->plp_chan;
fc = 1;
}
bwn_mac_suspend(mac);
/* BlueTooth Coexistance Override */
BWN_WRITE_2(mac, BWN_BTCOEX_CTL, 0x3);
BWN_WRITE_2(mac, BWN_BTCOEX_TXCTL, 0xff);
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_digflt_save(mac);
bwn_phy_lp_get_txpctlmode(mac);
mode = plp->plp_txpctlmode;
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
if (mac->mac_phy.rev == 0 && mode != BWN_PHYLP_TXPCTL_OFF)
bwn_phy_lp_bugfix(mac);
if (mac->mac_phy.rev >= 2 && fc == 1) {
bwn_phy_lp_get_txpctlmode(mac);
omode = plp->plp_txpctlmode;
oafeovr = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40;
if (oafeovr)
ogain = bwn_phy_lp_get_txgain(mac);
orf = BWN_PHY_READ(mac, BWN_PHY_RF_PWR_OVERRIDE) & 0xff;
obbmult = bwn_phy_lp_get_bbmult(mac);
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
if (oafeovr)
bwn_phy_lp_set_txgain(mac, &ogain);
bwn_phy_lp_set_bbmult(mac, obbmult);
bwn_phy_lp_set_txpctlmode(mac, omode);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, orf);
}
bwn_phy_lp_set_txpctlmode(mac, mode);
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_digflt_restore(mac);
/* do RX IQ Calculation; assumes that noise is true. */
if (siba_get_chipid(sc->sc_dev) == 0x5354) {
for (i = 0; i < N(bwn_rxcompco_5354); i++) {
if (bwn_rxcompco_5354[i].rc_chan == plp->plp_chan)
rc = &bwn_rxcompco_5354[i];
}
} else if (mac->mac_phy.rev >= 2)
rc = &bwn_rxcompco_r2;
else {
for (i = 0; i < N(bwn_rxcompco_r12); i++) {
if (bwn_rxcompco_r12[i].rc_chan == plp->plp_chan)
rc = &bwn_rxcompco_r12[i];
}
}
if (rc == NULL)
goto fail;
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, rc->rc_c1);
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, rc->rc_c0 << 8);
bwn_phy_lp_set_trsw_over(mac, 1 /* TX */, 0 /* RX */);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7, 0);
} else {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf, 0);
}
bwn_phy_lp_set_rxgain(mac, 0x2d5d);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800);
bwn_phy_lp_set_deaf(mac, 0);
/* XXX no checking return value? */
(void)bwn_phy_lp_calc_rx_iq_comp(mac, 0xfff0);
bwn_phy_lp_clear_deaf(mac, 0);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffc);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfff7);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffdf);
/* disable RX GAIN override. */
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffe);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffef);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffbf);
if (mac->mac_phy.rev >= 2) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfbff);
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xe5), 0xfff7);
}
} else {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfdff);
}
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xf7ff);
fail:
bwn_mac_enable(mac);
}
void
bwn_phy_lp_switch_analog(struct bwn_mac *mac, int on)
{
if (on) {
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfff8);
return;
}
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVRVAL, 0x0007);
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x0007);
}
static int
bwn_phy_lp_b2063_switch_channel(struct bwn_mac *mac, uint8_t chan)
{
static const struct bwn_b206x_chan *bc = NULL;
struct bwn_softc *sc = mac->mac_sc;
uint32_t count, freqref, freqvco, freqxtal, val[3], timeout, timeoutref,
tmp[6];
uint16_t old, scale, tmp16;
int i, div;
for (i = 0; i < N(bwn_b2063_chantable); i++) {
if (bwn_b2063_chantable[i].bc_chan == chan) {
bc = &bwn_b2063_chantable[i];
break;
}
}
if (bc == NULL)
return (EINVAL);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_VCOBUF1, bc->bc_data[0]);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_MIXER2, bc->bc_data[1]);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_BUF2, bc->bc_data[2]);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_RCCR1, bc->bc_data[3]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_1ST3, bc->bc_data[4]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND1, bc->bc_data[5]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND4, bc->bc_data[6]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND7, bc->bc_data[7]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_PS6, bc->bc_data[8]);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL2, bc->bc_data[9]);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL5, bc->bc_data[10]);
BWN_RF_WRITE(mac, BWN_B2063_PA_CTL11, bc->bc_data[11]);
old = BWN_RF_READ(mac, BWN_B2063_COM15);
BWN_RF_SET(mac, BWN_B2063_COM15, 0x1e);
freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
freqvco = bc->bc_freq << ((bc->bc_freq > 4000) ? 1 : 2);
freqref = freqxtal * 3;
div = (freqxtal <= 26000000 ? 1 : 2);
timeout = ((((8 * freqxtal) / (div * 5000000)) + 1) >> 1) - 1;
timeoutref = ((((8 * freqxtal) / (div * (timeout + 1))) +
999999) / 1000000) + 1;
BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB3, 0x2);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB6,
0xfff8, timeout >> 2);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7,
0xff9f,timeout << 5);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB5, timeoutref);
val[0] = bwn_phy_lp_roundup(freqxtal, 1000000, 16);
val[1] = bwn_phy_lp_roundup(freqxtal, 1000000 * div, 16);
val[2] = bwn_phy_lp_roundup(freqvco, 3, 16);
count = (bwn_phy_lp_roundup(val[2], val[1] + 16, 16) * (timeout + 1) *
(timeoutref + 1)) - 1;
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7,
0xf0, count >> 8);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB8, count & 0xff);
tmp[0] = ((val[2] * 62500) / freqref) << 4;
tmp[1] = ((val[2] * 62500) % freqref) << 4;
while (tmp[1] >= freqref) {
tmp[0]++;
tmp[1] -= freqref;
}
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG1, 0xffe0, tmp[0] >> 4);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfe0f, tmp[0] << 4);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfff0, tmp[0] >> 16);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG3, (tmp[1] >> 8) & 0xff);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG4, tmp[1] & 0xff);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF1, 0xb9);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF2, 0x88);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF3, 0x28);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF4, 0x63);
tmp[2] = ((41 * (val[2] - 3000)) /1200) + 27;
tmp[3] = bwn_phy_lp_roundup(132000 * tmp[0], 8451, 16);
if (howmany(tmp[3], tmp[2]) > 60) {
scale = 1;
tmp[4] = ((tmp[3] + tmp[2]) / (tmp[2] << 1)) - 8;
} else {
scale = 0;
tmp[4] = ((tmp[3] + (tmp[2] >> 1)) / tmp[2]) - 8;
}
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffc0, tmp[4]);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffbf, scale << 6);
tmp[5] = bwn_phy_lp_roundup(100 * val[0], val[2], 16) * (tmp[4] * 8) *
(scale + 1);
if (tmp[5] > 150)
tmp[5] = 0;
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffe0, tmp[5]);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffdf, scale << 5);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfffb, 0x4);
if (freqxtal > 26000000)
BWN_RF_SET(mac, BWN_B2063_JTAG_XTAL_12, 0x2);
else
BWN_RF_MASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfd);
if (val[0] == 45)
BWN_RF_SET(mac, BWN_B2063_JTAG_VCO1, 0x2);
else
BWN_RF_MASK(mac, BWN_B2063_JTAG_VCO1, 0xfd);
BWN_RF_SET(mac, BWN_B2063_PLL_SP2, 0x3);
DELAY(1);
BWN_RF_MASK(mac, BWN_B2063_PLL_SP2, 0xfffc);
/* VCO Calibration */
BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, ~0x40);
tmp16 = BWN_RF_READ(mac, BWN_B2063_JTAG_CALNRST) & 0xf8;
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16);
DELAY(1);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x4);
DELAY(1);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x6);
DELAY(1);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x7);
DELAY(300);
BWN_RF_SET(mac, BWN_B2063_PLL_SP1, 0x40);
BWN_RF_WRITE(mac, BWN_B2063_COM15, old);
return (0);
}
static int
bwn_phy_lp_b2062_switch_channel(struct bwn_mac *mac, uint8_t chan)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
const struct bwn_b206x_chan *bc = NULL;
uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
uint32_t tmp[9];
int i;
for (i = 0; i < N(bwn_b2062_chantable); i++) {
if (bwn_b2062_chantable[i].bc_chan == chan) {
bc = &bwn_b2062_chantable[i];
break;
}
}
if (bc == NULL)
return (EINVAL);
BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL14, 0x04);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE0, bc->bc_data[0]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE2, bc->bc_data[1]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE3, bc->bc_data[2]);
BWN_RF_WRITE(mac, BWN_B2062_N_TX_TUNE, bc->bc_data[3]);
BWN_RF_WRITE(mac, BWN_B2062_S_LGENG_CTL1, bc->bc_data[4]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL5, bc->bc_data[5]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL6, bc->bc_data[6]);
BWN_RF_WRITE(mac, BWN_B2062_N_TX_PGA, bc->bc_data[7]);
BWN_RF_WRITE(mac, BWN_B2062_N_TX_PAD, bc->bc_data[8]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xcc);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0x07);
bwn_phy_lp_b2062_reset_pllbias(mac);
tmp[0] = freqxtal / 1000;
tmp[1] = plp->plp_div * 1000;
tmp[2] = tmp[1] * ieee80211_ieee2mhz(chan, 0);
if (ieee80211_ieee2mhz(chan, 0) < 4000)
tmp[2] *= 2;
tmp[3] = 48 * tmp[0];
tmp[5] = tmp[2] / tmp[3];
tmp[6] = tmp[2] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL26, tmp[5]);
tmp[4] = tmp[6] * 0x100;
tmp[5] = tmp[4] / tmp[3];
tmp[6] = tmp[4] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL27, tmp[5]);
tmp[4] = tmp[6] * 0x100;
tmp[5] = tmp[4] / tmp[3];
tmp[6] = tmp[4] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL28, tmp[5]);
tmp[4] = tmp[6] * 0x100;
tmp[5] = tmp[4] / tmp[3];
tmp[6] = tmp[4] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL29,
tmp[5] + ((2 * tmp[6]) / tmp[3]));
tmp[7] = BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL19);
tmp[8] = ((2 * tmp[2] * (tmp[7] + 1)) + (3 * tmp[0])) / (6 * tmp[0]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL23, (tmp[8] >> 8) + 16);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL24, tmp[8] & 0xff);
bwn_phy_lp_b2062_vco_calib(mac);
if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) {
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xfc);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0);
bwn_phy_lp_b2062_reset_pllbias(mac);
bwn_phy_lp_b2062_vco_calib(mac);
if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) {
BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04);
return (EIO);
}
}
BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04);
return (0);
}
static void
bwn_phy_lp_set_anafilter(struct bwn_mac *mac, uint8_t channel)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
uint16_t tmp = (channel == 14);
if (mac->mac_phy.rev < 2) {
BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xfcff, tmp << 9);
if ((mac->mac_phy.rev == 1) && (plp->plp_rccap))
bwn_phy_lp_set_rccap(mac);
return;
}
BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, 0x3f);
}
static void
bwn_phy_lp_set_gaintbl(struct bwn_mac *mac, uint32_t freq)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint16_t iso, tmp[3];
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
iso = plp->plp_txisoband_m;
else if (freq <= 5320)
iso = plp->plp_txisoband_l;
else if (freq <= 5700)
iso = plp->plp_txisoband_m;
else
iso = plp->plp_txisoband_h;
tmp[0] = ((iso - 26) / 12) << 12;
tmp[1] = tmp[0] + 0x1000;
tmp[2] = tmp[0] + 0x2000;
bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), 3, tmp);
bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), 3, tmp);
}
static void
bwn_phy_lp_digflt_save(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
int i;
static const uint16_t addr[] = {
BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2),
BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4),
BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6),
BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8),
BWN_PHY_OFDM(0xcf),
};
static const uint16_t val[] = {
0xde5e, 0xe832, 0xe331, 0x4d26,
0x0026, 0x1420, 0x0020, 0xfe08,
0x0008,
};
for (i = 0; i < N(addr); i++) {
plp->plp_digfilt[i] = BWN_PHY_READ(mac, addr[i]);
BWN_PHY_WRITE(mac, addr[i], val[i]);
}
}
static void
bwn_phy_lp_get_txpctlmode(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
uint16_t ctl;
ctl = BWN_PHY_READ(mac, BWN_PHY_TX_PWR_CTL_CMD);
switch (ctl & BWN_PHY_TX_PWR_CTL_CMD_MODE) {
case BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_OFF;
break;
case BWN_PHY_TX_PWR_CTL_CMD_MODE_SW:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_SW;
break;
case BWN_PHY_TX_PWR_CTL_CMD_MODE_HW:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_HW;
break;
default:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_UNKNOWN;
device_printf(sc->sc_dev, "unknown command mode\n");
break;
}
}
static void
bwn_phy_lp_set_txpctlmode(struct bwn_mac *mac, uint8_t mode)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
uint16_t ctl;
uint8_t old;
bwn_phy_lp_get_txpctlmode(mac);
old = plp->plp_txpctlmode;
if (old == mode)
return;
plp->plp_txpctlmode = mode;
if (old != BWN_PHYLP_TXPCTL_ON_HW && mode == BWN_PHYLP_TXPCTL_ON_HW) {
BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, 0xff80,
plp->plp_tssiidx);
BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_NNUM,
0x8fff, ((uint16_t)plp->plp_tssinpt << 16));
/* disable TX GAIN override */
if (mac->mac_phy.rev < 2)
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff);
else {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xff7f);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xbfff);
}
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xffbf);
plp->plp_txpwridx = -1;
}
if (mac->mac_phy.rev >= 2) {
if (mode == BWN_PHYLP_TXPCTL_ON_HW)
BWN_PHY_SET(mac, BWN_PHY_OFDM(0xd0), 0x2);
else
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xd0), 0xfffd);
}
/* writes TX Power Control mode */
switch (plp->plp_txpctlmode) {
case BWN_PHYLP_TXPCTL_OFF:
ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF;
break;
case BWN_PHYLP_TXPCTL_ON_HW:
ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_HW;
break;
case BWN_PHYLP_TXPCTL_ON_SW:
ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_SW;
break;
default:
ctl = 0;
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD,
(uint16_t)~BWN_PHY_TX_PWR_CTL_CMD_MODE, ctl);
}
static void
bwn_phy_lp_bugfix(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
const unsigned int size = 256;
struct bwn_txgain tg;
uint32_t rxcomp, txgain, coeff, rfpwr, *tabs;
uint16_t tssinpt, tssiidx, value[2];
uint8_t mode;
int8_t txpwridx;
- tabs = (uint32_t *)mallocarray(size, sizeof(uint32_t), M_DEVBUF,
+ tabs = (uint32_t *)malloc(sizeof(uint32_t) * size, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (tabs == NULL) {
device_printf(sc->sc_dev, "failed to allocate buffer.\n");
return;
}
bwn_phy_lp_get_txpctlmode(mac);
mode = plp->plp_txpctlmode;
txpwridx = plp->plp_txpwridx;
tssinpt = plp->plp_tssinpt;
tssiidx = plp->plp_tssiidx;
bwn_tab_read_multi(mac,
(mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) :
BWN_TAB_4(7, 0x140), size, tabs);
bwn_phy_lp_tblinit(mac);
bwn_phy_lp_bbinit(mac);
bwn_phy_lp_txpctl_init(mac);
bwn_phy_lp_rf_onoff(mac, 1);
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
bwn_tab_write_multi(mac,
(mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) :
BWN_TAB_4(7, 0x140), size, tabs);
BWN_WRITE_2(mac, BWN_CHANNEL, plp->plp_chan);
plp->plp_tssinpt = tssinpt;
plp->plp_tssiidx = tssiidx;
bwn_phy_lp_set_anafilter(mac, plp->plp_chan);
if (txpwridx != -1) {
/* set TX power by index */
plp->plp_txpwridx = txpwridx;
bwn_phy_lp_get_txpctlmode(mac);
if (plp->plp_txpctlmode != BWN_PHYLP_TXPCTL_OFF)
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_ON_SW);
if (mac->mac_phy.rev >= 2) {
rxcomp = bwn_tab_read(mac,
BWN_TAB_4(7, txpwridx + 320));
txgain = bwn_tab_read(mac,
BWN_TAB_4(7, txpwridx + 192));
tg.tg_pad = (txgain >> 16) & 0xff;
tg.tg_gm = txgain & 0xff;
tg.tg_pga = (txgain >> 8) & 0xff;
tg.tg_dac = (rxcomp >> 28) & 0xff;
bwn_phy_lp_set_txgain(mac, &tg);
} else {
rxcomp = bwn_tab_read(mac,
BWN_TAB_4(10, txpwridx + 320));
txgain = bwn_tab_read(mac,
BWN_TAB_4(10, txpwridx + 192));
BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL,
0xf800, (txgain >> 4) & 0x7fff);
bwn_phy_lp_set_txgain_dac(mac, txgain & 0x7);
bwn_phy_lp_set_txgain_pa(mac, (txgain >> 24) & 0x7f);
}
bwn_phy_lp_set_bbmult(mac, (rxcomp >> 20) & 0xff);
/* set TX IQCC */
value[0] = (rxcomp >> 10) & 0x3ff;
value[1] = rxcomp & 0x3ff;
bwn_tab_write_multi(mac, BWN_TAB_2(0, 80), 2, value);
coeff = bwn_tab_read(mac,
(mac->mac_phy.rev >= 2) ? BWN_TAB_4(7, txpwridx + 448) :
BWN_TAB_4(10, txpwridx + 448));
bwn_tab_write(mac, BWN_TAB_2(0, 85), coeff & 0xffff);
if (mac->mac_phy.rev >= 2) {
rfpwr = bwn_tab_read(mac,
BWN_TAB_4(7, txpwridx + 576));
BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00,
rfpwr & 0xffff);
}
bwn_phy_lp_set_txgain_override(mac);
}
if (plp->plp_rccap)
bwn_phy_lp_set_rccap(mac);
bwn_phy_lp_set_antenna(mac, plp->plp_antenna);
bwn_phy_lp_set_txpctlmode(mac, mode);
free(tabs, M_DEVBUF);
}
static void
bwn_phy_lp_digflt_restore(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
int i;
static const uint16_t addr[] = {
BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2),
BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4),
BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6),
BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8),
BWN_PHY_OFDM(0xcf),
};
for (i = 0; i < N(addr); i++)
BWN_PHY_WRITE(mac, addr[i], plp->plp_digfilt[i]);
}
static void
bwn_phy_lp_tblinit(struct bwn_mac *mac)
{
uint32_t freq = ieee80211_ieee2mhz(bwn_phy_lp_get_default_chan(mac), 0);
if (mac->mac_phy.rev < 2) {
bwn_phy_lp_tblinit_r01(mac);
bwn_phy_lp_tblinit_txgain(mac);
bwn_phy_lp_set_gaintbl(mac, freq);
return;
}
bwn_phy_lp_tblinit_r2(mac);
bwn_phy_lp_tblinit_txgain(mac);
}
struct bwn_wpair {
uint16_t reg;
uint16_t value;
};
struct bwn_smpair {
uint16_t offset;
uint16_t mask;
uint16_t set;
};
static void
bwn_phy_lp_bbinit_r2(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_wpair v1[] = {
{ BWN_PHY_AFE_DAC_CTL, 0x50 },
{ BWN_PHY_AFE_CTL, 0x8800 },
{ BWN_PHY_AFE_CTL_OVR, 0 },
{ BWN_PHY_AFE_CTL_OVRVAL, 0 },
{ BWN_PHY_RF_OVERRIDE_0, 0 },
{ BWN_PHY_RF_OVERRIDE_2, 0 },
{ BWN_PHY_OFDM(0xf9), 0 },
{ BWN_PHY_TR_LOOKUP_1, 0 }
};
static const struct bwn_smpair v2[] = {
{ BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0xb4 },
{ BWN_PHY_DCOFFSETTRANSIENT, 0xf8ff, 0x200 },
{ BWN_PHY_DCOFFSETTRANSIENT, 0xff00, 0x7f },
{ BWN_PHY_GAINDIRECTMISMATCH, 0xff0f, 0x40 },
{ BWN_PHY_PREAMBLECONFIRMTO, 0xff00, 0x2 }
};
static const struct bwn_smpair v3[] = {
{ BWN_PHY_OFDM(0xfe), 0xffe0, 0x1f },
{ BWN_PHY_OFDM(0xff), 0xffe0, 0xc },
{ BWN_PHY_OFDM(0x100), 0xff00, 0x19 },
{ BWN_PHY_OFDM(0xff), 0x03ff, 0x3c00 },
{ BWN_PHY_OFDM(0xfe), 0xfc1f, 0x3e0 },
{ BWN_PHY_OFDM(0xff), 0xffe0, 0xc },
{ BWN_PHY_OFDM(0x100), 0x00ff, 0x1900 },
{ BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800 },
{ BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x12 },
{ BWN_PHY_GAINMISMATCH, 0x0fff, 0x9000 },
};
int i;
for (i = 0; i < N(v1); i++)
BWN_PHY_WRITE(mac, v1[i].reg, v1[i].value);
BWN_PHY_SET(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x10);
for (i = 0; i < N(v2); i++)
BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set);
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x4000);
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x2000);
BWN_PHY_SET(mac, BWN_PHY_OFDM(0x10a), 0x1);
if (siba_get_pci_revid(sc->sc_dev) >= 0x18) {
bwn_tab_write(mac, BWN_TAB_4(17, 65), 0xec);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x14);
} else {
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x10);
}
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0xff00, 0xf4);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0x00ff, 0xf100);
BWN_PHY_WRITE(mac, BWN_PHY_CLIPTHRESH, 0x48);
BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0xff00, 0x46);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe4), 0xff00, 0x10);
BWN_PHY_SETMASK(mac, BWN_PHY_PWR_THRESH1, 0xfff0, 0x9);
BWN_PHY_MASK(mac, BWN_PHY_GAINDIRECTMISMATCH, ~0xf);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5500);
BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0xa0);
BWN_PHY_SETMASK(mac, BWN_PHY_GAINDIRECTMISMATCH, 0xe0ff, 0x300);
BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2a00);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xa);
} else {
BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x1e00);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xd);
}
for (i = 0; i < N(v3); i++)
BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
bwn_tab_write(mac, BWN_TAB_2(0x08, 0x14), 0);
bwn_tab_write(mac, BWN_TAB_2(0x08, 0x12), 0x40);
}
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x40);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0xb00);
BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x6);
BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0x9d00);
BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0xff00, 0xa1);
BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff);
} else
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x40);
BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0xff00, 0xb3);
BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00);
BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset);
BWN_PHY_SET(mac, BWN_PHY_RESET_CTL, 0x44);
BWN_PHY_WRITE(mac, BWN_PHY_RESET_CTL, 0x80);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, 0xa954);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_1,
0x2000 | ((uint16_t)plp->plp_rssigs << 10) |
((uint16_t)plp->plp_rssivc << 4) | plp->plp_rssivf);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
BWN_PHY_SET(mac, BWN_PHY_AFE_ADC_CTL_0, 0x1c);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_CTL, 0x00ff, 0x8800);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_1, 0xfc3c, 0x0400);
}
bwn_phy_lp_digflt_save(mac);
}
static void
bwn_phy_lp_bbinit_r01(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_smpair v1[] = {
{ BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x0005 },
{ BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0x0180 },
{ BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x3c00 },
{ BWN_PHY_GAINDIRECTMISMATCH, 0xfff0, 0x0005 },
{ BWN_PHY_GAIN_MISMATCH_LIMIT, 0xffc0, 0x001a },
{ BWN_PHY_CRS_ED_THRESH, 0xff00, 0x00b3 },
{ BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00 }
};
static const struct bwn_smpair v2[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_1, 0x3f00, 0x0900 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0400 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_5, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_5, 0xc0ff, 0x0900 },
{ BWN_PHY_TR_LOOKUP_6, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_6, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_7, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_7, 0xc0ff, 0x0900 },
{ BWN_PHY_TR_LOOKUP_8, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_8, 0xc0ff, 0x0b00 }
};
static const struct bwn_smpair v3[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0001 },
{ BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0400 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0001 },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0500 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0800 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0a00 }
};
static const struct bwn_smpair v4[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0004 },
{ BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0800 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0004 },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0c00 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0100 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0300 }
};
static const struct bwn_smpair v5[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0900 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0006 },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0500 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0006 },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0700 }
};
int i;
uint16_t tmp, tmp2;
BWN_PHY_MASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf7ff);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL, 0);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, 0);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, 0);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0);
BWN_PHY_SET(mac, BWN_PHY_AFE_DAC_CTL, 0x0004);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0x0078);
BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800);
BWN_PHY_WRITE(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x0016);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_0, 0xfff8, 0x0004);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5400);
BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2400);
BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0x0006);
BWN_PHY_MASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfffe);
for (i = 0; i < N(v1); i++)
BWN_PHY_SETMASK(mac, v1[i].offset, v1[i].mask, v1[i].set);
BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB,
0xff00, plp->plp_rxpwroffset);
if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) &&
((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ||
(siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF))) {
siba_cc_pmu_set_ldovolt(sc->sc_dev, SIBA_LDO_PAREF, 0x28);
siba_cc_pmu_set_ldoparef(sc->sc_dev, 1);
if (mac->mac_phy.rev == 0)
BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT,
0xffcf, 0x0010);
bwn_tab_write(mac, BWN_TAB_2(11, 7), 60);
} else {
siba_cc_pmu_set_ldoparef(sc->sc_dev, 0);
BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0020);
bwn_tab_write(mac, BWN_TAB_2(11, 7), 100);
}
tmp = plp->plp_rssivf | plp->plp_rssivc << 4 | 0xa000;
BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, tmp);
if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_RSSIINV)
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x0aaa);
else
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x02aa);
bwn_tab_write(mac, BWN_TAB_2(11, 1), 24);
BWN_PHY_SETMASK(mac, BWN_PHY_RX_RADIO_CTL,
0xfff9, (plp->plp_bxarch << 1));
if (mac->mac_phy.rev == 1 &&
(siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT)) {
for (i = 0; i < N(v2); i++)
BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask,
v2[i].set);
} else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) ||
(siba_get_pci_subdevice(sc->sc_dev) == 0x048a) ||
((mac->mac_phy.rev == 0) &&
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM))) {
for (i = 0; i < N(v3); i++)
BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask,
v3[i].set);
} else if (mac->mac_phy.rev == 1 ||
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM)) {
for (i = 0; i < N(v4); i++)
BWN_PHY_SETMASK(mac, v4[i].offset, v4[i].mask,
v4[i].set);
} else {
for (i = 0; i < N(v5); i++)
BWN_PHY_SETMASK(mac, v5[i].offset, v5[i].mask,
v5[i].set);
}
if (mac->mac_phy.rev == 1 &&
(siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF)) {
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_5, BWN_PHY_TR_LOOKUP_1);
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_6, BWN_PHY_TR_LOOKUP_2);
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_7, BWN_PHY_TR_LOOKUP_3);
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_8, BWN_PHY_TR_LOOKUP_4);
}
if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT) &&
(siba_get_chipid(sc->sc_dev) == 0x5354) &&
(siba_get_chippkg(sc->sc_dev) == SIBA_CHIPPACK_BCM4712S)) {
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0006);
BWN_PHY_WRITE(mac, BWN_PHY_GPIO_SELECT, 0x0005);
BWN_PHY_WRITE(mac, BWN_PHY_GPIO_OUTEN, 0xffff);
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_PR45960W);
}
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x8000);
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0040);
BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0xa400);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0x0b00);
BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x0007);
BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xfff8, 0x0003);
BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xffc7, 0x0020);
BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff);
} else {
BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0x7fff);
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xffbf);
}
if (mac->mac_phy.rev == 1) {
tmp = BWN_PHY_READ(mac, BWN_PHY_CLIPCTRTHRESH);
tmp2 = (tmp & 0x03e0) >> 5;
tmp2 |= tmp2 << 5;
BWN_PHY_WRITE(mac, BWN_PHY_4C3, tmp2);
tmp = BWN_PHY_READ(mac, BWN_PHY_GAINDIRECTMISMATCH);
tmp2 = (tmp & 0x1f00) >> 8;
tmp2 |= tmp2 << 5;
BWN_PHY_WRITE(mac, BWN_PHY_4C4, tmp2);
tmp = BWN_PHY_READ(mac, BWN_PHY_VERYLOWGAINDB);
tmp2 = tmp & 0x00ff;
tmp2 |= tmp << 8;
BWN_PHY_WRITE(mac, BWN_PHY_4C5, tmp2);
}
}
struct bwn_b2062_freq {
uint16_t freq;
uint8_t value[6];
};
static void
bwn_phy_lp_b2062_init(struct bwn_mac *mac)
{
#define CALC_CTL7(freq, div) \
(((800000000 * (div) + (freq)) / (2 * (freq)) - 8) & 0xff)
#define CALC_CTL18(freq, div) \
((((100 * (freq) + 16000000 * (div)) / (32000000 * (div))) - 1) & 0xff)
#define CALC_CTL19(freq, div) \
((((2 * (freq) + 1000000 * (div)) / (2000000 * (div))) - 1) & 0xff)
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_b2062_freq freqdata_tab[] = {
{ 12000, { 6, 6, 6, 6, 10, 6 } },
{ 13000, { 4, 4, 4, 4, 11, 7 } },
{ 14400, { 3, 3, 3, 3, 12, 7 } },
{ 16200, { 3, 3, 3, 3, 13, 8 } },
{ 18000, { 2, 2, 2, 2, 14, 8 } },
{ 19200, { 1, 1, 1, 1, 14, 9 } }
};
static const struct bwn_wpair v1[] = {
{ BWN_B2062_N_TXCTL3, 0 },
{ BWN_B2062_N_TXCTL4, 0 },
{ BWN_B2062_N_TXCTL5, 0 },
{ BWN_B2062_N_TXCTL6, 0 },
{ BWN_B2062_N_PDNCTL0, 0x40 },
{ BWN_B2062_N_PDNCTL0, 0 },
{ BWN_B2062_N_CALIB_TS, 0x10 },
{ BWN_B2062_N_CALIB_TS, 0 }
};
const struct bwn_b2062_freq *f = NULL;
uint32_t xtalfreq, ref;
unsigned int i;
bwn_phy_lp_b2062_tblinit(mac);
for (i = 0; i < N(v1); i++)
BWN_RF_WRITE(mac, v1[i].reg, v1[i].value);
if (mac->mac_phy.rev > 0)
BWN_RF_WRITE(mac, BWN_B2062_S_BG_CTL1,
(BWN_RF_READ(mac, BWN_B2062_N_COM2) >> 1) | 0x80);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
BWN_RF_SET(mac, BWN_B2062_N_TSSI_CTL0, 0x1);
else
BWN_RF_MASK(mac, BWN_B2062_N_TSSI_CTL0, ~0x1);
KASSERT(siba_get_cc_caps(sc->sc_dev) & SIBA_CC_CAPS_PMU,
("%s:%d: fail", __func__, __LINE__));
xtalfreq = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
KASSERT(xtalfreq != 0, ("%s:%d: fail", __func__, __LINE__));
if (xtalfreq <= 30000000) {
plp->plp_div = 1;
BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL1, 0xfffb);
} else {
plp->plp_div = 2;
BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL1, 0x4);
}
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL7,
CALC_CTL7(xtalfreq, plp->plp_div));
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL18,
CALC_CTL18(xtalfreq, plp->plp_div));
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL19,
CALC_CTL19(xtalfreq, plp->plp_div));
ref = (1000 * plp->plp_div + 2 * xtalfreq) / (2000 * plp->plp_div);
ref &= 0xffff;
for (i = 0; i < N(freqdata_tab); i++) {
if (ref < freqdata_tab[i].freq) {
f = &freqdata_tab[i];
break;
}
}
if (f == NULL)
f = &freqdata_tab[N(freqdata_tab) - 1];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL8,
((uint16_t)(f->value[1]) << 4) | f->value[0]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL9,
((uint16_t)(f->value[3]) << 4) | f->value[2]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL10, f->value[4]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL11, f->value[5]);
#undef CALC_CTL7
#undef CALC_CTL18
#undef CALC_CTL19
}
static void
bwn_phy_lp_b2063_init(struct bwn_mac *mac)
{
bwn_phy_lp_b2063_tblinit(mac);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_SP5, 0);
BWN_RF_SET(mac, BWN_B2063_COM8, 0x38);
BWN_RF_WRITE(mac, BWN_B2063_REG_SP1, 0x56);
BWN_RF_MASK(mac, BWN_B2063_RX_BB_CTL2, ~0x2);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP6, 0x20);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP9, 0x40);
if (mac->mac_phy.rev == 2) {
BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0xa0);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP4, 0xa0);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x18);
} else {
BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0x20);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x20);
}
}
static void
bwn_phy_lp_rxcal_r2(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
static const struct bwn_wpair v1[] = {
{ BWN_B2063_RX_BB_SP8, 0x0 },
{ BWN_B2063_RC_CALIB_CTL1, 0x7e },
{ BWN_B2063_RC_CALIB_CTL1, 0x7c },
{ BWN_B2063_RC_CALIB_CTL2, 0x15 },
{ BWN_B2063_RC_CALIB_CTL3, 0x70 },
{ BWN_B2063_RC_CALIB_CTL4, 0x52 },
{ BWN_B2063_RC_CALIB_CTL5, 0x1 },
{ BWN_B2063_RC_CALIB_CTL1, 0x7d }
};
static const struct bwn_wpair v2[] = {
{ BWN_B2063_TX_BB_SP3, 0x0 },
{ BWN_B2063_RC_CALIB_CTL1, 0x7e },
{ BWN_B2063_RC_CALIB_CTL1, 0x7c },
{ BWN_B2063_RC_CALIB_CTL2, 0x55 },
{ BWN_B2063_RC_CALIB_CTL3, 0x76 }
};
uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
int i;
uint8_t tmp;
tmp = BWN_RF_READ(mac, BWN_B2063_RX_BB_SP8) & 0xff;
for (i = 0; i < 2; i++)
BWN_RF_WRITE(mac, v1[i].reg, v1[i].value);
BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, 0xf7);
for (i = 2; i < N(v1); i++)
BWN_RF_WRITE(mac, v1[i].reg, v1[i].value);
for (i = 0; i < 10000; i++) {
if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)
break;
DELAY(1000);
}
if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2))
BWN_RF_WRITE(mac, BWN_B2063_RX_BB_SP8, tmp);
tmp = BWN_RF_READ(mac, BWN_B2063_TX_BB_SP3) & 0xff;
for (i = 0; i < N(v2); i++)
BWN_RF_WRITE(mac, v2[i].reg, v2[i].value);
if (freqxtal == 24000000) {
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0xfc);
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x0);
} else {
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0x13);
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x1);
}
BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0x7d);
for (i = 0; i < 10000; i++) {
if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)
break;
DELAY(1000);
}
if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2))
BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, tmp);
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL1, 0x7e);
}
static void
bwn_phy_lp_rccal_r12(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_lp_iq_est ie;
struct bwn_txgain tx_gains;
static const uint32_t pwrtbl[21] = {
0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64,
0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35,
0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088,
0x0004c, 0x0002c, 0x0001a,
};
uint32_t npwr, ipwr, sqpwr, tmp;
int loopback, i, j, sum, error;
uint16_t save[7];
uint8_t txo, bbmult, txpctlmode;
error = bwn_phy_lp_switch_channel(mac, 7);
if (error)
device_printf(sc->sc_dev,
"failed to change channel to 7 (%d)\n", error);
txo = (BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40) ? 1 : 0;
bbmult = bwn_phy_lp_get_bbmult(mac);
if (txo)
tx_gains = bwn_phy_lp_get_txgain(mac);
save[0] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_0);
save[1] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_VAL_0);
save[2] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR);
save[3] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVRVAL);
save[4] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2);
save[5] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2_VAL);
save[6] = BWN_PHY_READ(mac, BWN_PHY_LP_PHY_CTL);
bwn_phy_lp_get_txpctlmode(mac);
txpctlmode = plp->plp_txpctlmode;
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
/* disable CRS */
bwn_phy_lp_set_deaf(mac, 1);
bwn_phy_lp_set_trsw_over(mac, 0, 1);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffb);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x4);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x10);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffbf);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x7);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x38);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x100);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff);
BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL0, 0);
BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL1, 1);
BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL2, 0x20);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff);
BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0);
BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45af);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0x3ff);
loopback = bwn_phy_lp_loopback(mac);
if (loopback == -1)
goto done;
bwn_phy_lp_set_rxgain_idx(mac, loopback);
BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xffbf, 0x40);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfff8, 0x1);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xffc7, 0x8);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f, 0xc0);
tmp = 0;
memset(&ie, 0, sizeof(ie));
for (i = 128; i <= 159; i++) {
BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, i);
sum = 0;
for (j = 5; j <= 25; j++) {
bwn_phy_lp_ddfs_turnon(mac, 1, 1, j, j, 0);
if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie)))
goto done;
sqpwr = ie.ie_ipwr + ie.ie_qpwr;
ipwr = ((pwrtbl[j - 5] >> 3) + 1) >> 1;
npwr = bwn_phy_lp_roundup(sqpwr, (j == 5) ? sqpwr : 0,
12);
sum += ((ipwr - npwr) * (ipwr - npwr));
if ((i == 128) || (sum < tmp)) {
plp->plp_rccap = i;
tmp = sum;
}
}
}
bwn_phy_lp_ddfs_turnoff(mac);
done:
/* restore CRS */
bwn_phy_lp_clear_deaf(mac, 1);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xff80);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfc00);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_VAL_0, save[1]);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, save[0]);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVRVAL, save[3]);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, save[2]);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2_VAL, save[5]);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, save[4]);
BWN_PHY_WRITE(mac, BWN_PHY_LP_PHY_CTL, save[6]);
bwn_phy_lp_set_bbmult(mac, bbmult);
if (txo)
bwn_phy_lp_set_txgain(mac, &tx_gains);
bwn_phy_lp_set_txpctlmode(mac, txpctlmode);
if (plp->plp_rccap)
bwn_phy_lp_set_rccap(mac);
}
static void
bwn_phy_lp_set_rccap(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
uint8_t rc_cap = (plp->plp_rccap & 0x1f) >> 1;
if (mac->mac_phy.rev == 1)
rc_cap = MIN(rc_cap + 5, 15);
BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2,
MAX(plp->plp_rccap - 4, 0x80));
BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, rc_cap | 0x80);
BWN_RF_WRITE(mac, BWN_B2062_S_RXG_CNT16,
((plp->plp_rccap & 0x1f) >> 2) | 0x80);
}
static uint32_t
bwn_phy_lp_roundup(uint32_t value, uint32_t div, uint8_t pre)
{
uint32_t i, q, r;
if (div == 0)
return (0);
for (i = 0, q = value / div, r = value % div; i < pre; i++) {
q <<= 1;
if (r << 1 >= div) {
q++;
r = (r << 1) - div;
}
}
if (r << 1 >= div)
q++;
return (q);
}
static void
bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0xff);
DELAY(20);
if (siba_get_chipid(sc->sc_dev) == 0x5354) {
BWN_RF_WRITE(mac, BWN_B2062_N_COM1, 4);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 4);
} else {
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0);
}
DELAY(5);
}
static void
bwn_phy_lp_b2062_vco_calib(struct bwn_mac *mac)
{
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x42);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x62);
DELAY(200);
}
static void
bwn_phy_lp_b2062_tblinit(struct bwn_mac *mac)
{
#define FLAG_A 0x01
#define FLAG_G 0x02
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_b206x_rfinit_entry bwn_b2062_init_tab[] = {
{ BWN_B2062_N_COM4, 0x1, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_PDNCTL1, 0x0, 0xca, FLAG_G, },
{ BWN_B2062_N_PDNCTL3, 0x0, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_PDNCTL4, 0x15, 0x2a, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENC, 0xDB, 0xff, FLAG_A, },
{ BWN_B2062_N_LGENATUNE0, 0xdd, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENATUNE2, 0xdd, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENATUNE3, 0x77, 0xB5, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENACTL3, 0x0, 0xff, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENACTL7, 0x33, 0x33, FLAG_A | FLAG_G, },
{ BWN_B2062_N_RXA_CTL1, 0x0, 0x0, FLAG_G, },
{ BWN_B2062_N_RXBB_CTL0, 0x82, 0x80, FLAG_A | FLAG_G, },
{ BWN_B2062_N_RXBB_GAIN1, 0x4, 0x4, FLAG_A | FLAG_G, },
{ BWN_B2062_N_RXBB_GAIN2, 0x0, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_TXCTL4, 0x3, 0x3, FLAG_A | FLAG_G, },
{ BWN_B2062_N_TXCTL5, 0x2, 0x2, FLAG_A | FLAG_G, },
{ BWN_B2062_N_TX_TUNE, 0x88, 0x1b, FLAG_A | FLAG_G, },
{ BWN_B2062_S_COM4, 0x1, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_S_PDS_CTL0, 0xff, 0xff, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL0, 0xf8, 0xd8, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL1, 0x3c, 0x24, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL8, 0x88, 0x80, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL10, 0x88, 0x80, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL0, 0x98, 0x98, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL1, 0x10, 0x10, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL5, 0x43, 0x43, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL6, 0x47, 0x47, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL7, 0xc, 0xc, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL8, 0x11, 0x11, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL9, 0x11, 0x11, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL10, 0xe, 0xe, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL11, 0x8, 0x8, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL12, 0x33, 0x33, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL13, 0xa, 0xa, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL14, 0x6, 0x6, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL18, 0x3e, 0x3e, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL19, 0x13, 0x13, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL21, 0x62, 0x62, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL22, 0x7, 0x7, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL23, 0x16, 0x16, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL24, 0x5c, 0x5c, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL25, 0x95, 0x95, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL30, 0xa0, 0xa0, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL31, 0x4, 0x4, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL33, 0xcc, 0xcc, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL34, 0x7, 0x7, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RXG_CNT8, 0xf, 0xf, FLAG_A, },
};
const struct bwn_b206x_rfinit_entry *br;
unsigned int i;
for (i = 0; i < N(bwn_b2062_init_tab); i++) {
br = &bwn_b2062_init_tab[i];
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
if (br->br_flags & FLAG_G)
BWN_RF_WRITE(mac, br->br_offset, br->br_valueg);
} else {
if (br->br_flags & FLAG_A)
BWN_RF_WRITE(mac, br->br_offset, br->br_valuea);
}
}
#undef FLAG_A
#undef FLAG_B
}
static void
bwn_phy_lp_b2063_tblinit(struct bwn_mac *mac)
{
#define FLAG_A 0x01
#define FLAG_G 0x02
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_b206x_rfinit_entry bwn_b2063_init_tab[] = {
{ BWN_B2063_COM1, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM10, 0x1, 0x0, FLAG_A, },
{ BWN_B2063_COM16, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM17, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM18, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM19, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM20, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM21, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM22, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM23, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM24, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_LOGEN_SP1, 0xe8, 0xd4, FLAG_A | FLAG_G, },
{ BWN_B2063_LOGEN_SP2, 0xa7, 0x53, FLAG_A | FLAG_G, },
{ BWN_B2063_LOGEN_SP4, 0xf0, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_SP1, 0x1f, 0x5e, FLAG_G, },
{ BWN_B2063_G_RX_SP2, 0x7f, 0x7e, FLAG_G, },
{ BWN_B2063_G_RX_SP3, 0x30, 0xf0, FLAG_G, },
{ BWN_B2063_G_RX_SP7, 0x7f, 0x7f, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_SP10, 0xc, 0xc, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_SP1, 0x3c, 0x3f, FLAG_A, },
{ BWN_B2063_A_RX_SP2, 0xfc, 0xfe, FLAG_A, },
{ BWN_B2063_A_RX_SP7, 0x8, 0x8, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_BB_SP4, 0x60, 0x60, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_BB_SP8, 0x30, 0x30, FLAG_A | FLAG_G, },
{ BWN_B2063_TX_RF_SP3, 0xc, 0xb, FLAG_A | FLAG_G, },
{ BWN_B2063_TX_RF_SP4, 0x10, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_PA_SP1, 0x3d, 0xfd, FLAG_A | FLAG_G, },
{ BWN_B2063_TX_BB_SP1, 0x2, 0x2, FLAG_A | FLAG_G, },
{ BWN_B2063_BANDGAP_CTL1, 0x56, 0x56, FLAG_A | FLAG_G, },
{ BWN_B2063_JTAG_VCO2, 0xF7, 0xF7, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_MIX3, 0x71, 0x71, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_MIX4, 0x71, 0x71, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_1ST2, 0xf0, 0x30, FLAG_A, },
{ BWN_B2063_A_RX_PS6, 0x77, 0x77, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_MIX4, 0x3, 0x3, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_MIX5, 0xf, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_MIX6, 0xf, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_TIA_CTL1, 0x77, 0x77, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_TIA_CTL3, 0x77, 0x77, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_BB_CTL2, 0x4, 0x4, FLAG_A | FLAG_G, },
{ BWN_B2063_PA_CTL1, 0x0, 0x4, FLAG_A, },
{ BWN_B2063_VREG_CTL1, 0x3, 0x3, FLAG_A | FLAG_G, },
};
const struct bwn_b206x_rfinit_entry *br;
unsigned int i;
for (i = 0; i < N(bwn_b2063_init_tab); i++) {
br = &bwn_b2063_init_tab[i];
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
if (br->br_flags & FLAG_G)
BWN_RF_WRITE(mac, br->br_offset, br->br_valueg);
} else {
if (br->br_flags & FLAG_A)
BWN_RF_WRITE(mac, br->br_offset, br->br_valuea);
}
}
#undef FLAG_A
#undef FLAG_B
}
static void
bwn_tab_read_multi(struct bwn_mac *mac, uint32_t typenoffset,
int count, void *_data)
{
unsigned int i;
uint32_t offset, type;
uint8_t *data = _data;
type = BWN_TAB_GETTYPE(typenoffset);
offset = BWN_TAB_GETOFFSET(typenoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
for (i = 0; i < count; i++) {
switch (type) {
case BWN_TAB_8BIT:
*data = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff;
data++;
break;
case BWN_TAB_16BIT:
*((uint16_t *)data) = BWN_PHY_READ(mac,
BWN_PHY_TABLEDATALO);
data += 2;
break;
case BWN_TAB_32BIT:
*((uint32_t *)data) = BWN_PHY_READ(mac,
BWN_PHY_TABLEDATAHI);
*((uint32_t *)data) <<= 16;
*((uint32_t *)data) |= BWN_PHY_READ(mac,
BWN_PHY_TABLEDATALO);
data += 4;
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
}
static void
bwn_tab_write_multi(struct bwn_mac *mac, uint32_t typenoffset,
int count, const void *_data)
{
uint32_t offset, type, value;
const uint8_t *data = _data;
unsigned int i;
type = BWN_TAB_GETTYPE(typenoffset);
offset = BWN_TAB_GETOFFSET(typenoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
for (i = 0; i < count; i++) {
switch (type) {
case BWN_TAB_8BIT:
value = *data;
data++;
KASSERT(!(value & ~0xff),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_16BIT:
value = *((const uint16_t *)data);
data += 2;
KASSERT(!(value & ~0xffff),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_32BIT:
value = *((const uint32_t *)data);
data += 4;
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
}
static struct bwn_txgain
bwn_phy_lp_get_txgain(struct bwn_mac *mac)
{
struct bwn_txgain tg;
uint16_t tmp;
tg.tg_dac = (BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0x380) >> 7;
if (mac->mac_phy.rev < 2) {
tmp = BWN_PHY_READ(mac,
BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7ff;
tg.tg_gm = tmp & 0x0007;
tg.tg_pga = (tmp & 0x0078) >> 3;
tg.tg_pad = (tmp & 0x780) >> 7;
return (tg);
}
tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL);
tg.tg_pad = BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0xff;
tg.tg_gm = tmp & 0xff;
tg.tg_pga = (tmp >> 8) & 0xff;
return (tg);
}
static uint8_t
bwn_phy_lp_get_bbmult(struct bwn_mac *mac)
{
return (bwn_tab_read(mac, BWN_TAB_2(0, 87)) & 0xff00) >> 8;
}
static void
bwn_phy_lp_set_txgain(struct bwn_mac *mac, struct bwn_txgain *tg)
{
uint16_t pa;
if (mac->mac_phy.rev < 2) {
BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800,
(tg->tg_pad << 7) | (tg->tg_pga << 3) | tg->tg_gm);
bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac);
bwn_phy_lp_set_txgain_override(mac);
return;
}
pa = bwn_phy_lp_get_pa_gain(mac);
BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL,
(tg->tg_pga << 8) | tg->tg_gm);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0x8000,
tg->tg_pad | (pa << 6));
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xfc), (tg->tg_pga << 8) | tg->tg_gm);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x8000,
tg->tg_pad | (pa << 8));
bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac);
bwn_phy_lp_set_txgain_override(mac);
}
static void
bwn_phy_lp_set_bbmult(struct bwn_mac *mac, uint8_t bbmult)
{
bwn_tab_write(mac, BWN_TAB_2(0, 87), (uint16_t)bbmult << 8);
}
static void
bwn_phy_lp_set_trsw_over(struct bwn_mac *mac, uint8_t tx, uint8_t rx)
{
uint16_t trsw = (tx << 1) | rx;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffc, trsw);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x3);
}
static void
bwn_phy_lp_set_rxgain(struct bwn_mac *mac, uint32_t gain)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint16_t ext_lna, high_gain, lna, low_gain, trsw, tmp;
if (mac->mac_phy.rev < 2) {
trsw = gain & 0x1;
lna = (gain & 0xfffc) | ((gain & 0xc) >> 2);
ext_lna = (gain & 2) >> 1;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xfbff, ext_lna << 10);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xf7ff, ext_lna << 11);
BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, lna);
} else {
low_gain = gain & 0xffff;
high_gain = (gain >> 16) & 0xf;
ext_lna = (gain >> 21) & 0x1;
trsw = ~(gain >> 20) & 0x1;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xfdff, ext_lna << 9);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xfbff, ext_lna << 10);
BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff0, high_gain);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
tmp = (gain >> 2) & 0x3;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xe7ff, tmp<<11);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe6), 0xffe7,
tmp << 3);
}
}
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40);
if (mac->mac_phy.rev >= 2) {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x400);
BWN_PHY_SET(mac, BWN_PHY_OFDM(0xe5), 0x8);
}
return;
}
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x200);
}
static void
bwn_phy_lp_set_deaf(struct bwn_mac *mac, uint8_t user)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
if (user)
plp->plp_crsusr_off = 1;
else
plp->plp_crssys_off = 1;
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x80);
}
static void
bwn_phy_lp_clear_deaf(struct bwn_mac *mac, uint8_t user)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
if (user)
plp->plp_crsusr_off = 0;
else
plp->plp_crssys_off = 0;
if (plp->plp_crsusr_off || plp->plp_crssys_off)
return;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x60);
else
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x20);
}
static int
bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *mac, uint16_t sample)
{
#define CALC_COEFF(_v, _x, _y, _z) do { \
int _t; \
_t = _x - 20; \
if (_t >= 0) { \
_v = ((_y << (30 - _x)) + (_z >> (1 + _t))) / (_z >> _t); \
} else { \
_v = ((_y << (30 - _x)) + (_z << (-1 - _t))) / (_z << -_t); \
} \
} while (0)
#define CALC_COEFF2(_v, _x, _y, _z) do { \
int _t; \
_t = _x - 11; \
if (_t >= 0) \
_v = (_y << (31 - _x)) / (_z >> _t); \
else \
_v = (_y << (31 - _x)) / (_z << -_t); \
} while (0)
struct bwn_phy_lp_iq_est ie;
uint16_t v0, v1;
int tmp[2], ret;
v1 = BWN_PHY_READ(mac, BWN_PHY_RX_COMP_COEFF_S);
v0 = v1 >> 8;
v1 |= 0xff;
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, 0x00c0);
BWN_PHY_MASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff);
ret = bwn_phy_lp_rx_iq_est(mac, sample, 32, &ie);
if (ret == 0)
goto done;
if (ie.ie_ipwr + ie.ie_qpwr < 2) {
ret = 0;
goto done;
}
CALC_COEFF(tmp[0], bwn_nbits(ie.ie_iqprod), ie.ie_iqprod, ie.ie_ipwr);
CALC_COEFF2(tmp[1], bwn_nbits(ie.ie_qpwr), ie.ie_qpwr, ie.ie_ipwr);
tmp[1] = -bwn_sqrt(mac, tmp[1] - (tmp[0] * tmp[0]));
v0 = tmp[0] >> 3;
v1 = tmp[1] >> 4;
done:
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, v1);
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, v0 << 8);
return ret;
#undef CALC_COEFF
#undef CALC_COEFF2
}
static void
bwn_phy_lp_tblinit_r01(struct bwn_mac *mac)
{
static const uint16_t noisescale[] = {
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36,
};
static const uint16_t crsgainnft[] = {
0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f,
0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381,
0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f,
0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d,
0x013d,
};
static const uint16_t filterctl[] = {
0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077,
0xff53, 0x0127,
};
static const uint32_t psctl[] = {
0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101,
0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0,
0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105,
0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0,
0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202,
0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0,
0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106,
0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0,
};
static const uint16_t ofdmcckgain_r0[] = {
0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001,
0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055,
0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d,
0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d,
0x755d,
};
static const uint16_t ofdmcckgain_r1[] = {
0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001,
0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055,
0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d,
0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d,
0x755d,
};
static const uint16_t gaindelta[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000,
};
static const uint32_t txpwrctl[] = {
0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c,
0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047,
0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042,
0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d,
0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038,
0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033,
0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e,
0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029,
0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024,
0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f,
0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a,
0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015,
0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1,
0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3,
0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2,
0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20,
0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23,
0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661,
0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60,
0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62,
0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661,
0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663,
0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62,
0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660,
0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663,
0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1,
0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0,
0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2,
0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61,
0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63,
0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562,
0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60,
0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63,
0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1,
0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10,
0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12,
0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1,
0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc,
0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04,
0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006,
0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb,
0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00,
0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd,
0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500,
0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa,
0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503,
0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501,
0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303,
0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01,
0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe,
0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa,
0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06,
0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc,
0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd,
0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9,
0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05,
0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa,
0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc,
0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206,
0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe,
0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9,
0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08,
0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb,
0x00000702,
};
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl),
bwn_tab_sigsq_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale);
bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(crsgainnft), crsgainnft);
bwn_tab_write_multi(mac, BWN_TAB_2(8, 0), N(filterctl), filterctl);
bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(psctl), psctl);
bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl),
bwn_tab_pllfrac_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl),
bwn_tabl_iqlocal_tbl);
if (mac->mac_phy.rev == 0) {
bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r0),
ofdmcckgain_r0);
bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r0),
ofdmcckgain_r0);
} else {
bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r1),
ofdmcckgain_r1);
bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r1),
ofdmcckgain_r1);
}
bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(gaindelta), gaindelta);
bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(txpwrctl), txpwrctl);
}
static void
bwn_phy_lp_tblinit_r2(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
static const uint16_t noisescale[] = {
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4
};
static const uint32_t filterctl[] = {
0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27,
0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f
};
static const uint32_t psctl[] = {
0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000,
0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042,
0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006,
0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002
};
static const uint32_t gainidx[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000,
0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207,
0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001,
0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288,
0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000,
0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794,
0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011,
0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21,
0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019,
0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329,
0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a,
0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082,
0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001,
0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683,
0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000,
0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711,
0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010,
0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c,
0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019,
0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6,
0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a,
0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c,
0x0000001a, 0x64ca55ad, 0x0000001a
};
static const uint16_t auxgainidx[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002,
0x0004, 0x0016
};
static const uint16_t swctl[] = {
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009,
0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018,
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009,
0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018
};
static const uint8_t hf[] = {
0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48,
0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17
};
static const uint32_t gainval[] = {
0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb,
0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004,
0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012,
0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000,
0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000,
0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003,
0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012,
0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009,
0x000000f1, 0x00000000, 0x00000000
};
static const uint16_t gain[] = {
0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808,
0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813,
0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824,
0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857,
0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f,
0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000
};
static const uint32_t papdeps[] = {
0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9,
0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7,
0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3,
0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77,
0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41,
0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16,
0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15,
0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f,
0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047,
0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7,
0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3,
0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356,
0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506
};
static const uint32_t papdmult[] = {
0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060,
0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080,
0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa,
0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3,
0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f,
0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193,
0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a,
0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd,
0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd,
0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc,
0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5,
0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd,
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28
};
static const uint32_t gainidx_a0[] = {
0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060,
0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080,
0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa,
0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3,
0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f,
0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193,
0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a,
0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd,
0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd,
0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc,
0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5,
0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd,
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28
};
static const uint16_t auxgainidx_a0[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0002, 0x0014
};
static const uint32_t gainval_a0[] = {
0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb,
0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004,
0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012,
0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000,
0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000,
0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003,
0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012,
0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f,
0x000000f7, 0x00000000, 0x00000000
};
static const uint16_t gain_a0[] = {
0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b,
0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016,
0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034,
0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f,
0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b,
0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000
};
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
for (i = 0; i < 704; i++)
bwn_tab_write(mac, BWN_TAB_4(7, i), 0);
bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl),
bwn_tab_sigsq_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale);
bwn_tab_write_multi(mac, BWN_TAB_4(11, 0), N(filterctl), filterctl);
bwn_tab_write_multi(mac, BWN_TAB_4(12, 0), N(psctl), psctl);
bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx), gainidx);
bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx), auxgainidx);
bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(swctl), swctl);
bwn_tab_write_multi(mac, BWN_TAB_1(16, 0), N(hf), hf);
bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval), gainval);
bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain), gain);
bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl),
bwn_tab_pllfrac_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl),
bwn_tabl_iqlocal_tbl);
bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(papdeps), papdeps);
bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(papdmult), papdmult);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx_a0),
gainidx_a0);
bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx_a0),
auxgainidx_a0);
bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval_a0),
gainval_a0);
bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain_a0), gain_a0);
}
}
static void
bwn_phy_lp_tblinit_txgain(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
static struct bwn_txgain_entry txgain_r2[] = {
{ 255, 255, 203, 0, 152 }, { 255, 255, 203, 0, 147 },
{ 255, 255, 203, 0, 143 }, { 255, 255, 203, 0, 139 },
{ 255, 255, 203, 0, 135 }, { 255, 255, 203, 0, 131 },
{ 255, 255, 203, 0, 128 }, { 255, 255, 203, 0, 124 },
{ 255, 255, 203, 0, 121 }, { 255, 255, 203, 0, 117 },
{ 255, 255, 203, 0, 114 }, { 255, 255, 203, 0, 111 },
{ 255, 255, 203, 0, 107 }, { 255, 255, 203, 0, 104 },
{ 255, 255, 203, 0, 101 }, { 255, 255, 203, 0, 99 },
{ 255, 255, 203, 0, 96 }, { 255, 255, 203, 0, 93 },
{ 255, 255, 203, 0, 90 }, { 255, 255, 203, 0, 88 },
{ 255, 255, 203, 0, 85 }, { 255, 255, 203, 0, 83 },
{ 255, 255, 203, 0, 81 }, { 255, 255, 203, 0, 78 },
{ 255, 255, 203, 0, 76 }, { 255, 255, 203, 0, 74 },
{ 255, 255, 203, 0, 72 }, { 255, 255, 203, 0, 70 },
{ 255, 255, 203, 0, 68 }, { 255, 255, 203, 0, 66 },
{ 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 },
{ 255, 255, 192, 0, 64 }, { 255, 255, 186, 0, 64 },
{ 255, 255, 181, 0, 64 }, { 255, 255, 176, 0, 64 },
{ 255, 255, 171, 0, 64 }, { 255, 255, 166, 0, 64 },
{ 255, 255, 161, 0, 64 }, { 255, 255, 157, 0, 64 },
{ 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 },
{ 255, 255, 144, 0, 64 }, { 255, 255, 140, 0, 64 },
{ 255, 255, 136, 0, 64 }, { 255, 255, 132, 0, 64 },
{ 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 },
{ 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 },
{ 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 },
{ 255, 255, 108, 0, 64 }, { 255, 255, 105, 0, 64 },
{ 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 },
{ 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 },
{ 255, 255, 91, 0, 64 }, { 255, 255, 88, 0, 64 },
{ 255, 255, 86, 0, 64 }, { 255, 255, 83, 0, 64 },
{ 255, 255, 81, 0, 64 }, { 255, 255, 79, 0, 64 },
{ 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 },
{ 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 },
{ 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 },
{ 255, 255, 64, 0, 64 }, { 255, 248, 64, 0, 64 },
{ 255, 248, 62, 0, 64 }, { 255, 241, 62, 0, 64 },
{ 255, 241, 60, 0, 64 }, { 255, 234, 60, 0, 64 },
{ 255, 234, 59, 0, 64 }, { 255, 227, 59, 0, 64 },
{ 255, 227, 57, 0, 64 }, { 255, 221, 57, 0, 64 },
{ 255, 221, 55, 0, 64 }, { 255, 215, 55, 0, 64 },
{ 255, 215, 54, 0, 64 }, { 255, 208, 54, 0, 64 },
{ 255, 208, 52, 0, 64 }, { 255, 203, 52, 0, 64 },
{ 255, 203, 51, 0, 64 }, { 255, 197, 51, 0, 64 },
{ 255, 197, 49, 0, 64 }, { 255, 191, 49, 0, 64 },
{ 255, 191, 48, 0, 64 }, { 255, 186, 48, 0, 64 },
{ 255, 186, 47, 0, 64 }, { 255, 181, 47, 0, 64 },
{ 255, 181, 45, 0, 64 }, { 255, 175, 45, 0, 64 },
{ 255, 175, 44, 0, 64 }, { 255, 170, 44, 0, 64 },
{ 255, 170, 43, 0, 64 }, { 255, 166, 43, 0, 64 },
{ 255, 166, 42, 0, 64 }, { 255, 161, 42, 0, 64 },
{ 255, 161, 40, 0, 64 }, { 255, 156, 40, 0, 64 },
{ 255, 156, 39, 0, 64 }, { 255, 152, 39, 0, 64 },
{ 255, 152, 38, 0, 64 }, { 255, 148, 38, 0, 64 },
{ 255, 148, 37, 0, 64 }, { 255, 143, 37, 0, 64 },
{ 255, 143, 36, 0, 64 }, { 255, 139, 36, 0, 64 },
{ 255, 139, 35, 0, 64 }, { 255, 135, 35, 0, 64 },
{ 255, 135, 34, 0, 64 }, { 255, 132, 34, 0, 64 },
{ 255, 132, 33, 0, 64 }, { 255, 128, 33, 0, 64 },
{ 255, 128, 32, 0, 64 }, { 255, 124, 32, 0, 64 },
{ 255, 124, 31, 0, 64 }, { 255, 121, 31, 0, 64 },
{ 255, 121, 30, 0, 64 }, { 255, 117, 30, 0, 64 },
{ 255, 117, 29, 0, 64 }, { 255, 114, 29, 0, 64 },
{ 255, 114, 29, 0, 64 }, { 255, 111, 29, 0, 64 },
};
static struct bwn_txgain_entry txgain_2ghz_r2[] = {
{ 7, 99, 255, 0, 64 }, { 7, 96, 255, 0, 64 },
{ 7, 93, 255, 0, 64 }, { 7, 90, 255, 0, 64 },
{ 7, 88, 255, 0, 64 }, { 7, 85, 255, 0, 64 },
{ 7, 83, 255, 0, 64 }, { 7, 81, 255, 0, 64 },
{ 7, 78, 255, 0, 64 }, { 7, 76, 255, 0, 64 },
{ 7, 74, 255, 0, 64 }, { 7, 72, 255, 0, 64 },
{ 7, 70, 255, 0, 64 }, { 7, 68, 255, 0, 64 },
{ 7, 66, 255, 0, 64 }, { 7, 64, 255, 0, 64 },
{ 7, 64, 255, 0, 64 }, { 7, 62, 255, 0, 64 },
{ 7, 62, 248, 0, 64 }, { 7, 60, 248, 0, 64 },
{ 7, 60, 241, 0, 64 }, { 7, 59, 241, 0, 64 },
{ 7, 59, 234, 0, 64 }, { 7, 57, 234, 0, 64 },
{ 7, 57, 227, 0, 64 }, { 7, 55, 227, 0, 64 },
{ 7, 55, 221, 0, 64 }, { 7, 54, 221, 0, 64 },
{ 7, 54, 215, 0, 64 }, { 7, 52, 215, 0, 64 },
{ 7, 52, 208, 0, 64 }, { 7, 51, 208, 0, 64 },
{ 7, 51, 203, 0, 64 }, { 7, 49, 203, 0, 64 },
{ 7, 49, 197, 0, 64 }, { 7, 48, 197, 0, 64 },
{ 7, 48, 191, 0, 64 }, { 7, 47, 191, 0, 64 },
{ 7, 47, 186, 0, 64 }, { 7, 45, 186, 0, 64 },
{ 7, 45, 181, 0, 64 }, { 7, 44, 181, 0, 64 },
{ 7, 44, 175, 0, 64 }, { 7, 43, 175, 0, 64 },
{ 7, 43, 170, 0, 64 }, { 7, 42, 170, 0, 64 },
{ 7, 42, 166, 0, 64 }, { 7, 40, 166, 0, 64 },
{ 7, 40, 161, 0, 64 }, { 7, 39, 161, 0, 64 },
{ 7, 39, 156, 0, 64 }, { 7, 38, 156, 0, 64 },
{ 7, 38, 152, 0, 64 }, { 7, 37, 152, 0, 64 },
{ 7, 37, 148, 0, 64 }, { 7, 36, 148, 0, 64 },
{ 7, 36, 143, 0, 64 }, { 7, 35, 143, 0, 64 },
{ 7, 35, 139, 0, 64 }, { 7, 34, 139, 0, 64 },
{ 7, 34, 135, 0, 64 }, { 7, 33, 135, 0, 64 },
{ 7, 33, 132, 0, 64 }, { 7, 32, 132, 0, 64 },
{ 7, 32, 128, 0, 64 }, { 7, 31, 128, 0, 64 },
{ 7, 31, 124, 0, 64 }, { 7, 30, 124, 0, 64 },
{ 7, 30, 121, 0, 64 }, { 7, 29, 121, 0, 64 },
{ 7, 29, 117, 0, 64 }, { 7, 29, 117, 0, 64 },
{ 7, 29, 114, 0, 64 }, { 7, 28, 114, 0, 64 },
{ 7, 28, 111, 0, 64 }, { 7, 27, 111, 0, 64 },
{ 7, 27, 108, 0, 64 }, { 7, 26, 108, 0, 64 },
{ 7, 26, 104, 0, 64 }, { 7, 25, 104, 0, 64 },
{ 7, 25, 102, 0, 64 }, { 7, 25, 102, 0, 64 },
{ 7, 25, 99, 0, 64 }, { 7, 24, 99, 0, 64 },
{ 7, 24, 96, 0, 64 }, { 7, 23, 96, 0, 64 },
{ 7, 23, 93, 0, 64 }, { 7, 23, 93, 0, 64 },
{ 7, 23, 90, 0, 64 }, { 7, 22, 90, 0, 64 },
{ 7, 22, 88, 0, 64 }, { 7, 21, 88, 0, 64 },
{ 7, 21, 85, 0, 64 }, { 7, 21, 85, 0, 64 },
{ 7, 21, 83, 0, 64 }, { 7, 20, 83, 0, 64 },
{ 7, 20, 81, 0, 64 }, { 7, 20, 81, 0, 64 },
{ 7, 20, 78, 0, 64 }, { 7, 19, 78, 0, 64 },
{ 7, 19, 76, 0, 64 }, { 7, 19, 76, 0, 64 },
{ 7, 19, 74, 0, 64 }, { 7, 18, 74, 0, 64 },
{ 7, 18, 72, 0, 64 }, { 7, 18, 72, 0, 64 },
{ 7, 18, 70, 0, 64 }, { 7, 17, 70, 0, 64 },
{ 7, 17, 68, 0, 64 }, { 7, 17, 68, 0, 64 },
{ 7, 17, 66, 0, 64 }, { 7, 16, 66, 0, 64 },
{ 7, 16, 64, 0, 64 }, { 7, 16, 64, 0, 64 },
{ 7, 16, 62, 0, 64 }, { 7, 15, 62, 0, 64 },
{ 7, 15, 60, 0, 64 }, { 7, 15, 60, 0, 64 },
{ 7, 15, 59, 0, 64 }, { 7, 14, 59, 0, 64 },
{ 7, 14, 57, 0, 64 }, { 7, 14, 57, 0, 64 },
{ 7, 14, 55, 0, 64 }, { 7, 14, 55, 0, 64 },
{ 7, 14, 54, 0, 64 }, { 7, 13, 54, 0, 64 },
{ 7, 13, 52, 0, 64 }, { 7, 13, 52, 0, 64 },
};
static struct bwn_txgain_entry txgain_5ghz_r2[] = {
{ 255, 255, 255, 0, 152 }, { 255, 255, 255, 0, 147 },
{ 255, 255, 255, 0, 143 }, { 255, 255, 255, 0, 139 },
{ 255, 255, 255, 0, 135 }, { 255, 255, 255, 0, 131 },
{ 255, 255, 255, 0, 128 }, { 255, 255, 255, 0, 124 },
{ 255, 255, 255, 0, 121 }, { 255, 255, 255, 0, 117 },
{ 255, 255, 255, 0, 114 }, { 255, 255, 255, 0, 111 },
{ 255, 255, 255, 0, 107 }, { 255, 255, 255, 0, 104 },
{ 255, 255, 255, 0, 101 }, { 255, 255, 255, 0, 99 },
{ 255, 255, 255, 0, 96 }, { 255, 255, 255, 0, 93 },
{ 255, 255, 255, 0, 90 }, { 255, 255, 255, 0, 88 },
{ 255, 255, 255, 0, 85 }, { 255, 255, 255, 0, 83 },
{ 255, 255, 255, 0, 81 }, { 255, 255, 255, 0, 78 },
{ 255, 255, 255, 0, 76 }, { 255, 255, 255, 0, 74 },
{ 255, 255, 255, 0, 72 }, { 255, 255, 255, 0, 70 },
{ 255, 255, 255, 0, 68 }, { 255, 255, 255, 0, 66 },
{ 255, 255, 255, 0, 64 }, { 255, 255, 248, 0, 64 },
{ 255, 255, 241, 0, 64 }, { 255, 255, 234, 0, 64 },
{ 255, 255, 227, 0, 64 }, { 255, 255, 221, 0, 64 },
{ 255, 255, 215, 0, 64 }, { 255, 255, 208, 0, 64 },
{ 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 },
{ 255, 255, 191, 0, 64 }, { 255, 255, 186, 0, 64 },
{ 255, 255, 181, 0, 64 }, { 255, 255, 175, 0, 64 },
{ 255, 255, 170, 0, 64 }, { 255, 255, 166, 0, 64 },
{ 255, 255, 161, 0, 64 }, { 255, 255, 156, 0, 64 },
{ 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 },
{ 255, 255, 143, 0, 64 }, { 255, 255, 139, 0, 64 },
{ 255, 255, 135, 0, 64 }, { 255, 255, 132, 0, 64 },
{ 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 },
{ 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 },
{ 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 },
{ 255, 255, 108, 0, 64 }, { 255, 255, 104, 0, 64 },
{ 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 },
{ 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 },
{ 255, 255, 90, 0, 64 }, { 255, 255, 88, 0, 64 },
{ 255, 255, 85, 0, 64 }, { 255, 255, 83, 0, 64 },
{ 255, 255, 81, 0, 64 }, { 255, 255, 78, 0, 64 },
{ 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 },
{ 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 },
{ 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 },
{ 255, 255, 64, 0, 64 }, { 255, 255, 64, 0, 64 },
{ 255, 255, 62, 0, 64 }, { 255, 248, 62, 0, 64 },
{ 255, 248, 60, 0, 64 }, { 255, 241, 60, 0, 64 },
{ 255, 241, 59, 0, 64 }, { 255, 234, 59, 0, 64 },
{ 255, 234, 57, 0, 64 }, { 255, 227, 57, 0, 64 },
{ 255, 227, 55, 0, 64 }, { 255, 221, 55, 0, 64 },
{ 255, 221, 54, 0, 64 }, { 255, 215, 54, 0, 64 },
{ 255, 215, 52, 0, 64 }, { 255, 208, 52, 0, 64 },
{ 255, 208, 51, 0, 64 }, { 255, 203, 51, 0, 64 },
{ 255, 203, 49, 0, 64 }, { 255, 197, 49, 0, 64 },
{ 255, 197, 48, 0, 64 }, { 255, 191, 48, 0, 64 },
{ 255, 191, 47, 0, 64 }, { 255, 186, 47, 0, 64 },
{ 255, 186, 45, 0, 64 }, { 255, 181, 45, 0, 64 },
{ 255, 181, 44, 0, 64 }, { 255, 175, 44, 0, 64 },
{ 255, 175, 43, 0, 64 }, { 255, 170, 43, 0, 64 },
{ 255, 170, 42, 0, 64 }, { 255, 166, 42, 0, 64 },
{ 255, 166, 40, 0, 64 }, { 255, 161, 40, 0, 64 },
{ 255, 161, 39, 0, 64 }, { 255, 156, 39, 0, 64 },
{ 255, 156, 38, 0, 64 }, { 255, 152, 38, 0, 64 },
{ 255, 152, 37, 0, 64 }, { 255, 148, 37, 0, 64 },
{ 255, 148, 36, 0, 64 }, { 255, 143, 36, 0, 64 },
{ 255, 143, 35, 0, 64 }, { 255, 139, 35, 0, 64 },
{ 255, 139, 34, 0, 64 }, { 255, 135, 34, 0, 64 },
{ 255, 135, 33, 0, 64 }, { 255, 132, 33, 0, 64 },
{ 255, 132, 32, 0, 64 }, { 255, 128, 32, 0, 64 }
};
static struct bwn_txgain_entry txgain_r0[] = {
{ 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 },
{ 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 },
{ 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 },
{ 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 },
{ 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 },
{ 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 },
{ 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 },
{ 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 },
{ 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 },
{ 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 },
{ 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 },
{ 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 },
{ 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 },
{ 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 },
{ 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 },
{ 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 },
{ 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 },
{ 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 },
{ 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 },
{ 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 },
{ 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 },
{ 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 },
{ 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 },
{ 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 },
{ 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 },
{ 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 },
{ 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 },
{ 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 },
{ 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 },
{ 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 },
{ 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 },
{ 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 },
{ 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 },
{ 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 },
{ 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 },
{ 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 },
{ 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 },
{ 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 },
{ 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 },
{ 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 },
{ 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 },
{ 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 },
{ 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 },
{ 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 },
{ 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 },
{ 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 },
{ 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 },
{ 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 },
{ 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 },
{ 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 },
{ 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 },
{ 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 },
{ 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 },
{ 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 },
{ 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 },
{ 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 },
{ 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 },
{ 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 },
{ 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 },
{ 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 },
{ 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 },
{ 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 },
{ 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 },
{ 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 }
};
static struct bwn_txgain_entry txgain_2ghz_r0[] = {
{ 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 },
{ 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 },
{ 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 },
{ 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 },
{ 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 },
{ 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 },
{ 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 },
{ 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 },
{ 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 },
{ 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 },
{ 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 },
{ 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 },
{ 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 },
{ 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 },
{ 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 },
{ 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 },
{ 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 },
{ 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 },
{ 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 },
{ 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 },
{ 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 },
{ 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 },
{ 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 },
{ 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 },
{ 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 },
{ 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 },
{ 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 },
{ 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 },
{ 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 },
{ 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 },
{ 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 },
{ 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 },
{ 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 },
{ 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 },
{ 4, 10, 6, 0, 59 }, { 4, 10, 5, 0, 72 },
{ 4, 10, 5, 0, 70 }, { 4, 10, 5, 0, 68 },
{ 4, 10, 5, 0, 66 }, { 4, 10, 5, 0, 64 },
{ 4, 10, 5, 0, 62 }, { 4, 10, 5, 0, 60 },
{ 4, 10, 5, 0, 59 }, { 4, 9, 5, 0, 70 },
{ 4, 9, 5, 0, 68 }, { 4, 9, 5, 0, 66 },
{ 4, 9, 5, 0, 64 }, { 4, 9, 5, 0, 63 },
{ 4, 9, 5, 0, 61 }, { 4, 9, 5, 0, 59 },
{ 4, 9, 4, 0, 71 }, { 4, 9, 4, 0, 69 },
{ 4, 9, 4, 0, 67 }, { 4, 9, 4, 0, 65 },
{ 4, 9, 4, 0, 63 }, { 4, 9, 4, 0, 62 },
{ 4, 9, 4, 0, 60 }, { 4, 9, 4, 0, 58 },
{ 4, 8, 4, 0, 70 }, { 4, 8, 4, 0, 68 },
{ 4, 8, 4, 0, 66 }, { 4, 8, 4, 0, 65 },
{ 4, 8, 4, 0, 63 }, { 4, 8, 4, 0, 61 },
{ 4, 8, 4, 0, 59 }, { 4, 7, 4, 0, 68 },
{ 4, 7, 4, 0, 66 }, { 4, 7, 4, 0, 64 },
{ 4, 7, 4, 0, 62 }, { 4, 7, 4, 0, 61 },
{ 4, 7, 4, 0, 59 }, { 4, 7, 3, 0, 67 },
{ 4, 7, 3, 0, 65 }, { 4, 7, 3, 0, 63 },
{ 4, 7, 3, 0, 62 }, { 4, 7, 3, 0, 60 },
{ 4, 6, 3, 0, 65 }, { 4, 6, 3, 0, 63 },
{ 4, 6, 3, 0, 61 }, { 4, 6, 3, 0, 60 },
{ 4, 6, 3, 0, 58 }, { 4, 5, 3, 0, 68 },
{ 4, 5, 3, 0, 66 }, { 4, 5, 3, 0, 64 },
{ 4, 5, 3, 0, 62 }, { 4, 5, 3, 0, 60 },
{ 4, 5, 3, 0, 59 }, { 4, 5, 3, 0, 57 },
{ 4, 4, 2, 0, 83 }, { 4, 4, 2, 0, 81 },
{ 4, 4, 2, 0, 78 }, { 4, 4, 2, 0, 76 },
{ 4, 4, 2, 0, 74 }, { 4, 4, 2, 0, 72 }
};
static struct bwn_txgain_entry txgain_5ghz_r0[] = {
{ 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 },
{ 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 },
{ 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 },
{ 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 },
{ 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 },
{ 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 },
{ 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 },
{ 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 },
{ 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 },
{ 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 },
{ 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 },
{ 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 },
{ 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 },
{ 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 },
{ 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 },
{ 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 },
{ 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 },
{ 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 },
{ 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 },
{ 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 },
{ 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 },
{ 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 },
{ 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 },
{ 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 },
{ 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 },
{ 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 },
{ 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 },
{ 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 },
{ 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 },
{ 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 },
{ 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 },
{ 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 },
{ 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 },
{ 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 },
{ 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 },
{ 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 },
{ 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 },
{ 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 },
{ 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 },
{ 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 },
{ 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 },
{ 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 },
{ 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 },
{ 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 },
{ 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 },
{ 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 },
{ 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 },
{ 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 },
{ 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 },
{ 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 },
{ 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 },
{ 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 },
{ 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 },
{ 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 },
{ 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 },
{ 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 },
{ 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 },
{ 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 },
{ 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 },
{ 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 },
{ 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 },
{ 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 },
{ 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 },
{ 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 }
};
static struct bwn_txgain_entry txgain_r1[] = {
{ 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 },
{ 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 },
{ 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 },
{ 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 },
{ 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 },
{ 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 },
{ 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 },
{ 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 },
{ 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 },
{ 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 },
{ 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 },
{ 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 },
{ 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 },
{ 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 },
{ 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 },
{ 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 },
{ 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 },
{ 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 },
{ 7, 15, 13, 0, 70 }, { 7, 15, 14, 0, 68 },
{ 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 },
{ 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 },
{ 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 },
{ 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 },
{ 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 },
{ 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 },
{ 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 },
{ 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 },
{ 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 },
{ 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 },
{ 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 },
{ 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 },
{ 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 },
{ 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 },
{ 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 },
{ 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 },
{ 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 },
{ 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 },
{ 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 },
{ 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 },
{ 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 },
{ 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 },
{ 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 },
{ 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 },
{ 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 },
{ 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 },
{ 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 },
{ 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 },
{ 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 },
{ 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 },
{ 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 },
{ 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 },
{ 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 },
{ 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 },
{ 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 },
{ 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 },
{ 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 },
{ 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 },
{ 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 },
{ 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 },
{ 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 },
{ 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 },
{ 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 },
{ 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 },
{ 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 },
{ 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 },
{ 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 },
{ 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 },
{ 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 },
{ 7, 11, 6, 0, 71 }
};
static struct bwn_txgain_entry txgain_2ghz_r1[] = {
{ 4, 15, 15, 0, 90 }, { 4, 15, 15, 0, 88 },
{ 4, 15, 15, 0, 85 }, { 4, 15, 15, 0, 83 },
{ 4, 15, 15, 0, 81 }, { 4, 15, 15, 0, 78 },
{ 4, 15, 15, 0, 76 }, { 4, 15, 15, 0, 74 },
{ 4, 15, 15, 0, 72 }, { 4, 15, 15, 0, 70 },
{ 4, 15, 15, 0, 68 }, { 4, 15, 15, 0, 66 },
{ 4, 15, 15, 0, 64 }, { 4, 15, 15, 0, 62 },
{ 4, 15, 15, 0, 60 }, { 4, 15, 15, 0, 59 },
{ 4, 15, 14, 0, 72 }, { 4, 15, 14, 0, 70 },
{ 4, 15, 14, 0, 68 }, { 4, 15, 14, 0, 66 },
{ 4, 15, 14, 0, 64 }, { 4, 15, 14, 0, 62 },
{ 4, 15, 14, 0, 60 }, { 4, 15, 14, 0, 59 },
{ 4, 15, 13, 0, 72 }, { 4, 15, 13, 0, 70 },
{ 4, 15, 13, 0, 68 }, { 4, 15, 13, 0, 66 },
{ 4, 15, 13, 0, 64 }, { 4, 15, 13, 0, 62 },
{ 4, 15, 13, 0, 60 }, { 4, 15, 13, 0, 59 },
{ 4, 15, 12, 0, 72 }, { 4, 15, 12, 0, 70 },
{ 4, 15, 12, 0, 68 }, { 4, 15, 12, 0, 66 },
{ 4, 15, 12, 0, 64 }, { 4, 15, 12, 0, 62 },
{ 4, 15, 12, 0, 60 }, { 4, 15, 12, 0, 59 },
{ 4, 15, 11, 0, 72 }, { 4, 15, 11, 0, 70 },
{ 4, 15, 11, 0, 68 }, { 4, 15, 11, 0, 66 },
{ 4, 15, 11, 0, 64 }, { 4, 15, 11, 0, 62 },
{ 4, 15, 11, 0, 60 }, { 4, 15, 11, 0, 59 },
{ 4, 15, 10, 0, 72 }, { 4, 15, 10, 0, 70 },
{ 4, 15, 10, 0, 68 }, { 4, 15, 10, 0, 66 },
{ 4, 15, 10, 0, 64 }, { 4, 15, 10, 0, 62 },
{ 4, 15, 10, 0, 60 }, { 4, 15, 10, 0, 59 },
{ 4, 15, 9, 0, 72 }, { 4, 15, 9, 0, 70 },
{ 4, 15, 9, 0, 68 }, { 4, 15, 9, 0, 66 },
{ 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 },
{ 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 },
{ 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 },
{ 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 },
{ 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 },
{ 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 },
{ 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 },
{ 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 },
{ 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 },
{ 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 },
{ 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 },
{ 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 },
{ 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 },
{ 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 },
{ 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 },
{ 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 },
{ 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 },
{ 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 },
{ 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 },
{ 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 },
{ 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 },
{ 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 },
{ 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 },
{ 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 },
{ 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 },
{ 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 },
{ 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 },
{ 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 },
{ 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 },
{ 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 },
{ 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 },
{ 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 },
{ 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 },
{ 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 }
};
static struct bwn_txgain_entry txgain_5ghz_r1[] = {
{ 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 },
{ 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 },
{ 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 },
{ 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 },
{ 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 },
{ 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 },
{ 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 },
{ 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 },
{ 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 },
{ 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 },
{ 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 },
{ 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 },
{ 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 },
{ 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 },
{ 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 },
{ 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 },
{ 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 },
{ 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 },
{ 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 },
{ 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 },
{ 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 },
{ 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 },
{ 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 },
{ 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 },
{ 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 },
{ 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 },
{ 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 },
{ 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 },
{ 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 },
{ 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 },
{ 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 },
{ 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 },
{ 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 },
{ 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 },
{ 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 },
{ 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 },
{ 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 },
{ 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 },
{ 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 },
{ 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 },
{ 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 },
{ 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 },
{ 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 },
{ 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 },
{ 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 },
{ 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 },
{ 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 },
{ 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 },
{ 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 },
{ 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 },
{ 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 },
{ 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 },
{ 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 },
{ 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 },
{ 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 },
{ 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 },
{ 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 },
{ 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 },
{ 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 },
{ 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 },
{ 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 },
{ 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 },
{ 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 },
{ 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 }
};
if (mac->mac_phy.rev != 0 && mac->mac_phy.rev != 1) {
if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA)
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r2);
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_2ghz_r2);
else
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_5ghz_r2);
return;
}
if (mac->mac_phy.rev == 0) {
if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) ||
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r0);
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_2ghz_r0);
else
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_5ghz_r0);
return;
}
if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) ||
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r1);
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r1);
else
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r1);
}
static void
bwn_tab_write(struct bwn_mac *mac, uint32_t typeoffset, uint32_t value)
{
uint32_t offset, type;
type = BWN_TAB_GETTYPE(typeoffset);
offset = BWN_TAB_GETOFFSET(typeoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
switch (type) {
case BWN_TAB_8BIT:
KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_16BIT:
KASSERT(!(value & ~0xffff),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_32BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
static int
bwn_phy_lp_loopback(struct bwn_mac *mac)
{
struct bwn_phy_lp_iq_est ie;
int i, index = -1;
uint32_t tmp;
memset(&ie, 0, sizeof(ie));
bwn_phy_lp_set_trsw_over(mac, 1, 1);
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 1);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x8);
BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, 0x80);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x80);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x80);
for (i = 0; i < 32; i++) {
bwn_phy_lp_set_rxgain_idx(mac, i);
bwn_phy_lp_ddfs_turnon(mac, 1, 1, 5, 5, 0);
if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie)))
continue;
tmp = (ie.ie_ipwr + ie.ie_qpwr) / 1000;
if ((tmp > 4000) && (tmp < 10000)) {
index = i;
break;
}
}
bwn_phy_lp_ddfs_turnoff(mac);
return (index);
}
static void
bwn_phy_lp_set_rxgain_idx(struct bwn_mac *mac, uint16_t idx)
{
bwn_phy_lp_set_rxgain(mac, bwn_tab_read(mac, BWN_TAB_2(12, idx)));
}
static void
bwn_phy_lp_ddfs_turnon(struct bwn_mac *mac, int i_on, int q_on,
int incr1, int incr2, int scale_idx)
{
bwn_phy_lp_ddfs_turnoff(mac);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0xff80);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0x80ff);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0xff80, incr1);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0x80ff, incr2 << 8);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff7, i_on << 3);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xffef, q_on << 4);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xff9f, scale_idx << 5);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffb);
BWN_PHY_SET(mac, BWN_PHY_AFE_DDFS, 0x2);
BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x20);
}
static uint8_t
bwn_phy_lp_rx_iq_est(struct bwn_mac *mac, uint16_t sample, uint8_t time,
struct bwn_phy_lp_iq_est *ie)
{
int i;
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfff7);
BWN_PHY_WRITE(mac, BWN_PHY_IQ_NUM_SMPLS_ADDR, sample);
BWN_PHY_SETMASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xff00, time);
BWN_PHY_MASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xfeff);
BWN_PHY_SET(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200);
for (i = 0; i < 500; i++) {
if (!(BWN_PHY_READ(mac,
BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200))
break;
DELAY(1000);
}
if ((BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) {
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8);
return 0;
}
ie->ie_iqprod = BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_HI_ADDR);
ie->ie_iqprod <<= 16;
ie->ie_iqprod |= BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_LO_ADDR);
ie->ie_ipwr = BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_HI_ADDR);
ie->ie_ipwr <<= 16;
ie->ie_ipwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_LO_ADDR);
ie->ie_qpwr = BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_HI_ADDR);
ie->ie_qpwr <<= 16;
ie->ie_qpwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_LO_ADDR);
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8);
return 1;
}
static uint32_t
bwn_tab_read(struct bwn_mac *mac, uint32_t typeoffset)
{
uint32_t offset, type, value;
type = BWN_TAB_GETTYPE(typeoffset);
offset = BWN_TAB_GETOFFSET(typeoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
switch (type) {
case BWN_TAB_8BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff;
break;
case BWN_TAB_16BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO);
break;
case BWN_TAB_32BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI);
value <<= 16;
value |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
value = 0;
}
return (value);
}
static void
bwn_phy_lp_ddfs_turnoff(struct bwn_mac *mac)
{
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffd);
BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0xffdf);
}
static void
bwn_phy_lp_set_txgain_dac(struct bwn_mac *mac, uint16_t dac)
{
uint16_t ctl;
ctl = BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0xc7f;
ctl |= dac << 7;
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf000, ctl);
}
static void
bwn_phy_lp_set_txgain_pa(struct bwn_mac *mac, uint16_t gain)
{
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0xe03f, gain << 6);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x80ff, gain << 8);
}
static void
bwn_phy_lp_set_txgain_override(struct bwn_mac *mac)
{
if (mac->mac_phy.rev < 2)
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100);
else {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x80);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x4000);
}
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x40);
}
static uint16_t
bwn_phy_lp_get_pa_gain(struct bwn_mac *mac)
{
return BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0x7f;
}
static uint8_t
bwn_nbits(int32_t val)
{
uint32_t tmp;
uint8_t nbits = 0;
for (tmp = abs(val); tmp != 0; tmp >>= 1)
nbits++;
return (nbits);
}
static void
bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *mac, int offset, int count,
struct bwn_txgain_entry *table)
{
int i;
for (i = offset; i < count; i++)
bwn_phy_lp_gaintbl_write(mac, i, table[i]);
}
static void
bwn_phy_lp_gaintbl_write(struct bwn_mac *mac, int offset,
struct bwn_txgain_entry data)
{
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_gaintbl_write_r2(mac, offset, data);
else
bwn_phy_lp_gaintbl_write_r01(mac, offset, data);
}
static void
bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *mac, int offset,
struct bwn_txgain_entry te)
{
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
KASSERT(mac->mac_phy.rev >= 2, ("%s:%d: fail", __func__, __LINE__));
tmp = (te.te_pad << 16) | (te.te_pga << 8) | te.te_gm;
if (mac->mac_phy.rev >= 3) {
tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ?
(0x10 << 24) : (0x70 << 24));
} else {
tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ?
(0x14 << 24) : (0x7f << 24));
}
bwn_tab_write(mac, BWN_TAB_4(7, 0xc0 + offset), tmp);
bwn_tab_write(mac, BWN_TAB_4(7, 0x140 + offset),
te.te_bbmult << 20 | te.te_dac << 28);
}
static void
bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *mac, int offset,
struct bwn_txgain_entry te)
{
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
bwn_tab_write(mac, BWN_TAB_4(10, 0xc0 + offset),
(te.te_pad << 11) | (te.te_pga << 7) | (te.te_gm << 4) |
te.te_dac);
bwn_tab_write(mac, BWN_TAB_4(10, 0x140 + offset), te.te_bbmult << 20);
}
Index: head/sys/dev/ciss/ciss.c
===================================================================
--- head/sys/dev/ciss/ciss.c (revision 328217)
+++ head/sys/dev/ciss/ciss.c (revision 328218)
@@ -1,4733 +1,4733 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2001 Michael Smith
* Copyright (c) 2004 Paul Saab
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Common Interface for SCSI-3 Support driver.
*
* CISS claims to provide a common interface between a generic SCSI
* transport and an intelligent host adapter.
*
* This driver supports CISS as defined in the document "CISS Command
* Interface for SCSI-3 Support Open Specification", Version 1.04,
* Valence Number 1, dated 20001127, produced by Compaq Computer
* Corporation. This document appears to be a hastily and somewhat
* arbitrarlily cut-down version of a larger (and probably even more
* chaotic and inconsistent) Compaq internal document. Various
* details were also gleaned from Compaq's "cciss" driver for Linux.
*
* We provide a shim layer between the CISS interface and CAM,
* offloading most of the queueing and being-a-disk chores onto CAM.
* Entry to the driver is via the PCI bus attachment (ciss_probe,
* ciss_attach, etc) and via the CAM interface (ciss_cam_action,
* ciss_cam_poll). The Compaq CISS adapters are, however, poor SCSI
* citizens and we have to fake up some responses to get reasonable
* behaviour out of them. In addition, the CISS command set is by no
* means adequate to support the functionality of a RAID controller,
* and thus the supported Compaq adapters utilise portions of the
* control protocol from earlier Compaq adapter families.
*
* Note that we only support the "simple" transport layer over PCI.
* This interface (ab)uses the I2O register set (specifically the post
* queues) to exchange commands with the adapter. Other interfaces
* are available, but we aren't supposed to know about them, and it is
* dubious whether they would provide major performance improvements
* except under extreme load.
*
* Currently the only supported CISS adapters are the Compaq Smart
* Array 5* series (5300, 5i, 532). Even with only three adapters,
* Compaq still manage to have interface variations.
*
*
* Thanks must go to Fred Harris and Darryl DeVinney at Compaq, as
* well as Paul Saab at Yahoo! for their assistance in making this
* driver happen.
*
* More thanks must go to John Cagle at HP for the countless hours
* spent making this driver "work" with the MSA* series storage
* enclosures. Without his help (and nagging), this driver could not
* be used with these enclosures.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/stat.h>
#include <sys/kthread.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_periph.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <machine/bus.h>
#include <machine/endian.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/ciss/cissreg.h>
#include <dev/ciss/cissio.h>
#include <dev/ciss/cissvar.h>
static MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data",
"ciss internal data buffers");
/* pci interface */
static int ciss_lookup(device_t dev);
static int ciss_probe(device_t dev);
static int ciss_attach(device_t dev);
static int ciss_detach(device_t dev);
static int ciss_shutdown(device_t dev);
/* (de)initialisation functions, control wrappers */
static int ciss_init_pci(struct ciss_softc *sc);
static int ciss_setup_msix(struct ciss_softc *sc);
static int ciss_init_perf(struct ciss_softc *sc);
static int ciss_wait_adapter(struct ciss_softc *sc);
static int ciss_flush_adapter(struct ciss_softc *sc);
static int ciss_init_requests(struct ciss_softc *sc);
static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs,
int nseg, int error);
static int ciss_identify_adapter(struct ciss_softc *sc);
static int ciss_init_logical(struct ciss_softc *sc);
static int ciss_init_physical(struct ciss_softc *sc);
static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll);
static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld);
static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld);
static int ciss_update_config(struct ciss_softc *sc);
static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld);
static void ciss_init_sysctl(struct ciss_softc *sc);
static void ciss_soft_reset(struct ciss_softc *sc);
static void ciss_free(struct ciss_softc *sc);
static void ciss_spawn_notify_thread(struct ciss_softc *sc);
static void ciss_kill_notify_thread(struct ciss_softc *sc);
/* request submission/completion */
static int ciss_start(struct ciss_request *cr);
static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh);
static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh);
static void ciss_intr(void *arg);
static void ciss_perf_intr(void *arg);
static void ciss_perf_msi_intr(void *arg);
static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh);
static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func);
static int ciss_synch_request(struct ciss_request *cr, int timeout);
static int ciss_poll_request(struct ciss_request *cr, int timeout);
static int ciss_wait_request(struct ciss_request *cr, int timeout);
#if 0
static int ciss_abort_request(struct ciss_request *cr);
#endif
/* request queueing */
static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp);
static void ciss_preen_command(struct ciss_request *cr);
static void ciss_release_request(struct ciss_request *cr);
/* request helpers */
static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp,
int opcode, void **bufp, size_t bufsize);
static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc);
/* DMA map/unmap */
static int ciss_map_request(struct ciss_request *cr);
static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs,
int nseg, int error);
static void ciss_unmap_request(struct ciss_request *cr);
/* CAM interface */
static int ciss_cam_init(struct ciss_softc *sc);
static void ciss_cam_rescan_target(struct ciss_softc *sc,
int bus, int target);
static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb);
static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio);
static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio);
static void ciss_cam_poll(struct cam_sim *sim);
static void ciss_cam_complete(struct ciss_request *cr);
static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio);
static int ciss_name_device(struct ciss_softc *sc, int bus, int target);
/* periodic status monitoring */
static void ciss_periodic(void *arg);
static void ciss_nop_complete(struct ciss_request *cr);
static void ciss_disable_adapter(struct ciss_softc *sc);
static void ciss_notify_event(struct ciss_softc *sc);
static void ciss_notify_complete(struct ciss_request *cr);
static int ciss_notify_abort(struct ciss_softc *sc);
static int ciss_notify_abort_bmic(struct ciss_softc *sc);
static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn);
static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn);
static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn);
/* debugging output */
static void ciss_print_request(struct ciss_request *cr);
static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld);
static const char *ciss_name_ldrive_status(int status);
static int ciss_decode_ldrive_status(int status);
static const char *ciss_name_ldrive_org(int org);
static const char *ciss_name_command_status(int status);
/*
* PCI bus interface.
*/
static device_method_t ciss_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ciss_probe),
DEVMETHOD(device_attach, ciss_attach),
DEVMETHOD(device_detach, ciss_detach),
DEVMETHOD(device_shutdown, ciss_shutdown),
{ 0, 0 }
};
static driver_t ciss_pci_driver = {
"ciss",
ciss_methods,
sizeof(struct ciss_softc)
};
static devclass_t ciss_devclass;
DRIVER_MODULE(ciss, pci, ciss_pci_driver, ciss_devclass, 0, 0);
MODULE_DEPEND(ciss, cam, 1, 1, 1);
MODULE_DEPEND(ciss, pci, 1, 1, 1);
/*
* Control device interface.
*/
static d_open_t ciss_open;
static d_close_t ciss_close;
static d_ioctl_t ciss_ioctl;
static struct cdevsw ciss_cdevsw = {
.d_version = D_VERSION,
.d_flags = 0,
.d_open = ciss_open,
.d_close = ciss_close,
.d_ioctl = ciss_ioctl,
.d_name = "ciss",
};
/*
* This tunable can be set at boot time and controls whether physical devices
* that are marked hidden by the firmware should be exposed anyways.
*/
static unsigned int ciss_expose_hidden_physical = 0;
TUNABLE_INT("hw.ciss.expose_hidden_physical", &ciss_expose_hidden_physical);
static unsigned int ciss_nop_message_heartbeat = 0;
TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat);
/*
* This tunable can force a particular transport to be used:
* <= 0 : use default
* 1 : force simple
* 2 : force performant
*/
static int ciss_force_transport = 0;
TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport);
/*
* This tunable can force a particular interrupt delivery method to be used:
* <= 0 : use default
* 1 : force INTx
* 2 : force MSIX
*/
static int ciss_force_interrupt = 0;
TUNABLE_INT("hw.ciss.force_interrupt", &ciss_force_interrupt);
/************************************************************************
* CISS adapters amazingly don't have a defined programming interface
* value. (One could say some very despairing things about PCI and
* people just not getting the general idea.) So we are forced to
* stick with matching against subvendor/subdevice, and thus have to
* be updated for every new CISS adapter that appears.
*/
#define CISS_BOARD_UNKNWON 0
#define CISS_BOARD_SA5 1
#define CISS_BOARD_SA5B 2
#define CISS_BOARD_NOMSI (1<<4)
#define CISS_BOARD_SIMPLE (1<<5)
static struct
{
u_int16_t subvendor;
u_int16_t subdevice;
int flags;
char *desc;
} ciss_vendor_data[] = {
{ 0x0e11, 0x4070, CISS_BOARD_SA5|CISS_BOARD_NOMSI|CISS_BOARD_SIMPLE,
"Compaq Smart Array 5300" },
{ 0x0e11, 0x4080, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 5i" },
{ 0x0e11, 0x4082, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 532" },
{ 0x0e11, 0x4083, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "HP Smart Array 5312" },
{ 0x0e11, 0x4091, CISS_BOARD_SA5, "HP Smart Array 6i" },
{ 0x0e11, 0x409A, CISS_BOARD_SA5, "HP Smart Array 641" },
{ 0x0e11, 0x409B, CISS_BOARD_SA5, "HP Smart Array 642" },
{ 0x0e11, 0x409C, CISS_BOARD_SA5, "HP Smart Array 6400" },
{ 0x0e11, 0x409D, CISS_BOARD_SA5, "HP Smart Array 6400 EM" },
{ 0x103C, 0x3211, CISS_BOARD_SA5, "HP Smart Array E200i" },
{ 0x103C, 0x3212, CISS_BOARD_SA5, "HP Smart Array E200" },
{ 0x103C, 0x3213, CISS_BOARD_SA5, "HP Smart Array E200i" },
{ 0x103C, 0x3214, CISS_BOARD_SA5, "HP Smart Array E200i" },
{ 0x103C, 0x3215, CISS_BOARD_SA5, "HP Smart Array E200i" },
{ 0x103C, 0x3220, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3222, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3223, CISS_BOARD_SA5, "HP Smart Array P800" },
{ 0x103C, 0x3225, CISS_BOARD_SA5, "HP Smart Array P600" },
{ 0x103C, 0x3230, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3231, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3232, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3233, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3234, CISS_BOARD_SA5, "HP Smart Array P400" },
{ 0x103C, 0x3235, CISS_BOARD_SA5, "HP Smart Array P400i" },
{ 0x103C, 0x3236, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3237, CISS_BOARD_SA5, "HP Smart Array E500" },
{ 0x103C, 0x3238, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3239, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x323A, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x323B, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x323C, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x323D, CISS_BOARD_SA5, "HP Smart Array P700m" },
{ 0x103C, 0x3241, CISS_BOARD_SA5, "HP Smart Array P212" },
{ 0x103C, 0x3243, CISS_BOARD_SA5, "HP Smart Array P410" },
{ 0x103C, 0x3245, CISS_BOARD_SA5, "HP Smart Array P410i" },
{ 0x103C, 0x3247, CISS_BOARD_SA5, "HP Smart Array P411" },
{ 0x103C, 0x3249, CISS_BOARD_SA5, "HP Smart Array P812" },
{ 0x103C, 0x324A, CISS_BOARD_SA5, "HP Smart Array P712m" },
{ 0x103C, 0x324B, CISS_BOARD_SA5, "HP Smart Array" },
{ 0x103C, 0x3350, CISS_BOARD_SA5, "HP Smart Array P222" },
{ 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" },
{ 0x103C, 0x3352, CISS_BOARD_SA5, "HP Smart Array P421" },
{ 0x103C, 0x3353, CISS_BOARD_SA5, "HP Smart Array P822" },
{ 0x103C, 0x3354, CISS_BOARD_SA5, "HP Smart Array P420i" },
{ 0x103C, 0x3355, CISS_BOARD_SA5, "HP Smart Array P220i" },
{ 0x103C, 0x3356, CISS_BOARD_SA5, "HP Smart Array P721m" },
{ 0x103C, 0x1920, CISS_BOARD_SA5, "HP Smart Array P430i" },
{ 0x103C, 0x1921, CISS_BOARD_SA5, "HP Smart Array P830i" },
{ 0x103C, 0x1922, CISS_BOARD_SA5, "HP Smart Array P430" },
{ 0x103C, 0x1923, CISS_BOARD_SA5, "HP Smart Array P431" },
{ 0x103C, 0x1924, CISS_BOARD_SA5, "HP Smart Array P830" },
{ 0x103C, 0x1926, CISS_BOARD_SA5, "HP Smart Array P731m" },
{ 0x103C, 0x1928, CISS_BOARD_SA5, "HP Smart Array P230i" },
{ 0x103C, 0x1929, CISS_BOARD_SA5, "HP Smart Array P530" },
{ 0x103C, 0x192A, CISS_BOARD_SA5, "HP Smart Array P531" },
{ 0x103C, 0x21BD, CISS_BOARD_SA5, "HP Smart Array P244br" },
{ 0x103C, 0x21BE, CISS_BOARD_SA5, "HP Smart Array P741m" },
{ 0x103C, 0x21BF, CISS_BOARD_SA5, "HP Smart Array H240ar" },
{ 0x103C, 0x21C0, CISS_BOARD_SA5, "HP Smart Array P440ar" },
{ 0x103C, 0x21C1, CISS_BOARD_SA5, "HP Smart Array P840ar" },
{ 0x103C, 0x21C2, CISS_BOARD_SA5, "HP Smart Array P440" },
{ 0x103C, 0x21C3, CISS_BOARD_SA5, "HP Smart Array P441" },
{ 0x103C, 0x21C5, CISS_BOARD_SA5, "HP Smart Array P841" },
{ 0x103C, 0x21C6, CISS_BOARD_SA5, "HP Smart Array H244br" },
{ 0x103C, 0x21C7, CISS_BOARD_SA5, "HP Smart Array H240" },
{ 0x103C, 0x21C8, CISS_BOARD_SA5, "HP Smart Array H241" },
{ 0x103C, 0x21CA, CISS_BOARD_SA5, "HP Smart Array P246br" },
{ 0x103C, 0x21CB, CISS_BOARD_SA5, "HP Smart Array P840" },
{ 0x103C, 0x21CC, CISS_BOARD_SA5, "HP Smart Array TBD" },
{ 0x103C, 0x21CD, CISS_BOARD_SA5, "HP Smart Array P240nr" },
{ 0x103C, 0x21CE, CISS_BOARD_SA5, "HP Smart Array H240nr" },
{ 0, 0, 0, NULL }
};
/************************************************************************
* Find a match for the device in our list of known adapters.
*/
static int
ciss_lookup(device_t dev)
{
int i;
for (i = 0; ciss_vendor_data[i].desc != NULL; i++)
if ((pci_get_subvendor(dev) == ciss_vendor_data[i].subvendor) &&
(pci_get_subdevice(dev) == ciss_vendor_data[i].subdevice)) {
return(i);
}
return(-1);
}
/************************************************************************
* Match a known CISS adapter.
*/
static int
ciss_probe(device_t dev)
{
int i;
i = ciss_lookup(dev);
if (i != -1) {
device_set_desc(dev, ciss_vendor_data[i].desc);
return(BUS_PROBE_DEFAULT);
}
return(ENOENT);
}
/************************************************************************
* Attach the driver to this adapter.
*/
static int
ciss_attach(device_t dev)
{
struct ciss_softc *sc;
int error;
debug_called(1);
#ifdef CISS_DEBUG
/* print structure/union sizes */
debug_struct(ciss_command);
debug_struct(ciss_header);
debug_union(ciss_device_address);
debug_struct(ciss_cdb);
debug_struct(ciss_report_cdb);
debug_struct(ciss_notify_cdb);
debug_struct(ciss_notify);
debug_struct(ciss_message_cdb);
debug_struct(ciss_error_info_pointer);
debug_struct(ciss_error_info);
debug_struct(ciss_sg_entry);
debug_struct(ciss_config_table);
debug_struct(ciss_bmic_cdb);
debug_struct(ciss_bmic_id_ldrive);
debug_struct(ciss_bmic_id_lstatus);
debug_struct(ciss_bmic_id_table);
debug_struct(ciss_bmic_id_pdrive);
debug_struct(ciss_bmic_blink_pdrive);
debug_struct(ciss_bmic_flush_cache);
debug_const(CISS_MAX_REQUESTS);
debug_const(CISS_MAX_LOGICAL);
debug_const(CISS_INTERRUPT_COALESCE_DELAY);
debug_const(CISS_INTERRUPT_COALESCE_COUNT);
debug_const(CISS_COMMAND_ALLOC_SIZE);
debug_const(CISS_COMMAND_SG_LENGTH);
debug_type(cciss_pci_info_struct);
debug_type(cciss_coalint_struct);
debug_type(cciss_coalint_struct);
debug_type(NodeName_type);
debug_type(NodeName_type);
debug_type(Heartbeat_type);
debug_type(BusTypes_type);
debug_type(FirmwareVer_type);
debug_type(DriverVer_type);
debug_type(IOCTL_Command_struct);
#endif
sc = device_get_softc(dev);
sc->ciss_dev = dev;
mtx_init(&sc->ciss_mtx, "cissmtx", NULL, MTX_DEF);
callout_init_mtx(&sc->ciss_periodic, &sc->ciss_mtx, 0);
/*
* Do PCI-specific init.
*/
if ((error = ciss_init_pci(sc)) != 0)
goto out;
/*
* Initialise driver queues.
*/
ciss_initq_free(sc);
ciss_initq_notify(sc);
/*
* Initialize device sysctls.
*/
ciss_init_sysctl(sc);
/*
* Initialise command/request pool.
*/
if ((error = ciss_init_requests(sc)) != 0)
goto out;
/*
* Get adapter information.
*/
if ((error = ciss_identify_adapter(sc)) != 0)
goto out;
/*
* Find all the physical devices.
*/
if ((error = ciss_init_physical(sc)) != 0)
goto out;
/*
* Build our private table of logical devices.
*/
if ((error = ciss_init_logical(sc)) != 0)
goto out;
/*
* Enable interrupts so that the CAM scan can complete.
*/
CISS_TL_SIMPLE_ENABLE_INTERRUPTS(sc);
/*
* Initialise the CAM interface.
*/
if ((error = ciss_cam_init(sc)) != 0)
goto out;
/*
* Start the heartbeat routine and event chain.
*/
ciss_periodic(sc);
/*
* Create the control device.
*/
sc->ciss_dev_t = make_dev(&ciss_cdevsw, device_get_unit(sc->ciss_dev),
UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
"ciss%d", device_get_unit(sc->ciss_dev));
sc->ciss_dev_t->si_drv1 = sc;
/*
* The adapter is running; synchronous commands can now sleep
* waiting for an interrupt to signal completion.
*/
sc->ciss_flags |= CISS_FLAG_RUNNING;
ciss_spawn_notify_thread(sc);
error = 0;
out:
if (error != 0) {
/* ciss_free() expects the mutex to be held */
mtx_lock(&sc->ciss_mtx);
ciss_free(sc);
}
return(error);
}
/************************************************************************
* Detach the driver from this adapter.
*/
static int
ciss_detach(device_t dev)
{
struct ciss_softc *sc = device_get_softc(dev);
debug_called(1);
mtx_lock(&sc->ciss_mtx);
if (sc->ciss_flags & CISS_FLAG_CONTROL_OPEN) {
mtx_unlock(&sc->ciss_mtx);
return (EBUSY);
}
/* flush adapter cache */
ciss_flush_adapter(sc);
/* release all resources. The mutex is released and freed here too. */
ciss_free(sc);
return(0);
}
/************************************************************************
* Prepare adapter for system shutdown.
*/
static int
ciss_shutdown(device_t dev)
{
struct ciss_softc *sc = device_get_softc(dev);
debug_called(1);
mtx_lock(&sc->ciss_mtx);
/* flush adapter cache */
ciss_flush_adapter(sc);
if (sc->ciss_soft_reset)
ciss_soft_reset(sc);
mtx_unlock(&sc->ciss_mtx);
return(0);
}
static void
ciss_init_sysctl(struct ciss_softc *sc)
{
SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->ciss_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ciss_dev)),
OID_AUTO, "soft_reset", CTLFLAG_RW, &sc->ciss_soft_reset, 0, "");
}
/************************************************************************
* Perform PCI-specific attachment actions.
*/
static int
ciss_init_pci(struct ciss_softc *sc)
{
uintptr_t cbase, csize, cofs;
uint32_t method, supported_methods;
int error, sqmask, i;
void *intr;
debug_called(1);
/*
* Work out adapter type.
*/
i = ciss_lookup(sc->ciss_dev);
if (i < 0) {
ciss_printf(sc, "unknown adapter type\n");
return (ENXIO);
}
if (ciss_vendor_data[i].flags & CISS_BOARD_SA5) {
sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5;
} else if (ciss_vendor_data[i].flags & CISS_BOARD_SA5B) {
sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5B;
} else {
/*
* XXX Big hammer, masks/unmasks all possible interrupts. This should
* work on all hardware variants. Need to add code to handle the
* "controller crashed" interrupt bit that this unmasks.
*/
sqmask = ~0;
}
/*
* Allocate register window first (we need this to find the config
* struct).
*/
error = ENXIO;
sc->ciss_regs_rid = CISS_TL_SIMPLE_BAR_REGS;
if ((sc->ciss_regs_resource =
bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY,
&sc->ciss_regs_rid, RF_ACTIVE)) == NULL) {
ciss_printf(sc, "can't allocate register window\n");
return(ENXIO);
}
sc->ciss_regs_bhandle = rman_get_bushandle(sc->ciss_regs_resource);
sc->ciss_regs_btag = rman_get_bustag(sc->ciss_regs_resource);
/*
* Find the BAR holding the config structure. If it's not the one
* we already mapped for registers, map it too.
*/
sc->ciss_cfg_rid = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_BAR) & 0xffff;
if (sc->ciss_cfg_rid != sc->ciss_regs_rid) {
if ((sc->ciss_cfg_resource =
bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY,
&sc->ciss_cfg_rid, RF_ACTIVE)) == NULL) {
ciss_printf(sc, "can't allocate config window\n");
return(ENXIO);
}
cbase = (uintptr_t)rman_get_virtual(sc->ciss_cfg_resource);
csize = rman_get_end(sc->ciss_cfg_resource) -
rman_get_start(sc->ciss_cfg_resource) + 1;
} else {
cbase = (uintptr_t)rman_get_virtual(sc->ciss_regs_resource);
csize = rman_get_end(sc->ciss_regs_resource) -
rman_get_start(sc->ciss_regs_resource) + 1;
}
cofs = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_OFF);
/*
* Use the base/size/offset values we just calculated to
* sanity-check the config structure. If it's OK, point to it.
*/
if ((cofs + sizeof(struct ciss_config_table)) > csize) {
ciss_printf(sc, "config table outside window\n");
return(ENXIO);
}
sc->ciss_cfg = (struct ciss_config_table *)(cbase + cofs);
debug(1, "config struct at %p", sc->ciss_cfg);
/*
* Calculate the number of request structures/commands we are
* going to provide for this adapter.
*/
sc->ciss_max_requests = min(CISS_MAX_REQUESTS, sc->ciss_cfg->max_outstanding_commands);
/*
* Validate the config structure. If we supported other transport
* methods, we could select amongst them at this point in time.
*/
if (strncmp(sc->ciss_cfg->signature, "CISS", 4)) {
ciss_printf(sc, "config signature mismatch (got '%c%c%c%c')\n",
sc->ciss_cfg->signature[0], sc->ciss_cfg->signature[1],
sc->ciss_cfg->signature[2], sc->ciss_cfg->signature[3]);
return(ENXIO);
}
/*
* Select the mode of operation, prefer Performant.
*/
if (!(sc->ciss_cfg->supported_methods &
(CISS_TRANSPORT_METHOD_SIMPLE | CISS_TRANSPORT_METHOD_PERF))) {
ciss_printf(sc, "No supported transport layers: 0x%x\n",
sc->ciss_cfg->supported_methods);
}
switch (ciss_force_transport) {
case 1:
supported_methods = CISS_TRANSPORT_METHOD_SIMPLE;
break;
case 2:
supported_methods = CISS_TRANSPORT_METHOD_PERF;
break;
default:
/*
* Override the capabilities of the BOARD and specify SIMPLE
* MODE
*/
if (ciss_vendor_data[i].flags & CISS_BOARD_SIMPLE)
supported_methods = CISS_TRANSPORT_METHOD_SIMPLE;
else
supported_methods = sc->ciss_cfg->supported_methods;
break;
}
setup:
if ((supported_methods & CISS_TRANSPORT_METHOD_PERF) != 0) {
method = CISS_TRANSPORT_METHOD_PERF;
sc->ciss_perf = (struct ciss_perf_config *)(cbase + cofs +
sc->ciss_cfg->transport_offset);
if (ciss_init_perf(sc)) {
supported_methods &= ~method;
goto setup;
}
} else if (supported_methods & CISS_TRANSPORT_METHOD_SIMPLE) {
method = CISS_TRANSPORT_METHOD_SIMPLE;
} else {
ciss_printf(sc, "No supported transport methods: 0x%x\n",
sc->ciss_cfg->supported_methods);
return(ENXIO);
}
/*
* Tell it we're using the low 4GB of RAM. Set the default interrupt
* coalescing options.
*/
sc->ciss_cfg->requested_method = method;
sc->ciss_cfg->command_physlimit = 0;
sc->ciss_cfg->interrupt_coalesce_delay = CISS_INTERRUPT_COALESCE_DELAY;
sc->ciss_cfg->interrupt_coalesce_count = CISS_INTERRUPT_COALESCE_COUNT;
#ifdef __i386__
sc->ciss_cfg->host_driver |= CISS_DRIVER_SCSI_PREFETCH;
#endif
if (ciss_update_config(sc)) {
ciss_printf(sc, "adapter refuses to accept config update (IDBR 0x%x)\n",
CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR));
return(ENXIO);
}
if ((sc->ciss_cfg->active_method & method) == 0) {
supported_methods &= ~method;
if (supported_methods == 0) {
ciss_printf(sc, "adapter refuses to go into available transports "
"mode (0x%x, 0x%x)\n", supported_methods,
sc->ciss_cfg->active_method);
return(ENXIO);
} else
goto setup;
}
/*
* Wait for the adapter to come ready.
*/
if ((error = ciss_wait_adapter(sc)) != 0)
return(error);
/* Prepare to possibly use MSIX and/or PERFORMANT interrupts. Normal
* interrupts have a rid of 0, this will be overridden if MSIX is used.
*/
sc->ciss_irq_rid[0] = 0;
if (method == CISS_TRANSPORT_METHOD_PERF) {
ciss_printf(sc, "PERFORMANT Transport\n");
if ((ciss_force_interrupt != 1) && (ciss_setup_msix(sc) == 0)) {
intr = ciss_perf_msi_intr;
} else {
intr = ciss_perf_intr;
}
/* XXX The docs say that the 0x01 bit is only for SAS controllers.
* Unfortunately, there is no good way to know if this is a SAS
* controller. Hopefully enabling this bit universally will work OK.
* It seems to work fine for SA6i controllers.
*/
sc->ciss_interrupt_mask = CISS_TL_PERF_INTR_OPQ | CISS_TL_PERF_INTR_MSI;
} else {
ciss_printf(sc, "SIMPLE Transport\n");
/* MSIX doesn't seem to work in SIMPLE mode, only enable if it forced */
if (ciss_force_interrupt == 2)
/* If this fails, we automatically revert to INTx */
ciss_setup_msix(sc);
sc->ciss_perf = NULL;
intr = ciss_intr;
sc->ciss_interrupt_mask = sqmask;
}
/*
* Turn off interrupts before we go routing anything.
*/
CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc);
/*
* Allocate and set up our interrupt.
*/
if ((sc->ciss_irq_resource =
bus_alloc_resource_any(sc->ciss_dev, SYS_RES_IRQ, &sc->ciss_irq_rid[0],
RF_ACTIVE | RF_SHAREABLE)) == NULL) {
ciss_printf(sc, "can't allocate interrupt\n");
return(ENXIO);
}
if (bus_setup_intr(sc->ciss_dev, sc->ciss_irq_resource,
INTR_TYPE_CAM|INTR_MPSAFE, NULL, intr, sc,
&sc->ciss_intr)) {
ciss_printf(sc, "can't set up interrupt\n");
return(ENXIO);
}
/*
* Allocate the parent bus DMA tag appropriate for our PCI
* interface.
*
* Note that "simple" adapters can only address within a 32-bit
* span.
*/
if (bus_dma_tag_create(bus_get_dma_tag(sc->ciss_dev),/* PCI parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ciss_parent_dmat)) {
ciss_printf(sc, "can't allocate parent DMA tag\n");
return(ENOMEM);
}
/*
* Create DMA tag for mapping buffers into adapter-addressable
* space.
*/
if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
(CISS_MAX_SG_ELEMENTS - 1) * PAGE_SIZE, /* maxsize */
CISS_MAX_SG_ELEMENTS, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, &sc->ciss_mtx, /* lockfunc, lockarg */
&sc->ciss_buffer_dmat)) {
ciss_printf(sc, "can't allocate buffer DMA tag\n");
return(ENOMEM);
}
return(0);
}
/************************************************************************
* Setup MSI/MSIX operation (Performant only)
* Four interrupts are available, but we only use 1 right now. If MSI-X
* isn't avaialble, try using MSI instead.
*/
static int
ciss_setup_msix(struct ciss_softc *sc)
{
int val, i;
/* Weed out devices that don't actually support MSI */
i = ciss_lookup(sc->ciss_dev);
if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI)
return (EINVAL);
/*
* Only need to use the minimum number of MSI vectors, as the driver
* doesn't support directed MSIX interrupts.
*/
val = pci_msix_count(sc->ciss_dev);
if (val < CISS_MSI_COUNT) {
val = pci_msi_count(sc->ciss_dev);
device_printf(sc->ciss_dev, "got %d MSI messages]\n", val);
if (val < CISS_MSI_COUNT)
return (EINVAL);
}
val = MIN(val, CISS_MSI_COUNT);
if (pci_alloc_msix(sc->ciss_dev, &val) != 0) {
if (pci_alloc_msi(sc->ciss_dev, &val) != 0)
return (EINVAL);
}
sc->ciss_msi = val;
if (bootverbose)
ciss_printf(sc, "Using %d MSIX interrupt%s\n", val,
(val != 1) ? "s" : "");
for (i = 0; i < val; i++)
sc->ciss_irq_rid[i] = i + 1;
return (0);
}
/************************************************************************
* Setup the Performant structures.
*/
static int
ciss_init_perf(struct ciss_softc *sc)
{
struct ciss_perf_config *pc = sc->ciss_perf;
int reply_size;
/*
* Create the DMA tag for the reply queue.
*/
reply_size = sizeof(uint64_t) * sc->ciss_max_requests;
if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
reply_size, 1, /* maxsize, nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ciss_reply_dmat)) {
ciss_printf(sc, "can't allocate reply DMA tag\n");
return(ENOMEM);
}
/*
* Allocate memory and make it available for DMA.
*/
if (bus_dmamem_alloc(sc->ciss_reply_dmat, (void **)&sc->ciss_reply,
BUS_DMA_NOWAIT, &sc->ciss_reply_map)) {
ciss_printf(sc, "can't allocate reply memory\n");
return(ENOMEM);
}
bus_dmamap_load(sc->ciss_reply_dmat, sc->ciss_reply_map, sc->ciss_reply,
reply_size, ciss_command_map_helper, &sc->ciss_reply_phys, 0);
bzero(sc->ciss_reply, reply_size);
sc->ciss_cycle = 0x1;
sc->ciss_rqidx = 0;
/*
* Preload the fetch table with common command sizes. This allows the
* hardware to not waste bus cycles for typical i/o commands, but also not
* tax the driver to be too exact in choosing sizes. The table is optimized
* for page-aligned i/o's, but since most i/o comes from the various pagers,
* it's a reasonable assumption to make.
*/
pc->fetch_count[CISS_SG_FETCH_NONE] = (sizeof(struct ciss_command) + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_1] =
(sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 1 + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_2] =
(sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 2 + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_4] =
(sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 4 + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_8] =
(sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 8 + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_16] =
(sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 16 + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_32] =
(sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 32 + 15) / 16;
pc->fetch_count[CISS_SG_FETCH_MAX] = (CISS_COMMAND_ALLOC_SIZE + 15) / 16;
pc->rq_size = sc->ciss_max_requests; /* XXX less than the card supports? */
pc->rq_count = 1; /* XXX Hardcode for a single queue */
pc->rq_bank_hi = 0;
pc->rq_bank_lo = 0;
pc->rq[0].rq_addr_hi = 0x0;
pc->rq[0].rq_addr_lo = sc->ciss_reply_phys;
return(0);
}
/************************************************************************
* Wait for the adapter to come ready.
*/
static int
ciss_wait_adapter(struct ciss_softc *sc)
{
int i;
debug_called(1);
/*
* Wait for the adapter to come ready.
*/
if (!(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY)) {
ciss_printf(sc, "waiting for adapter to come ready...\n");
for (i = 0; !(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY); i++) {
DELAY(1000000); /* one second */
if (i > 30) {
ciss_printf(sc, "timed out waiting for adapter to come ready\n");
return(EIO);
}
}
}
return(0);
}
/************************************************************************
* Flush the adapter cache.
*/
static int
ciss_flush_adapter(struct ciss_softc *sc)
{
struct ciss_request *cr;
struct ciss_bmic_flush_cache *cbfc;
int error, command_status;
debug_called(1);
cr = NULL;
cbfc = NULL;
/*
* Build a BMIC request to flush the cache. We don't disable
* it, as we may be going to do more I/O (eg. we are emulating
* the Synchronise Cache command).
*/
if ((cbfc = malloc(sizeof(*cbfc), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) {
error = ENOMEM;
goto out;
}
if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_FLUSH_CACHE,
(void **)&cbfc, sizeof(*cbfc))) != 0)
goto out;
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending BMIC FLUSH_CACHE command (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS:
break;
default:
ciss_printf(sc, "error flushing cache (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
out:
if (cbfc != NULL)
free(cbfc, CISS_MALLOC_CLASS);
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
static void
ciss_soft_reset(struct ciss_softc *sc)
{
struct ciss_request *cr = NULL;
struct ciss_command *cc;
int i, error = 0;
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
/* only reset proxy controllers */
if (sc->ciss_controllers[i].physical.bus == 0)
continue;
if ((error = ciss_get_request(sc, &cr)) != 0)
break;
if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_SOFT_RESET,
NULL, 0)) != 0)
break;
cc = cr->cr_cc;
cc->header.address = sc->ciss_controllers[i];
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0)
break;
ciss_release_request(cr);
}
if (error)
ciss_printf(sc, "error resetting controller (%d)\n", error);
if (cr != NULL)
ciss_release_request(cr);
}
/************************************************************************
* Allocate memory for the adapter command structures, initialise
* the request structures.
*
* Note that the entire set of commands are allocated in a single
* contiguous slab.
*/
static int
ciss_init_requests(struct ciss_softc *sc)
{
struct ciss_request *cr;
int i;
debug_called(1);
if (bootverbose)
ciss_printf(sc, "using %d of %d available commands\n",
sc->ciss_max_requests, sc->ciss_cfg->max_outstanding_commands);
/*
* Create the DMA tag for commands.
*/
if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */
32, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
CISS_COMMAND_ALLOC_SIZE *
sc->ciss_max_requests, 1, /* maxsize, nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ciss_command_dmat)) {
ciss_printf(sc, "can't allocate command DMA tag\n");
return(ENOMEM);
}
/*
* Allocate memory and make it available for DMA.
*/
if (bus_dmamem_alloc(sc->ciss_command_dmat, (void **)&sc->ciss_command,
BUS_DMA_NOWAIT, &sc->ciss_command_map)) {
ciss_printf(sc, "can't allocate command memory\n");
return(ENOMEM);
}
bus_dmamap_load(sc->ciss_command_dmat, sc->ciss_command_map,sc->ciss_command,
CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests,
ciss_command_map_helper, &sc->ciss_command_phys, 0);
bzero(sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests);
/*
* Set up the request and command structures, push requests onto
* the free queue.
*/
for (i = 1; i < sc->ciss_max_requests; i++) {
cr = &sc->ciss_request[i];
cr->cr_sc = sc;
cr->cr_tag = i;
cr->cr_cc = (struct ciss_command *)((uintptr_t)sc->ciss_command +
CISS_COMMAND_ALLOC_SIZE * i);
cr->cr_ccphys = sc->ciss_command_phys + CISS_COMMAND_ALLOC_SIZE * i;
bus_dmamap_create(sc->ciss_buffer_dmat, 0, &cr->cr_datamap);
ciss_enqueue_free(cr);
}
return(0);
}
static void
ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
uint32_t *addr;
addr = arg;
*addr = segs[0].ds_addr;
}
/************************************************************************
* Identify the adapter, print some information about it.
*/
static int
ciss_identify_adapter(struct ciss_softc *sc)
{
struct ciss_request *cr;
int error, command_status;
debug_called(1);
cr = NULL;
/*
* Get a request, allocate storage for the adapter data.
*/
if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_CTLR,
(void **)&sc->ciss_id,
sizeof(*sc->ciss_id))) != 0)
goto out;
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending BMIC ID_CTLR command (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS: /* buffer right size */
break;
case CISS_CMD_STATUS_DATA_UNDERRUN:
case CISS_CMD_STATUS_DATA_OVERRUN:
ciss_printf(sc, "data over/underrun reading adapter information\n");
default:
ciss_printf(sc, "error reading adapter information (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
/* sanity-check reply */
if (!(sc->ciss_id->controller_flags & CONTROLLER_FLAGS_BIG_MAP_SUPPORT)) {
ciss_printf(sc, "adapter does not support BIG_MAP\n");
error = ENXIO;
goto out;
}
#if 0
/* XXX later revisions may not need this */
sc->ciss_flags |= CISS_FLAG_FAKE_SYNCH;
#endif
/* XXX only really required for old 5300 adapters? */
sc->ciss_flags |= CISS_FLAG_BMIC_ABORT;
/*
* Earlier controller specs do not contain these config
* entries, so assume that a 0 means its old and assign
* these values to the defaults that were established
* when this driver was developed for them
*/
if (sc->ciss_cfg->max_logical_supported == 0)
sc->ciss_cfg->max_logical_supported = CISS_MAX_LOGICAL;
if (sc->ciss_cfg->max_physical_supported == 0)
sc->ciss_cfg->max_physical_supported = CISS_MAX_PHYSICAL;
/* print information */
if (bootverbose) {
ciss_printf(sc, " %d logical drive%s configured\n",
sc->ciss_id->configured_logical_drives,
(sc->ciss_id->configured_logical_drives == 1) ? "" : "s");
ciss_printf(sc, " firmware %4.4s\n", sc->ciss_id->running_firmware_revision);
ciss_printf(sc, " %d SCSI channels\n", sc->ciss_id->scsi_chip_count);
ciss_printf(sc, " signature '%.4s'\n", sc->ciss_cfg->signature);
ciss_printf(sc, " valence %d\n", sc->ciss_cfg->valence);
ciss_printf(sc, " supported I/O methods 0x%b\n",
sc->ciss_cfg->supported_methods,
"\20\1READY\2simple\3performant\4MEMQ\n");
ciss_printf(sc, " active I/O method 0x%b\n",
sc->ciss_cfg->active_method, "\20\2simple\3performant\4MEMQ\n");
ciss_printf(sc, " 4G page base 0x%08x\n",
sc->ciss_cfg->command_physlimit);
ciss_printf(sc, " interrupt coalesce delay %dus\n",
sc->ciss_cfg->interrupt_coalesce_delay);
ciss_printf(sc, " interrupt coalesce count %d\n",
sc->ciss_cfg->interrupt_coalesce_count);
ciss_printf(sc, " max outstanding commands %d\n",
sc->ciss_cfg->max_outstanding_commands);
ciss_printf(sc, " bus types 0x%b\n", sc->ciss_cfg->bus_types,
"\20\1ultra2\2ultra3\10fibre1\11fibre2\n");
ciss_printf(sc, " server name '%.16s'\n", sc->ciss_cfg->server_name);
ciss_printf(sc, " heartbeat 0x%x\n", sc->ciss_cfg->heartbeat);
ciss_printf(sc, " max logical logical volumes: %d\n", sc->ciss_cfg->max_logical_supported);
ciss_printf(sc, " max physical disks supported: %d\n", sc->ciss_cfg->max_physical_supported);
ciss_printf(sc, " max physical disks per logical volume: %d\n", sc->ciss_cfg->max_physical_per_logical);
ciss_printf(sc, " JBOD Support is %s\n", (sc->ciss_id->uiYetMoreControllerFlags & YMORE_CONTROLLER_FLAGS_JBOD_SUPPORTED) ?
"Available" : "Unavailable");
ciss_printf(sc, " JBOD Mode is %s\n", (sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED) ?
"Enabled" : "Disabled");
}
out:
if (error) {
if (sc->ciss_id != NULL) {
free(sc->ciss_id, CISS_MALLOC_CLASS);
sc->ciss_id = NULL;
}
}
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Helper routine for generating a list of logical and physical luns.
*/
static struct ciss_lun_report *
ciss_report_luns(struct ciss_softc *sc, int opcode, int nunits)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_report_cdb *crc;
struct ciss_lun_report *cll;
int command_status;
int report_size;
int error = 0;
debug_called(1);
cr = NULL;
cll = NULL;
/*
* Get a request, allocate storage for the address list.
*/
if ((error = ciss_get_request(sc, &cr)) != 0)
goto out;
report_size = sizeof(*cll) + nunits * sizeof(union ciss_device_address);
if ((cll = malloc(report_size, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) {
ciss_printf(sc, "can't allocate memory for lun report\n");
error = ENOMEM;
goto out;
}
/*
* Build the Report Logical/Physical LUNs command.
*/
cc = cr->cr_cc;
cr->cr_data = cll;
cr->cr_length = report_size;
cr->cr_flags = CISS_REQ_DATAIN;
cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL;
cc->header.address.physical.bus = 0;
cc->header.address.physical.target = 0;
cc->cdb.cdb_length = sizeof(*crc);
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = CISS_CDB_DIRECTION_READ;
cc->cdb.timeout = 30; /* XXX better suggestions? */
crc = (struct ciss_report_cdb *)&(cc->cdb.cdb[0]);
bzero(crc, sizeof(*crc));
crc->opcode = opcode;
crc->length = htonl(report_size); /* big-endian field */
cll->list_size = htonl(report_size - sizeof(*cll)); /* big-endian field */
/*
* Submit the request and wait for it to complete. (timeout
* here should be much greater than above)
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending %d LUN command (%d)\n", opcode, error);
goto out;
}
/*
* Check response. Note that data over/underrun is OK.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS: /* buffer right size */
case CISS_CMD_STATUS_DATA_UNDERRUN: /* buffer too large, not bad */
break;
case CISS_CMD_STATUS_DATA_OVERRUN:
ciss_printf(sc, "WARNING: more units than driver limit (%d)\n",
sc->ciss_cfg->max_logical_supported);
break;
default:
ciss_printf(sc, "error detecting logical drive configuration (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
ciss_release_request(cr);
cr = NULL;
out:
if (cr != NULL)
ciss_release_request(cr);
if (error && cll != NULL) {
free(cll, CISS_MALLOC_CLASS);
cll = NULL;
}
return(cll);
}
/************************************************************************
* Find logical drives on the adapter.
*/
static int
ciss_init_logical(struct ciss_softc *sc)
{
struct ciss_lun_report *cll;
int error = 0, i, j;
int ndrives;
debug_called(1);
cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS,
sc->ciss_cfg->max_logical_supported);
if (cll == NULL) {
error = ENXIO;
goto out;
}
/* sanity-check reply */
ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address));
if ((ndrives < 0) || (ndrives > sc->ciss_cfg->max_logical_supported)) {
ciss_printf(sc, "adapter claims to report absurd number of logical drives (%d > %d)\n",
ndrives, sc->ciss_cfg->max_logical_supported);
error = ENXIO;
goto out;
}
/*
* Save logical drive information.
*/
if (bootverbose) {
ciss_printf(sc, "%d logical drive%s\n",
ndrives, (ndrives > 1 || ndrives == 0) ? "s" : "");
}
sc->ciss_logical =
- mallocarray(sc->ciss_max_logical_bus, sizeof(struct ciss_ldrive *),
+ malloc(sc->ciss_max_logical_bus * sizeof(struct ciss_ldrive *),
CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO);
if (sc->ciss_logical == NULL) {
error = ENXIO;
goto out;
}
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
sc->ciss_logical[i] =
- mallocarray(sc->ciss_cfg->max_logical_supported,
+ malloc(sc->ciss_cfg->max_logical_supported *
sizeof(struct ciss_ldrive),
CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO);
if (sc->ciss_logical[i] == NULL) {
error = ENXIO;
goto out;
}
for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++)
sc->ciss_logical[i][j].cl_status = CISS_LD_NONEXISTENT;
}
for (i = 0; i < sc->ciss_cfg->max_logical_supported; i++) {
if (i < ndrives) {
struct ciss_ldrive *ld;
int bus, target;
bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun);
target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun);
ld = &sc->ciss_logical[bus][target];
ld->cl_address = cll->lun[i];
ld->cl_controller = &sc->ciss_controllers[bus];
if (ciss_identify_logical(sc, ld) != 0)
continue;
/*
* If the drive has had media exchanged, we should bring it online.
*/
if (ld->cl_lstatus->media_exchanged)
ciss_accept_media(sc, ld);
}
}
out:
if (cll != NULL)
free(cll, CISS_MALLOC_CLASS);
return(error);
}
static int
ciss_init_physical(struct ciss_softc *sc)
{
struct ciss_lun_report *cll;
int error = 0, i;
int nphys;
int bus, target;
debug_called(1);
bus = 0;
target = 0;
cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS,
sc->ciss_cfg->max_physical_supported);
if (cll == NULL) {
error = ENXIO;
goto out;
}
nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address));
if (bootverbose) {
ciss_printf(sc, "%d physical device%s\n",
nphys, (nphys > 1 || nphys == 0) ? "s" : "");
}
/*
* Figure out the bus mapping.
* Logical buses include both the local logical bus for local arrays and
* proxy buses for remote arrays. Physical buses are numbered by the
* controller and represent physical buses that hold physical devices.
* We shift these bus numbers so that everything fits into a single flat
* numbering space for CAM. Logical buses occupy the first 32 CAM bus
* numbers, and the physical bus numbers are shifted to be above that.
* This results in the various driver arrays being indexed as follows:
*
* ciss_controllers[] - indexed by logical bus
* ciss_cam_sim[] - indexed by both logical and physical, with physical
* being shifted by 32.
* ciss_logical[][] - indexed by logical bus
* ciss_physical[][] - indexed by physical bus
*
* XXX This is getting more and more hackish. CISS really doesn't play
* well with a standard SCSI model; devices are addressed via magic
* cookies, not via b/t/l addresses. Since there is no way to store
* the cookie in the CAM device object, we have to keep these lookup
* tables handy so that the devices can be found quickly at the cost
* of wasting memory and having a convoluted lookup scheme. This
* driver should probably be converted to block interface.
*/
/*
* If the L2 and L3 SCSI addresses are 0, this signifies a proxy
* controller. A proxy controller is another physical controller
* behind the primary PCI controller. We need to know about this
* so that BMIC commands can be properly targeted. There can be
* proxy controllers attached to a single PCI controller, so
* find the highest numbered one so the array can be properly
* sized.
*/
sc->ciss_max_logical_bus = 1;
for (i = 0; i < nphys; i++) {
if (cll->lun[i].physical.extra_address == 0) {
bus = cll->lun[i].physical.bus;
sc->ciss_max_logical_bus = max(sc->ciss_max_logical_bus, bus) + 1;
} else {
bus = CISS_EXTRA_BUS2(cll->lun[i].physical.extra_address);
sc->ciss_max_physical_bus = max(sc->ciss_max_physical_bus, bus);
}
}
sc->ciss_controllers =
- mallocarray(sc->ciss_max_logical_bus, sizeof(union ciss_device_address),
+ malloc(sc->ciss_max_logical_bus * sizeof (union ciss_device_address),
CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO);
if (sc->ciss_controllers == NULL) {
ciss_printf(sc, "Could not allocate memory for controller map\n");
error = ENOMEM;
goto out;
}
/* setup a map of controller addresses */
for (i = 0; i < nphys; i++) {
if (cll->lun[i].physical.extra_address == 0) {
sc->ciss_controllers[cll->lun[i].physical.bus] = cll->lun[i];
}
}
sc->ciss_physical =
- mallocarray(sc->ciss_max_physical_bus, sizeof(struct ciss_pdrive *),
+ malloc(sc->ciss_max_physical_bus * sizeof(struct ciss_pdrive *),
CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO);
if (sc->ciss_physical == NULL) {
ciss_printf(sc, "Could not allocate memory for physical device map\n");
error = ENOMEM;
goto out;
}
for (i = 0; i < sc->ciss_max_physical_bus; i++) {
sc->ciss_physical[i] =
malloc(sizeof(struct ciss_pdrive) * CISS_MAX_PHYSTGT,
CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO);
if (sc->ciss_physical[i] == NULL) {
ciss_printf(sc, "Could not allocate memory for target map\n");
error = ENOMEM;
goto out;
}
}
ciss_filter_physical(sc, cll);
out:
if (cll != NULL)
free(cll, CISS_MALLOC_CLASS);
return(error);
}
static int
ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll)
{
u_int32_t ea;
int i, nphys;
int bus, target;
nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address));
for (i = 0; i < nphys; i++) {
if (cll->lun[i].physical.extra_address == 0)
continue;
/*
* Filter out devices that we don't want. Level 3 LUNs could
* probably be supported, but the docs don't give enough of a
* hint to know how.
*
* The mode field of the physical address is likely set to have
* hard disks masked out. Honor it unless the user has overridden
* us with the tunable. We also munge the inquiry data for these
* disks so that they only show up as passthrough devices. Keeping
* them visible in this fashion is useful for doing things like
* flashing firmware.
*/
ea = cll->lun[i].physical.extra_address;
if ((CISS_EXTRA_BUS3(ea) != 0) || (CISS_EXTRA_TARGET3(ea) != 0) ||
(CISS_EXTRA_MODE2(ea) == 0x3))
continue;
if ((ciss_expose_hidden_physical == 0) &&
(cll->lun[i].physical.mode == CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL))
continue;
/*
* Note: CISS firmware numbers physical busses starting at '1', not
* '0'. This numbering is internal to the firmware and is only
* used as a hint here.
*/
bus = CISS_EXTRA_BUS2(ea) - 1;
target = CISS_EXTRA_TARGET2(ea);
sc->ciss_physical[bus][target].cp_address = cll->lun[i];
sc->ciss_physical[bus][target].cp_online = 1;
}
return (0);
}
static int
ciss_inquiry_logical(struct ciss_softc *sc, struct ciss_ldrive *ld)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct scsi_inquiry *inq;
int error;
int command_status;
cr = NULL;
bzero(&ld->cl_geometry, sizeof(ld->cl_geometry));
if ((error = ciss_get_request(sc, &cr)) != 0)
goto out;
cc = cr->cr_cc;
cr->cr_data = &ld->cl_geometry;
cr->cr_length = sizeof(ld->cl_geometry);
cr->cr_flags = CISS_REQ_DATAIN;
cc->header.address = ld->cl_address;
cc->cdb.cdb_length = 6;
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = CISS_CDB_DIRECTION_READ;
cc->cdb.timeout = 30;
inq = (struct scsi_inquiry *)&(cc->cdb.cdb[0]);
inq->opcode = INQUIRY;
inq->byte2 = SI_EVPD;
inq->page_code = CISS_VPD_LOGICAL_DRIVE_GEOMETRY;
scsi_ulto2b(sizeof(ld->cl_geometry), inq->length);
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error getting geometry (%d)\n", error);
goto out;
}
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS:
case CISS_CMD_STATUS_DATA_UNDERRUN:
break;
case CISS_CMD_STATUS_DATA_OVERRUN:
ciss_printf(sc, "WARNING: Data overrun\n");
break;
default:
ciss_printf(sc, "Error detecting logical drive geometry (%s)\n",
ciss_name_command_status(command_status));
break;
}
out:
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Identify a logical drive, initialise state related to it.
*/
static int
ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_bmic_cdb *cbc;
int error, command_status;
debug_called(1);
cr = NULL;
/*
* Build a BMIC request to fetch the drive ID.
*/
if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LDRIVE,
(void **)&ld->cl_ldrive,
sizeof(*ld->cl_ldrive))) != 0)
goto out;
cc = cr->cr_cc;
cc->header.address = *ld->cl_controller; /* target controller */
cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]);
cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun);
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending BMIC LDRIVE command (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS: /* buffer right size */
break;
case CISS_CMD_STATUS_DATA_UNDERRUN:
case CISS_CMD_STATUS_DATA_OVERRUN:
ciss_printf(sc, "data over/underrun reading logical drive ID\n");
default:
ciss_printf(sc, "error reading logical drive ID (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
ciss_release_request(cr);
cr = NULL;
/*
* Build a CISS BMIC command to get the logical drive status.
*/
if ((error = ciss_get_ldrive_status(sc, ld)) != 0)
goto out;
/*
* Get the logical drive geometry.
*/
if ((error = ciss_inquiry_logical(sc, ld)) != 0)
goto out;
/*
* Print the drive's basic characteristics.
*/
if (bootverbose) {
ciss_printf(sc, "logical drive (b%dt%d): %s, %dMB ",
CISS_LUN_TO_BUS(ld->cl_address.logical.lun),
CISS_LUN_TO_TARGET(ld->cl_address.logical.lun),
ciss_name_ldrive_org(ld->cl_ldrive->fault_tolerance),
((ld->cl_ldrive->blocks_available / (1024 * 1024)) *
ld->cl_ldrive->block_size));
ciss_print_ldrive(sc, ld);
}
out:
if (error != 0) {
/* make the drive not-exist */
ld->cl_status = CISS_LD_NONEXISTENT;
if (ld->cl_ldrive != NULL) {
free(ld->cl_ldrive, CISS_MALLOC_CLASS);
ld->cl_ldrive = NULL;
}
if (ld->cl_lstatus != NULL) {
free(ld->cl_lstatus, CISS_MALLOC_CLASS);
ld->cl_lstatus = NULL;
}
}
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Get status for a logical drive.
*
* XXX should we also do this in response to Test Unit Ready?
*/
static int
ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_bmic_cdb *cbc;
int error, command_status;
/*
* Build a CISS BMIC command to get the logical drive status.
*/
if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LSTATUS,
(void **)&ld->cl_lstatus,
sizeof(*ld->cl_lstatus))) != 0)
goto out;
cc = cr->cr_cc;
cc->header.address = *ld->cl_controller; /* target controller */
cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]);
cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun);
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending BMIC LSTATUS command (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS: /* buffer right size */
break;
case CISS_CMD_STATUS_DATA_UNDERRUN:
case CISS_CMD_STATUS_DATA_OVERRUN:
ciss_printf(sc, "data over/underrun reading logical drive status\n");
default:
ciss_printf(sc, "error reading logical drive status (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
/*
* Set the drive's summary status based on the returned status.
*
* XXX testing shows that a failed JBOD drive comes back at next
* boot in "queued for expansion" mode. WTF?
*/
ld->cl_status = ciss_decode_ldrive_status(ld->cl_lstatus->status);
out:
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Notify the adapter of a config update.
*/
static int
ciss_update_config(struct ciss_softc *sc)
{
int i;
debug_called(1);
CISS_TL_SIMPLE_WRITE(sc, CISS_TL_SIMPLE_IDBR, CISS_TL_SIMPLE_IDBR_CFG_TABLE);
for (i = 0; i < 1000; i++) {
if (!(CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR) &
CISS_TL_SIMPLE_IDBR_CFG_TABLE)) {
return(0);
}
DELAY(1000);
}
return(1);
}
/************************************************************************
* Accept new media into a logical drive.
*
* XXX The drive has previously been offline; it would be good if we
* could make sure it's not open right now.
*/
static int
ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_bmic_cdb *cbc;
int command_status;
int error = 0, ldrive;
ldrive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun);
debug(0, "bringing logical drive %d back online", ldrive);
/*
* Build a CISS BMIC command to bring the drive back online.
*/
if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ACCEPT_MEDIA,
NULL, 0)) != 0)
goto out;
cc = cr->cr_cc;
cc->header.address = *ld->cl_controller; /* target controller */
cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]);
cbc->log_drive = ldrive;
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending BMIC ACCEPT MEDIA command (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS: /* all OK */
/* we should get a logical drive status changed event here */
break;
default:
ciss_printf(cr->cr_sc, "error accepting media into failed logical drive (%s)\n",
ciss_name_command_status(command_status));
break;
}
out:
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Release adapter resources.
*/
static void
ciss_free(struct ciss_softc *sc)
{
struct ciss_request *cr;
int i, j;
debug_called(1);
/* we're going away */
sc->ciss_flags |= CISS_FLAG_ABORTING;
/* terminate the periodic heartbeat routine */
callout_stop(&sc->ciss_periodic);
/* cancel the Event Notify chain */
ciss_notify_abort(sc);
ciss_kill_notify_thread(sc);
/* disconnect from CAM */
if (sc->ciss_cam_sim) {
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
if (sc->ciss_cam_sim[i]) {
xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i]));
cam_sim_free(sc->ciss_cam_sim[i], 0);
}
}
for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus +
CISS_PHYSICAL_BASE; i++) {
if (sc->ciss_cam_sim[i]) {
xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i]));
cam_sim_free(sc->ciss_cam_sim[i], 0);
}
}
free(sc->ciss_cam_sim, CISS_MALLOC_CLASS);
}
if (sc->ciss_cam_devq)
cam_simq_free(sc->ciss_cam_devq);
/* remove the control device */
mtx_unlock(&sc->ciss_mtx);
if (sc->ciss_dev_t != NULL)
destroy_dev(sc->ciss_dev_t);
/* Final cleanup of the callout. */
callout_drain(&sc->ciss_periodic);
mtx_destroy(&sc->ciss_mtx);
/* free the controller data */
if (sc->ciss_id != NULL)
free(sc->ciss_id, CISS_MALLOC_CLASS);
/* release I/O resources */
if (sc->ciss_regs_resource != NULL)
bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY,
sc->ciss_regs_rid, sc->ciss_regs_resource);
if (sc->ciss_cfg_resource != NULL)
bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY,
sc->ciss_cfg_rid, sc->ciss_cfg_resource);
if (sc->ciss_intr != NULL)
bus_teardown_intr(sc->ciss_dev, sc->ciss_irq_resource, sc->ciss_intr);
if (sc->ciss_irq_resource != NULL)
bus_release_resource(sc->ciss_dev, SYS_RES_IRQ,
sc->ciss_irq_rid[0], sc->ciss_irq_resource);
if (sc->ciss_msi)
pci_release_msi(sc->ciss_dev);
while ((cr = ciss_dequeue_free(sc)) != NULL)
bus_dmamap_destroy(sc->ciss_buffer_dmat, cr->cr_datamap);
if (sc->ciss_buffer_dmat)
bus_dma_tag_destroy(sc->ciss_buffer_dmat);
/* destroy command memory and DMA tag */
if (sc->ciss_command != NULL) {
bus_dmamap_unload(sc->ciss_command_dmat, sc->ciss_command_map);
bus_dmamem_free(sc->ciss_command_dmat, sc->ciss_command, sc->ciss_command_map);
}
if (sc->ciss_command_dmat)
bus_dma_tag_destroy(sc->ciss_command_dmat);
if (sc->ciss_reply) {
bus_dmamap_unload(sc->ciss_reply_dmat, sc->ciss_reply_map);
bus_dmamem_free(sc->ciss_reply_dmat, sc->ciss_reply, sc->ciss_reply_map);
}
if (sc->ciss_reply_dmat)
bus_dma_tag_destroy(sc->ciss_reply_dmat);
/* destroy DMA tags */
if (sc->ciss_parent_dmat)
bus_dma_tag_destroy(sc->ciss_parent_dmat);
if (sc->ciss_logical) {
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) {
if (sc->ciss_logical[i][j].cl_ldrive)
free(sc->ciss_logical[i][j].cl_ldrive, CISS_MALLOC_CLASS);
if (sc->ciss_logical[i][j].cl_lstatus)
free(sc->ciss_logical[i][j].cl_lstatus, CISS_MALLOC_CLASS);
}
free(sc->ciss_logical[i], CISS_MALLOC_CLASS);
}
free(sc->ciss_logical, CISS_MALLOC_CLASS);
}
if (sc->ciss_physical) {
for (i = 0; i < sc->ciss_max_physical_bus; i++)
free(sc->ciss_physical[i], CISS_MALLOC_CLASS);
free(sc->ciss_physical, CISS_MALLOC_CLASS);
}
if (sc->ciss_controllers)
free(sc->ciss_controllers, CISS_MALLOC_CLASS);
}
/************************************************************************
* Give a command to the adapter.
*
* Note that this uses the simple transport layer directly. If we
* want to add support for other layers, we'll need a switch of some
* sort.
*
* Note that the simple transport layer has no way of refusing a
* command; we only have as many request structures as the adapter
* supports commands, so we don't have to check (this presumes that
* the adapter can handle commands as fast as we throw them at it).
*/
static int
ciss_start(struct ciss_request *cr)
{
struct ciss_command *cc; /* XXX debugging only */
int error;
cc = cr->cr_cc;
debug(2, "post command %d tag %d ", cr->cr_tag, cc->header.host_tag);
/*
* Map the request's data.
*/
if ((error = ciss_map_request(cr)))
return(error);
#if 0
ciss_print_request(cr);
#endif
return(0);
}
/************************************************************************
* Fetch completed request(s) from the adapter, queue them for
* completion handling.
*
* Note that this uses the simple transport layer directly. If we
* want to add support for other layers, we'll need a switch of some
* sort.
*
* Note that the simple transport mechanism does not require any
* reentrancy protection; the OPQ read is atomic. If there is a
* chance of a race with something else that might move the request
* off the busy list, then we will have to lock against that
* (eg. timeouts, etc.)
*/
static void
ciss_done(struct ciss_softc *sc, cr_qhead_t *qh)
{
struct ciss_request *cr;
struct ciss_command *cc;
u_int32_t tag, index;
debug_called(3);
/*
* Loop quickly taking requests from the adapter and moving them
* to the completed queue.
*/
for (;;) {
tag = CISS_TL_SIMPLE_FETCH_CMD(sc);
if (tag == CISS_TL_SIMPLE_OPQ_EMPTY)
break;
index = tag >> 2;
debug(2, "completed command %d%s", index,
(tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : "");
if (index >= sc->ciss_max_requests) {
ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag);
continue;
}
cr = &(sc->ciss_request[index]);
cc = cr->cr_cc;
cc->header.host_tag = tag; /* not updated by adapter */
ciss_enqueue_complete(cr, qh);
}
}
static void
ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh)
{
struct ciss_request *cr;
struct ciss_command *cc;
u_int32_t tag, index;
debug_called(3);
/*
* Loop quickly taking requests from the adapter and moving them
* to the completed queue.
*/
for (;;) {
tag = sc->ciss_reply[sc->ciss_rqidx];
if ((tag & CISS_CYCLE_MASK) != sc->ciss_cycle)
break;
index = tag >> 2;
debug(2, "completed command %d%s\n", index,
(tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : "");
if (index < sc->ciss_max_requests) {
cr = &(sc->ciss_request[index]);
cc = cr->cr_cc;
cc->header.host_tag = tag; /* not updated by adapter */
ciss_enqueue_complete(cr, qh);
} else {
ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag);
}
if (++sc->ciss_rqidx == sc->ciss_max_requests) {
sc->ciss_rqidx = 0;
sc->ciss_cycle ^= 1;
}
}
}
/************************************************************************
* Take an interrupt from the adapter.
*/
static void
ciss_intr(void *arg)
{
cr_qhead_t qh;
struct ciss_softc *sc = (struct ciss_softc *)arg;
/*
* The only interrupt we recognise indicates that there are
* entries in the outbound post queue.
*/
STAILQ_INIT(&qh);
ciss_done(sc, &qh);
mtx_lock(&sc->ciss_mtx);
ciss_complete(sc, &qh);
mtx_unlock(&sc->ciss_mtx);
}
static void
ciss_perf_intr(void *arg)
{
struct ciss_softc *sc = (struct ciss_softc *)arg;
/* Clear the interrupt and flush the bridges. Docs say that the flush
* needs to be done twice, which doesn't seem right.
*/
CISS_TL_PERF_CLEAR_INT(sc);
CISS_TL_PERF_FLUSH_INT(sc);
ciss_perf_msi_intr(sc);
}
static void
ciss_perf_msi_intr(void *arg)
{
cr_qhead_t qh;
struct ciss_softc *sc = (struct ciss_softc *)arg;
STAILQ_INIT(&qh);
ciss_perf_done(sc, &qh);
mtx_lock(&sc->ciss_mtx);
ciss_complete(sc, &qh);
mtx_unlock(&sc->ciss_mtx);
}
/************************************************************************
* Process completed requests.
*
* Requests can be completed in three fashions:
*
* - by invoking a callback function (cr_complete is non-null)
* - by waking up a sleeper (cr_flags has CISS_REQ_SLEEP set)
* - by clearing the CISS_REQ_POLL flag in interrupt/timeout context
*/
static void
ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh)
{
struct ciss_request *cr;
debug_called(2);
/*
* Loop taking requests off the completed queue and performing
* completion processing on them.
*/
for (;;) {
if ((cr = ciss_dequeue_complete(sc, qh)) == NULL)
break;
ciss_unmap_request(cr);
if ((cr->cr_flags & CISS_REQ_BUSY) == 0)
ciss_printf(sc, "WARNING: completing non-busy request\n");
cr->cr_flags &= ~CISS_REQ_BUSY;
/*
* If the request has a callback, invoke it.
*/
if (cr->cr_complete != NULL) {
cr->cr_complete(cr);
continue;
}
/*
* If someone is sleeping on this request, wake them up.
*/
if (cr->cr_flags & CISS_REQ_SLEEP) {
cr->cr_flags &= ~CISS_REQ_SLEEP;
wakeup(cr);
continue;
}
/*
* If someone is polling this request for completion, signal.
*/
if (cr->cr_flags & CISS_REQ_POLL) {
cr->cr_flags &= ~CISS_REQ_POLL;
continue;
}
/*
* Give up and throw the request back on the free queue. This
* should never happen; resources will probably be lost.
*/
ciss_printf(sc, "WARNING: completed command with no submitter\n");
ciss_enqueue_free(cr);
}
}
/************************************************************************
* Report on the completion status of a request, and pass back SCSI
* and command status values.
*/
static int
_ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func)
{
struct ciss_command *cc;
struct ciss_error_info *ce;
debug_called(2);
cc = cr->cr_cc;
ce = (struct ciss_error_info *)&(cc->sg[0]);
/*
* We don't consider data under/overrun an error for the Report
* Logical/Physical LUNs commands.
*/
if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) &&
((ce->command_status == CISS_CMD_STATUS_DATA_OVERRUN) ||
(ce->command_status == CISS_CMD_STATUS_DATA_UNDERRUN)) &&
((cc->cdb.cdb[0] == CISS_OPCODE_REPORT_LOGICAL_LUNS) ||
(cc->cdb.cdb[0] == CISS_OPCODE_REPORT_PHYSICAL_LUNS) ||
(cc->cdb.cdb[0] == INQUIRY))) {
cc->header.host_tag &= ~CISS_HDR_HOST_TAG_ERROR;
debug(2, "ignoring irrelevant under/overrun error");
}
/*
* Check the command's error bit, if clear, there's no status and
* everything is OK.
*/
if (!(cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR)) {
if (scsi_status != NULL)
*scsi_status = SCSI_STATUS_OK;
if (command_status != NULL)
*command_status = CISS_CMD_STATUS_SUCCESS;
return(0);
} else {
if (command_status != NULL)
*command_status = ce->command_status;
if (scsi_status != NULL) {
if (ce->command_status == CISS_CMD_STATUS_TARGET_STATUS) {
*scsi_status = ce->scsi_status;
} else {
*scsi_status = -1;
}
}
if (bootverbose)
ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x\n",
ce->command_status, ciss_name_command_status(ce->command_status),
ce->scsi_status);
if (ce->command_status == CISS_CMD_STATUS_INVALID_COMMAND) {
ciss_printf(cr->cr_sc, "invalid command, offense size %d at %d, value 0x%x, function %s\n",
ce->additional_error_info.invalid_command.offense_size,
ce->additional_error_info.invalid_command.offense_offset,
ce->additional_error_info.invalid_command.offense_value,
func);
}
}
#if 0
ciss_print_request(cr);
#endif
return(1);
}
/************************************************************************
* Issue a request and don't return until it's completed.
*
* Depending on adapter status, we may poll or sleep waiting for
* completion.
*/
static int
ciss_synch_request(struct ciss_request *cr, int timeout)
{
if (cr->cr_sc->ciss_flags & CISS_FLAG_RUNNING) {
return(ciss_wait_request(cr, timeout));
} else {
return(ciss_poll_request(cr, timeout));
}
}
/************************************************************************
* Issue a request and poll for completion.
*
* Timeout in milliseconds.
*/
static int
ciss_poll_request(struct ciss_request *cr, int timeout)
{
cr_qhead_t qh;
struct ciss_softc *sc;
int error;
debug_called(2);
STAILQ_INIT(&qh);
sc = cr->cr_sc;
cr->cr_flags |= CISS_REQ_POLL;
if ((error = ciss_start(cr)) != 0)
return(error);
do {
if (sc->ciss_perf)
ciss_perf_done(sc, &qh);
else
ciss_done(sc, &qh);
ciss_complete(sc, &qh);
if (!(cr->cr_flags & CISS_REQ_POLL))
return(0);
DELAY(1000);
} while (timeout-- >= 0);
return(EWOULDBLOCK);
}
/************************************************************************
* Issue a request and sleep waiting for completion.
*
* Timeout in milliseconds. Note that a spurious wakeup will reset
* the timeout.
*/
static int
ciss_wait_request(struct ciss_request *cr, int timeout)
{
int error;
debug_called(2);
cr->cr_flags |= CISS_REQ_SLEEP;
if ((error = ciss_start(cr)) != 0)
return(error);
while ((cr->cr_flags & CISS_REQ_SLEEP) && (error != EWOULDBLOCK)) {
error = msleep_sbt(cr, &cr->cr_sc->ciss_mtx, PRIBIO, "cissREQ",
SBT_1MS * timeout, 0, 0);
}
return(error);
}
#if 0
/************************************************************************
* Abort a request. Note that a potential exists here to race the
* request being completed; the caller must deal with this.
*/
static int
ciss_abort_request(struct ciss_request *ar)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_message_cdb *cmc;
int error;
debug_called(1);
/* get a request */
if ((error = ciss_get_request(ar->cr_sc, &cr)) != 0)
return(error);
/* build the abort command */
cc = cr->cr_cc;
cc->header.address.mode.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; /* addressing? */
cc->header.address.physical.target = 0;
cc->header.address.physical.bus = 0;
cc->cdb.cdb_length = sizeof(*cmc);
cc->cdb.type = CISS_CDB_TYPE_MESSAGE;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = CISS_CDB_DIRECTION_NONE;
cc->cdb.timeout = 30;
cmc = (struct ciss_message_cdb *)&(cc->cdb.cdb[0]);
cmc->opcode = CISS_OPCODE_MESSAGE_ABORT;
cmc->type = CISS_MESSAGE_ABORT_TASK;
cmc->abort_tag = ar->cr_tag; /* endianness?? */
/*
* Send the request and wait for a response. If we believe we
* aborted the request OK, clear the flag that indicates it's
* running.
*/
error = ciss_synch_request(cr, 35 * 1000);
if (!error)
error = ciss_report_request(cr, NULL, NULL);
ciss_release_request(cr);
return(error);
}
#endif
/************************************************************************
* Fetch and initialise a request
*/
static int
ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp)
{
struct ciss_request *cr;
debug_called(2);
/*
* Get a request and clean it up.
*/
if ((cr = ciss_dequeue_free(sc)) == NULL)
return(ENOMEM);
cr->cr_data = NULL;
cr->cr_flags = 0;
cr->cr_complete = NULL;
cr->cr_private = NULL;
cr->cr_sg_tag = CISS_SG_MAX; /* Backstop to prevent accidents */
ciss_preen_command(cr);
*crp = cr;
return(0);
}
static void
ciss_preen_command(struct ciss_request *cr)
{
struct ciss_command *cc;
u_int32_t cmdphys;
/*
* Clean up the command structure.
*
* Note that we set up the error_info structure here, since the
* length can be overwritten by any command.
*/
cc = cr->cr_cc;
cc->header.sg_in_list = 0; /* kinda inefficient this way */
cc->header.sg_total = 0;
cc->header.host_tag = cr->cr_tag << 2;
cc->header.host_tag_zeroes = 0;
bzero(&(cc->sg[0]), CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command));
cmdphys = cr->cr_ccphys;
cc->error_info.error_info_address = cmdphys + sizeof(struct ciss_command);
cc->error_info.error_info_length = CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command);
}
/************************************************************************
* Release a request to the free list.
*/
static void
ciss_release_request(struct ciss_request *cr)
{
struct ciss_softc *sc;
debug_called(2);
sc = cr->cr_sc;
/* release the request to the free queue */
ciss_requeue_free(cr);
}
/************************************************************************
* Allocate a request that will be used to send a BMIC command. Do some
* of the common setup here to avoid duplicating it everywhere else.
*/
static int
ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp,
int opcode, void **bufp, size_t bufsize)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_bmic_cdb *cbc;
void *buf;
int error;
int dataout;
debug_called(2);
cr = NULL;
buf = NULL;
/*
* Get a request.
*/
if ((error = ciss_get_request(sc, &cr)) != 0)
goto out;
/*
* Allocate data storage if requested, determine the data direction.
*/
dataout = 0;
if ((bufsize > 0) && (bufp != NULL)) {
if (*bufp == NULL) {
if ((buf = malloc(bufsize, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) {
error = ENOMEM;
goto out;
}
} else {
buf = *bufp;
dataout = 1; /* we are given a buffer, so we are writing */
}
}
/*
* Build a CISS BMIC command to get the logical drive ID.
*/
cr->cr_data = buf;
cr->cr_length = bufsize;
if (!dataout)
cr->cr_flags = CISS_REQ_DATAIN;
cc = cr->cr_cc;
cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL;
cc->header.address.physical.bus = 0;
cc->header.address.physical.target = 0;
cc->cdb.cdb_length = sizeof(*cbc);
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = dataout ? CISS_CDB_DIRECTION_WRITE : CISS_CDB_DIRECTION_READ;
cc->cdb.timeout = 0;
cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]);
bzero(cbc, sizeof(*cbc));
cbc->opcode = dataout ? CISS_ARRAY_CONTROLLER_WRITE : CISS_ARRAY_CONTROLLER_READ;
cbc->bmic_opcode = opcode;
cbc->size = htons((u_int16_t)bufsize);
out:
if (error) {
if (cr != NULL)
ciss_release_request(cr);
} else {
*crp = cr;
if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
*bufp = buf;
}
return(error);
}
/************************************************************************
* Handle a command passed in from userspace.
*/
static int
ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_error_info *ce;
int error = 0;
debug_called(1);
cr = NULL;
/*
* Get a request.
*/
while (ciss_get_request(sc, &cr) != 0)
msleep(sc, &sc->ciss_mtx, PPAUSE, "cissREQ", hz);
cc = cr->cr_cc;
/*
* Allocate an in-kernel databuffer if required, copy in user data.
*/
mtx_unlock(&sc->ciss_mtx);
cr->cr_length = ioc->buf_size;
if (ioc->buf_size > 0) {
if ((cr->cr_data = malloc(ioc->buf_size, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) {
error = ENOMEM;
goto out_unlocked;
}
if ((error = copyin(ioc->buf, cr->cr_data, ioc->buf_size))) {
debug(0, "copyin: bad data buffer %p/%d", ioc->buf, ioc->buf_size);
goto out_unlocked;
}
}
/*
* Build the request based on the user command.
*/
bcopy(&ioc->LUN_info, &cc->header.address, sizeof(cc->header.address));
bcopy(&ioc->Request, &cc->cdb, sizeof(cc->cdb));
/* XXX anything else to populate here? */
mtx_lock(&sc->ciss_mtx);
/*
* Run the command.
*/
if ((error = ciss_synch_request(cr, 60 * 1000))) {
debug(0, "request failed - %d", error);
goto out;
}
/*
* Check to see if the command succeeded.
*/
ce = (struct ciss_error_info *)&(cc->sg[0]);
if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) == 0)
bzero(ce, sizeof(*ce));
/*
* Copy the results back to the user.
*/
bcopy(ce, &ioc->error_info, sizeof(*ce));
mtx_unlock(&sc->ciss_mtx);
if ((ioc->buf_size > 0) &&
(error = copyout(cr->cr_data, ioc->buf, ioc->buf_size))) {
debug(0, "copyout: bad data buffer %p/%d", ioc->buf, ioc->buf_size);
goto out_unlocked;
}
/* done OK */
error = 0;
out_unlocked:
mtx_lock(&sc->ciss_mtx);
out:
if ((cr != NULL) && (cr->cr_data != NULL))
free(cr->cr_data, CISS_MALLOC_CLASS);
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Map a request into bus-visible space, initialise the scatter/gather
* list.
*/
static int
ciss_map_request(struct ciss_request *cr)
{
struct ciss_softc *sc;
int error = 0;
debug_called(2);
sc = cr->cr_sc;
/* check that mapping is necessary */
if (cr->cr_flags & CISS_REQ_MAPPED)
return(0);
cr->cr_flags |= CISS_REQ_MAPPED;
bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map,
BUS_DMASYNC_PREWRITE);
if (cr->cr_data != NULL) {
if (cr->cr_flags & CISS_REQ_CCB)
error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat,
cr->cr_datamap, cr->cr_data,
ciss_request_map_helper, cr, 0);
else
error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap,
cr->cr_data, cr->cr_length,
ciss_request_map_helper, cr, 0);
if (error != 0)
return (error);
} else {
/*
* Post the command to the adapter.
*/
cr->cr_sg_tag = CISS_SG_NONE;
cr->cr_flags |= CISS_REQ_BUSY;
if (sc->ciss_perf)
CISS_TL_PERF_POST_CMD(sc, cr);
else
CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys);
}
return(0);
}
static void
ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct ciss_command *cc;
struct ciss_request *cr;
struct ciss_softc *sc;
int i;
debug_called(2);
cr = (struct ciss_request *)arg;
sc = cr->cr_sc;
cc = cr->cr_cc;
for (i = 0; i < nseg; i++) {
cc->sg[i].address = segs[i].ds_addr;
cc->sg[i].length = segs[i].ds_len;
cc->sg[i].extension = 0;
}
/* we leave the s/g table entirely within the command */
cc->header.sg_in_list = nseg;
cc->header.sg_total = nseg;
if (cr->cr_flags & CISS_REQ_DATAIN)
bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREREAD);
if (cr->cr_flags & CISS_REQ_DATAOUT)
bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREWRITE);
if (nseg == 0)
cr->cr_sg_tag = CISS_SG_NONE;
else if (nseg == 1)
cr->cr_sg_tag = CISS_SG_1;
else if (nseg == 2)
cr->cr_sg_tag = CISS_SG_2;
else if (nseg <= 4)
cr->cr_sg_tag = CISS_SG_4;
else if (nseg <= 8)
cr->cr_sg_tag = CISS_SG_8;
else if (nseg <= 16)
cr->cr_sg_tag = CISS_SG_16;
else if (nseg <= 32)
cr->cr_sg_tag = CISS_SG_32;
else
cr->cr_sg_tag = CISS_SG_MAX;
/*
* Post the command to the adapter.
*/
cr->cr_flags |= CISS_REQ_BUSY;
if (sc->ciss_perf)
CISS_TL_PERF_POST_CMD(sc, cr);
else
CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys);
}
/************************************************************************
* Unmap a request from bus-visible space.
*/
static void
ciss_unmap_request(struct ciss_request *cr)
{
struct ciss_softc *sc;
debug_called(2);
sc = cr->cr_sc;
/* check that unmapping is necessary */
if ((cr->cr_flags & CISS_REQ_MAPPED) == 0)
return;
bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map,
BUS_DMASYNC_POSTWRITE);
if (cr->cr_data == NULL)
goto out;
if (cr->cr_flags & CISS_REQ_DATAIN)
bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTREAD);
if (cr->cr_flags & CISS_REQ_DATAOUT)
bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ciss_buffer_dmat, cr->cr_datamap);
out:
cr->cr_flags &= ~CISS_REQ_MAPPED;
}
/************************************************************************
* Attach the driver to CAM.
*
* We put all the logical drives on a single SCSI bus.
*/
static int
ciss_cam_init(struct ciss_softc *sc)
{
int i, maxbus;
debug_called(1);
/*
* Allocate a devq. We can reuse this for the masked physical
* devices if we decide to export these as well.
*/
if ((sc->ciss_cam_devq = cam_simq_alloc(sc->ciss_max_requests - 2)) == NULL) {
ciss_printf(sc, "can't allocate CAM SIM queue\n");
return(ENOMEM);
}
/*
* Create a SIM.
*
* This naturally wastes a bit of memory. The alternative is to allocate
* and register each bus as it is found, and then track them on a linked
* list. Unfortunately, the driver has a few places where it needs to
* look up the SIM based solely on bus number, and it's unclear whether
* a list traversal would work for these situations.
*/
maxbus = max(sc->ciss_max_logical_bus, sc->ciss_max_physical_bus +
CISS_PHYSICAL_BASE);
- sc->ciss_cam_sim = mallocarray(maxbus, sizeof(struct cam_sim*),
+ sc->ciss_cam_sim = malloc(maxbus * sizeof(struct cam_sim*),
CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO);
if (sc->ciss_cam_sim == NULL) {
ciss_printf(sc, "can't allocate memory for controller SIM\n");
return(ENOMEM);
}
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll,
"ciss", sc,
device_get_unit(sc->ciss_dev),
&sc->ciss_mtx,
2,
sc->ciss_max_requests - 2,
sc->ciss_cam_devq)) == NULL) {
ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i);
return(ENOMEM);
}
/*
* Register bus with this SIM.
*/
mtx_lock(&sc->ciss_mtx);
if (i == 0 || sc->ciss_controllers[i].physical.bus != 0) {
if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) {
ciss_printf(sc, "can't register SCSI bus %d\n", i);
mtx_unlock(&sc->ciss_mtx);
return (ENXIO);
}
}
mtx_unlock(&sc->ciss_mtx);
}
for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus +
CISS_PHYSICAL_BASE; i++) {
if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll,
"ciss", sc,
device_get_unit(sc->ciss_dev),
&sc->ciss_mtx, 1,
sc->ciss_max_requests - 2,
sc->ciss_cam_devq)) == NULL) {
ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i);
return (ENOMEM);
}
mtx_lock(&sc->ciss_mtx);
if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) {
ciss_printf(sc, "can't register SCSI bus %d\n", i);
mtx_unlock(&sc->ciss_mtx);
return (ENXIO);
}
mtx_unlock(&sc->ciss_mtx);
}
return(0);
}
/************************************************************************
* Initiate a rescan of the 'logical devices' SIM
*/
static void
ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target)
{
union ccb *ccb;
debug_called(1);
if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
ciss_printf(sc, "rescan failed (can't allocate CCB)\n");
return;
}
if (xpt_create_path(&ccb->ccb_h.path, NULL,
cam_sim_path(sc->ciss_cam_sim[bus]),
target, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
ciss_printf(sc, "rescan failed (can't create path)\n");
xpt_free_ccb(ccb);
return;
}
xpt_rescan(ccb);
/* scan is now in progress */
}
/************************************************************************
* Handle requests coming from CAM
*/
static void
ciss_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct ciss_softc *sc;
struct ccb_scsiio *csio;
int bus, target;
int physical;
sc = cam_sim_softc(sim);
bus = cam_sim_bus(sim);
csio = (struct ccb_scsiio *)&ccb->csio;
target = csio->ccb_h.target_id;
physical = CISS_IS_PHYSICAL(bus);
switch (ccb->ccb_h.func_code) {
/* perform SCSI I/O */
case XPT_SCSI_IO:
if (!ciss_cam_action_io(sim, csio))
return;
break;
/* perform geometry calculations */
case XPT_CALC_GEOMETRY:
{
struct ccb_calc_geometry *ccg = &ccb->ccg;
struct ciss_ldrive *ld;
debug(1, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
ld = NULL;
if (!physical)
ld = &sc->ciss_logical[bus][target];
/*
* Use the cached geometry settings unless the fault tolerance
* is invalid.
*/
if (physical || ld->cl_geometry.fault_tolerance == 0xFF) {
u_int32_t secs_per_cylinder;
ccg->heads = 255;
ccg->secs_per_track = 32;
secs_per_cylinder = ccg->heads * ccg->secs_per_track;
ccg->cylinders = ccg->volume_size / secs_per_cylinder;
} else {
ccg->heads = ld->cl_geometry.heads;
ccg->secs_per_track = ld->cl_geometry.sectors;
ccg->cylinders = ntohs(ld->cl_geometry.cylinders);
}
ccb->ccb_h.status = CAM_REQ_CMP;
break;
}
/* handle path attribute inquiry */
case XPT_PATH_INQ:
{
struct ccb_pathinq *cpi = &ccb->cpi;
int sg_length;
debug(1, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
cpi->version_num = 1;
cpi->hba_inquiry = PI_TAG_ABLE; /* XXX is this correct? */
cpi->target_sprt = 0;
cpi->hba_misc = 0;
cpi->max_target = sc->ciss_cfg->max_logical_supported;
cpi->max_lun = 0; /* 'logical drive' channel only */
cpi->initiator_id = sc->ciss_cfg->max_logical_supported;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "CISS", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_2;
if (sc->ciss_cfg->max_sg_length == 0) {
sg_length = 17;
} else {
/* XXX Fix for ZMR cards that advertise max_sg_length == 32
* Confusing bit here. max_sg_length is usually a power of 2. We always
* need to subtract 1 to account for partial pages. Then we need to
* align on a valid PAGE_SIZE so we round down to the nearest power of 2.
* Add 1 so we can then subtract it out in the assignment to maxio.
* The reason for all these shenanigans is to create a maxio value that
* creates IO operations to volumes that yield consistent operations
* with good performance.
*/
sg_length = sc->ciss_cfg->max_sg_length - 1;
sg_length = (1 << (fls(sg_length) - 1)) + 1;
}
cpi->maxio = (min(CISS_MAX_SG_ELEMENTS, sg_length) - 1) * PAGE_SIZE;
ccb->ccb_h.status = CAM_REQ_CMP;
break;
}
case XPT_GET_TRAN_SETTINGS:
{
struct ccb_trans_settings *cts = &ccb->cts;
int bus, target;
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
bus = cam_sim_bus(sim);
target = cts->ccb_h.target_id;
debug(1, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target);
/* disconnect always OK */
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_2;
cts->transport = XPORT_SPI;
cts->transport_version = 2;
spi->valid = CTS_SPI_VALID_DISC;
spi->flags = CTS_SPI_FLAGS_DISC_ENB;
scsi->valid = CTS_SCSI_VALID_TQ;
scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
cts->ccb_h.status = CAM_REQ_CMP;
break;
}
default: /* we can't do this */
debug(1, "unspported func_code = 0x%x", ccb->ccb_h.func_code);
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
xpt_done(ccb);
}
/************************************************************************
* Handle a CAM SCSI I/O request.
*/
static int
ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
{
struct ciss_softc *sc;
int bus, target;
struct ciss_request *cr;
struct ciss_command *cc;
int error;
sc = cam_sim_softc(sim);
bus = cam_sim_bus(sim);
target = csio->ccb_h.target_id;
debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun);
/* check that the CDB pointer is not to a physical address */
if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) {
debug(3, " CDB pointer is to physical address");
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
/* abandon aborted ccbs or those that have failed validation */
if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
debug(3, "abandoning CCB due to abort/validation failure");
return(EINVAL);
}
/* handle emulation of some SCSI commands ourself */
if (ciss_cam_emulate(sc, csio))
return(0);
/*
* Get a request to manage this command. If we can't, return the
* ccb, freeze the queue and flag so that we unfreeze it when a
* request completes.
*/
if ((error = ciss_get_request(sc, &cr)) != 0) {
xpt_freeze_simq(sim, 1);
sc->ciss_flags |= CISS_FLAG_BUSY;
csio->ccb_h.status |= CAM_REQUEUE_REQ;
return(error);
}
/*
* Build the command.
*/
cc = cr->cr_cc;
cr->cr_data = csio;
cr->cr_length = csio->dxfer_len;
cr->cr_complete = ciss_cam_complete;
cr->cr_private = csio;
/*
* Target the right logical volume.
*/
if (CISS_IS_PHYSICAL(bus))
cc->header.address =
sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_address;
else
cc->header.address =
sc->ciss_logical[bus][target].cl_address;
cc->cdb.cdb_length = csio->cdb_len;
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
cr->cr_flags = CISS_REQ_DATAOUT | CISS_REQ_CCB;
cc->cdb.direction = CISS_CDB_DIRECTION_WRITE;
} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
cr->cr_flags = CISS_REQ_DATAIN | CISS_REQ_CCB;
cc->cdb.direction = CISS_CDB_DIRECTION_READ;
} else {
cr->cr_data = NULL;
cr->cr_flags = 0;
cc->cdb.direction = CISS_CDB_DIRECTION_NONE;
}
cc->cdb.timeout = (csio->ccb_h.timeout / 1000) + 1;
if (csio->ccb_h.flags & CAM_CDB_POINTER) {
bcopy(csio->cdb_io.cdb_ptr, &cc->cdb.cdb[0], csio->cdb_len);
} else {
bcopy(csio->cdb_io.cdb_bytes, &cc->cdb.cdb[0], csio->cdb_len);
}
/*
* Submit the request to the adapter.
*
* Note that this may fail if we're unable to map the request (and
* if we ever learn a transport layer other than simple, may fail
* if the adapter rejects the command).
*/
if ((error = ciss_start(cr)) != 0) {
xpt_freeze_simq(sim, 1);
csio->ccb_h.status |= CAM_RELEASE_SIMQ;
if (error == EINPROGRESS) {
error = 0;
} else {
csio->ccb_h.status |= CAM_REQUEUE_REQ;
ciss_release_request(cr);
}
return(error);
}
return(0);
}
/************************************************************************
* Emulate SCSI commands the adapter doesn't handle as we might like.
*/
static int
ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio)
{
int bus, target;
u_int8_t opcode;
target = csio->ccb_h.target_id;
bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path));
opcode = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
*(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0];
if (CISS_IS_PHYSICAL(bus)) {
if (sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_online != 1) {
csio->ccb_h.status |= CAM_SEL_TIMEOUT;
xpt_done((union ccb *)csio);
return(1);
} else
return(0);
}
/*
* Handle requests for volumes that don't exist or are not online.
* A selection timeout is slightly better than an illegal request.
* Other errors might be better.
*/
if (sc->ciss_logical[bus][target].cl_status != CISS_LD_ONLINE) {
csio->ccb_h.status |= CAM_SEL_TIMEOUT;
xpt_done((union ccb *)csio);
return(1);
}
/* if we have to fake Synchronise Cache */
if (sc->ciss_flags & CISS_FLAG_FAKE_SYNCH) {
/*
* If this is a Synchronise Cache command, typically issued when
* a device is closed, flush the adapter and complete now.
*/
if (((csio->ccb_h.flags & CAM_CDB_POINTER) ?
*(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
ciss_flush_adapter(sc);
csio->ccb_h.status |= CAM_REQ_CMP;
xpt_done((union ccb *)csio);
return(1);
}
}
/*
* A CISS target can only ever have one lun per target. REPORT_LUNS requires
* at least one LUN field to be pre created for us, so snag it and fill in
* the least significant byte indicating 1 LUN here. Emulate the command
* return to shut up warning on console of a CDB error. swb
*/
if (opcode == REPORT_LUNS && csio->dxfer_len > 0) {
csio->data_ptr[3] = 8;
csio->ccb_h.status |= CAM_REQ_CMP;
xpt_done((union ccb *)csio);
return(1);
}
return(0);
}
/************************************************************************
* Check for possibly-completed commands.
*/
static void
ciss_cam_poll(struct cam_sim *sim)
{
cr_qhead_t qh;
struct ciss_softc *sc = cam_sim_softc(sim);
debug_called(2);
STAILQ_INIT(&qh);
if (sc->ciss_perf)
ciss_perf_done(sc, &qh);
else
ciss_done(sc, &qh);
ciss_complete(sc, &qh);
}
/************************************************************************
* Handle completion of a command - pass results back through the CCB
*/
static void
ciss_cam_complete(struct ciss_request *cr)
{
struct ciss_softc *sc;
struct ciss_command *cc;
struct ciss_error_info *ce;
struct ccb_scsiio *csio;
int scsi_status;
int command_status;
debug_called(2);
sc = cr->cr_sc;
cc = cr->cr_cc;
ce = (struct ciss_error_info *)&(cc->sg[0]);
csio = (struct ccb_scsiio *)cr->cr_private;
/*
* Extract status values from request.
*/
ciss_report_request(cr, &command_status, &scsi_status);
csio->scsi_status = scsi_status;
/*
* Handle specific SCSI status values.
*/
switch(scsi_status) {
/* no status due to adapter error */
case -1:
debug(0, "adapter error");
csio->ccb_h.status |= CAM_REQ_CMP_ERR;
break;
/* no status due to command completed OK */
case SCSI_STATUS_OK: /* CISS_SCSI_STATUS_GOOD */
debug(2, "SCSI_STATUS_OK");
csio->ccb_h.status |= CAM_REQ_CMP;
break;
/* check condition, sense data included */
case SCSI_STATUS_CHECK_COND: /* CISS_SCSI_STATUS_CHECK_CONDITION */
debug(0, "SCSI_STATUS_CHECK_COND sense size %d resid %d\n",
ce->sense_length, ce->residual_count);
bzero(&csio->sense_data, SSD_FULL_SIZE);
bcopy(&ce->sense_info[0], &csio->sense_data, ce->sense_length);
if (csio->sense_len > ce->sense_length)
csio->sense_resid = csio->sense_len - ce->sense_length;
else
csio->sense_resid = 0;
csio->resid = ce->residual_count;
csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
#ifdef CISS_DEBUG
{
struct scsi_sense_data *sns = (struct scsi_sense_data *)&ce->sense_info[0];
debug(0, "sense key %x", scsi_get_sense_key(sns, csio->sense_len -
csio->sense_resid, /*show_errors*/ 1));
}
#endif
break;
case SCSI_STATUS_BUSY: /* CISS_SCSI_STATUS_BUSY */
debug(0, "SCSI_STATUS_BUSY");
csio->ccb_h.status |= CAM_SCSI_BUSY;
break;
default:
debug(0, "unknown status 0x%x", csio->scsi_status);
csio->ccb_h.status |= CAM_REQ_CMP_ERR;
break;
}
/* handle post-command fixup */
ciss_cam_complete_fixup(sc, csio);
ciss_release_request(cr);
if (sc->ciss_flags & CISS_FLAG_BUSY) {
sc->ciss_flags &= ~CISS_FLAG_BUSY;
if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
else
csio->ccb_h.status |= CAM_RELEASE_SIMQ;
}
xpt_done((union ccb *)csio);
}
/********************************************************************************
* Fix up the result of some commands here.
*/
static void
ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio)
{
struct scsi_inquiry_data *inq;
struct ciss_ldrive *cl;
uint8_t *cdb;
int bus, target;
cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
if (cdb[0] == INQUIRY &&
(cdb[1] & SI_EVPD) == 0 &&
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
inq = (struct scsi_inquiry_data *)csio->data_ptr;
target = csio->ccb_h.target_id;
bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path));
/*
* If the controller is in JBOD mode, there are no logical volumes.
* Let the disks be probed and dealt with via CAM. Else, mask off
* the physical disks and setup the parts of the inq structure for
* the logical volume. swb
*/
if( !(sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED)){
if (CISS_IS_PHYSICAL(bus)) {
if (SID_TYPE(inq) == T_DIRECT)
inq->device = (inq->device & 0xe0) | T_NODEVICE;
return;
}
cl = &sc->ciss_logical[bus][target];
padstr(inq->vendor, "HP",
SID_VENDOR_SIZE);
padstr(inq->product,
ciss_name_ldrive_org(cl->cl_ldrive->fault_tolerance),
SID_PRODUCT_SIZE);
padstr(inq->revision,
ciss_name_ldrive_status(cl->cl_lstatus->status),
SID_REVISION_SIZE);
}
}
}
/********************************************************************************
* Name the device at (target)
*
* XXX is this strictly correct?
*/
static int
ciss_name_device(struct ciss_softc *sc, int bus, int target)
{
struct cam_periph *periph;
struct cam_path *path;
int status;
if (CISS_IS_PHYSICAL(bus))
return (0);
status = xpt_create_path(&path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]),
target, 0);
if (status == CAM_REQ_CMP) {
xpt_path_lock(path);
periph = cam_periph_find(path, NULL);
xpt_path_unlock(path);
xpt_free_path(path);
if (periph != NULL) {
sprintf(sc->ciss_logical[bus][target].cl_name, "%s%d",
periph->periph_name, periph->unit_number);
return(0);
}
}
sc->ciss_logical[bus][target].cl_name[0] = 0;
return(ENOENT);
}
/************************************************************************
* Periodic status monitoring.
*/
static void
ciss_periodic(void *arg)
{
struct ciss_softc *sc;
struct ciss_request *cr = NULL;
struct ciss_command *cc = NULL;
int error = 0;
debug_called(1);
sc = (struct ciss_softc *)arg;
/*
* Check the adapter heartbeat.
*/
if (sc->ciss_cfg->heartbeat == sc->ciss_heartbeat) {
sc->ciss_heart_attack++;
debug(0, "adapter heart attack in progress 0x%x/%d",
sc->ciss_heartbeat, sc->ciss_heart_attack);
if (sc->ciss_heart_attack == 3) {
ciss_printf(sc, "ADAPTER HEARTBEAT FAILED\n");
ciss_disable_adapter(sc);
return;
}
} else {
sc->ciss_heartbeat = sc->ciss_cfg->heartbeat;
sc->ciss_heart_attack = 0;
debug(3, "new heartbeat 0x%x", sc->ciss_heartbeat);
}
/*
* Send the NOP message and wait for a response.
*/
if (ciss_nop_message_heartbeat != 0 && (error = ciss_get_request(sc, &cr)) == 0) {
cc = cr->cr_cc;
cr->cr_complete = ciss_nop_complete;
cc->cdb.cdb_length = 1;
cc->cdb.type = CISS_CDB_TYPE_MESSAGE;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = CISS_CDB_DIRECTION_WRITE;
cc->cdb.timeout = 0;
cc->cdb.cdb[0] = CISS_OPCODE_MESSAGE_NOP;
if ((error = ciss_start(cr)) != 0) {
ciss_printf(sc, "SENDING NOP MESSAGE FAILED\n");
}
}
/*
* If the notify event request has died for some reason, or has
* not started yet, restart it.
*/
if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) {
debug(0, "(re)starting Event Notify chain");
ciss_notify_event(sc);
}
/*
* Reschedule.
*/
callout_reset(&sc->ciss_periodic, CISS_HEARTBEAT_RATE * hz, ciss_periodic, sc);
}
static void
ciss_nop_complete(struct ciss_request *cr)
{
struct ciss_softc *sc;
static int first_time = 1;
sc = cr->cr_sc;
if (ciss_report_request(cr, NULL, NULL) != 0) {
if (first_time == 1) {
first_time = 0;
ciss_printf(sc, "SENDING NOP MESSAGE FAILED (not logging anymore)\n");
}
}
ciss_release_request(cr);
}
/************************************************************************
* Disable the adapter.
*
* The all requests in completed queue is failed with hardware error.
* This will cause failover in a multipath configuration.
*/
static void
ciss_disable_adapter(struct ciss_softc *sc)
{
cr_qhead_t qh;
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_error_info *ce;
int i;
CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc);
pci_disable_busmaster(sc->ciss_dev);
sc->ciss_flags &= ~CISS_FLAG_RUNNING;
for (i = 1; i < sc->ciss_max_requests; i++) {
cr = &sc->ciss_request[i];
if ((cr->cr_flags & CISS_REQ_BUSY) == 0)
continue;
cc = cr->cr_cc;
ce = (struct ciss_error_info *)&(cc->sg[0]);
ce->command_status = CISS_CMD_STATUS_HARDWARE_ERROR;
ciss_enqueue_complete(cr, &qh);
}
for (;;) {
if ((cr = ciss_dequeue_complete(sc, &qh)) == NULL)
break;
/*
* If the request has a callback, invoke it.
*/
if (cr->cr_complete != NULL) {
cr->cr_complete(cr);
continue;
}
/*
* If someone is sleeping on this request, wake them up.
*/
if (cr->cr_flags & CISS_REQ_SLEEP) {
cr->cr_flags &= ~CISS_REQ_SLEEP;
wakeup(cr);
continue;
}
}
}
/************************************************************************
* Request a notification response from the adapter.
*
* If (cr) is NULL, this is the first request of the adapter, so
* reset the adapter's message pointer and start with the oldest
* message available.
*/
static void
ciss_notify_event(struct ciss_softc *sc)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_notify_cdb *cnc;
int error;
debug_called(1);
cr = sc->ciss_periodic_notify;
/* get a request if we don't already have one */
if (cr == NULL) {
if ((error = ciss_get_request(sc, &cr)) != 0) {
debug(0, "can't get notify event request");
goto out;
}
sc->ciss_periodic_notify = cr;
cr->cr_complete = ciss_notify_complete;
debug(1, "acquired request %d", cr->cr_tag);
}
/*
* Get a databuffer if we don't already have one, note that the
* adapter command wants a larger buffer than the actual
* structure.
*/
if (cr->cr_data == NULL) {
if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) {
debug(0, "can't get notify event request buffer");
error = ENOMEM;
goto out;
}
cr->cr_length = CISS_NOTIFY_DATA_SIZE;
}
/* re-setup the request's command (since we never release it) XXX overkill*/
ciss_preen_command(cr);
/* (re)build the notify event command */
cc = cr->cr_cc;
cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL;
cc->header.address.physical.bus = 0;
cc->header.address.physical.target = 0;
cc->cdb.cdb_length = sizeof(*cnc);
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = CISS_CDB_DIRECTION_READ;
cc->cdb.timeout = 0; /* no timeout, we hope */
cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]);
bzero(cr->cr_data, CISS_NOTIFY_DATA_SIZE);
cnc->opcode = CISS_OPCODE_READ;
cnc->command = CISS_COMMAND_NOTIFY_ON_EVENT;
cnc->timeout = 0; /* no timeout, we hope */
cnc->synchronous = 0;
cnc->ordered = 0;
cnc->seek_to_oldest = 0;
if ((sc->ciss_flags & CISS_FLAG_RUNNING) == 0)
cnc->new_only = 1;
else
cnc->new_only = 0;
cnc->length = htonl(CISS_NOTIFY_DATA_SIZE);
/* submit the request */
error = ciss_start(cr);
out:
if (error) {
if (cr != NULL) {
if (cr->cr_data != NULL)
free(cr->cr_data, CISS_MALLOC_CLASS);
ciss_release_request(cr);
}
sc->ciss_periodic_notify = NULL;
debug(0, "can't submit notify event request");
sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK;
} else {
debug(1, "notify event submitted");
sc->ciss_flags |= CISS_FLAG_NOTIFY_OK;
}
}
static void
ciss_notify_complete(struct ciss_request *cr)
{
struct ciss_command *cc;
struct ciss_notify *cn;
struct ciss_softc *sc;
int scsi_status;
int command_status;
debug_called(1);
cc = cr->cr_cc;
cn = (struct ciss_notify *)cr->cr_data;
sc = cr->cr_sc;
/*
* Report request results, decode status.
*/
ciss_report_request(cr, &command_status, &scsi_status);
/*
* Abort the chain on a fatal error.
*
* XXX which of these are actually errors?
*/
if ((command_status != CISS_CMD_STATUS_SUCCESS) &&
(command_status != CISS_CMD_STATUS_TARGET_STATUS) &&
(command_status != CISS_CMD_STATUS_TIMEOUT)) { /* XXX timeout? */
ciss_printf(sc, "fatal error in Notify Event request (%s)\n",
ciss_name_command_status(command_status));
ciss_release_request(cr);
sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK;
return;
}
/*
* If the adapter gave us a text message, print it.
*/
if (cn->message[0] != 0)
ciss_printf(sc, "*** %.80s\n", cn->message);
debug(0, "notify event class %d subclass %d detail %d",
cn->class, cn->subclass, cn->detail);
/*
* If the response indicates that the notifier has been aborted,
* release the notifier command.
*/
if ((cn->class == CISS_NOTIFY_NOTIFIER) &&
(cn->subclass == CISS_NOTIFY_NOTIFIER_STATUS) &&
(cn->detail == 1)) {
debug(0, "notifier exiting");
sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK;
ciss_release_request(cr);
sc->ciss_periodic_notify = NULL;
wakeup(&sc->ciss_periodic_notify);
} else {
/* Handle notify events in a kernel thread */
ciss_enqueue_notify(cr);
sc->ciss_periodic_notify = NULL;
wakeup(&sc->ciss_periodic_notify);
wakeup(&sc->ciss_notify);
}
/*
* Send a new notify event command, if we're not aborting.
*/
if (!(sc->ciss_flags & CISS_FLAG_ABORTING)) {
ciss_notify_event(sc);
}
}
/************************************************************************
* Abort the Notify Event chain.
*
* Note that we can't just abort the command in progress; we have to
* explicitly issue an Abort Notify Event command in order for the
* adapter to clean up correctly.
*
* If we are called with CISS_FLAG_ABORTING set in the adapter softc,
* the chain will not restart itself.
*/
static int
ciss_notify_abort(struct ciss_softc *sc)
{
struct ciss_request *cr;
struct ciss_command *cc;
struct ciss_notify_cdb *cnc;
int error, command_status, scsi_status;
debug_called(1);
cr = NULL;
error = 0;
/* verify that there's an outstanding command */
if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK))
goto out;
/* get a command to issue the abort with */
if ((error = ciss_get_request(sc, &cr)))
goto out;
/* get a buffer for the result */
if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) {
debug(0, "can't get notify event request buffer");
error = ENOMEM;
goto out;
}
cr->cr_length = CISS_NOTIFY_DATA_SIZE;
/* build the CDB */
cc = cr->cr_cc;
cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL;
cc->header.address.physical.bus = 0;
cc->header.address.physical.target = 0;
cc->cdb.cdb_length = sizeof(*cnc);
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE;
cc->cdb.direction = CISS_CDB_DIRECTION_READ;
cc->cdb.timeout = 0; /* no timeout, we hope */
cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]);
bzero(cnc, sizeof(*cnc));
cnc->opcode = CISS_OPCODE_WRITE;
cnc->command = CISS_COMMAND_ABORT_NOTIFY;
cnc->length = htonl(CISS_NOTIFY_DATA_SIZE);
ciss_print_request(cr);
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "Abort Notify Event command failed (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, &scsi_status);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS:
break;
case CISS_CMD_STATUS_INVALID_COMMAND:
/*
* Some older adapters don't support the CISS version of this
* command. Fall back to using the BMIC version.
*/
error = ciss_notify_abort_bmic(sc);
if (error != 0)
goto out;
break;
case CISS_CMD_STATUS_TARGET_STATUS:
/*
* This can happen if the adapter thinks there wasn't an outstanding
* Notify Event command but we did. We clean up here.
*/
if (scsi_status == CISS_SCSI_STATUS_CHECK_CONDITION) {
if (sc->ciss_periodic_notify != NULL)
ciss_release_request(sc->ciss_periodic_notify);
error = 0;
goto out;
}
/* FALLTHROUGH */
default:
ciss_printf(sc, "Abort Notify Event command failed (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
/*
* Sleep waiting for the notifier command to complete. Note
* that if it doesn't, we may end up in a bad situation, since
* the adapter may deliver it later. Also note that the adapter
* requires the Notify Event command to be cancelled in order to
* maintain internal bookkeeping.
*/
while (sc->ciss_periodic_notify != NULL) {
error = msleep(&sc->ciss_periodic_notify, &sc->ciss_mtx, PRIBIO, "cissNEA", hz * 5);
if (error == EWOULDBLOCK) {
ciss_printf(sc, "Notify Event command failed to abort, adapter may wedge.\n");
break;
}
}
out:
/* release the cancel request */
if (cr != NULL) {
if (cr->cr_data != NULL)
free(cr->cr_data, CISS_MALLOC_CLASS);
ciss_release_request(cr);
}
if (error == 0)
sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK;
return(error);
}
/************************************************************************
* Abort the Notify Event chain using a BMIC command.
*/
static int
ciss_notify_abort_bmic(struct ciss_softc *sc)
{
struct ciss_request *cr;
int error, command_status;
debug_called(1);
cr = NULL;
error = 0;
/* verify that there's an outstanding command */
if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK))
goto out;
/*
* Build a BMIC command to cancel the Notify on Event command.
*
* Note that we are sending a CISS opcode here. Odd.
*/
if ((error = ciss_get_bmic_request(sc, &cr, CISS_COMMAND_ABORT_NOTIFY,
NULL, 0)) != 0)
goto out;
/*
* Submit the request and wait for it to complete.
*/
if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) {
ciss_printf(sc, "error sending BMIC Cancel Notify on Event command (%d)\n", error);
goto out;
}
/*
* Check response.
*/
ciss_report_request(cr, &command_status, NULL);
switch(command_status) {
case CISS_CMD_STATUS_SUCCESS:
break;
default:
ciss_printf(sc, "error cancelling Notify on Event (%s)\n",
ciss_name_command_status(command_status));
error = EIO;
goto out;
}
out:
if (cr != NULL)
ciss_release_request(cr);
return(error);
}
/************************************************************************
* Handle rescanning all the logical volumes when a notify event
* causes the drives to come online or offline.
*/
static void
ciss_notify_rescan_logical(struct ciss_softc *sc)
{
struct ciss_lun_report *cll;
struct ciss_ldrive *ld;
int i, j, ndrives;
/*
* We must rescan all logical volumes to get the right logical
* drive address.
*/
cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS,
sc->ciss_cfg->max_logical_supported);
if (cll == NULL)
return;
ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address));
/*
* Delete any of the drives which were destroyed by the
* firmware.
*/
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) {
ld = &sc->ciss_logical[i][j];
if (ld->cl_update == 0)
continue;
if (ld->cl_status != CISS_LD_ONLINE) {
ciss_cam_rescan_target(sc, i, j);
ld->cl_update = 0;
if (ld->cl_ldrive)
free(ld->cl_ldrive, CISS_MALLOC_CLASS);
if (ld->cl_lstatus)
free(ld->cl_lstatus, CISS_MALLOC_CLASS);
ld->cl_ldrive = NULL;
ld->cl_lstatus = NULL;
}
}
}
/*
* Scan for new drives.
*/
for (i = 0; i < ndrives; i++) {
int bus, target;
bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun);
target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun);
ld = &sc->ciss_logical[bus][target];
if (ld->cl_update == 0)
continue;
ld->cl_update = 0;
ld->cl_address = cll->lun[i];
ld->cl_controller = &sc->ciss_controllers[bus];
if (ciss_identify_logical(sc, ld) == 0) {
ciss_cam_rescan_target(sc, bus, target);
}
}
free(cll, CISS_MALLOC_CLASS);
}
/************************************************************************
* Handle a notify event relating to the status of a logical drive.
*
* XXX need to be able to defer some of these to properly handle
* calling the "ID Physical drive" command, unless the 'extended'
* drive IDs are always in BIG_MAP format.
*/
static void
ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn)
{
struct ciss_ldrive *ld;
int ostatus, bus, target;
debug_called(2);
bus = cn->device.physical.bus;
target = cn->data.logical_status.logical_drive;
ld = &sc->ciss_logical[bus][target];
switch (cn->subclass) {
case CISS_NOTIFY_LOGICAL_STATUS:
switch (cn->detail) {
case 0:
ciss_name_device(sc, bus, target);
ciss_printf(sc, "logical drive %d (%s) changed status %s->%s, spare status 0x%b\n",
cn->data.logical_status.logical_drive, ld->cl_name,
ciss_name_ldrive_status(cn->data.logical_status.previous_state),
ciss_name_ldrive_status(cn->data.logical_status.new_state),
cn->data.logical_status.spare_state,
"\20\1configured\2rebuilding\3failed\4in use\5available\n");
/*
* Update our idea of the drive's status.
*/
ostatus = ciss_decode_ldrive_status(cn->data.logical_status.previous_state);
ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state);
if (ld->cl_lstatus != NULL)
ld->cl_lstatus->status = cn->data.logical_status.new_state;
/*
* Have CAM rescan the drive if its status has changed.
*/
if (ostatus != ld->cl_status) {
ld->cl_update = 1;
ciss_notify_rescan_logical(sc);
}
break;
case 1: /* logical drive has recognised new media, needs Accept Media Exchange */
ciss_name_device(sc, bus, target);
ciss_printf(sc, "logical drive %d (%s) media exchanged, ready to go online\n",
cn->data.logical_status.logical_drive, ld->cl_name);
ciss_accept_media(sc, ld);
ld->cl_update = 1;
ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state);
ciss_notify_rescan_logical(sc);
break;
case 2:
case 3:
ciss_printf(sc, "rebuild of logical drive %d (%s) failed due to %s error\n",
cn->data.rebuild_aborted.logical_drive,
ld->cl_name,
(cn->detail == 2) ? "read" : "write");
break;
}
break;
case CISS_NOTIFY_LOGICAL_ERROR:
if (cn->detail == 0) {
ciss_printf(sc, "FATAL I/O ERROR on logical drive %d (%s), SCSI port %d ID %d\n",
cn->data.io_error.logical_drive,
ld->cl_name,
cn->data.io_error.failure_bus,
cn->data.io_error.failure_drive);
/* XXX should we take the drive down at this point, or will we be told? */
}
break;
case CISS_NOTIFY_LOGICAL_SURFACE:
if (cn->detail == 0)
ciss_printf(sc, "logical drive %d (%s) completed consistency initialisation\n",
cn->data.consistency_completed.logical_drive,
ld->cl_name);
break;
}
}
/************************************************************************
* Handle a notify event relating to the status of a physical drive.
*/
static void
ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn)
{
}
/************************************************************************
* Handle a notify event relating to the status of a physical drive.
*/
static void
ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn)
{
struct ciss_lun_report *cll = NULL;
int bus, target;
switch (cn->subclass) {
case CISS_NOTIFY_HOTPLUG_PHYSICAL:
case CISS_NOTIFY_HOTPLUG_NONDISK:
bus = CISS_BIG_MAP_BUS(sc, cn->data.drive.big_physical_drive_number);
target =
CISS_BIG_MAP_TARGET(sc, cn->data.drive.big_physical_drive_number);
if (cn->detail == 0) {
/*
* Mark the device offline so that it'll start producing selection
* timeouts to the upper layer.
*/
if ((bus >= 0) && (target >= 0))
sc->ciss_physical[bus][target].cp_online = 0;
} else {
/*
* Rescan the physical lun list for new items
*/
cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS,
sc->ciss_cfg->max_physical_supported);
if (cll == NULL) {
ciss_printf(sc, "Warning, cannot get physical lun list\n");
break;
}
ciss_filter_physical(sc, cll);
}
break;
default:
ciss_printf(sc, "Unknown hotplug event %d\n", cn->subclass);
return;
}
if (cll != NULL)
free(cll, CISS_MALLOC_CLASS);
}
/************************************************************************
* Handle deferred processing of notify events. Notify events may need
* sleep which is unsafe during an interrupt.
*/
static void
ciss_notify_thread(void *arg)
{
struct ciss_softc *sc;
struct ciss_request *cr;
struct ciss_notify *cn;
sc = (struct ciss_softc *)arg;
mtx_lock(&sc->ciss_mtx);
for (;;) {
if (STAILQ_EMPTY(&sc->ciss_notify) != 0 &&
(sc->ciss_flags & CISS_FLAG_THREAD_SHUT) == 0) {
msleep(&sc->ciss_notify, &sc->ciss_mtx, PUSER, "idle", 0);
}
if (sc->ciss_flags & CISS_FLAG_THREAD_SHUT)
break;
cr = ciss_dequeue_notify(sc);
if (cr == NULL)
panic("cr null");
cn = (struct ciss_notify *)cr->cr_data;
switch (cn->class) {
case CISS_NOTIFY_HOTPLUG:
ciss_notify_hotplug(sc, cn);
break;
case CISS_NOTIFY_LOGICAL:
ciss_notify_logical(sc, cn);
break;
case CISS_NOTIFY_PHYSICAL:
ciss_notify_physical(sc, cn);
break;
}
ciss_release_request(cr);
}
sc->ciss_notify_thread = NULL;
wakeup(&sc->ciss_notify_thread);
mtx_unlock(&sc->ciss_mtx);
kproc_exit(0);
}
/************************************************************************
* Start the notification kernel thread.
*/
static void
ciss_spawn_notify_thread(struct ciss_softc *sc)
{
if (kproc_create((void(*)(void *))ciss_notify_thread, sc,
&sc->ciss_notify_thread, 0, 0, "ciss_notify%d",
device_get_unit(sc->ciss_dev)))
panic("Could not create notify thread\n");
}
/************************************************************************
* Kill the notification kernel thread.
*/
static void
ciss_kill_notify_thread(struct ciss_softc *sc)
{
if (sc->ciss_notify_thread == NULL)
return;
sc->ciss_flags |= CISS_FLAG_THREAD_SHUT;
wakeup(&sc->ciss_notify);
msleep(&sc->ciss_notify_thread, &sc->ciss_mtx, PUSER, "thtrm", 0);
}
/************************************************************************
* Print a request.
*/
static void
ciss_print_request(struct ciss_request *cr)
{
struct ciss_softc *sc;
struct ciss_command *cc;
int i;
sc = cr->cr_sc;
cc = cr->cr_cc;
ciss_printf(sc, "REQUEST @ %p\n", cr);
ciss_printf(sc, " data %p/%d tag %d flags %b\n",
cr->cr_data, cr->cr_length, cr->cr_tag, cr->cr_flags,
"\20\1mapped\2sleep\3poll\4dataout\5datain\n");
ciss_printf(sc, " sg list/total %d/%d host tag 0x%x\n",
cc->header.sg_in_list, cc->header.sg_total, cc->header.host_tag);
switch(cc->header.address.mode.mode) {
case CISS_HDR_ADDRESS_MODE_PERIPHERAL:
case CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL:
ciss_printf(sc, " physical bus %d target %d\n",
cc->header.address.physical.bus, cc->header.address.physical.target);
break;
case CISS_HDR_ADDRESS_MODE_LOGICAL:
ciss_printf(sc, " logical unit %d\n", cc->header.address.logical.lun);
break;
}
ciss_printf(sc, " %s cdb length %d type %s attribute %s\n",
(cc->cdb.direction == CISS_CDB_DIRECTION_NONE) ? "no-I/O" :
(cc->cdb.direction == CISS_CDB_DIRECTION_READ) ? "READ" :
(cc->cdb.direction == CISS_CDB_DIRECTION_WRITE) ? "WRITE" : "??",
cc->cdb.cdb_length,
(cc->cdb.type == CISS_CDB_TYPE_COMMAND) ? "command" :
(cc->cdb.type == CISS_CDB_TYPE_MESSAGE) ? "message" : "??",
(cc->cdb.attribute == CISS_CDB_ATTRIBUTE_UNTAGGED) ? "untagged" :
(cc->cdb.attribute == CISS_CDB_ATTRIBUTE_SIMPLE) ? "simple" :
(cc->cdb.attribute == CISS_CDB_ATTRIBUTE_HEAD_OF_QUEUE) ? "head-of-queue" :
(cc->cdb.attribute == CISS_CDB_ATTRIBUTE_ORDERED) ? "ordered" :
(cc->cdb.attribute == CISS_CDB_ATTRIBUTE_AUTO_CONTINGENT) ? "auto-contingent" : "??");
ciss_printf(sc, " %*D\n", cc->cdb.cdb_length, &cc->cdb.cdb[0], " ");
if (cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) {
/* XXX print error info */
} else {
/* since we don't use chained s/g, don't support it here */
for (i = 0; i < cc->header.sg_in_list; i++) {
if ((i % 4) == 0)
ciss_printf(sc, " ");
printf("0x%08x/%d ", (u_int32_t)cc->sg[i].address, cc->sg[i].length);
if ((((i + 1) % 4) == 0) || (i == (cc->header.sg_in_list - 1)))
printf("\n");
}
}
}
/************************************************************************
* Print information about the status of a logical drive.
*/
static void
ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld)
{
int bus, target, i;
if (ld->cl_lstatus == NULL) {
printf("does not exist\n");
return;
}
/* print drive status */
switch(ld->cl_lstatus->status) {
case CISS_LSTATUS_OK:
printf("online\n");
break;
case CISS_LSTATUS_INTERIM_RECOVERY:
printf("in interim recovery mode\n");
break;
case CISS_LSTATUS_READY_RECOVERY:
printf("ready to begin recovery\n");
break;
case CISS_LSTATUS_RECOVERING:
bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding);
target = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding);
printf("being recovered, working on physical drive %d.%d, %u blocks remaining\n",
bus, target, ld->cl_lstatus->blocks_to_recover);
break;
case CISS_LSTATUS_EXPANDING:
printf("being expanded, %u blocks remaining\n",
ld->cl_lstatus->blocks_to_recover);
break;
case CISS_LSTATUS_QUEUED_FOR_EXPANSION:
printf("queued for expansion\n");
break;
case CISS_LSTATUS_FAILED:
printf("queued for expansion\n");
break;
case CISS_LSTATUS_WRONG_PDRIVE:
printf("wrong physical drive inserted\n");
break;
case CISS_LSTATUS_MISSING_PDRIVE:
printf("missing a needed physical drive\n");
break;
case CISS_LSTATUS_BECOMING_READY:
printf("becoming ready\n");
break;
}
/* print failed physical drives */
for (i = 0; i < CISS_BIG_MAP_ENTRIES / 8; i++) {
bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_failure_map[i]);
target = CISS_BIG_MAP_TARGET(sc, ld->cl_lstatus->drive_failure_map[i]);
if (bus == -1)
continue;
ciss_printf(sc, "physical drive %d:%d (%x) failed\n", bus, target,
ld->cl_lstatus->drive_failure_map[i]);
}
}
#ifdef CISS_DEBUG
#include "opt_ddb.h"
#ifdef DDB
#include <ddb/ddb.h>
/************************************************************************
* Print information about the controller/driver.
*/
static void
ciss_print_adapter(struct ciss_softc *sc)
{
int i, j;
ciss_printf(sc, "ADAPTER:\n");
for (i = 0; i < CISSQ_COUNT; i++) {
ciss_printf(sc, "%s %d/%d\n",
i == 0 ? "free" :
i == 1 ? "busy" : "complete",
sc->ciss_qstat[i].q_length,
sc->ciss_qstat[i].q_max);
}
ciss_printf(sc, "max_requests %d\n", sc->ciss_max_requests);
ciss_printf(sc, "flags %b\n", sc->ciss_flags,
"\20\1notify_ok\2control_open\3aborting\4running\21fake_synch\22bmic_abort\n");
for (i = 0; i < sc->ciss_max_logical_bus; i++) {
for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) {
ciss_printf(sc, "LOGICAL DRIVE %d: ", i);
ciss_print_ldrive(sc, &sc->ciss_logical[i][j]);
}
}
/* XXX Should physical drives be printed out here? */
for (i = 1; i < sc->ciss_max_requests; i++)
ciss_print_request(sc->ciss_request + i);
}
/* DDB hook */
DB_COMMAND(ciss_prt, db_ciss_prt)
{
struct ciss_softc *sc;
devclass_t dc;
int maxciss, i;
dc = devclass_find("ciss");
if ( dc == NULL ) {
printf("%s: can't find devclass!\n", __func__);
return;
}
maxciss = devclass_get_maxunit(dc);
for (i = 0; i < maxciss; i++) {
sc = devclass_get_softc(dc, i);
ciss_print_adapter(sc);
}
}
#endif
#endif
/************************************************************************
* Return a name for a logical drive status value.
*/
static const char *
ciss_name_ldrive_status(int status)
{
switch (status) {
case CISS_LSTATUS_OK:
return("OK");
case CISS_LSTATUS_FAILED:
return("failed");
case CISS_LSTATUS_NOT_CONFIGURED:
return("not configured");
case CISS_LSTATUS_INTERIM_RECOVERY:
return("interim recovery");
case CISS_LSTATUS_READY_RECOVERY:
return("ready for recovery");
case CISS_LSTATUS_RECOVERING:
return("recovering");
case CISS_LSTATUS_WRONG_PDRIVE:
return("wrong physical drive inserted");
case CISS_LSTATUS_MISSING_PDRIVE:
return("missing physical drive");
case CISS_LSTATUS_EXPANDING:
return("expanding");
case CISS_LSTATUS_BECOMING_READY:
return("becoming ready");
case CISS_LSTATUS_QUEUED_FOR_EXPANSION:
return("queued for expansion");
}
return("unknown status");
}
/************************************************************************
* Return an online/offline/nonexistent value for a logical drive
* status value.
*/
static int
ciss_decode_ldrive_status(int status)
{
switch(status) {
case CISS_LSTATUS_NOT_CONFIGURED:
return(CISS_LD_NONEXISTENT);
case CISS_LSTATUS_OK:
case CISS_LSTATUS_INTERIM_RECOVERY:
case CISS_LSTATUS_READY_RECOVERY:
case CISS_LSTATUS_RECOVERING:
case CISS_LSTATUS_EXPANDING:
case CISS_LSTATUS_QUEUED_FOR_EXPANSION:
return(CISS_LD_ONLINE);
case CISS_LSTATUS_FAILED:
case CISS_LSTATUS_WRONG_PDRIVE:
case CISS_LSTATUS_MISSING_PDRIVE:
case CISS_LSTATUS_BECOMING_READY:
default:
return(CISS_LD_OFFLINE);
}
}
/************************************************************************
* Return a name for a logical drive's organisation.
*/
static const char *
ciss_name_ldrive_org(int org)
{
switch(org) {
case CISS_LDRIVE_RAID0:
return("RAID 0");
case CISS_LDRIVE_RAID1:
return("RAID 1(1+0)");
case CISS_LDRIVE_RAID4:
return("RAID 4");
case CISS_LDRIVE_RAID5:
return("RAID 5");
case CISS_LDRIVE_RAID51:
return("RAID 5+1");
case CISS_LDRIVE_RAIDADG:
return("RAID ADG");
}
return("unknown");
}
/************************************************************************
* Return a name for a command status value.
*/
static const char *
ciss_name_command_status(int status)
{
switch(status) {
case CISS_CMD_STATUS_SUCCESS:
return("success");
case CISS_CMD_STATUS_TARGET_STATUS:
return("target status");
case CISS_CMD_STATUS_DATA_UNDERRUN:
return("data underrun");
case CISS_CMD_STATUS_DATA_OVERRUN:
return("data overrun");
case CISS_CMD_STATUS_INVALID_COMMAND:
return("invalid command");
case CISS_CMD_STATUS_PROTOCOL_ERROR:
return("protocol error");
case CISS_CMD_STATUS_HARDWARE_ERROR:
return("hardware error");
case CISS_CMD_STATUS_CONNECTION_LOST:
return("connection lost");
case CISS_CMD_STATUS_ABORTED:
return("aborted");
case CISS_CMD_STATUS_ABORT_FAILED:
return("abort failed");
case CISS_CMD_STATUS_UNSOLICITED_ABORT:
return("unsolicited abort");
case CISS_CMD_STATUS_TIMEOUT:
return("timeout");
case CISS_CMD_STATUS_UNABORTABLE:
return("unabortable");
}
return("unknown status");
}
/************************************************************************
* Handle an open on the control device.
*/
static int
ciss_open(struct cdev *dev, int flags, int fmt, struct thread *p)
{
struct ciss_softc *sc;
debug_called(1);
sc = (struct ciss_softc *)dev->si_drv1;
/* we might want to veto if someone already has us open */
mtx_lock(&sc->ciss_mtx);
sc->ciss_flags |= CISS_FLAG_CONTROL_OPEN;
mtx_unlock(&sc->ciss_mtx);
return(0);
}
/************************************************************************
* Handle the last close on the control device.
*/
static int
ciss_close(struct cdev *dev, int flags, int fmt, struct thread *p)
{
struct ciss_softc *sc;
debug_called(1);
sc = (struct ciss_softc *)dev->si_drv1;
mtx_lock(&sc->ciss_mtx);
sc->ciss_flags &= ~CISS_FLAG_CONTROL_OPEN;
mtx_unlock(&sc->ciss_mtx);
return (0);
}
/********************************************************************************
* Handle adapter-specific control operations.
*
* Note that the API here is compatible with the Linux driver, in order to
* simplify the porting of Compaq's userland tools.
*/
static int
ciss_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *p)
{
struct ciss_softc *sc;
IOCTL_Command_struct *ioc = (IOCTL_Command_struct *)addr;
#ifdef __amd64__
IOCTL_Command_struct32 *ioc32 = (IOCTL_Command_struct32 *)addr;
IOCTL_Command_struct ioc_swab;
#endif
int error;
debug_called(1);
sc = (struct ciss_softc *)dev->si_drv1;
error = 0;
mtx_lock(&sc->ciss_mtx);
switch(cmd) {
case CCISS_GETQSTATS:
{
union ciss_statrequest *cr = (union ciss_statrequest *)addr;
switch (cr->cs_item) {
case CISSQ_FREE:
case CISSQ_NOTIFY:
bcopy(&sc->ciss_qstat[cr->cs_item], &cr->cs_qstat,
sizeof(struct ciss_qstat));
break;
default:
error = ENOIOCTL;
break;
}
break;
}
case CCISS_GETPCIINFO:
{
cciss_pci_info_struct *pis = (cciss_pci_info_struct *)addr;
pis->bus = pci_get_bus(sc->ciss_dev);
pis->dev_fn = pci_get_slot(sc->ciss_dev);
pis->board_id = (pci_get_subvendor(sc->ciss_dev) << 16) |
pci_get_subdevice(sc->ciss_dev);
break;
}
case CCISS_GETINTINFO:
{
cciss_coalint_struct *cis = (cciss_coalint_struct *)addr;
cis->delay = sc->ciss_cfg->interrupt_coalesce_delay;
cis->count = sc->ciss_cfg->interrupt_coalesce_count;
break;
}
case CCISS_SETINTINFO:
{
cciss_coalint_struct *cis = (cciss_coalint_struct *)addr;
if ((cis->delay == 0) && (cis->count == 0)) {
error = EINVAL;
break;
}
/*
* XXX apparently this is only safe if the controller is idle,
* we should suspend it before doing this.
*/
sc->ciss_cfg->interrupt_coalesce_delay = cis->delay;
sc->ciss_cfg->interrupt_coalesce_count = cis->count;
if (ciss_update_config(sc))
error = EIO;
/* XXX resume the controller here */
break;
}
case CCISS_GETNODENAME:
bcopy(sc->ciss_cfg->server_name, (NodeName_type *)addr,
sizeof(NodeName_type));
break;
case CCISS_SETNODENAME:
bcopy((NodeName_type *)addr, sc->ciss_cfg->server_name,
sizeof(NodeName_type));
if (ciss_update_config(sc))
error = EIO;
break;
case CCISS_GETHEARTBEAT:
*(Heartbeat_type *)addr = sc->ciss_cfg->heartbeat;
break;
case CCISS_GETBUSTYPES:
*(BusTypes_type *)addr = sc->ciss_cfg->bus_types;
break;
case CCISS_GETFIRMVER:
bcopy(sc->ciss_id->running_firmware_revision, (FirmwareVer_type *)addr,
sizeof(FirmwareVer_type));
break;
case CCISS_GETDRIVERVER:
*(DriverVer_type *)addr = CISS_DRIVER_VERSION;
break;
case CCISS_REVALIDVOLS:
/*
* This is a bit ugly; to do it "right" we really need
* to find any disks that have changed, kick CAM off them,
* then rescan only these disks. It'd be nice if they
* a) told us which disk(s) they were going to play with,
* and b) which ones had arrived. 8(
*/
break;
#ifdef __amd64__
case CCISS_PASSTHRU32:
ioc_swab.LUN_info = ioc32->LUN_info;
ioc_swab.Request = ioc32->Request;
ioc_swab.error_info = ioc32->error_info;
ioc_swab.buf_size = ioc32->buf_size;
ioc_swab.buf = (u_int8_t *)(uintptr_t)ioc32->buf;
ioc = &ioc_swab;
/* FALLTHROUGH */
#endif
case CCISS_PASSTHRU:
error = ciss_user_command(sc, ioc);
break;
default:
debug(0, "unknown ioctl 0x%lx", cmd);
debug(1, "CCISS_GETPCIINFO: 0x%lx", CCISS_GETPCIINFO);
debug(1, "CCISS_GETINTINFO: 0x%lx", CCISS_GETINTINFO);
debug(1, "CCISS_SETINTINFO: 0x%lx", CCISS_SETINTINFO);
debug(1, "CCISS_GETNODENAME: 0x%lx", CCISS_GETNODENAME);
debug(1, "CCISS_SETNODENAME: 0x%lx", CCISS_SETNODENAME);
debug(1, "CCISS_GETHEARTBEAT: 0x%lx", CCISS_GETHEARTBEAT);
debug(1, "CCISS_GETBUSTYPES: 0x%lx", CCISS_GETBUSTYPES);
debug(1, "CCISS_GETFIRMVER: 0x%lx", CCISS_GETFIRMVER);
debug(1, "CCISS_GETDRIVERVER: 0x%lx", CCISS_GETDRIVERVER);
debug(1, "CCISS_REVALIDVOLS: 0x%lx", CCISS_REVALIDVOLS);
debug(1, "CCISS_PASSTHRU: 0x%lx", CCISS_PASSTHRU);
error = ENOIOCTL;
break;
}
mtx_unlock(&sc->ciss_mtx);
return(error);
}
Index: head/sys/dev/cxgbe/crypto/t4_crypto.c
===================================================================
--- head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 328217)
+++ head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 328218)
@@ -1,2234 +1,2234 @@
/*-
* Copyright (c) 2017 Chelsio Communications, Inc.
* All rights reserved.
* Written by: John Baldwin <jhb@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/sglist.h>
#include <opencrypto/cryptodev.h>
#include <opencrypto/xform.h>
#include "cryptodev_if.h"
#include "common/common.h"
#include "crypto/t4_crypto.h"
/*
* Requests consist of:
*
* +-------------------------------+
* | struct fw_crypto_lookaside_wr |
* +-------------------------------+
* | struct ulp_txpkt |
* +-------------------------------+
* | struct ulptx_idata |
* +-------------------------------+
* | struct cpl_tx_sec_pdu |
* +-------------------------------+
* | struct cpl_tls_tx_scmd_fmt |
* +-------------------------------+
* | key context header |
* +-------------------------------+
* | AES key | ----- For requests with AES
* +-------------------------------+ -
* | IPAD (16-byte aligned) | \
* +-------------------------------+ +---- For requests with HMAC
* | OPAD (16-byte aligned) | /
* +-------------------------------+ -
* | GMAC H | ----- For AES-GCM
* +-------------------------------+ -
* | struct cpl_rx_phys_dsgl | \
* +-------------------------------+ +---- Destination buffer for
* | PHYS_DSGL entries | / non-hash-only requests
* +-------------------------------+ -
* | 16 dummy bytes | ----- Only for hash-only requests
* +-------------------------------+
* | IV | ----- If immediate IV
* +-------------------------------+
* | Payload | ----- If immediate Payload
* +-------------------------------+ -
* | struct ulptx_sgl | \
* +-------------------------------+ +---- If payload via SGL
* | SGL entries | /
* +-------------------------------+ -
*
* Note that the key context must be padded to ensure 16-byte alignment.
* For HMAC requests, the key consists of the partial hash of the IPAD
* followed by the partial hash of the OPAD.
*
* Replies consist of:
*
* +-------------------------------+
* | struct cpl_fw6_pld |
* +-------------------------------+
* | hash digest | ----- For HMAC request with
* +-------------------------------+ 'hash_size' set in work request
*
* A 32-bit big-endian error status word is supplied in the last 4
* bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
* "MAC" error and bit 1 indicates a "PAD" error.
*
* The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
* in the request is returned in data[1] of the CPL_FW6_PLD message.
*
* For block cipher replies, the updated IV is supplied in data[2] and
* data[3] of the CPL_FW6_PLD message.
*
* For hash replies where the work request set 'hash_size' to request
* a copy of the hash in the reply, the hash digest is supplied
* immediately following the CPL_FW6_PLD message.
*/
/*
* The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32
* SG entries.
*/
#define MAX_RX_PHYS_DSGL_SGE 32
#define DSGL_SGE_MAXLEN 65535
/*
* The adapter only supports requests with a total input or output
* length of 64k-1 or smaller. Longer requests either result in hung
* requests or incorrect results.
*/
#define MAX_REQUEST_SIZE 65535
static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
struct ccr_session_hmac {
struct auth_hash *auth_hash;
int hash_len;
unsigned int partial_digest_len;
unsigned int auth_mode;
unsigned int mk_size;
char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
struct ccr_session_gmac {
int hash_len;
char ghash_h[GMAC_BLOCK_LEN];
};
struct ccr_session_blkcipher {
unsigned int cipher_mode;
unsigned int key_len;
unsigned int iv_len;
__be32 key_ctx_hdr;
char enckey[CHCR_AES_MAX_KEY_LEN];
char deckey[CHCR_AES_MAX_KEY_LEN];
};
struct ccr_session {
bool active;
int pending;
enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
union {
struct ccr_session_hmac hmac;
struct ccr_session_gmac gmac;
};
struct ccr_session_blkcipher blkcipher;
};
struct ccr_softc {
struct adapter *adapter;
device_t dev;
uint32_t cid;
int tx_channel_id;
struct ccr_session *sessions;
int nsessions;
struct mtx lock;
bool detaching;
struct sge_wrq *txq;
struct sge_rxq *rxq;
/*
* Pre-allocate S/G lists used when preparing a work request.
* 'sg_crp' contains an sglist describing the entire buffer
* for a 'struct cryptop'. 'sg_ulptx' is used to describe
* the data the engine should DMA as input via ULPTX_SGL.
* 'sg_dsgl' is used to describe the destination that cipher
* text and a tag should be written to.
*/
struct sglist *sg_crp;
struct sglist *sg_ulptx;
struct sglist *sg_dsgl;
/* Statistics. */
uint64_t stats_blkcipher_encrypt;
uint64_t stats_blkcipher_decrypt;
uint64_t stats_hmac;
uint64_t stats_authenc_encrypt;
uint64_t stats_authenc_decrypt;
uint64_t stats_gcm_encrypt;
uint64_t stats_gcm_decrypt;
uint64_t stats_wr_nomem;
uint64_t stats_inflight;
uint64_t stats_mac_error;
uint64_t stats_pad_error;
uint64_t stats_bad_session;
uint64_t stats_sglist_error;
uint64_t stats_process_error;
};
/*
* Crypto requests involve two kind of scatter/gather lists.
*
* Non-hash-only requests require a PHYS_DSGL that describes the
* location to store the results of the encryption or decryption
* operation. This SGL uses a different format (PHYS_DSGL) and should
* exclude the crd_skip bytes at the start of the data as well as
* any AAD or IV. For authenticated encryption requests it should
* cover include the destination of the hash or tag.
*
* The input payload may either be supplied inline as immediate data,
* or via a standard ULP_TX SGL. This SGL should include AAD,
* ciphertext, and the hash or tag for authenticated decryption
* requests.
*
* These scatter/gather lists can describe different subsets of the
* buffer described by the crypto operation. ccr_populate_sglist()
* generates a scatter/gather list that covers the entire crypto
* operation buffer that is then used to construct the other
* scatter/gather lists.
*/
static int
ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
{
int error;
sglist_reset(sg);
if (crp->crp_flags & CRYPTO_F_IMBUF)
error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
else if (crp->crp_flags & CRYPTO_F_IOV)
error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
else
error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
return (error);
}
/*
* Segments in 'sg' larger than 'maxsegsize' are counted as multiple
* segments.
*/
static int
ccr_count_sgl(struct sglist *sg, int maxsegsize)
{
int i, nsegs;
nsegs = 0;
for (i = 0; i < sg->sg_nseg; i++)
nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
return (nsegs);
}
/* These functions deal with PHYS_DSGL for the reply buffer. */
static inline int
ccr_phys_dsgl_len(int nsegs)
{
int len;
len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
if ((nsegs % 8) != 0) {
len += sizeof(uint16_t) * 8;
len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
}
return (len);
}
static void
ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
{
struct sglist *sg;
struct cpl_rx_phys_dsgl *cpl;
struct phys_sge_pairs *sgl;
vm_paddr_t paddr;
size_t seglen;
u_int i, j;
sg = sc->sg_dsgl;
cpl = dst;
cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
V_CPL_RX_PHYS_DSGL_ISRDMA(0));
cpl->pcirlxorder_to_noofsgentr = htobe32(
V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
cpl->rss_hdr_int.hash_val = 0;
sgl = (struct phys_sge_pairs *)(cpl + 1);
j = 0;
for (i = 0; i < sg->sg_nseg; i++) {
seglen = sg->sg_segs[i].ss_len;
paddr = sg->sg_segs[i].ss_paddr;
do {
sgl->addr[j] = htobe64(paddr);
if (seglen > DSGL_SGE_MAXLEN) {
sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
paddr += DSGL_SGE_MAXLEN;
seglen -= DSGL_SGE_MAXLEN;
} else {
sgl->len[j] = htobe16(seglen);
seglen = 0;
}
j++;
if (j == 8) {
sgl++;
j = 0;
}
} while (seglen != 0);
}
MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
}
/* These functions deal with the ULPTX_SGL for input payload. */
static inline int
ccr_ulptx_sgl_len(int nsegs)
{
u_int n;
nsegs--; /* first segment is part of ulptx_sgl */
n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
return (roundup2(n, 16));
}
static void
ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
{
struct ulptx_sgl *usgl;
struct sglist *sg;
struct sglist_seg *ss;
int i;
sg = sc->sg_ulptx;
MPASS(nsegs == sg->sg_nseg);
ss = &sg->sg_segs[0];
usgl = dst;
usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
V_ULPTX_NSGE(nsegs));
usgl->len0 = htobe32(ss->ss_len);
usgl->addr0 = htobe64(ss->ss_paddr);
ss++;
for (i = 0; i < sg->sg_nseg - 1; i++) {
usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
ss++;
}
}
static bool
ccr_use_imm_data(u_int transhdr_len, u_int input_len)
{
if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
return (false);
if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
SGE_MAX_WR_LEN)
return (false);
return (true);
}
static void
ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size,
u_int iv_loc, struct cryptop *crp)
{
u_int cctx_size;
cctx_size = sizeof(struct _key_ctx) + kctx_len;
crwr->wreq.op_to_cctx_size = htobe32(
V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
crwr->wreq.len16_pkd = htobe32(
V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
crwr->wreq.session_id = htobe32(sid);
crwr->wreq.rx_chid_to_rx_q_id = htobe32(
V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
V_FW_CRYPTO_LOOKASIDE_WR_IV(iv_loc) |
V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
crwr->wreq.key_addr = 0;
crwr->wreq.pld_size_hash_size = htobe32(
V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
crwr->wreq.cookie = htobe64((uintptr_t)crp);
crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DATAMODIFY(0) |
V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1));
crwr->ulptx.len = htobe32(
((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
sgl_len);
}
static int
ccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
struct cryptop *crp)
{
struct chcr_wr *crwr;
struct wrqe *wr;
struct auth_hash *axf;
struct cryptodesc *crd;
char *dst;
u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
u_int imm_len, iopad_size;
int error, sgl_nsegs, sgl_len;
crd = crp->crp_desc;
/* Reject requests with too large of an input buffer. */
if (crd->crd_len > MAX_REQUEST_SIZE)
return (EFBIG);
axf = s->hmac.auth_hash;
/* PADs must be 128-bit aligned. */
iopad_size = roundup2(s->hmac.partial_digest_len, 16);
/*
* The 'key' part of the context includes the aligned IPAD and
* OPAD.
*/
kctx_len = iopad_size * 2;
hash_size_in_response = axf->hashsize;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
if (crd->crd_len == 0) {
imm_len = axf->blocksize;
sgl_nsegs = 0;
sgl_len = 0;
} else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
imm_len = crd->crd_len;
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crd->crd_skip, crd->crd_len);
if (error)
return (error);
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
wr = alloc_wrqe(wr_len, sc->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
return (ENOMEM);
}
crwr = wrtod(wr);
memset(crwr, 0, wr_len);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
hash_size_in_response, IV_NOP, crp);
/* XXX: Hardcodes SGE loopback channel of 0. */
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
crd->crd_len);
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
crwr->sec_cpl.seqno_numivs = htobe32(
V_SCMD_SEQ_NO_CTRL(0) |
V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) |
V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_LAST_FRAG(0) |
V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
s->hmac.partial_digest_len);
/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) |
V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
if (crd->crd_len == 0) {
dst[0] = 0x80;
*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
htobe64(axf->blocksize << 3);
} else if (imm_len != 0)
crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
crd->crd_len, dst);
else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
return (0);
}
static int
ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
const struct cpl_fw6_pld *cpl, int error)
{
struct cryptodesc *crd;
crd = crp->crp_desc;
if (error == 0) {
crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
s->hmac.hash_len, (c_caddr_t)(cpl + 1));
}
return (error);
}
static int
ccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
struct cryptop *crp)
{
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct chcr_wr *crwr;
struct wrqe *wr;
struct cryptodesc *crd;
char *dst;
u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len;
u_int imm_len;
int dsgl_nsegs, dsgl_len;
int sgl_nsegs, sgl_len;
int error;
crd = crp->crp_desc;
if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
return (EINVAL);
if (crd->crd_alg == CRYPTO_AES_CBC &&
(crd->crd_len % AES_BLOCK_LEN) != 0)
return (EINVAL);
/* Reject requests with too large of an input buffer. */
if (crd->crd_len > MAX_REQUEST_SIZE)
return (EFBIG);
iv_loc = IV_NOP;
if (crd->crd_flags & CRD_F_ENCRYPT) {
op_type = CHCR_ENCRYPT_OP;
if (crd->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
else
arc4rand(iv, s->blkcipher.iv_len, 0);
iv_loc = IV_IMMEDIATE;
if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
crd->crd_inject, s->blkcipher.iv_len, iv);
} else {
op_type = CHCR_DECRYPT_OP;
if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
iv_loc = IV_IMMEDIATE;
} else
iv_loc = IV_DSGL;
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
crd->crd_len);
if (error)
return (error);
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
/* The 'key' must be 128-bit aligned. */
kctx_len = roundup2(s->blkcipher.key_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
if (ccr_use_imm_data(transhdr_len, crd->crd_len +
s->blkcipher.iv_len)) {
imm_len = crd->crd_len;
if (iv_loc == IV_DSGL) {
crypto_copydata(crp->crp_flags, crp->crp_buf,
crd->crd_inject, s->blkcipher.iv_len, iv);
iv_loc = IV_IMMEDIATE;
}
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
if (iv_loc == IV_DSGL) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crd->crd_inject, s->blkcipher.iv_len);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crd->crd_skip, crd->crd_len);
if (error)
return (error);
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
if (iv_loc == IV_IMMEDIATE)
wr_len += s->blkcipher.iv_len;
wr = alloc_wrqe(wr_len, sc->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
return (ENOMEM);
}
crwr = wrtod(wr);
memset(crwr, 0, wr_len);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0,
iv_loc, crp);
/* XXX: Hardcodes SGE loopback channel of 0. */
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
crwr->sec_cpl.seqno_numivs = htobe32(
V_SCMD_SEQ_NO_CTRL(0) |
V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
V_SCMD_ENC_DEC_CTRL(op_type) |
V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) |
V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) |
V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
V_SCMD_NUM_IVS(0));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_IV_GEN_CTRL(0) |
V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
switch (crd->crd_alg) {
case CRYPTO_AES_CBC:
if (crd->crd_flags & CRD_F_ENCRYPT)
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
else
memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
s->blkcipher.key_len);
break;
case CRYPTO_AES_ICM:
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
break;
case CRYPTO_AES_XTS:
key_half = s->blkcipher.key_len / 2;
memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
key_half);
if (crd->crd_flags & CRD_F_ENCRYPT)
memcpy(crwr->key_ctx.key + key_half,
s->blkcipher.enckey, key_half);
else
memcpy(crwr->key_ctx.key + key_half,
s->blkcipher.deckey, key_half);
break;
}
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
if (iv_loc == IV_IMMEDIATE) {
memcpy(dst, iv, s->blkcipher.iv_len);
dst += s->blkcipher.iv_len;
}
if (imm_len != 0)
crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
crd->crd_len, dst);
else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
return (0);
}
static int
ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
{
/*
* The updated IV to permit chained requests is at
* cpl->data[2], but OCF doesn't permit chained requests.
*/
return (error);
}
/*
* 'hashsize' is the length of a full digest. 'authsize' is the
* requested digest length for this operation which may be less
* than 'hashsize'.
*/
static int
ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
{
if (authsize == 10)
return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366);
if (authsize == 12)
return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT);
if (authsize == hashsize / 2)
return (CHCR_SCMD_HMAC_CTRL_DIV2);
return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC);
}
static int
ccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
{
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct chcr_wr *crwr;
struct wrqe *wr;
struct auth_hash *axf;
char *dst;
u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len;
u_int hash_size_in_response, imm_len, iopad_size;
u_int aad_start, aad_len, aad_stop;
u_int auth_start, auth_stop, auth_insert;
u_int cipher_start, cipher_stop;
u_int hmac_ctrl, input_len;
int dsgl_nsegs, dsgl_len;
int sgl_nsegs, sgl_len;
int error;
/*
* If there is a need in the future, requests with an empty
* payload could be supported as HMAC-only requests.
*/
if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
return (EINVAL);
if (crde->crd_alg == CRYPTO_AES_CBC &&
(crde->crd_len % AES_BLOCK_LEN) != 0)
return (EINVAL);
/*
* AAD is only permitted before the cipher/plain text, not
* after.
*/
if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
return (EINVAL);
axf = s->hmac.auth_hash;
hash_size_in_response = s->hmac.hash_len;
/*
* The IV is always stored at the start of the buffer even
* though it may be duplicated in the payload. The crypto
* engine doesn't work properly if the IV offset points inside
* of the AAD region, so a second copy is always required.
*/
iv_loc = IV_IMMEDIATE;
if (crde->crd_flags & CRD_F_ENCRYPT) {
op_type = CHCR_ENCRYPT_OP;
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
else
arc4rand(iv, s->blkcipher.iv_len, 0);
if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
crde->crd_inject, s->blkcipher.iv_len, iv);
} else {
op_type = CHCR_DECRYPT_OP;
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
crde->crd_inject, s->blkcipher.iv_len, iv);
}
/*
* The output buffer consists of the cipher text followed by
* the hash when encrypting. For decryption it only contains
* the plain text.
*/
if (op_type == CHCR_ENCRYPT_OP) {
if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE)
return (EFBIG);
} else {
if (crde->crd_len > MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
crde->crd_len);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
crda->crd_inject, hash_size_in_response);
if (error)
return (error);
}
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
/* PADs must be 128-bit aligned. */
iopad_size = roundup2(s->hmac.partial_digest_len, 16);
/*
* The 'key' part of the key context consists of the key followed
* by the IPAD and OPAD.
*/
kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
/*
* The input buffer consists of the IV, any AAD, and then the
* cipher/plain text. For decryption requests the hash is
* appended after the cipher text.
*/
if (crda->crd_skip < crde->crd_skip) {
if (crda->crd_skip + crda->crd_len > crde->crd_skip)
aad_len = (crde->crd_skip - crda->crd_skip);
else
aad_len = crda->crd_len;
} else
aad_len = 0;
input_len = aad_len + crde->crd_len;
/*
* The firmware hangs if sent a request which is a
* bit smaller than MAX_REQUEST_SIZE. In particular, the
* firmware appears to require 512 - 16 bytes of spare room
* along with the size of the hash even if the hash isn't
* included in the input buffer.
*/
if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
MAX_REQUEST_SIZE)
return (EFBIG);
if (op_type == CHCR_DECRYPT_OP)
input_len += hash_size_in_response;
if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
imm_len = input_len;
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
if (aad_len != 0) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crda->crd_skip, aad_len);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crde->crd_skip, crde->crd_len);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crda->crd_inject, hash_size_in_response);
if (error)
return (error);
}
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
/*
* Any auth-only data before the cipher region is marked as AAD.
* Auth-data that overlaps with the cipher region is placed in
* the auth section.
*/
if (aad_len != 0) {
aad_start = s->blkcipher.iv_len + 1;
aad_stop = aad_start + aad_len - 1;
} else {
aad_start = 0;
aad_stop = 0;
}
cipher_start = s->blkcipher.iv_len + aad_len + 1;
if (op_type == CHCR_DECRYPT_OP)
cipher_stop = hash_size_in_response;
else
cipher_stop = 0;
if (aad_len == crda->crd_len) {
auth_start = 0;
auth_stop = 0;
} else {
if (aad_len != 0)
auth_start = cipher_start;
else
auth_start = s->blkcipher.iv_len + crda->crd_skip -
crde->crd_skip + 1;
auth_stop = (crde->crd_skip + crde->crd_len) -
(crda->crd_skip + crda->crd_len) + cipher_stop;
}
if (op_type == CHCR_DECRYPT_OP)
auth_insert = hash_size_in_response;
else
auth_insert = 0;
wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
if (iv_loc == IV_IMMEDIATE)
wr_len += s->blkcipher.iv_len;
wr = alloc_wrqe(wr_len, sc->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
return (ENOMEM);
}
crwr = wrtod(wr);
memset(crwr, 0, wr_len);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, iv_loc,
crp);
/* XXX: Hardcodes SGE loopback channel of 0. */
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
crwr->sec_cpl.seqno_numivs = htobe32(
V_SCMD_SEQ_NO_CTRL(0) |
V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
V_SCMD_ENC_DEC_CTRL(op_type) |
V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
V_SCMD_HMAC_CTRL(hmac_ctrl) |
V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
V_SCMD_NUM_IVS(0));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_IV_GEN_CTRL(0) |
V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
switch (crde->crd_alg) {
case CRYPTO_AES_CBC:
if (crde->crd_flags & CRD_F_ENCRYPT)
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
else
memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
s->blkcipher.key_len);
break;
case CRYPTO_AES_ICM:
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
break;
case CRYPTO_AES_XTS:
key_half = s->blkcipher.key_len / 2;
memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
key_half);
if (crde->crd_flags & CRD_F_ENCRYPT)
memcpy(crwr->key_ctx.key + key_half,
s->blkcipher.enckey, key_half);
else
memcpy(crwr->key_ctx.key + key_half,
s->blkcipher.deckey, key_half);
break;
}
dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
if (iv_loc == IV_IMMEDIATE) {
memcpy(dst, iv, s->blkcipher.iv_len);
dst += s->blkcipher.iv_len;
}
if (imm_len != 0) {
if (aad_len != 0) {
crypto_copydata(crp->crp_flags, crp->crp_buf,
crda->crd_skip, aad_len, dst);
dst += aad_len;
}
crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
crde->crd_len, dst);
dst += crde->crd_len;
if (op_type == CHCR_DECRYPT_OP)
crypto_copydata(crp->crp_flags, crp->crp_buf,
crda->crd_inject, hash_size_in_response, dst);
} else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
return (0);
}
static int
ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
{
struct cryptodesc *crd;
/*
* The updated IV to permit chained requests is at
* cpl->data[2], but OCF doesn't permit chained requests.
*
* For a decryption request, the hardware may do a verification
* of the HMAC which will fail if the existing HMAC isn't in the
* buffer. If that happens, clear the error and copy the HMAC
* from the CPL reply into the buffer.
*
* For encryption requests, crd should be the cipher request
* which will have CRD_F_ENCRYPT set. For decryption
* requests, crp_desc will be the HMAC request which should
* not have this flag set.
*/
crd = crp->crp_desc;
if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
!(crd->crd_flags & CRD_F_ENCRYPT)) {
crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
s->hmac.hash_len, (c_caddr_t)(cpl + 1));
error = 0;
}
return (error);
}
static int
ccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
{
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct chcr_wr *crwr;
struct wrqe *wr;
char *dst;
u_int iv_len, iv_loc, kctx_len, op_type, transhdr_len, wr_len;
u_int hash_size_in_response, imm_len;
u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
u_int hmac_ctrl, input_len;
int dsgl_nsegs, dsgl_len;
int sgl_nsegs, sgl_len;
int error;
if (s->blkcipher.key_len == 0)
return (EINVAL);
/*
* AAD is only permitted before the cipher/plain text, not
* after.
*/
if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
return (EINVAL);
hash_size_in_response = s->gmac.hash_len;
/*
* The IV is always stored at the start of the buffer even
* though it may be duplicated in the payload. The crypto
* engine doesn't work properly if the IV offset points inside
* of the AAD region, so a second copy is always required.
*
* The IV for GCM is further complicated in that IPSec
* provides a full 16-byte IV (including the counter), whereas
* the /dev/crypto interface sometimes provides a full 16-byte
* IV (if no IV is provided in the ioctl) and sometimes a
* 12-byte IV (if the IV was explicit). For now the driver
* always assumes a 12-byte IV and initializes the low 4 byte
* counter to 1.
*/
iv_loc = IV_IMMEDIATE;
if (crde->crd_flags & CRD_F_ENCRYPT) {
op_type = CHCR_ENCRYPT_OP;
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
else
arc4rand(iv, s->blkcipher.iv_len, 0);
if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
crde->crd_inject, s->blkcipher.iv_len, iv);
} else {
op_type = CHCR_DECRYPT_OP;
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
crde->crd_inject, s->blkcipher.iv_len, iv);
}
/*
* If the input IV is 12 bytes, append an explicit counter of
* 1.
*/
if (s->blkcipher.iv_len == 12) {
*(uint32_t *)&iv[12] = htobe32(1);
iv_len = AES_BLOCK_LEN;
} else
iv_len = s->blkcipher.iv_len;
/*
* The output buffer consists of the cipher text followed by
* the tag when encrypting. For decryption it only contains
* the plain text.
*/
if (op_type == CHCR_ENCRYPT_OP) {
if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE)
return (EFBIG);
} else {
if (crde->crd_len > MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
crde->crd_len);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
crda->crd_inject, hash_size_in_response);
if (error)
return (error);
}
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
/*
* The 'key' part of the key context consists of the key followed
* by the Galois hash key.
*/
kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
/*
* The input buffer consists of the IV, any AAD, and then the
* cipher/plain text. For decryption requests the hash is
* appended after the cipher text.
*/
input_len = crda->crd_len + crde->crd_len;
if (op_type == CHCR_DECRYPT_OP)
input_len += hash_size_in_response;
if (input_len > MAX_REQUEST_SIZE)
return (EFBIG);
if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
imm_len = input_len;
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
if (crda->crd_len != 0) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crda->crd_skip, crda->crd_len);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crde->crd_skip, crde->crd_len);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
crda->crd_inject, hash_size_in_response);
if (error)
return (error);
}
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
if (crda->crd_len != 0) {
aad_start = iv_len + 1;
aad_stop = aad_start + crda->crd_len - 1;
} else {
aad_start = 0;
aad_stop = 0;
}
cipher_start = iv_len + crda->crd_len + 1;
if (op_type == CHCR_DECRYPT_OP)
cipher_stop = hash_size_in_response;
else
cipher_stop = 0;
if (op_type == CHCR_DECRYPT_OP)
auth_insert = hash_size_in_response;
else
auth_insert = 0;
wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
if (iv_loc == IV_IMMEDIATE)
wr_len += iv_len;
wr = alloc_wrqe(wr_len, sc->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
return (ENOMEM);
}
crwr = wrtod(wr);
memset(crwr, 0, wr_len);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
0, iv_loc, crp);
/* XXX: Hardcodes SGE loopback channel of 0. */
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
/*
* NB: cipherstop is explicitly set to 0. On encrypt it
* should normally be set to 0 anyway (as the encrypt crd ends
* at the end of the input). However, for decrypt the cipher
* ends before the tag in the AUTHENC case (and authstop is
* set to stop before the tag), but for GCM the cipher still
* runs to the end of the buffer. Not sure if this is
* intentional or a firmware quirk, but it is required for
* working tag validation with GCM decryption.
*/
crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
crwr->sec_cpl.seqno_numivs = htobe32(
V_SCMD_SEQ_NO_CTRL(0) |
V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
V_SCMD_ENC_DEC_CTRL(op_type) |
V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
V_SCMD_HMAC_CTRL(hmac_ctrl) |
V_SCMD_IV_SIZE(iv_len / 2) |
V_SCMD_NUM_IVS(0));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_IV_GEN_CTRL(0) |
V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
if (iv_loc == IV_IMMEDIATE) {
memcpy(dst, iv, iv_len);
dst += iv_len;
}
if (imm_len != 0) {
if (crda->crd_len != 0) {
crypto_copydata(crp->crp_flags, crp->crp_buf,
crda->crd_skip, crda->crd_len, dst);
dst += crda->crd_len;
}
crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
crde->crd_len, dst);
dst += crde->crd_len;
if (op_type == CHCR_DECRYPT_OP)
crypto_copydata(crp->crp_flags, crp->crp_buf,
crda->crd_inject, hash_size_in_response, dst);
} else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
return (0);
}
static int
ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
{
/*
* The updated IV to permit chained requests is at
* cpl->data[2], but OCF doesn't permit chained requests.
*
* Note that the hardware should always verify the GMAC hash.
*/
return (error);
}
/*
* Handle a GCM request with an empty payload by performing the
* operation in software. Derived from swcr_authenc().
*/
static void
ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
struct cryptodesc *crda, struct cryptodesc *crde)
{
struct aes_gmac_ctx gmac_ctx;
char block[GMAC_BLOCK_LEN];
char digest[GMAC_DIGEST_LEN];
char iv[AES_BLOCK_LEN];
int i, len;
/*
* This assumes a 12-byte IV from the crp. See longer comment
* above in ccr_gcm() for more details.
*/
if (crde->crd_flags & CRD_F_ENCRYPT) {
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crde->crd_iv, 12);
else
arc4rand(iv, 12, 0);
} else {
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
memcpy(iv, crde->crd_iv, 12);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
crde->crd_inject, 12, iv);
}
*(uint32_t *)&iv[12] = htobe32(1);
/* Initialize the MAC. */
AES_GMAC_Init(&gmac_ctx);
AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
/* MAC the AAD. */
for (i = 0; i < crda->crd_len; i += sizeof(block)) {
len = imin(crda->crd_len - i, sizeof(block));
crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
i, len, block);
bzero(block + len, sizeof(block) - len);
AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
}
/* Length block. */
bzero(block, sizeof(block));
((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
AES_GMAC_Final(digest, &gmac_ctx);
if (crde->crd_flags & CRD_F_ENCRYPT) {
crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
sizeof(digest), digest);
crp->crp_etype = 0;
} else {
char digest2[GMAC_DIGEST_LEN];
crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
sizeof(digest2), digest2);
if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
crp->crp_etype = 0;
else
crp->crp_etype = EBADMSG;
}
crypto_done(crp);
}
static void
ccr_identify(driver_t *driver, device_t parent)
{
struct adapter *sc;
sc = device_get_softc(parent);
if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
device_find_child(parent, "ccr", -1) == NULL)
device_add_child(parent, "ccr", -1);
}
static int
ccr_probe(device_t dev)
{
device_set_desc(dev, "Chelsio Crypto Accelerator");
return (BUS_PROBE_DEFAULT);
}
static void
ccr_sysctls(struct ccr_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *oid;
struct sysctl_oid_list *children;
ctx = device_get_sysctl_ctx(sc->dev);
/*
* dev.ccr.X.
*/
oid = device_get_sysctl_tree(sc->dev);
children = SYSCTL_CHILDREN(oid);
/*
* dev.ccr.X.stats.
*/
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
NULL, "statistics");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
&sc->stats_hmac, 0, "HMAC requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
&sc->stats_blkcipher_encrypt, 0,
"Cipher encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
&sc->stats_blkcipher_decrypt, 0,
"Cipher decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
&sc->stats_authenc_encrypt, 0,
"Combined AES+HMAC encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
&sc->stats_authenc_decrypt, 0,
"Combined AES+HMAC decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
&sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
&sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
&sc->stats_wr_nomem, 0, "Work request memory allocation failures");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
&sc->stats_inflight, 0, "Requests currently pending");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
&sc->stats_mac_error, 0, "MAC errors");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
&sc->stats_pad_error, 0, "Padding errors");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
&sc->stats_bad_session, 0, "Requests with invalid session ID");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
&sc->stats_sglist_error, 0,
"Requests for which DMA mapping failed");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
&sc->stats_process_error, 0, "Requests failed during queueing");
}
static int
ccr_attach(device_t dev)
{
struct ccr_softc *sc;
int32_t cid;
/*
* TODO: Crypto requests will panic if the parent device isn't
* initialized so that the queues are up and running. Need to
* figure out how to handle that correctly, maybe just reject
* requests if the adapter isn't fully initialized?
*/
sc = device_get_softc(dev);
sc->dev = dev;
sc->adapter = device_get_softc(device_get_parent(dev));
sc->txq = &sc->adapter->sge.ctrlq[0];
sc->rxq = &sc->adapter->sge.rxq[0];
cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
if (cid < 0) {
device_printf(dev, "could not get crypto driver id\n");
return (ENXIO);
}
sc->cid = cid;
sc->adapter->ccr_softc = sc;
/* XXX: TODO? */
sc->tx_channel_id = 0;
mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
ccr_sysctls(sc);
crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
return (0);
}
static int
ccr_detach(device_t dev)
{
struct ccr_softc *sc;
int i;
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
for (i = 0; i < sc->nsessions; i++) {
if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
mtx_unlock(&sc->lock);
return (EBUSY);
}
}
sc->detaching = true;
mtx_unlock(&sc->lock);
crypto_unregister_all(sc->cid);
free(sc->sessions, M_CCR);
mtx_destroy(&sc->lock);
sglist_free(sc->sg_dsgl);
sglist_free(sc->sg_ulptx);
sglist_free(sc->sg_crp);
sc->adapter->ccr_softc = NULL;
return (0);
}
static void
ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
{
uint32_t *u32;
uint64_t *u64;
u_int i;
u32 = (uint32_t *)dst;
u64 = (uint64_t *)dst;
switch (cri_alg) {
case CRYPTO_SHA1_HMAC:
for (i = 0; i < SHA1_HASH_LEN / 4; i++)
u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
break;
case CRYPTO_SHA2_256_HMAC:
for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
break;
case CRYPTO_SHA2_384_HMAC:
for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
break;
case CRYPTO_SHA2_512_HMAC:
for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
break;
}
}
static void
ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
int klen)
{
union authctx auth_ctx;
struct auth_hash *axf;
u_int i;
/*
* If the key is larger than the block size, use the digest of
* the key as the key instead.
*/
axf = s->hmac.auth_hash;
klen /= 8;
if (klen > axf->blocksize) {
axf->Init(&auth_ctx);
axf->Update(&auth_ctx, key, klen);
axf->Final(s->hmac.ipad, &auth_ctx);
klen = axf->hashsize;
} else
memcpy(s->hmac.ipad, key, klen);
memset(s->hmac.ipad + klen, 0, axf->blocksize);
memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
for (i = 0; i < axf->blocksize; i++) {
s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
s->hmac.opad[i] ^= HMAC_OPAD_VAL;
}
/*
* Hash the raw ipad and opad and store the partial result in
* the same buffer.
*/
axf->Init(&auth_ctx);
axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
axf->Init(&auth_ctx);
axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
}
/*
* Borrowed from AES_GMAC_Setkey().
*/
static void
ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
{
static char zeroes[GMAC_BLOCK_LEN];
uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
int rounds;
rounds = rijndaelKeySetupEnc(keysched, key, klen);
rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
}
static int
ccr_aes_check_keylen(int alg, int klen)
{
switch (klen) {
case 128:
case 192:
if (alg == CRYPTO_AES_XTS)
return (EINVAL);
break;
case 256:
break;
case 512:
if (alg != CRYPTO_AES_XTS)
return (EINVAL);
break;
default:
return (EINVAL);
}
return (0);
}
/*
* Borrowed from cesa_prep_aes_key(). We should perhaps have a public
* function to generate this instead.
*
* NB: The crypto engine wants the words in the decryption key in reverse
* order.
*/
static void
ccr_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
{
uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
uint32_t *dkey;
int i;
rijndaelKeySetupEnc(ek, enc_key, kbits);
dkey = dec_key;
dkey += (kbits / 8) / 4;
switch (kbits) {
case 128:
for (i = 0; i < 4; i++)
*--dkey = htobe32(ek[4 * 10 + i]);
break;
case 192:
for (i = 0; i < 2; i++)
*--dkey = htobe32(ek[4 * 11 + 2 + i]);
for (i = 0; i < 4; i++)
*--dkey = htobe32(ek[4 * 12 + i]);
break;
case 256:
for (i = 0; i < 4; i++)
*--dkey = htobe32(ek[4 * 13 + i]);
for (i = 0; i < 4; i++)
*--dkey = htobe32(ek[4 * 14 + i]);
break;
}
MPASS(dkey == dec_key);
}
static void
ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
{
unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
unsigned int opad_present;
if (alg == CRYPTO_AES_XTS)
kbits = klen / 2;
else
kbits = klen;
switch (kbits) {
case 128:
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
break;
case 192:
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
break;
case 256:
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
break;
default:
panic("should not get here");
}
s->blkcipher.key_len = klen / 8;
memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
switch (alg) {
case CRYPTO_AES_CBC:
case CRYPTO_AES_XTS:
ccr_aes_getdeckey(s->blkcipher.deckey, key, kbits);
break;
}
kctx_len = roundup2(s->blkcipher.key_len, 16);
switch (s->mode) {
case AUTHENC:
mk_size = s->hmac.mk_size;
opad_present = 1;
iopad_size = roundup2(s->hmac.partial_digest_len, 16);
kctx_len += iopad_size * 2;
break;
case GCM:
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
opad_present = 0;
kctx_len += GMAC_BLOCK_LEN;
break;
default:
mk_size = CHCR_KEYCTX_NO_KEY;
opad_present = 0;
break;
}
kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
}
static int
ccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
{
struct ccr_softc *sc;
struct ccr_session *s;
struct auth_hash *auth_hash;
struct cryptoini *c, *hash, *cipher;
unsigned int auth_mode, cipher_mode, iv_len, mk_size;
unsigned int partial_digest_len;
int error, i, sess;
bool gcm_hash;
if (sidp == NULL || cri == NULL)
return (EINVAL);
gcm_hash = false;
cipher = NULL;
hash = NULL;
auth_hash = NULL;
auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP;
iv_len = 0;
mk_size = 0;
partial_digest_len = 0;
for (c = cri; c != NULL; c = c->cri_next) {
switch (c->cri_alg) {
case CRYPTO_SHA1_HMAC:
case CRYPTO_SHA2_256_HMAC:
case CRYPTO_SHA2_384_HMAC:
case CRYPTO_SHA2_512_HMAC:
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
if (hash)
return (EINVAL);
hash = c;
switch (c->cri_alg) {
case CRYPTO_SHA1_HMAC:
auth_hash = &auth_hash_hmac_sha1;
auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
partial_digest_len = SHA1_HASH_LEN;
break;
case CRYPTO_SHA2_256_HMAC:
auth_hash = &auth_hash_hmac_sha2_256;
auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
partial_digest_len = SHA2_256_HASH_LEN;
break;
case CRYPTO_SHA2_384_HMAC:
auth_hash = &auth_hash_hmac_sha2_384;
auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
partial_digest_len = SHA2_512_HASH_LEN;
break;
case CRYPTO_SHA2_512_HMAC:
auth_hash = &auth_hash_hmac_sha2_512;
auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
partial_digest_len = SHA2_512_HASH_LEN;
break;
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
gcm_hash = true;
auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
break;
}
break;
case CRYPTO_AES_CBC:
case CRYPTO_AES_ICM:
case CRYPTO_AES_NIST_GCM_16:
case CRYPTO_AES_XTS:
if (cipher)
return (EINVAL);
cipher = c;
switch (c->cri_alg) {
case CRYPTO_AES_CBC:
cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
iv_len = AES_BLOCK_LEN;
break;
case CRYPTO_AES_ICM:
cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
iv_len = AES_BLOCK_LEN;
break;
case CRYPTO_AES_NIST_GCM_16:
cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM;
iv_len = AES_GCM_IV_LEN;
break;
case CRYPTO_AES_XTS:
cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
iv_len = AES_BLOCK_LEN;
break;
}
if (c->cri_key != NULL) {
error = ccr_aes_check_keylen(c->cri_alg,
c->cri_klen);
if (error)
return (error);
}
break;
default:
return (EINVAL);
}
}
if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM))
return (EINVAL);
if (hash == NULL && cipher == NULL)
return (EINVAL);
if (hash != NULL && hash->cri_key == NULL)
return (EINVAL);
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
if (sc->detaching) {
mtx_unlock(&sc->lock);
return (ENXIO);
}
sess = -1;
for (i = 0; i < sc->nsessions; i++) {
if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
sess = i;
break;
}
}
if (sess == -1) {
- s = mallocarray(sc->nsessions + 1, sizeof(*s), M_CCR,
+ s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR,
M_NOWAIT | M_ZERO);
if (s == NULL) {
mtx_unlock(&sc->lock);
return (ENOMEM);
}
if (sc->sessions != NULL)
memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
sess = sc->nsessions;
free(sc->sessions, M_CCR);
sc->sessions = s;
sc->nsessions++;
}
s = &sc->sessions[sess];
if (gcm_hash)
s->mode = GCM;
else if (hash != NULL && cipher != NULL)
s->mode = AUTHENC;
else if (hash != NULL)
s->mode = HMAC;
else {
MPASS(cipher != NULL);
s->mode = BLKCIPHER;
}
if (gcm_hash) {
if (hash->cri_mlen == 0)
s->gmac.hash_len = AES_GMAC_HASH_LEN;
else
s->gmac.hash_len = hash->cri_mlen;
ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
} else if (hash != NULL) {
s->hmac.auth_hash = auth_hash;
s->hmac.auth_mode = auth_mode;
s->hmac.mk_size = mk_size;
s->hmac.partial_digest_len = partial_digest_len;
if (hash->cri_mlen == 0)
s->hmac.hash_len = auth_hash->hashsize;
else
s->hmac.hash_len = hash->cri_mlen;
ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
hash->cri_klen);
}
if (cipher != NULL) {
s->blkcipher.cipher_mode = cipher_mode;
s->blkcipher.iv_len = iv_len;
if (cipher->cri_key != NULL)
ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
cipher->cri_klen);
}
s->active = true;
mtx_unlock(&sc->lock);
*sidp = sess;
return (0);
}
static int
ccr_freesession(device_t dev, uint64_t tid)
{
struct ccr_softc *sc;
uint32_t sid;
int error;
sc = device_get_softc(dev);
sid = CRYPTO_SESID2LID(tid);
mtx_lock(&sc->lock);
if (sid >= sc->nsessions || !sc->sessions[sid].active)
error = EINVAL;
else {
if (sc->sessions[sid].pending != 0)
device_printf(dev,
"session %d freed with %d pending requests\n", sid,
sc->sessions[sid].pending);
sc->sessions[sid].active = false;
error = 0;
}
mtx_unlock(&sc->lock);
return (error);
}
static int
ccr_process(device_t dev, struct cryptop *crp, int hint)
{
struct ccr_softc *sc;
struct ccr_session *s;
struct cryptodesc *crd, *crda, *crde;
uint32_t sid;
int error;
if (crp == NULL)
return (EINVAL);
crd = crp->crp_desc;
sid = CRYPTO_SESID2LID(crp->crp_sid);
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
if (sid >= sc->nsessions || !sc->sessions[sid].active) {
sc->stats_bad_session++;
error = EINVAL;
goto out;
}
error = ccr_populate_sglist(sc->sg_crp, crp);
if (error) {
sc->stats_sglist_error++;
goto out;
}
s = &sc->sessions[sid];
switch (s->mode) {
case HMAC:
if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
crd->crd_klen);
error = ccr_hmac(sc, sid, s, crp);
if (error == 0)
sc->stats_hmac++;
break;
case BLKCIPHER:
if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
error = ccr_aes_check_keylen(crd->crd_alg,
crd->crd_klen);
if (error)
break;
ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
crd->crd_klen);
}
error = ccr_blkcipher(sc, sid, s, crp);
if (error == 0) {
if (crd->crd_flags & CRD_F_ENCRYPT)
sc->stats_blkcipher_encrypt++;
else
sc->stats_blkcipher_decrypt++;
}
break;
case AUTHENC:
error = 0;
switch (crd->crd_alg) {
case CRYPTO_AES_CBC:
case CRYPTO_AES_ICM:
case CRYPTO_AES_XTS:
/* Only encrypt-then-authenticate supported. */
crde = crd;
crda = crd->crd_next;
if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
error = EINVAL;
break;
}
break;
default:
crda = crd;
crde = crd->crd_next;
if (crde->crd_flags & CRD_F_ENCRYPT) {
error = EINVAL;
break;
}
break;
}
if (error)
break;
if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
crda->crd_klen);
if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
error = ccr_aes_check_keylen(crde->crd_alg,
crde->crd_klen);
if (error)
break;
ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
crde->crd_klen);
}
error = ccr_authenc(sc, sid, s, crp, crda, crde);
if (error == 0) {
if (crde->crd_flags & CRD_F_ENCRYPT)
sc->stats_authenc_encrypt++;
else
sc->stats_authenc_decrypt++;
}
break;
case GCM:
error = 0;
if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
crde = crd;
crda = crd->crd_next;
} else {
crda = crd;
crde = crd->crd_next;
}
if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
error = ccr_aes_check_keylen(crde->crd_alg,
crde->crd_klen);
if (error)
break;
ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
crde->crd_klen);
}
if (crde->crd_len == 0) {
mtx_unlock(&sc->lock);
ccr_gcm_soft(s, crp, crda, crde);
return (0);
}
error = ccr_gcm(sc, sid, s, crp, crda, crde);
if (error == 0) {
if (crde->crd_flags & CRD_F_ENCRYPT)
sc->stats_gcm_encrypt++;
else
sc->stats_gcm_decrypt++;
}
break;
}
if (error == 0) {
s->pending++;
sc->stats_inflight++;
} else
sc->stats_process_error++;
out:
mtx_unlock(&sc->lock);
if (error) {
crp->crp_etype = error;
crypto_done(crp);
}
return (0);
}
static int
do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct ccr_softc *sc = iq->adapter->ccr_softc;
struct ccr_session *s;
const struct cpl_fw6_pld *cpl;
struct cryptop *crp;
uint32_t sid, status;
int error;
if (m != NULL)
cpl = mtod(m, const void *);
else
cpl = (const void *)(rss + 1);
crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
sid = CRYPTO_SESID2LID(crp->crp_sid);
status = be64toh(cpl->data[0]);
if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
error = EBADMSG;
else
error = 0;
mtx_lock(&sc->lock);
MPASS(sid < sc->nsessions);
s = &sc->sessions[sid];
s->pending--;
sc->stats_inflight--;
switch (s->mode) {
case HMAC:
error = ccr_hmac_done(sc, s, crp, cpl, error);
break;
case BLKCIPHER:
error = ccr_blkcipher_done(sc, s, crp, cpl, error);
break;
case AUTHENC:
error = ccr_authenc_done(sc, s, crp, cpl, error);
break;
case GCM:
error = ccr_gcm_done(sc, s, crp, cpl, error);
break;
}
if (error == EBADMSG) {
if (CHK_MAC_ERR_BIT(status))
sc->stats_mac_error++;
if (CHK_PAD_ERR_BIT(status))
sc->stats_pad_error++;
}
mtx_unlock(&sc->lock);
crp->crp_etype = error;
crypto_done(crp);
m_freem(m);
return (0);
}
static int
ccr_modevent(module_t mod, int cmd, void *arg)
{
switch (cmd) {
case MOD_LOAD:
t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
return (0);
case MOD_UNLOAD:
t4_register_cpl_handler(CPL_FW6_PLD, NULL);
return (0);
default:
return (EOPNOTSUPP);
}
}
static device_method_t ccr_methods[] = {
DEVMETHOD(device_identify, ccr_identify),
DEVMETHOD(device_probe, ccr_probe),
DEVMETHOD(device_attach, ccr_attach),
DEVMETHOD(device_detach, ccr_detach),
DEVMETHOD(cryptodev_newsession, ccr_newsession),
DEVMETHOD(cryptodev_freesession, ccr_freesession),
DEVMETHOD(cryptodev_process, ccr_process),
DEVMETHOD_END
};
static driver_t ccr_driver = {
"ccr",
ccr_methods,
sizeof(struct ccr_softc)
};
static devclass_t ccr_devclass;
DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
MODULE_VERSION(ccr, 1);
MODULE_DEPEND(ccr, crypto, 1, 1, 1);
MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
Index: head/sys/dev/e1000/if_em.c
===================================================================
--- head/sys/dev/e1000/if_em.c (revision 328217)
+++ head/sys/dev/e1000/if_em.c (revision 328218)
@@ -1,4560 +1,4559 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "if_em.h"
#include <sys/sbuf.h>
#include <machine/_inttypes.h>
#define em_mac_min e1000_82547
#define igb_mac_min e1000_82575
/*********************************************************************
* Driver version:
*********************************************************************/
char em_driver_version[] = "7.6.1-k";
/*********************************************************************
* PCI Device ID Table
*
* Used by probe to select devices to load on
* Last field stores an index into e1000_strings
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
*********************************************************************/
static pci_vendor_info_t em_vendor_info_array[] =
{
/* Intel(R) PRO/1000 Network Connection - Legacy em*/
PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) PRO/1000 Network Connection"),
/* Intel(R) PRO/1000 Network Connection - em */
PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82572EI, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM6, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V6, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM7, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V7, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM8, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V8, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM9, "Intel(R) PRO/1000 Network Connection"),
PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V9, "Intel(R) PRO/1000 Network Connection"),
/* required last entry */
PVID_END
};
static pci_vendor_info_t igb_vendor_info_array[] =
{
/* Intel(R) PRO/1000 Network Connection - igb */
PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_NS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82576_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82580_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82580_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82580_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82580_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_DH89XXCC_SFP, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I350_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I350_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I350_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I350_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I350_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_COPPER_OEM1, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I210_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I211_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
PVID(0x8086, E1000_DEV_ID_I354_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
/* required last entry */
PVID_END
};
/*********************************************************************
* Function prototypes
*********************************************************************/
static void *em_register(device_t dev);
static void *igb_register(device_t dev);
static int em_if_attach_pre(if_ctx_t ctx);
static int em_if_attach_post(if_ctx_t ctx);
static int em_if_detach(if_ctx_t ctx);
static int em_if_shutdown(if_ctx_t ctx);
static int em_if_suspend(if_ctx_t ctx);
static int em_if_resume(if_ctx_t ctx);
static int em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
static int em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
static void em_if_queues_free(if_ctx_t ctx);
static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
static void em_if_init(if_ctx_t ctx);
static void em_if_stop(if_ctx_t ctx);
static void em_if_media_status(if_ctx_t, struct ifmediareq *);
static int em_if_media_change(if_ctx_t ctx);
static int em_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
static void em_if_timer(if_ctx_t ctx, uint16_t qid);
static void em_if_vlan_register(if_ctx_t ctx, u16 vtag);
static void em_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
static void em_identify_hardware(if_ctx_t ctx);
static int em_allocate_pci_resources(if_ctx_t ctx);
static void em_free_pci_resources(if_ctx_t ctx);
static void em_reset(if_ctx_t ctx);
static int em_setup_interface(if_ctx_t ctx);
static int em_setup_msix(if_ctx_t ctx);
static void em_initialize_transmit_unit(if_ctx_t ctx);
static void em_initialize_receive_unit(if_ctx_t ctx);
static void em_if_enable_intr(if_ctx_t ctx);
static void em_if_disable_intr(if_ctx_t ctx);
static int em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
static int em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
static void em_if_multi_set(if_ctx_t ctx);
static void em_if_update_admin_status(if_ctx_t ctx);
static void em_if_debug(if_ctx_t ctx);
static void em_update_stats_counters(struct adapter *);
static void em_add_hw_stats(struct adapter *adapter);
static int em_if_set_promisc(if_ctx_t ctx, int flags);
static void em_setup_vlan_hw_support(struct adapter *);
static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
static void em_print_nvm_info(struct adapter *);
static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
static int em_get_rs(SYSCTL_HANDLER_ARGS);
static void em_print_debug_info(struct adapter *);
static int em_is_valid_ether_addr(u8 *);
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct adapter *, const char *,
const char *, struct em_int_delay_info *, int, int);
/* Management and WOL Support */
static void em_init_manageability(struct adapter *);
static void em_release_manageability(struct adapter *);
static void em_get_hw_control(struct adapter *);
static void em_release_hw_control(struct adapter *);
static void em_get_wakeup(if_ctx_t ctx);
static void em_enable_wakeup(if_ctx_t ctx);
static int em_enable_phy_wakeup(struct adapter *);
static void em_disable_aspm(struct adapter *);
int em_intr(void *arg);
static void em_disable_promisc(if_ctx_t ctx);
/* MSIX handlers */
static int em_if_msix_intr_assign(if_ctx_t, int);
static int em_msix_link(void *);
static void em_handle_link(void *context);
static void em_enable_vectors_82574(if_ctx_t);
static int em_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
static void em_if_led_func(if_ctx_t ctx, int onoff);
static int em_get_regs(SYSCTL_HANDLER_ARGS);
static void lem_smartspeed(struct adapter *adapter);
static void igb_configure_queues(struct adapter *adapter);
/*********************************************************************
* FreeBSD Device Interface Entry Points
*********************************************************************/
static device_method_t em_methods[] = {
/* Device interface */
DEVMETHOD(device_register, em_register),
DEVMETHOD(device_probe, iflib_device_probe),
DEVMETHOD(device_attach, iflib_device_attach),
DEVMETHOD(device_detach, iflib_device_detach),
DEVMETHOD(device_shutdown, iflib_device_shutdown),
DEVMETHOD(device_suspend, iflib_device_suspend),
DEVMETHOD(device_resume, iflib_device_resume),
DEVMETHOD_END
};
static device_method_t igb_methods[] = {
/* Device interface */
DEVMETHOD(device_register, igb_register),
DEVMETHOD(device_probe, iflib_device_probe),
DEVMETHOD(device_attach, iflib_device_attach),
DEVMETHOD(device_detach, iflib_device_detach),
DEVMETHOD(device_shutdown, iflib_device_shutdown),
DEVMETHOD(device_suspend, iflib_device_suspend),
DEVMETHOD(device_resume, iflib_device_resume),
DEVMETHOD_END
};
static driver_t em_driver = {
"em", em_methods, sizeof(struct adapter),
};
static devclass_t em_devclass;
DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
MODULE_DEPEND(em, pci, 1, 1, 1);
MODULE_DEPEND(em, ether, 1, 1, 1);
MODULE_DEPEND(em, iflib, 1, 1, 1);
IFLIB_PNP_INFO(pci, em, em_vendor_info_array);
static driver_t igb_driver = {
"igb", igb_methods, sizeof(struct adapter),
};
static devclass_t igb_devclass;
DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
MODULE_DEPEND(igb, pci, 1, 1, 1);
MODULE_DEPEND(igb, ether, 1, 1, 1);
MODULE_DEPEND(igb, iflib, 1, 1, 1);
IFLIB_PNP_INFO(pci, igb, igb_vendor_info_array);
static device_method_t em_if_methods[] = {
DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
DEVMETHOD(ifdi_attach_post, em_if_attach_post),
DEVMETHOD(ifdi_detach, em_if_detach),
DEVMETHOD(ifdi_shutdown, em_if_shutdown),
DEVMETHOD(ifdi_suspend, em_if_suspend),
DEVMETHOD(ifdi_resume, em_if_resume),
DEVMETHOD(ifdi_init, em_if_init),
DEVMETHOD(ifdi_stop, em_if_stop),
DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
DEVMETHOD(ifdi_intr_enable, em_if_enable_intr),
DEVMETHOD(ifdi_intr_disable, em_if_disable_intr),
DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
DEVMETHOD(ifdi_queues_free, em_if_queues_free),
DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
DEVMETHOD(ifdi_multi_set, em_if_multi_set),
DEVMETHOD(ifdi_media_status, em_if_media_status),
DEVMETHOD(ifdi_media_change, em_if_media_change),
DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
DEVMETHOD(ifdi_timer, em_if_timer),
DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
DEVMETHOD(ifdi_get_counter, em_if_get_counter),
DEVMETHOD(ifdi_led_func, em_if_led_func),
DEVMETHOD(ifdi_rx_queue_intr_enable, em_if_rx_queue_intr_enable),
DEVMETHOD(ifdi_tx_queue_intr_enable, em_if_tx_queue_intr_enable),
DEVMETHOD(ifdi_debug, em_if_debug),
DEVMETHOD_END
};
/*
* note that if (adapter->msix_mem) is replaced by:
* if (adapter->intr_type == IFLIB_INTR_MSIX)
*/
static driver_t em_if_driver = {
"em_if", em_if_methods, sizeof(struct adapter)
};
/*********************************************************************
* Tunable default values.
*********************************************************************/
#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
#define M_TSO_LEN 66
#define MAX_INTS_PER_SEC 8000
#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
/* Allow common code without TSO */
#ifndef CSUM_TSO
#define CSUM_TSO 0
#endif
#define TSO_WORKAROUND 4
static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
static int em_disable_crc_stripping = 0;
SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
&em_disable_crc_stripping, 0, "Disable CRC Stripping");
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
0, "Default transmit interrupt delay in usecs");
SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
0, "Default receive interrupt delay in usecs");
static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
&em_tx_abs_int_delay_dflt, 0,
"Default transmit interrupt delay limit in usecs");
SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
&em_rx_abs_int_delay_dflt, 0,
"Default receive interrupt delay limit in usecs");
static int em_smart_pwr_down = FALSE;
SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
0, "Set to true to leave smart power down enabled on newer adapters");
/* Controls whether promiscuous also shows bad packets */
static int em_debug_sbp = TRUE;
SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
"Show bad packets in promiscuous mode");
/* How many packets rxeof tries to clean at a time */
static int em_rx_process_limit = 100;
SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&em_rx_process_limit, 0,
"Maximum number of received packets to process "
"at a time, -1 means unlimited");
/* Energy efficient ethernet - default to OFF */
static int eee_setting = 1;
SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
"Enable Energy Efficient Ethernet");
/*
** Tuneable Interrupt rate
*/
static int em_max_interrupt_rate = 8000;
SYSCTL_INT(_hw_em, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&em_max_interrupt_rate, 0, "Maximum interrupts per second");
/* Global used in WOL setup with multiport cards */
static int global_quad_port_a = 0;
extern struct if_txrx igb_txrx;
extern struct if_txrx em_txrx;
extern struct if_txrx lem_txrx;
static struct if_shared_ctx em_sctx_init = {
.isc_magic = IFLIB_MAGIC,
.isc_q_align = PAGE_SIZE,
.isc_tx_maxsize = EM_TSO_SIZE,
.isc_tx_maxsegsize = PAGE_SIZE,
.isc_rx_maxsize = MJUM9BYTES,
.isc_rx_nsegments = 1,
.isc_rx_maxsegsize = MJUM9BYTES,
.isc_nfl = 1,
.isc_nrxqs = 1,
.isc_ntxqs = 1,
.isc_admin_intrcnt = 1,
.isc_vendor_info = em_vendor_info_array,
.isc_driver_version = em_driver_version,
.isc_driver = &em_if_driver,
.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_nrxd_min = {EM_MIN_RXD},
.isc_ntxd_min = {EM_MIN_TXD},
.isc_nrxd_max = {EM_MAX_RXD},
.isc_ntxd_max = {EM_MAX_TXD},
.isc_nrxd_default = {EM_DEFAULT_RXD},
.isc_ntxd_default = {EM_DEFAULT_TXD},
};
if_shared_ctx_t em_sctx = &em_sctx_init;
static struct if_shared_ctx igb_sctx_init = {
.isc_magic = IFLIB_MAGIC,
.isc_q_align = PAGE_SIZE,
.isc_tx_maxsize = EM_TSO_SIZE,
.isc_tx_maxsegsize = PAGE_SIZE,
.isc_rx_maxsize = MJUM9BYTES,
.isc_rx_nsegments = 1,
.isc_rx_maxsegsize = MJUM9BYTES,
.isc_nfl = 1,
.isc_nrxqs = 1,
.isc_ntxqs = 1,
.isc_admin_intrcnt = 1,
.isc_vendor_info = igb_vendor_info_array,
.isc_driver_version = em_driver_version,
.isc_driver = &em_if_driver,
.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_nrxd_min = {EM_MIN_RXD},
.isc_ntxd_min = {EM_MIN_TXD},
.isc_nrxd_max = {IGB_MAX_RXD},
.isc_ntxd_max = {IGB_MAX_TXD},
.isc_nrxd_default = {EM_DEFAULT_RXD},
.isc_ntxd_default = {EM_DEFAULT_TXD},
};
if_shared_ctx_t igb_sctx = &igb_sctx_init;
/*****************************************************************
*
* Dump Registers
*
****************************************************************/
#define IGB_REGS_LEN 739
static int em_get_regs(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter = (struct adapter *)arg1;
struct e1000_hw *hw = &adapter->hw;
struct sbuf *sb;
u32 *regs_buff;
int rc;
regs_buff = malloc(sizeof(u32) * IGB_REGS_LEN, M_DEVBUF, M_WAITOK);
memset(regs_buff, 0, IGB_REGS_LEN * sizeof(u32));
rc = sysctl_wire_old_buffer(req, 0);
MPASS(rc == 0);
if (rc != 0) {
free(regs_buff, M_DEVBUF);
return (rc);
}
sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
MPASS(sb != NULL);
if (sb == NULL) {
free(regs_buff, M_DEVBUF);
return (ENOMEM);
}
/* General Registers */
regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
regs_buff[3] = E1000_READ_REG(hw, E1000_ICR);
regs_buff[4] = E1000_READ_REG(hw, E1000_RCTL);
regs_buff[5] = E1000_READ_REG(hw, E1000_RDLEN(0));
regs_buff[6] = E1000_READ_REG(hw, E1000_RDH(0));
regs_buff[7] = E1000_READ_REG(hw, E1000_RDT(0));
regs_buff[8] = E1000_READ_REG(hw, E1000_RXDCTL(0));
regs_buff[9] = E1000_READ_REG(hw, E1000_RDBAL(0));
regs_buff[10] = E1000_READ_REG(hw, E1000_RDBAH(0));
regs_buff[11] = E1000_READ_REG(hw, E1000_TCTL);
regs_buff[12] = E1000_READ_REG(hw, E1000_TDBAL(0));
regs_buff[13] = E1000_READ_REG(hw, E1000_TDBAH(0));
regs_buff[14] = E1000_READ_REG(hw, E1000_TDLEN(0));
regs_buff[15] = E1000_READ_REG(hw, E1000_TDH(0));
regs_buff[16] = E1000_READ_REG(hw, E1000_TDT(0));
regs_buff[17] = E1000_READ_REG(hw, E1000_TXDCTL(0));
regs_buff[18] = E1000_READ_REG(hw, E1000_TDFH);
regs_buff[19] = E1000_READ_REG(hw, E1000_TDFT);
regs_buff[20] = E1000_READ_REG(hw, E1000_TDFHS);
regs_buff[21] = E1000_READ_REG(hw, E1000_TDFPC);
sbuf_printf(sb, "General Registers\n");
sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]);
sbuf_printf(sb, "Interrupt Registers\n");
sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
sbuf_printf(sb, "RX Registers\n");
sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
sbuf_printf(sb, "TX Registers\n");
sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
free(regs_buff, M_DEVBUF);
#ifdef DUMP_DESCS
{
if_softc_ctx_t scctx = adapter->shared;
struct rx_ring *rxr = &rx_que->rxr;
struct tx_ring *txr = &tx_que->txr;
int ntxd = scctx->isc_ntxd[0];
int nrxd = scctx->isc_nrxd[0];
int j;
for (j = 0; j < nrxd; j++) {
u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
}
for (j = 0; j < min(ntxd, 256); j++) {
unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & E1000_TXD_STAT_DD : 0);
}
}
#endif
rc = sbuf_finish(sb);
sbuf_delete(sb);
return(rc);
}
static void *
em_register(device_t dev)
{
return (em_sctx);
}
static void *
igb_register(device_t dev)
{
return (igb_sctx);
}
static int
em_set_num_queues(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
int maxqueues;
/* Sanity check based on HW */
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
case e1000_i350:
case e1000_i354:
maxqueues = 8;
break;
case e1000_i210:
case e1000_82575:
maxqueues = 4;
break;
case e1000_i211:
case e1000_82574:
maxqueues = 2;
break;
default:
maxqueues = 1;
break;
}
return (maxqueues);
}
#define EM_CAPS \
IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \
IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | \
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU;
#define IGB_CAPS \
IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \
IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \
IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_TXCSUM_IPV6 | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU;
/*********************************************************************
* Device initialization routine
*
* The attach entry point is called when the driver is being loaded.
* This routine identifies the type of hardware, allocates all resources
* and initializes the hardware.
*
* return 0 on success, positive on failure
*********************************************************************/
static int
em_if_attach_pre(if_ctx_t ctx)
{
struct adapter *adapter;
if_softc_ctx_t scctx;
device_t dev;
struct e1000_hw *hw;
int error = 0;
INIT_DEBUGOUT("em_if_attach_pre begin");
dev = iflib_get_dev(ctx);
adapter = iflib_get_softc(ctx);
if (resource_disabled("em", device_get_unit(dev))) {
device_printf(dev, "Disabled by device hint\n");
return (ENXIO);
}
adapter->ctx = ctx;
adapter->dev = adapter->osdep.dev = dev;
scctx = adapter->shared = iflib_get_softc_ctx(ctx);
adapter->media = iflib_get_media(ctx);
hw = &adapter->hw;
adapter->tx_process_limit = scctx->isc_ntxd[0];
/* SYSCTL stuff */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
em_sysctl_nvm_info, "I", "NVM Information");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
em_sysctl_debug_info, "I", "Debug Information");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
em_set_flowcntl, "I", "Flow Control");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "reg_dump", CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
em_get_regs, "A", "Dump Registers");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rs_dump", CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
em_get_rs, "I", "Dump RS indexes");
/* Determine hardware and mac info */
em_identify_hardware(ctx);
/* Set isc_msix_bar */
scctx->isc_msix_bar = PCIR_BAR(EM_MSIX_BAR);
scctx->isc_tx_nsegments = EM_MAX_SCATTER;
scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = em_set_num_queues(ctx);
device_printf(dev, "attach_pre capping queues at %d\n", scctx->isc_ntxqsets_max);
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
if (adapter->hw.mac.type >= igb_mac_min) {
int try_second_bar;
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(union e1000_adv_tx_desc);
scctx->isc_rxd_size[0] = sizeof(union e1000_adv_rx_desc);
scctx->isc_txrx = &igb_txrx;
scctx->isc_capenable = IGB_CAPS;
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP \
| CSUM_IP6_UDP | CSUM_IP6_TCP;
if (adapter->hw.mac.type != e1000_82575)
scctx->isc_tx_csum_flags |= CSUM_SCTP | CSUM_IP6_SCTP;
/*
** Some new devices, as with ixgbe, now may
** use a different BAR, so we need to keep
** track of which is used.
*/
try_second_bar = pci_read_config(dev, scctx->isc_msix_bar, 4);
if (try_second_bar == 0)
scctx->isc_msix_bar += 4;
} else if (adapter->hw.mac.type >= em_mac_min) {
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
scctx->isc_rxd_size[0] = sizeof(union e1000_rx_desc_extended);
scctx->isc_txrx = &em_txrx;
scctx->isc_capenable = EM_CAPS;
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
} else {
scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
scctx->isc_rxd_size[0] = sizeof(struct e1000_rx_desc);
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
scctx->isc_txrx = &lem_txrx;
scctx->isc_capenable = EM_CAPS;
if (adapter->hw.mac.type < e1000_82543)
scctx->isc_capenable &= ~(IFCAP_HWCSUM|IFCAP_VLAN_HWCSUM);
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
scctx->isc_msix_bar = 0;
}
/* Setup PCI resources */
if (em_allocate_pci_resources(ctx)) {
device_printf(dev, "Allocation of PCI resources failed\n");
error = ENXIO;
goto err_pci;
}
/*
** For ICH8 and family we need to
** map the flash memory, and this
** must happen after the MAC is
** identified
*/
if ((hw->mac.type == e1000_ich8lan) ||
(hw->mac.type == e1000_ich9lan) ||
(hw->mac.type == e1000_ich10lan) ||
(hw->mac.type == e1000_pchlan) ||
(hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) {
int rid = EM_BAR_TYPE_FLASH;
adapter->flash = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (adapter->flash == NULL) {
device_printf(dev, "Mapping of Flash failed\n");
error = ENXIO;
goto err_pci;
}
/* This is used in the shared code */
hw->flash_address = (u8 *)adapter->flash;
adapter->osdep.flash_bus_space_tag =
rman_get_bustag(adapter->flash);
adapter->osdep.flash_bus_space_handle =
rman_get_bushandle(adapter->flash);
}
/*
** In the new SPT device flash is not a
** separate BAR, rather it is also in BAR0,
** so use the same tag and an offset handle for the
** FLASH read/write macros in the shared code.
*/
else if (hw->mac.type >= e1000_pch_spt) {
adapter->osdep.flash_bus_space_tag =
adapter->osdep.mem_bus_space_tag;
adapter->osdep.flash_bus_space_handle =
adapter->osdep.mem_bus_space_handle
+ E1000_FLASH_BASE_ADDR;
}
/* Do Shared Code initialization */
error = e1000_setup_init_funcs(hw, TRUE);
if (error) {
device_printf(dev, "Setup of Shared code failed, error %d\n",
error);
error = ENXIO;
goto err_pci;
}
em_setup_msix(ctx);
e1000_get_bus_info(hw);
/* Set up some sysctls for the tunable interrupt delays */
em_add_int_delay_sysctl(adapter, "rx_int_delay",
"receive interrupt delay in usecs", &adapter->rx_int_delay,
E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "tx_int_delay",
"transmit interrupt delay in usecs", &adapter->tx_int_delay,
E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
"receive interrupt delay limit in usecs",
&adapter->rx_abs_int_delay,
E1000_REGISTER(hw, E1000_RADV),
em_rx_abs_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
"transmit interrupt delay limit in usecs",
&adapter->tx_abs_int_delay,
E1000_REGISTER(hw, E1000_TADV),
em_tx_abs_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "itr",
"interrupt delay limit in usecs/4",
&adapter->tx_itr,
E1000_REGISTER(hw, E1000_ITR),
DEFAULT_ITR);
hw->mac.autoneg = DO_AUTO_NEG;
hw->phy.autoneg_wait_to_complete = FALSE;
hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
if (adapter->hw.mac.type < em_mac_min) {
e1000_init_script_state_82541(&adapter->hw, TRUE);
e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
}
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
hw->phy.mdix = AUTO_ALL_MODES;
hw->phy.disable_polarity_correction = FALSE;
hw->phy.ms_type = EM_MASTER_SLAVE;
}
/*
* Set the frame limits assuming
* standard ethernet sized frames.
*/
scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
/*
* This controls when hardware reports transmit completion
* status.
*/
hw->mac.report_tx_early = 1;
/* Allocate multicast array memory. */
adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
if (adapter->mta == NULL) {
device_printf(dev, "Can not allocate multicast setup array\n");
error = ENOMEM;
goto err_late;
}
/* Check SOL/IDER usage */
if (e1000_check_reset_block(hw))
device_printf(dev, "PHY reset is blocked"
" due to SOL/IDER session.\n");
/* Sysctl for setting Energy Efficient Ethernet */
hw->dev_spec.ich8lan.eee_disable = eee_setting;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "eee_control", CTLTYPE_INT|CTLFLAG_RW,
adapter, 0, em_sysctl_eee, "I",
"Disable Energy Efficient Ethernet");
/*
** Start from a known state, this is
** important in reading the nvm and
** mac from that.
*/
e1000_reset_hw(hw);
/* Make sure we have a good EEPROM before we read from it */
if (e1000_validate_nvm_checksum(hw) < 0) {
/*
** Some PCI-E parts fail the first check due to
** the link being in sleep state, call it again,
** if it fails a second time its a real issue.
*/
if (e1000_validate_nvm_checksum(hw) < 0) {
device_printf(dev,
"The EEPROM Checksum Is Not Valid\n");
error = EIO;
goto err_late;
}
}
/* Copy the permanent MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw) < 0) {
device_printf(dev, "EEPROM read error while reading MAC"
" address\n");
error = EIO;
goto err_late;
}
if (!em_is_valid_ether_addr(hw->mac.addr)) {
device_printf(dev, "Invalid MAC address\n");
error = EIO;
goto err_late;
}
/* Disable ULP support */
e1000_disable_ulp_lpt_lp(hw, TRUE);
/*
* Get Wake-on-Lan and Management info for later use
*/
em_get_wakeup(ctx);
iflib_set_mac(ctx, hw->mac.addr);
return (0);
err_late:
em_release_hw_control(adapter);
err_pci:
em_free_pci_resources(ctx);
free(adapter->mta, M_DEVBUF);
return (error);
}
static int
em_if_attach_post(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
int error = 0;
/* Setup OS specific network interface */
error = em_setup_interface(ctx);
if (error != 0) {
goto err_late;
}
em_reset(ctx);
/* Initialize statistics */
em_update_stats_counters(adapter);
hw->mac.get_link_status = 1;
em_if_update_admin_status(ctx);
em_add_hw_stats(adapter);
/* Non-AMT based hardware can now take control from firmware */
if (adapter->has_manage && !adapter->has_amt)
em_get_hw_control(adapter);
INIT_DEBUGOUT("em_if_attach_post: end");
return (error);
err_late:
em_release_hw_control(adapter);
em_free_pci_resources(ctx);
em_if_queues_free(ctx);
free(adapter->mta, M_DEVBUF);
return (error);
}
/*********************************************************************
* Device removal routine
*
* The detach entry point is called when the driver is being removed.
* This routine stops the adapter and deallocates all the resources
* that were allocated for driver operation.
*
* return 0 on success, positive on failure
*********************************************************************/
static int
em_if_detach(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
INIT_DEBUGOUT("em_detach: begin");
e1000_phy_hw_reset(&adapter->hw);
em_release_manageability(adapter);
em_release_hw_control(adapter);
em_free_pci_resources(ctx);
return (0);
}
/*********************************************************************
*
* Shutdown entry point
*
**********************************************************************/
static int
em_if_shutdown(if_ctx_t ctx)
{
return em_if_suspend(ctx);
}
/*
* Suspend/resume device methods.
*/
static int
em_if_suspend(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
em_release_manageability(adapter);
em_release_hw_control(adapter);
em_enable_wakeup(ctx);
return (0);
}
static int
em_if_resume(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
if (adapter->hw.mac.type == e1000_pch2lan)
e1000_resume_workarounds_pchlan(&adapter->hw);
em_if_init(ctx);
em_init_manageability(adapter);
return(0);
}
static int
em_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
int max_frame_size;
struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
switch (adapter->hw.mac.type) {
case e1000_82571:
case e1000_82572:
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_82574:
case e1000_82583:
case e1000_80003es2lan:
/* 9K Jumbo Frame size */
max_frame_size = 9234;
break;
case e1000_pchlan:
max_frame_size = 4096;
break;
case e1000_82542:
case e1000_ich8lan:
/* Adapters that do not support jumbo frames */
max_frame_size = ETHER_MAX_LEN;
break;
default:
if (adapter->hw.mac.type >= igb_mac_min)
max_frame_size = 9234;
else /* lem */
max_frame_size = MAX_JUMBO_FRAME_SIZE;
}
if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
return (EINVAL);
}
scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
return (0);
}
/*********************************************************************
* Init entry point
*
* This routine is used in two ways. It is used by the stack as
* init entry point in network interface structure. It is also used
* by the driver as a hw/sw initialization routine to get to a
* consistent state.
*
* return 0 on success, positive on failure
**********************************************************************/
static void
em_if_init(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct ifnet *ifp = iflib_get_ifp(ctx);
struct em_tx_queue *tx_que;
int i;
INIT_DEBUGOUT("em_if_init: begin");
/* Get the latest mac address, User can use a LAA */
bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
ETHER_ADDR_LEN);
/* Put the address into the Receive Address Array */
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
/*
* With the 82571 adapter, RAR[0] may be overwritten
* when the other port is reset, we make a duplicate
* in RAR[14] for that eventuality, this assures
* the interface continues to function.
*/
if (adapter->hw.mac.type == e1000_82571) {
e1000_set_laa_state_82571(&adapter->hw, TRUE);
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
E1000_RAR_ENTRIES - 1);
}
/* Initialize the hardware */
em_reset(ctx);
em_if_update_admin_status(ctx);
for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
}
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
/* Clear bad data from Rx FIFOs */
if (adapter->hw.mac.type >= igb_mac_min)
e1000_rx_fifo_flush_82575(&adapter->hw);
/* Configure for OS presence */
em_init_manageability(adapter);
/* Prepare transmit descriptors and buffers */
em_initialize_transmit_unit(ctx);
/* Setup Multicast table */
em_if_multi_set(ctx);
/*
* Figure out the desired mbuf
* pool for doing jumbos
*/
if (adapter->hw.mac.max_frame_size <= 2048)
adapter->rx_mbuf_sz = MCLBYTES;
#ifndef CONTIGMALLOC_WORKS
else
adapter->rx_mbuf_sz = MJUMPAGESIZE;
#else
else if (adapter->hw.mac.max_frame_size <= 4096)
adapter->rx_mbuf_sz = MJUMPAGESIZE;
else
adapter->rx_mbuf_sz = MJUM9BYTES;
#endif
em_initialize_receive_unit(ctx);
/* Use real VLAN Filter support? */
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
/* Use real VLAN Filter support */
em_setup_vlan_hw_support(adapter);
else {
u32 ctrl;
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
}
}
/* Don't lose promiscuous settings */
em_if_set_promisc(ctx, IFF_PROMISC);
e1000_clear_hw_cntrs_base_generic(&adapter->hw);
/* MSI/X configuration for 82574 */
if (adapter->hw.mac.type == e1000_82574) {
int tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
tmp |= E1000_CTRL_EXT_PBA_CLR;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
/* Set the IVAR - interrupt vector routing. */
E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars);
} else if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
igb_configure_queues(adapter);
/* this clears any pending interrupts */
E1000_READ_REG(&adapter->hw, E1000_ICR);
E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
/* AMT based hardware can now take control from firmware */
if (adapter->has_manage && adapter->has_amt)
em_get_hw_control(adapter);
/* Set Energy Efficient Ethernet */
if (adapter->hw.mac.type >= igb_mac_min &&
adapter->hw.phy.media_type == e1000_media_type_copper) {
if (adapter->hw.mac.type == e1000_i354)
e1000_set_eee_i354(&adapter->hw, TRUE, TRUE);
else
e1000_set_eee_i350(&adapter->hw, TRUE, TRUE);
}
}
/*********************************************************************
*
* Fast Legacy/MSI Combined Interrupt Service routine
*
*********************************************************************/
int
em_intr(void *arg)
{
struct adapter *adapter = arg;
if_ctx_t ctx = adapter->ctx;
u32 reg_icr;
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
if (adapter->intr_type != IFLIB_INTR_LEGACY)
goto skip_stray;
/* Hot eject? */
if (reg_icr == 0xffffffff)
return FILTER_STRAY;
/* Definitely not our interrupt. */
if (reg_icr == 0x0)
return FILTER_STRAY;
/*
* Starting with the 82571 chip, bit 31 should be used to
* determine whether the interrupt belongs to us.
*/
if (adapter->hw.mac.type >= e1000_82571 &&
(reg_icr & E1000_ICR_INT_ASSERTED) == 0)
return FILTER_STRAY;
skip_stray:
/* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
adapter->hw.mac.get_link_status = 1;
iflib_admin_intr_deferred(ctx);
}
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
return (FILTER_SCHEDULE_THREAD);
}
static void
igb_rx_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
{
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims);
}
static void
em_rx_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
{
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims);
}
static void
igb_tx_enable_queue(struct adapter *adapter, struct em_tx_queue *txq)
{
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txq->eims);
}
static void
em_tx_enable_queue(struct adapter *adapter, struct em_tx_queue *txq)
{
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txq->eims);
}
static int
em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
if (adapter->hw.mac.type >= igb_mac_min)
igb_rx_enable_queue(adapter, rxq);
else
em_rx_enable_queue(adapter, rxq);
return (0);
}
static int
em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_tx_queue *txq = &adapter->tx_queues[txqid];
if (adapter->hw.mac.type >= igb_mac_min)
igb_tx_enable_queue(adapter, txq);
else
em_tx_enable_queue(adapter, txq);
return (0);
}
/*********************************************************************
*
* MSIX RX Interrupt Service routine
*
**********************************************************************/
static int
em_msix_que(void *arg)
{
struct em_rx_queue *que = arg;
++que->irqs;
return (FILTER_SCHEDULE_THREAD);
}
/*********************************************************************
*
* MSIX Link Fast Interrupt Service routine
*
**********************************************************************/
static int
em_msix_link(void *arg)
{
struct adapter *adapter = arg;
u32 reg_icr;
++adapter->link_irq;
MPASS(adapter->hw.back != NULL);
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
em_handle_link(adapter->ctx);
} else {
E1000_WRITE_REG(&adapter->hw, E1000_IMS,
EM_MSIX_LINK | E1000_IMS_LSC);
if (adapter->hw.mac.type >= igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
}
/*
* Because we must read the ICR for this interrupt
* it may clear other causes using autoclear, for
* this reason we simply create a soft interrupt
* for all these vectors.
*/
if (reg_icr && adapter->hw.mac.type < igb_mac_min) {
E1000_WRITE_REG(&adapter->hw,
E1000_ICS, adapter->ims);
}
return (FILTER_HANDLED);
}
static void
em_handle_link(void *context)
{
if_ctx_t ctx = context;
struct adapter *adapter = iflib_get_softc(ctx);
adapter->hw.mac.get_link_status = 1;
iflib_admin_intr_deferred(ctx);
}
/*********************************************************************
*
* Media Ioctl callback
*
* This routine is called whenever the user queries the status of
* the interface using ifconfig.
*
**********************************************************************/
static void
em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
{
struct adapter *adapter = iflib_get_softc(ctx);
u_char fiber_type = IFM_1000_SX;
INIT_DEBUGOUT("em_if_media_status: begin");
iflib_admin_intr_deferred(ctx);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (!adapter->link_active) {
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
(adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
if (adapter->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (adapter->link_speed) {
case 10:
ifmr->ifm_active |= IFM_10_T;
break;
case 100:
ifmr->ifm_active |= IFM_100_TX;
break;
case 1000:
ifmr->ifm_active |= IFM_1000_T;
break;
}
if (adapter->link_duplex == FULL_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
}
}
/*********************************************************************
*
* Media Ioctl callback
*
* This routine is called when the user changes speed/duplex using
* media/mediopt option with ifconfig.
*
**********************************************************************/
static int
em_if_media_change(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct ifmedia *ifm = iflib_get_media(ctx);
INIT_DEBUGOUT("em_if_media_change: begin");
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
adapter->hw.mac.autoneg = DO_AUTO_NEG;
adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
break;
case IFM_1000_LX:
case IFM_1000_SX:
case IFM_1000_T:
adapter->hw.mac.autoneg = DO_AUTO_NEG;
adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
adapter->hw.mac.autoneg = FALSE;
adapter->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
else
adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
break;
case IFM_10_T:
adapter->hw.mac.autoneg = FALSE;
adapter->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
else
adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
break;
default:
device_printf(adapter->dev, "Unsupported media type\n");
}
em_if_init(ctx);
return (0);
}
static int
em_if_set_promisc(if_ctx_t ctx, int flags)
{
struct adapter *adapter = iflib_get_softc(ctx);
u32 reg_rctl;
em_disable_promisc(ctx);
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
if (flags & IFF_PROMISC) {
reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
/* Turn this on if you want to see bad packets */
if (em_debug_sbp)
reg_rctl |= E1000_RCTL_SBP;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
} else if (flags & IFF_ALLMULTI) {
reg_rctl |= E1000_RCTL_MPE;
reg_rctl &= ~E1000_RCTL_UPE;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
}
return (0);
}
static void
em_disable_promisc(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct ifnet *ifp = iflib_get_ifp(ctx);
u32 reg_rctl;
int mcnt = 0;
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl &= (~E1000_RCTL_UPE);
if (if_getflags(ifp) & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
else
mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
/* Don't disable if in MAX groups */
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
reg_rctl &= (~E1000_RCTL_MPE);
reg_rctl &= (~E1000_RCTL_SBP);
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
}
/*********************************************************************
* Multicast Update
*
* This routine is called whenever multicast address list is updated.
*
**********************************************************************/
static void
em_if_multi_set(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct ifnet *ifp = iflib_get_ifp(ctx);
u32 reg_rctl = 0;
u8 *mta; /* Multicast array memory */
int mcnt = 0;
IOCTL_DEBUGOUT("em_set_multi: begin");
mta = adapter->mta;
bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
if (adapter->hw.mac.type == e1000_82542 &&
adapter->hw.revision_id == E1000_REVISION_2) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
e1000_pci_clear_mwi(&adapter->hw);
reg_rctl |= E1000_RCTL_RST;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
msec_delay(5);
}
if_multiaddr_array(ifp, mta, &mcnt, MAX_NUM_MULTICAST_ADDRESSES);
if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
} else
e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
if (adapter->hw.mac.type == e1000_82542 &&
adapter->hw.revision_id == E1000_REVISION_2) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl &= ~E1000_RCTL_RST;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
msec_delay(5);
if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
e1000_pci_set_mwi(&adapter->hw);
}
}
/*********************************************************************
* Timer routine
*
* This routine checks for link status and updates statistics.
*
**********************************************************************/
static void
em_if_timer(if_ctx_t ctx, uint16_t qid)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *que;
int i;
int trigger = 0;
if (qid != 0)
return;
iflib_admin_intr_deferred(ctx);
/* Reset LAA into RAR[0] on 82571 */
if ((adapter->hw.mac.type == e1000_82571) &&
e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
if (adapter->hw.mac.type < em_mac_min)
lem_smartspeed(adapter);
/* Mask to use in the irq trigger */
if (adapter->intr_type == IFLIB_INTR_MSIX) {
for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++)
trigger |= que->eims;
} else {
trigger = E1000_ICS_RXDMT0;
}
}
static void
em_if_update_admin_status(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
struct ifnet *ifp = iflib_get_ifp(ctx);
device_t dev = iflib_get_dev(ctx);
u32 link_check, thstat, ctrl;
link_check = thstat = ctrl = 0;
/* Get the cached link value or read phy for real */
switch (hw->phy.media_type) {
case e1000_media_type_copper:
if (hw->mac.get_link_status) {
if (hw->mac.type == e1000_pch_spt)
msec_delay(50);
/* Do the work to read phy */
e1000_check_for_link(hw);
link_check = !hw->mac.get_link_status;
if (link_check) /* ESB2 fix */
e1000_cfg_on_link_up(hw);
} else {
link_check = TRUE;
}
break;
case e1000_media_type_fiber:
e1000_check_for_link(hw);
link_check = (E1000_READ_REG(hw, E1000_STATUS) &
E1000_STATUS_LU);
break;
case e1000_media_type_internal_serdes:
e1000_check_for_link(hw);
link_check = adapter->hw.mac.serdes_has_link;
break;
/* VF device is type_unknown */
case e1000_media_type_unknown:
e1000_check_for_link(hw);
link_check = !hw->mac.get_link_status;
/* FALLTHROUGH */
default:
break;
}
/* Check for thermal downshift or shutdown */
if (hw->mac.type == e1000_i350) {
thstat = E1000_READ_REG(hw, E1000_THSTAT);
ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
}
/* Now check for a transition */
if (link_check && (adapter->link_active == 0)) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
/* Check if we must disable SPEED_MODE bit on PCI-E */
if ((adapter->link_speed != SPEED_1000) &&
((hw->mac.type == e1000_82571) ||
(hw->mac.type == e1000_82572))) {
int tarc0;
tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
tarc0 &= ~TARC_SPEED_MODE_BIT;
E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
}
if (bootverbose)
device_printf(dev, "Link is up %d Mbps %s\n",
adapter->link_speed,
((adapter->link_duplex == FULL_DUPLEX) ?
"Full Duplex" : "Half Duplex"));
adapter->link_active = 1;
adapter->smartspeed = 0;
if_setbaudrate(ifp, adapter->link_speed * 1000000);
if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
(thstat & E1000_THSTAT_LINK_THROTTLE))
device_printf(dev, "Link: thermal downshift\n");
/* Delay Link Up for Phy update */
if (((hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) &&
(hw->phy.id == I210_I_PHY_ID))
msec_delay(I210_LINK_DELAY);
/* Reset if the media type changed. */
if ((hw->dev_spec._82575.media_changed) &&
(adapter->hw.mac.type >= igb_mac_min)) {
hw->dev_spec._82575.media_changed = false;
adapter->flags |= IGB_MEDIA_RESET;
em_reset(ctx);
}
iflib_link_state_change(ctx, LINK_STATE_UP, ifp->if_baudrate);
printf("Link state changed to up\n");
} else if (!link_check && (adapter->link_active == 1)) {
if_setbaudrate(ifp, 0);
adapter->link_speed = 0;
adapter->link_duplex = 0;
if (bootverbose)
device_printf(dev, "Link is Down\n");
adapter->link_active = 0;
iflib_link_state_change(ctx, LINK_STATE_DOWN, ifp->if_baudrate);
printf("link state changed to down\n");
}
em_update_stats_counters(adapter);
E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC);
}
/*********************************************************************
*
* This routine disables all traffic on the adapter by issuing a
* global reset on the MAC and deallocates TX/RX buffers.
*
* This routine should always be called with BOTH the CORE
* and TX locks.
**********************************************************************/
static void
em_if_stop(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
INIT_DEBUGOUT("em_stop: begin");
e1000_reset_hw(&adapter->hw);
if (adapter->hw.mac.type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, E1000_WUFC, 0);
e1000_led_off(&adapter->hw);
e1000_cleanup_led(&adapter->hw);
}
/*********************************************************************
*
* Determine hardware revision.
*
**********************************************************************/
static void
em_identify_hardware(if_ctx_t ctx)
{
device_t dev = iflib_get_dev(ctx);
struct adapter *adapter = iflib_get_softc(ctx);
/* Make sure our PCI config space has the necessary stuff set */
adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
/* Save off the information about this board */
adapter->hw.vendor_id = pci_get_vendor(dev);
adapter->hw.device_id = pci_get_device(dev);
adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
adapter->hw.subsystem_vendor_id =
pci_read_config(dev, PCIR_SUBVEND_0, 2);
adapter->hw.subsystem_device_id =
pci_read_config(dev, PCIR_SUBDEV_0, 2);
/* Do Shared Code Init and Setup */
if (e1000_set_mac_type(&adapter->hw)) {
device_printf(dev, "Setup init failure\n");
return;
}
}
static int
em_allocate_pci_resources(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
int rid, val;
rid = PCIR_BAR(0);
adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (adapter->memory == NULL) {
device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory);
adapter->osdep.mem_bus_space_handle =
rman_get_bushandle(adapter->memory);
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
/* Only older adapters use IO mapping */
if (adapter->hw.mac.type < em_mac_min &&
adapter->hw.mac.type > e1000_82543) {
/* Figure our where our IO BAR is ? */
for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
val = pci_read_config(dev, rid, 4);
if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
adapter->io_rid = rid;
break;
}
rid += 4;
/* check for 64bit BAR */
if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
rid += 4;
}
if (rid >= PCIR_CIS) {
device_printf(dev, "Unable to locate IO BAR\n");
return (ENXIO);
}
adapter->ioport = bus_alloc_resource_any(dev,
SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
if (adapter->ioport == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"ioport\n");
return (ENXIO);
}
adapter->hw.io_base = 0;
adapter->osdep.io_bus_space_tag =
rman_get_bustag(adapter->ioport);
adapter->osdep.io_bus_space_handle =
rman_get_bushandle(adapter->ioport);
}
adapter->hw.back = &adapter->osdep;
return (0);
}
/*********************************************************************
*
* Setup the MSIX Interrupt handlers
*
**********************************************************************/
static int
em_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rx_que = adapter->rx_queues;
struct em_tx_queue *tx_que = adapter->tx_queues;
int error, rid, i, vector = 0, rx_vectors;
char buf[16];
/* First set up ring resources */
for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
adapter->rx_num_queues = i + 1;
goto fail;
}
rx_que->msix = vector;
/*
* Set the bit to enable interrupt
* in E1000_IMS -- bits 20 and 21
* are for RX0 and RX1, note this has
* NOTHING to do with the MSIX vector
*/
if (adapter->hw.mac.type == e1000_82574) {
rx_que->eims = 1 << (20 + i);
adapter->ims |= rx_que->eims;
adapter->ivars |= (8 | rx_que->msix) << (i * 4);
} else if (adapter->hw.mac.type == e1000_82575)
rx_que->eims = E1000_EICR_TX_QUEUE0 << vector;
else
rx_que->eims = 1 << vector;
}
rx_vectors = vector;
vector = 0;
for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "txq%d", i);
tx_que = &adapter->tx_queues[i];
iflib_softirq_alloc_generic(ctx,
&adapter->rx_queues[i % adapter->rx_num_queues].que_irq,
IFLIB_INTR_TX, tx_que, tx_que->me, buf);
tx_que->msix = (vector % adapter->tx_num_queues);
/*
* Set the bit to enable interrupt
* in E1000_IMS -- bits 22 and 23
* are for TX0 and TX1, note this has
* NOTHING to do with the MSIX vector
*/
if (adapter->hw.mac.type == e1000_82574) {
tx_que->eims = 1 << (22 + i);
adapter->ims |= tx_que->eims;
adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
} else if (adapter->hw.mac.type == e1000_82575) {
tx_que->eims = E1000_EICR_TX_QUEUE0 << (i % adapter->tx_num_queues);
} else {
tx_que->eims = 1 << (i % adapter->tx_num_queues);
}
}
/* Link interrupt */
rid = rx_vectors + 1;
error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
goto fail;
}
adapter->linkvec = rx_vectors;
if (adapter->hw.mac.type < igb_mac_min) {
adapter->ivars |= (8 | rx_vectors) << 16;
adapter->ivars |= 0x80000000;
}
return (0);
fail:
iflib_irq_free(ctx, &adapter->irq);
rx_que = adapter->rx_queues;
for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
iflib_irq_free(ctx, &rx_que->que_irq);
return (error);
}
static void
igb_configure_queues(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct em_rx_queue *rx_que;
struct em_tx_queue *tx_que;
u32 tmp, ivar = 0, newitr = 0;
/* First turn on RSS capability */
if (adapter->hw.mac.type != e1000_82575)
E1000_WRITE_REG(hw, E1000_GPIE,
E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
E1000_GPIE_PBA | E1000_GPIE_NSICR);
/* Turn on MSIX */
switch (adapter->hw.mac.type) {
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
case e1000_vfadapt:
case e1000_vfadapt_i350:
/* RX entries */
for (int i = 0; i < adapter->rx_num_queues; i++) {
u32 index = i >> 1;
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
rx_que = &adapter->rx_queues[i];
if (i & 1) {
ivar &= 0xFF00FFFF;
ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
} else {
ivar &= 0xFFFFFF00;
ivar |= rx_que->msix | E1000_IVAR_VALID;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
}
/* TX entries */
for (int i = 0; i < adapter->tx_num_queues; i++) {
u32 index = i >> 1;
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
tx_que = &adapter->tx_queues[i];
if (i & 1) {
ivar &= 0x00FFFFFF;
ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
} else {
ivar &= 0xFFFF00FF;
ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
adapter->que_mask |= tx_que->eims;
}
/* And for the link interrupt */
ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
adapter->link_mask = 1 << adapter->linkvec;
E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
break;
case e1000_82576:
/* RX entries */
for (int i = 0; i < adapter->rx_num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
rx_que = &adapter->rx_queues[i];
if (i < 8) {
ivar &= 0xFFFFFF00;
ivar |= rx_que->msix | E1000_IVAR_VALID;
} else {
ivar &= 0xFF00FFFF;
ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
adapter->que_mask |= rx_que->eims;
}
/* TX entries */
for (int i = 0; i < adapter->tx_num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
tx_que = &adapter->tx_queues[i];
if (i < 8) {
ivar &= 0xFFFF00FF;
ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
} else {
ivar &= 0x00FFFFFF;
ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
adapter->que_mask |= tx_que->eims;
}
/* And for the link interrupt */
ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
adapter->link_mask = 1 << adapter->linkvec;
E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
break;
case e1000_82575:
/* enable MSI-X support*/
tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
tmp |= E1000_CTRL_EXT_PBA_CLR;
/* Auto-Mask interrupts upon ICR read. */
tmp |= E1000_CTRL_EXT_EIAME;
tmp |= E1000_CTRL_EXT_IRCA;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
/* Queues */
for (int i = 0; i < adapter->rx_num_queues; i++) {
rx_que = &adapter->rx_queues[i];
tmp = E1000_EICR_RX_QUEUE0 << i;
tmp |= E1000_EICR_TX_QUEUE0 << i;
rx_que->eims = tmp;
E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
i, rx_que->eims);
adapter->que_mask |= rx_que->eims;
}
/* Link */
E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
E1000_EIMS_OTHER);
adapter->link_mask |= E1000_EIMS_OTHER;
default:
break;
}
/* Set the starting interrupt rate */
if (em_max_interrupt_rate > 0)
newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC;
if (hw->mac.type == e1000_82575)
newitr |= newitr << 16;
else
newitr |= E1000_EITR_CNT_IGNR;
for (int i = 0; i < adapter->rx_num_queues; i++) {
rx_que = &adapter->rx_queues[i];
E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr);
}
return;
}
static void
em_free_pci_resources(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *que = adapter->rx_queues;
device_t dev = iflib_get_dev(ctx);
/* Release all msix queue resources */
if (adapter->intr_type == IFLIB_INTR_MSIX)
iflib_irq_free(ctx, &adapter->irq);
for (int i = 0; i < adapter->rx_num_queues; i++, que++) {
iflib_irq_free(ctx, &que->que_irq);
}
/* First release all the interrupt resources */
if (adapter->memory != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), adapter->memory);
adapter->memory = NULL;
}
if (adapter->flash != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
EM_FLASH, adapter->flash);
adapter->flash = NULL;
}
if (adapter->ioport != NULL)
bus_release_resource(dev, SYS_RES_IOPORT,
adapter->io_rid, adapter->ioport);
}
/* Setup MSI or MSI/X */
static int
em_setup_msix(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
if (adapter->hw.mac.type == e1000_82574) {
em_enable_vectors_82574(ctx);
}
return (0);
}
/*********************************************************************
*
* Initialize the hardware to a configuration
* as specified by the adapter structure.
*
**********************************************************************/
static void
lem_smartspeed(struct adapter *adapter)
{
u16 phy_tmp;
if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
adapter->hw.mac.autoneg == 0 ||
(adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
return;
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
* we assume back-to-back */
e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
return;
e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
e1000_read_phy_reg(&adapter->hw,
PHY_1000T_CTRL, &phy_tmp);
if(phy_tmp & CR_1000T_MS_ENABLE) {
phy_tmp &= ~CR_1000T_MS_ENABLE;
e1000_write_phy_reg(&adapter->hw,
PHY_1000T_CTRL, phy_tmp);
adapter->smartspeed++;
if(adapter->hw.mac.autoneg &&
!e1000_copper_link_autoneg(&adapter->hw) &&
!e1000_read_phy_reg(&adapter->hw,
PHY_CONTROL, &phy_tmp)) {
phy_tmp |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(&adapter->hw,
PHY_CONTROL, phy_tmp);
}
}
}
return;
} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
/* If still no link, perhaps using 2/3 pair cable */
e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
phy_tmp |= CR_1000T_MS_ENABLE;
e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
if(adapter->hw.mac.autoneg &&
!e1000_copper_link_autoneg(&adapter->hw) &&
!e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
phy_tmp |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
}
}
/* Restart process after EM_SMARTSPEED_MAX iterations */
if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
adapter->smartspeed = 0;
}
/*********************************************************************
*
* Initialize the DMA Coalescing feature
*
**********************************************************************/
static void
igb_init_dmac(struct adapter *adapter, u32 pba)
{
device_t dev = adapter->dev;
struct e1000_hw *hw = &adapter->hw;
u32 dmac, reg = ~E1000_DMACR_DMAC_EN;
u16 hwm;
u16 max_frame_size;
if (hw->mac.type == e1000_i211)
return;
max_frame_size = adapter->shared->isc_max_frame_size;
if (hw->mac.type > e1000_82580) {
if (adapter->dmac == 0) { /* Disabling it */
E1000_WRITE_REG(hw, E1000_DMACR, reg);
return;
} else
device_printf(dev, "DMA Coalescing enabled\n");
/* Set starting threshold */
E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
hwm = 64 * pba - max_frame_size / 16;
if (hwm < 64 * (pba - 6))
hwm = 64 * (pba - 6);
reg = E1000_READ_REG(hw, E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
& E1000_FCRTC_RTH_COAL_MASK);
E1000_WRITE_REG(hw, E1000_FCRTC, reg);
dmac = pba - max_frame_size / 512;
if (dmac < pba - 10)
dmac = pba - 10;
reg = E1000_READ_REG(hw, E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT)
& E1000_DMACR_DMACTHR_MASK);
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
/* Check if status is 2.5Gb backplane connection
* before configuration of watchdog timer, which is
* in msec values in 12.8usec intervals
* watchdog timer= msec values in 32usec intervals
* for non 2.5Gb connection
*/
if (hw->mac.type == e1000_i354) {
int status = E1000_READ_REG(hw, E1000_STATUS);
if ((status & E1000_STATUS_2P5_SKU) &&
(!(status & E1000_STATUS_2P5_SKU_OVER)))
reg |= ((adapter->dmac * 5) >> 6);
else
reg |= (adapter->dmac >> 5);
} else {
reg |= (adapter->dmac >> 5);
}
E1000_WRITE_REG(hw, E1000_DMACR, reg);
E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
/* Set the interval before transition */
reg = E1000_READ_REG(hw, E1000_DMCTLX);
if (hw->mac.type == e1000_i350)
reg |= IGB_DMCTLX_DCFLUSH_DIS;
/*
** in 2.5Gb connection, TTLX unit is 0.4 usec
** which is 0x4*2 = 0xA. But delay is still 4 usec
*/
if (hw->mac.type == e1000_i354) {
int status = E1000_READ_REG(hw, E1000_STATUS);
if ((status & E1000_STATUS_2P5_SKU) &&
(!(status & E1000_STATUS_2P5_SKU_OVER)))
reg |= 0xA;
else
reg |= 0x4;
} else {
reg |= 0x4;
}
E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
/* free space in tx packet buffer to wake from DMA coal */
E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_TXPBSIZE -
(2 * max_frame_size)) >> 6);
/* make low power state decision controlled by DMA coal */
reg = E1000_READ_REG(hw, E1000_PCIEMISC);
reg &= ~E1000_PCIEMISC_LX_DECISION;
E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
} else if (hw->mac.type == e1000_82580) {
u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
E1000_WRITE_REG(hw, E1000_PCIEMISC,
reg & ~E1000_PCIEMISC_LX_DECISION);
E1000_WRITE_REG(hw, E1000_DMACR, 0);
}
}
static void
em_reset(if_ctx_t ctx)
{
device_t dev = iflib_get_dev(ctx);
struct adapter *adapter = iflib_get_softc(ctx);
struct ifnet *ifp = iflib_get_ifp(ctx);
struct e1000_hw *hw = &adapter->hw;
u16 rx_buffer_size;
u32 pba;
INIT_DEBUGOUT("em_reset: begin");
/* Let the firmware know the OS is in control */
em_get_hw_control(adapter);
/* Set up smart power down as default off on newer adapters. */
if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||
hw->mac.type == e1000_82572)) {
u16 phy_tmp = 0;
/* Speed up time to link by disabling smart power down. */
e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
phy_tmp &= ~IGP02E1000_PM_SPD;
e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
}
/*
* Packet Buffer Allocation (PBA)
* Writing PBA sets the receive portion of the buffer
* the remainder is used for the transmit buffer.
*/
switch (hw->mac.type) {
/* Total Packet Buffer on these is 48K */
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
break;
case e1000_82573: /* 82573: Total Packet Buffer is 32K */
pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
break;
case e1000_82574:
case e1000_82583:
pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
break;
case e1000_ich8lan:
pba = E1000_PBA_8K;
break;
case e1000_ich9lan:
case e1000_ich10lan:
/* Boost Receive side for jumbo frames */
if (adapter->hw.mac.max_frame_size > 4096)
pba = E1000_PBA_14K;
else
pba = E1000_PBA_10K;
break;
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
pba = E1000_PBA_26K;
break;
case e1000_82575:
pba = E1000_PBA_32K;
break;
case e1000_82576:
case e1000_vfadapt:
pba = E1000_READ_REG(hw, E1000_RXPBS);
pba &= E1000_RXPBS_SIZE_MASK_82576;
break;
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_vfadapt_i350:
pba = E1000_READ_REG(hw, E1000_RXPBS);
pba = e1000_rxpbs_adjust_82580(pba);
break;
case e1000_i210:
case e1000_i211:
pba = E1000_PBA_34K;
break;
default:
if (adapter->hw.mac.max_frame_size > 8192)
pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
else
pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
}
/* Special needs in case of Jumbo frames */
if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
u32 tx_space, min_tx, min_rx;
pba = E1000_READ_REG(hw, E1000_PBA);
tx_space = pba >> 16;
pba &= 0xffff;
min_tx = (adapter->hw.mac.max_frame_size +
sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
min_tx = roundup2(min_tx, 1024);
min_tx >>= 10;
min_rx = adapter->hw.mac.max_frame_size;
min_rx = roundup2(min_rx, 1024);
min_rx >>= 10;
if (tx_space < min_tx &&
((min_tx - tx_space) < pba)) {
pba = pba - (min_tx - tx_space);
/*
* if short on rx space, rx wins
* and must trump tx adjustment
*/
if (pba < min_rx)
pba = min_rx;
}
E1000_WRITE_REG(hw, E1000_PBA, pba);
}
if (hw->mac.type < igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
INIT_DEBUGOUT1("em_reset: pba=%dK",pba);
/*
* These parameters control the automatic generation (Tx) and
* response (Rx) to Ethernet PAUSE frames.
* - High water mark should allow for at least two frames to be
* received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
* drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There
* could be several smaller frames in the buffer and if so they will
* not trigger the XON until their total number reduces the buffer
* by 1500.
* - The pause time is fairly large at 1000 x 512ns = 512 usec.
*/
rx_buffer_size = (pba & 0xffff) << 10;
hw->fc.high_water = rx_buffer_size -
roundup2(adapter->hw.mac.max_frame_size, 1024);
hw->fc.low_water = hw->fc.high_water - 1500;
if (adapter->fc) /* locally set flow control value? */
hw->fc.requested_mode = adapter->fc;
else
hw->fc.requested_mode = e1000_fc_full;
if (hw->mac.type == e1000_80003es2lan)
hw->fc.pause_time = 0xFFFF;
else
hw->fc.pause_time = EM_FC_PAUSE_TIME;
hw->fc.send_xon = TRUE;
/* Device specific overrides/settings */
switch (hw->mac.type) {
case e1000_pchlan:
/* Workaround: no TX flow ctrl for PCH */
hw->fc.requested_mode = e1000_fc_rx_pause;
hw->fc.pause_time = 0xFFFF; /* override */
if (if_getmtu(ifp) > ETHERMTU) {
hw->fc.high_water = 0x3500;
hw->fc.low_water = 0x1500;
} else {
hw->fc.high_water = 0x5000;
hw->fc.low_water = 0x3000;
}
hw->fc.refresh_time = 0x1000;
break;
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
hw->fc.high_water = 0x5C20;
hw->fc.low_water = 0x5048;
hw->fc.pause_time = 0x0650;
hw->fc.refresh_time = 0x0400;
/* Jumbos need adjusted PBA */
if (if_getmtu(ifp) > ETHERMTU)
E1000_WRITE_REG(hw, E1000_PBA, 12);
else
E1000_WRITE_REG(hw, E1000_PBA, 26);
break;
case e1000_82575:
case e1000_82576:
/* 8-byte granularity */
hw->fc.low_water = hw->fc.high_water - 8;
break;
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
case e1000_vfadapt:
case e1000_vfadapt_i350:
/* 16-byte granularity */
hw->fc.low_water = hw->fc.high_water - 16;
break;
case e1000_ich9lan:
case e1000_ich10lan:
if (if_getmtu(ifp) > ETHERMTU) {
hw->fc.high_water = 0x2800;
hw->fc.low_water = hw->fc.high_water - 8;
break;
}
/* FALLTHROUGH */
default:
if (hw->mac.type == e1000_80003es2lan)
hw->fc.pause_time = 0xFFFF;
break;
}
/* Issue a global reset */
e1000_reset_hw(hw);
if (adapter->hw.mac.type >= igb_mac_min) {
E1000_WRITE_REG(hw, E1000_WUC, 0);
} else {
E1000_WRITE_REG(hw, E1000_WUFC, 0);
em_disable_aspm(adapter);
}
if (adapter->flags & IGB_MEDIA_RESET) {
e1000_setup_init_funcs(hw, TRUE);
e1000_get_bus_info(hw);
adapter->flags &= ~IGB_MEDIA_RESET;
}
/* and a re-init */
if (e1000_init_hw(hw) < 0) {
device_printf(dev, "Hardware Initialization Failed\n");
return;
}
if (adapter->hw.mac.type >= igb_mac_min)
igb_init_dmac(adapter, pba);
E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
e1000_get_phy_info(hw);
e1000_check_for_link(hw);
}
#define RSSKEYLEN 10
static void
em_initialize_rss_mapping(struct adapter *adapter)
{
uint8_t rss_key[4 * RSSKEYLEN];
uint32_t reta = 0;
struct e1000_hw *hw = &adapter->hw;
int i;
/*
* Configure RSS key
*/
arc4rand(rss_key, sizeof(rss_key), 0);
for (i = 0; i < RSSKEYLEN; ++i) {
uint32_t rssrk = 0;
rssrk = EM_RSSRK_VAL(rss_key, i);
E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk);
}
/*
* Configure RSS redirect table in following fashion:
* (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
*/
for (i = 0; i < sizeof(reta); ++i) {
uint32_t q;
q = (i % adapter->rx_num_queues) << 7;
reta |= q << (8 * i);
}
for (i = 0; i < 32; ++i)
E1000_WRITE_REG(hw, E1000_RETA(i), reta);
E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX |
E1000_MRQC_RSS_FIELD_IPV6_EX |
E1000_MRQC_RSS_FIELD_IPV6);
}
static void
igb_initialize_rss_mapping(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
int i;
int queue_id;
u32 reta;
u32 rss_key[10], mrqc, shift = 0;
/* XXX? */
if (adapter->hw.mac.type == e1000_82575)
shift = 6;
/*
* The redirection table controls which destination
* queue each bucket redirects traffic to.
* Each DWORD represents four queues, with the LSB
* being the first queue in the DWORD.
*
* This just allocates buckets to queues using round-robin
* allocation.
*
* NOTE: It Just Happens to line up with the default
* RSS allocation method.
*/
/* Warning FM follows */
reta = 0;
for (i = 0; i < 128; i++) {
#ifdef RSS
queue_id = rss_get_indirection_to_bucket(i);
/*
* If we have more queues than buckets, we'll
* end up mapping buckets to a subset of the
* queues.
*
* If we have more buckets than queues, we'll
* end up instead assigning multiple buckets
* to queues.
*
* Both are suboptimal, but we need to handle
* the case so we don't go out of bounds
* indexing arrays and such.
*/
queue_id = queue_id % adapter->rx_num_queues;
#else
queue_id = (i % adapter->rx_num_queues);
#endif
/* Adjust if required */
queue_id = queue_id << shift;
/*
* The low 8 bits are for hash value (n+0);
* The next 8 bits are for hash value (n+1), etc.
*/
reta = reta >> 8;
reta = reta | ( ((uint32_t) queue_id) << 24);
if ((i & 3) == 3) {
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
reta = 0;
}
}
/* Now fill in hash table */
/*
* MRQC: Multiple Receive Queues Command
* Set queuing to RSS control, number depends on the device.
*/
mrqc = E1000_MRQC_ENABLE_RSS_8Q;
#ifdef RSS
/* XXX ew typecasting */
rss_getkey((uint8_t *) &rss_key);
#else
arc4rand(&rss_key, sizeof(rss_key), 0);
#endif
for (i = 0; i < 10; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key[i]);
/*
* Configure the RSS fields to hash upon.
*/
mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP);
mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP);
mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
E1000_MRQC_RSS_FIELD_IPV6_UDP);
mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
}
/*********************************************************************
*
* Setup networking device structure and register an interface.
*
**********************************************************************/
static int
em_setup_interface(if_ctx_t ctx)
{
struct ifnet *ifp = iflib_get_ifp(ctx);
struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
uint64_t cap = 0;
INIT_DEBUGOUT("em_setup_interface: begin");
/* TSO parameters */
if_sethwtsomax(ifp, IP_MAXPACKET);
/* Take m_pullup(9)'s in em_xmit() w/ TSO into acount. */
if_sethwtsomaxsegcount(ifp, EM_MAX_SCATTER - 5);
if_sethwtsomaxsegsize(ifp, EM_TSO_SEG_SIZE);
/* Single Queue */
if (adapter->tx_num_queues == 1) {
if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
if_setsendqready(ifp);
}
cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4;
cap |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU;
/*
* Tell the upper layer(s) we
* support full VLAN capability
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, cap, 0);
/*
* Don't turn this on by default, if vlans are
* created on another pseudo device (eg. lagg)
* then vlan events are not passed thru, breaking
* operation, but with HW FILTER off it works. If
* using vlans directly on the em driver you can
* enable this and get full hardware tag filtering.
*/
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER,0);
/* Enable only WOL MAGIC by default */
if (adapter->wol) {
if_setcapenablebit(ifp, IFCAP_WOL_MAGIC,
IFCAP_WOL_MCAST| IFCAP_WOL_UCAST);
} else {
if_setcapenablebit(ifp, 0, IFCAP_WOL_MAGIC |
IFCAP_WOL_MCAST| IFCAP_WOL_UCAST);
}
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
(adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
u_char fiber_type = IFM_1000_SX; /* default type */
if (adapter->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
ifmedia_add(adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL);
ifmedia_add(adapter->media, IFM_ETHER | fiber_type, 0, NULL);
} else {
ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
if (adapter->hw.phy.type != e1000_phy_ife) {
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
}
}
ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
return (0);
}
static int
em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
{
struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
int error = E1000_SUCCESS;
struct em_tx_queue *que;
int i, j;
MPASS(adapter->tx_num_queues > 0);
MPASS(adapter->tx_num_queues == ntxqsets);
/* First allocate the top level queue structs */
if (!(adapter->tx_queues =
- (struct em_tx_queue *) mallocarray(adapter->tx_num_queues,
- sizeof(struct em_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ (struct em_tx_queue *) malloc(sizeof(struct em_tx_queue) *
+ adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
return(ENOMEM);
}
for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) {
/* Set up some basics */
struct tx_ring *txr = &que->txr;
txr->adapter = que->adapter = adapter;
que->me = txr->me = i;
/* Allocate report status array */
- if (!(txr->tx_rsq = (qidx_t *) mallocarray(scctx->isc_ntxd[0],
- sizeof(qidx_t), M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
/* get the virtual and physical address of the hardware queues */
txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
txr->tx_paddr = paddrs[i*ntxqs];
}
device_printf(iflib_get_dev(ctx), "allocated for %d tx_queues\n", adapter->tx_num_queues);
return (0);
fail:
em_if_queues_free(ctx);
return (error);
}
static int
em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
{
struct adapter *adapter = iflib_get_softc(ctx);
int error = E1000_SUCCESS;
struct em_rx_queue *que;
int i;
MPASS(adapter->rx_num_queues > 0);
MPASS(adapter->rx_num_queues == nrxqsets);
/* First allocate the top level queue structs */
if (!(adapter->rx_queues =
- (struct em_rx_queue *) mallocarray(adapter->rx_num_queues,
- sizeof(struct em_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ (struct em_rx_queue *) malloc(sizeof(struct em_rx_queue) *
+ adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
error = ENOMEM;
goto fail;
}
for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
/* Set up some basics */
struct rx_ring *rxr = &que->rxr;
rxr->adapter = que->adapter = adapter;
rxr->que = que;
que->me = rxr->me = i;
/* get the virtual and physical address of the hardware queues */
rxr->rx_base = (union e1000_rx_desc_extended *)vaddrs[i*nrxqs];
rxr->rx_paddr = paddrs[i*nrxqs];
}
device_printf(iflib_get_dev(ctx), "allocated for %d rx_queues\n", adapter->rx_num_queues);
return (0);
fail:
em_if_queues_free(ctx);
return (error);
}
static void
em_if_queues_free(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_tx_queue *tx_que = adapter->tx_queues;
struct em_rx_queue *rx_que = adapter->rx_queues;
if (tx_que != NULL) {
for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
if (txr->tx_rsq == NULL)
break;
free(txr->tx_rsq, M_DEVBUF);
txr->tx_rsq = NULL;
}
free(adapter->tx_queues, M_DEVBUF);
adapter->tx_queues = NULL;
}
if (rx_que != NULL) {
free(adapter->rx_queues, M_DEVBUF);
adapter->rx_queues = NULL;
}
em_release_hw_control(adapter);
if (adapter->mta != NULL) {
free(adapter->mta, M_DEVBUF);
}
}
/*********************************************************************
*
* Enable transmit unit.
*
**********************************************************************/
static void
em_initialize_transmit_unit(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
struct em_tx_queue *que;
struct tx_ring *txr;
struct e1000_hw *hw = &adapter->hw;
u32 tctl, txdctl = 0, tarc, tipg = 0;
INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
u64 bus_addr;
caddr_t offp, endp;
que = &adapter->tx_queues[i];
txr = &que->txr;
bus_addr = txr->tx_paddr;
/* Clear checksum offload context. */
offp = (caddr_t)&txr->csum_flags;
endp = (caddr_t)(txr + 1);
bzero(offp, endp - offp);
/* Base and Len of TX Ring */
E1000_WRITE_REG(hw, E1000_TDLEN(i),
scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc));
E1000_WRITE_REG(hw, E1000_TDBAH(i),
(u32)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_TDBAL(i),
(u32)bus_addr);
/* Init the HEAD/TAIL indices */
E1000_WRITE_REG(hw, E1000_TDT(i), 0);
E1000_WRITE_REG(hw, E1000_TDH(i), 0);
HW_DEBUGOUT2("Base = %x, Length = %x\n",
E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
txdctl = 0; /* clear txdctl */
txdctl |= 0x1f; /* PTHRESH */
txdctl |= 1 << 8; /* HTHRESH */
txdctl |= 1 << 16;/* WTHRESH */
txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
txdctl |= E1000_TXDCTL_GRAN;
txdctl |= 1 << 25; /* LWTHRESH */
E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
}
/* Set the default values for the Tx Inter Packet Gap timer */
switch (adapter->hw.mac.type) {
case e1000_80003es2lan:
tipg = DEFAULT_82543_TIPG_IPGR1;
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
E1000_TIPG_IPGR2_SHIFT;
break;
case e1000_82542:
tipg = DEFAULT_82542_TIPG_IPGT;
tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
break;
default:
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
(adapter->hw.phy.media_type ==
e1000_media_type_internal_serdes))
tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
else
tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
}
E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
if(adapter->hw.mac.type >= e1000_82540)
E1000_WRITE_REG(&adapter->hw, E1000_TADV,
adapter->tx_abs_int_delay.value);
if ((adapter->hw.mac.type == e1000_82571) ||
(adapter->hw.mac.type == e1000_82572)) {
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
tarc |= TARC_SPEED_MODE_BIT;
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
} else if (adapter->hw.mac.type == e1000_80003es2lan) {
/* errata: program both queues to unweighted RR */
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
tarc |= 1;
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
tarc |= 1;
E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
} else if (adapter->hw.mac.type == e1000_82574) {
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
tarc |= TARC_ERRATA_BIT;
if ( adapter->tx_num_queues > 1) {
tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX);
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
} else
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
}
if (adapter->tx_int_delay.value > 0)
adapter->txd_cmd |= E1000_TXD_CMD_IDE;
/* Program the Transmit Control Register */
tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
if (adapter->hw.mac.type >= e1000_82571)
tctl |= E1000_TCTL_MULR;
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
/* SPT and KBL errata workarounds */
if (hw->mac.type == e1000_pch_spt) {
u32 reg;
reg = E1000_READ_REG(hw, E1000_IOSFPC);
reg |= E1000_RCTL_RDMTS_HEX;
E1000_WRITE_REG(hw, E1000_IOSFPC, reg);
/* i218-i219 Specification Update 1.5.4.5 */
reg = E1000_READ_REG(hw, E1000_TARC(0));
reg &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
reg |= E1000_TARC0_CB_MULTIQ_2_REQ;
E1000_WRITE_REG(hw, E1000_TARC(0), reg);
}
}
/*********************************************************************
*
* Enable receive unit.
*
**********************************************************************/
static void
em_initialize_receive_unit(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
if_softc_ctx_t scctx = adapter->shared;
struct ifnet *ifp = iflib_get_ifp(ctx);
struct e1000_hw *hw = &adapter->hw;
struct em_rx_queue *que;
int i;
u32 rctl, rxcsum, rfctl;
INIT_DEBUGOUT("em_initialize_receive_units: begin");
/*
* Make sure receives are disabled while setting
* up the descriptor ring
*/
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* Do not disable if ever enabled on this hardware */
if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
/* Setup the Receive Control Register */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Do not store bad packets */
rctl &= ~E1000_RCTL_SBP;
/* Enable Long Packet receive */
if (if_getmtu(ifp) > ETHERMTU)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
/* Strip the CRC */
if (!em_disable_crc_stripping)
rctl |= E1000_RCTL_SECRC;
if (adapter->hw.mac.type >= e1000_82540) {
E1000_WRITE_REG(&adapter->hw, E1000_RADV,
adapter->rx_abs_int_delay.value);
/*
* Set the interrupt throttling rate. Value is calculated
* as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
*/
E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
}
E1000_WRITE_REG(&adapter->hw, E1000_RDTR,
adapter->rx_int_delay.value);
/* Use extended rx descriptor formats */
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
/*
* When using MSIX interrupts we need to throttle
* using the EITR register (82574 only)
*/
if (hw->mac.type == e1000_82574) {
for (int i = 0; i < 4; i++)
E1000_WRITE_REG(hw, E1000_EITR_82574(i),
DEFAULT_ITR);
/* Disable accelerated acknowledge */
rfctl |= E1000_RFCTL_ACK_DIS;
}
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
if (if_getcapenable(ifp) & IFCAP_RXCSUM &&
adapter->hw.mac.type >= e1000_82543) {
if (adapter->tx_num_queues > 1) {
if (adapter->hw.mac.type >= igb_mac_min) {
rxcsum |= E1000_RXCSUM_PCSD;
if (hw->mac.type != e1000_82575)
rxcsum |= E1000_RXCSUM_CRCOFL;
} else
rxcsum |= E1000_RXCSUM_TUOFL |
E1000_RXCSUM_IPOFL |
E1000_RXCSUM_PCSD;
} else {
if (adapter->hw.mac.type >= igb_mac_min)
rxcsum |= E1000_RXCSUM_IPPCSE;
else
rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
if (adapter->hw.mac.type > e1000_82575)
rxcsum |= E1000_RXCSUM_CRCOFL;
}
} else
rxcsum &= ~E1000_RXCSUM_TUOFL;
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
if (adapter->rx_num_queues > 1) {
if (adapter->hw.mac.type >= igb_mac_min)
igb_initialize_rss_mapping(adapter);
else
em_initialize_rss_mapping(adapter);
}
/*
* XXX TEMPORARY WORKAROUND: on some systems with 82573
* long latencies are observed, like Lenovo X60. This
* change eliminates the problem, but since having positive
* values in RDTR is a known source of problems on other
* platforms another solution is being sought.
*/
if (hw->mac.type == e1000_82573)
E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
/* Setup the Base and Length of the Rx Descriptor Ring */
u64 bus_addr = rxr->rx_paddr;
#if 0
u32 rdt = adapter->rx_num_queues -1; /* default */
#endif
E1000_WRITE_REG(hw, E1000_RDLEN(i),
scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended));
E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
/* Setup the Head and Tail Descriptor Pointers */
E1000_WRITE_REG(hw, E1000_RDH(i), 0);
E1000_WRITE_REG(hw, E1000_RDT(i), 0);
}
/*
* Set PTHRESH for improved jumbo performance
* According to 10.2.5.11 of Intel 82574 Datasheet,
* RXDCTL(1) is written whenever RXDCTL(0) is written.
* Only write to RXDCTL(1) if there is a need for different
* settings.
*/
if (((adapter->hw.mac.type == e1000_ich9lan) ||
(adapter->hw.mac.type == e1000_pch2lan) ||
(adapter->hw.mac.type == e1000_ich10lan)) &&
(if_getmtu(ifp) > ETHERMTU)) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
} else if (adapter->hw.mac.type == e1000_82574) {
for (int i = 0; i < adapter->rx_num_queues; i++) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
rxdctl |= 0x20; /* PTHRESH */
rxdctl |= 4 << 8; /* HTHRESH */
rxdctl |= 4 << 16;/* WTHRESH */
rxdctl |= 1 << 24; /* Switch to granularity */
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
} else if (adapter->hw.mac.type >= igb_mac_min) {
u32 psize, srrctl = 0;
if (if_getmtu(ifp) > ETHERMTU) {
/* Set maximum packet len */
if (adapter->rx_mbuf_sz <= 4096) {
srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
} else if (adapter->rx_mbuf_sz > 4096) {
srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
}
psize = scctx->isc_max_frame_size;
/* are we on a vlan? */
if (ifp->if_vlantrunk != NULL)
psize += VLAN_TAG_SIZE;
E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
} else {
srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_2048;
}
/*
* If TX flow control is disabled and there's >1 queue defined,
* enable DROP.
*
* This drops frames rather than hanging the RX MAC for all queues.
*/
if ((adapter->rx_num_queues > 1) &&
(adapter->fc == e1000_fc_none ||
adapter->fc == e1000_fc_rx_pause)) {
srrctl |= E1000_SRRCTL_DROP_EN;
}
/* Setup the Base and Length of the Rx Descriptor Rings */
for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
u64 bus_addr = rxr->rx_paddr;
u32 rxdctl;
#ifdef notyet
/* Configure for header split? -- ignore for now */
rxr->hdr_split = igb_header_split;
#else
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
#endif
E1000_WRITE_REG(hw, E1000_RDLEN(i),
scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc));
E1000_WRITE_REG(hw, E1000_RDBAH(i),
(uint32_t)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i),
(uint32_t)bus_addr);
E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
/* Enable this Queue */
rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
rxdctl &= 0xFFF00000;
rxdctl |= IGB_RX_PTHRESH;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
} else if (adapter->hw.mac.type >= e1000_pch2lan) {
if (if_getmtu(ifp) > ETHERMTU)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
/* Make sure VLAN Filters are off */
rctl &= ~E1000_RCTL_VFE;
if (adapter->hw.mac.type < igb_mac_min) {
if (adapter->rx_mbuf_sz == MCLBYTES)
rctl |= E1000_RCTL_SZ_2048;
else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
else if (adapter->rx_mbuf_sz > MJUMPAGESIZE)
rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
/* ensure we clear use DTYPE of 00 here */
rctl &= ~0x00000C00;
}
/* Write out the settings */
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
return;
}
static void
em_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
struct adapter *adapter = iflib_get_softc(ctx);
u32 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
adapter->shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
}
static void
em_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
struct adapter *adapter = iflib_get_softc(ctx);
u32 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
adapter->shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
}
static void
em_setup_vlan_hw_support(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 reg;
/*
* We get here thru init_locked, meaning
* a soft reset, this has already cleared
* the VFTA and other state, so if there
* have been no vlan's registered do nothing.
*/
if (adapter->num_vlans == 0)
return;
/*
* A soft reset zero's out the VFTA, so
* we need to repopulate it now.
*/
for (int i = 0; i < EM_VFTA_SIZE; i++)
if (adapter->shadow_vfta[i] != 0)
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
i, adapter->shadow_vfta[i]);
reg = E1000_READ_REG(hw, E1000_CTRL);
reg |= E1000_CTRL_VME;
E1000_WRITE_REG(hw, E1000_CTRL, reg);
/* Enable the Filter Table */
reg = E1000_READ_REG(hw, E1000_RCTL);
reg &= ~E1000_RCTL_CFIEN;
reg |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, reg);
}
static void
em_if_enable_intr(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
u32 ims_mask = IMS_ENABLE_MASK;
if (hw->mac.type == e1000_82574) {
E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
ims_mask |= adapter->ims;
} else if (adapter->intr_type == IFLIB_INTR_MSIX && hw->mac.type >= igb_mac_min) {
u32 mask = (adapter->que_mask | adapter->link_mask);
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
ims_mask = E1000_IMS_LSC;
}
E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
}
static void
em_if_disable_intr(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
if (adapter->intr_type == IFLIB_INTR_MSIX) {
if (hw->mac.type >= igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
}
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
}
/*
* Bit of a misnomer, what this really means is
* to enable OS management of the system... aka
* to disable special hardware management features
*/
static void
em_init_manageability(struct adapter *adapter)
{
/* A shared code workaround */
#define E1000_82542_MANC2H E1000_MANC2H
if (adapter->has_manage) {
int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host */
manc |= E1000_MANC_EN_MNG2HOST;
#define E1000_MNG2HOST_PORT_623 (1 << 5)
#define E1000_MNG2HOST_PORT_664 (1 << 6)
manc2h |= E1000_MNG2HOST_PORT_623;
manc2h |= E1000_MNG2HOST_PORT_664;
E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
}
}
/*
* Give control back to hardware management
* controller if there is one.
*/
static void
em_release_manageability(struct adapter *adapter)
{
if (adapter->has_manage) {
int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
manc &= ~E1000_MANC_EN_MNG2HOST;
E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
}
}
/*
* em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means
* that the driver is loaded. For AMT version type f/w
* this means that the network i/f is open.
*/
static void
em_get_hw_control(struct adapter *adapter)
{
u32 ctrl_ext, swsm;
if (adapter->vf_ifp)
return;
if (adapter->hw.mac.type == e1000_82573) {
swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
swsm | E1000_SWSM_DRV_LOAD);
return;
}
/* else */
ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
/*
* em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is no longer loaded. For AMT versions of the
* f/w this means that the network i/f is closed.
*/
static void
em_release_hw_control(struct adapter *adapter)
{
u32 ctrl_ext, swsm;
if (!adapter->has_manage)
return;
if (adapter->hw.mac.type == e1000_82573) {
swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
return;
}
/* else */
ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
return;
}
static int
em_is_valid_ether_addr(u8 *addr)
{
char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
return (FALSE);
}
return (TRUE);
}
/*
** Parse the interface capabilities with regard
** to both system management and wake-on-lan for
** later use.
*/
static void
em_get_wakeup(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
u16 eeprom_data = 0, device_id, apme_mask;
adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
apme_mask = EM_EEPROM_APME;
switch (adapter->hw.mac.type) {
case e1000_82542:
case e1000_82543:
break;
case e1000_82544:
e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
apme_mask = EM_82544_APME;
break;
case e1000_82546:
case e1000_82546_rev_3:
if (adapter->hw.bus.func == 1) {
e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break;
} else
e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
break;
case e1000_82573:
case e1000_82583:
adapter->has_amt = TRUE;
/* FALLTHROUGH */
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
if (adapter->hw.bus.func == 1) {
e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break;
} else
e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
break;
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_82575: /* listing all igb devices */
case e1000_82576:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
case e1000_vfadapt:
case e1000_vfadapt_i350:
apme_mask = E1000_WUC_APME;
adapter->has_amt = TRUE;
eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
break;
default:
e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
break;
}
if (eeprom_data & apme_mask)
adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
/*
* We have the eeprom settings, now apply the special cases
* where the eeprom may be wrong or the board won't support
* wake on lan on a particular port
*/
device_id = pci_get_device(dev);
switch (device_id) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->wol = 0;
break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
E1000_STATUS_FUNC_1)
adapter->wol = 0;
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->wol = 0;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
E1000_STATUS_FUNC_1)
adapter->wol = 0;
break;
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->wol = 0;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
}
return;
}
/*
* Enable PCI Wake On Lan capability
*/
static void
em_enable_wakeup(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
if_t ifp = iflib_get_ifp(ctx);
int error = 0;
u32 pmc, ctrl, ctrl_ext, rctl;
u16 status;
if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
return;
/*
* Determine type of Wakeup: note that wol
* is set with all bits on by default.
*/
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
adapter->wol &= ~E1000_WUFC_MAG;
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0)
adapter->wol &= ~E1000_WUFC_EX;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
adapter->wol &= ~E1000_WUFC_MC;
else {
rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
}
if (!(adapter->wol & (E1000_WUFC_EX | E1000_WUFC_MAG | E1000_WUFC_MC)))
goto pme;
/* Advertise the wakeup capability */
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
/* Keep the laser running on Fiber adapters */
if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
}
if ((adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_pchlan) ||
(adapter->hw.mac.type == e1000_ich9lan) ||
(adapter->hw.mac.type == e1000_ich10lan))
e1000_suspend_workarounds_ich8lan(&adapter->hw);
if ( adapter->hw.mac.type >= e1000_pchlan) {
error = em_enable_phy_wakeup(adapter);
if (error)
goto pme;
} else {
/* Enable wakeup by the MAC */
E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
}
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
pme:
status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
return;
}
/*
* WOL in the newer chipset interfaces (pchlan)
* require thing to be copied into the phy
*/
static int
em_enable_phy_wakeup(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mreg, ret = 0;
u16 preg;
/* copy MAC RARs to PHY RARs */
e1000_copy_rx_addrs_to_phy_ich8lan(hw);
/* copy MAC MTA to PHY MTA */
for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
e1000_write_phy_reg(hw, BM_MTA(i) + 1,
(u16)((mreg >> 16) & 0xFFFF));
}
/* configure PHY Rx Control register */
e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
mreg = E1000_READ_REG(hw, E1000_RCTL);
if (mreg & E1000_RCTL_UPE)
preg |= BM_RCTL_UPE;
if (mreg & E1000_RCTL_MPE)
preg |= BM_RCTL_MPE;
preg &= ~(BM_RCTL_MO_MASK);
if (mreg & E1000_RCTL_MO_3)
preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
<< BM_RCTL_MO_SHIFT);
if (mreg & E1000_RCTL_BAM)
preg |= BM_RCTL_BAM;
if (mreg & E1000_RCTL_PMCF)
preg |= BM_RCTL_PMCF;
mreg = E1000_READ_REG(hw, E1000_CTRL);
if (mreg & E1000_CTRL_RFCE)
preg |= BM_RCTL_RFCE;
e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
/* enable PHY wakeup in MAC register */
E1000_WRITE_REG(hw, E1000_WUC,
E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN | E1000_WUC_APME);
E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
/* configure and enable PHY wakeup in PHY registers */
e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
/* activate PHY wakeup */
ret = hw->phy.ops.acquire(hw);
if (ret) {
printf("Could not acquire PHY\n");
return ret;
}
e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
(BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
if (ret) {
printf("Could not read PHY page 769\n");
goto out;
}
preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
if (ret)
printf("Could not set PHY Host Wakeup bit\n");
out:
hw->phy.ops.release(hw);
return ret;
}
static void
em_if_led_func(if_ctx_t ctx, int onoff)
{
struct adapter *adapter = iflib_get_softc(ctx);
if (onoff) {
e1000_setup_led(&adapter->hw);
e1000_led_on(&adapter->hw);
} else {
e1000_led_off(&adapter->hw);
e1000_cleanup_led(&adapter->hw);
}
}
/*
* Disable the L0S and L1 LINK states
*/
static void
em_disable_aspm(struct adapter *adapter)
{
int base, reg;
u16 link_cap,link_ctrl;
device_t dev = adapter->dev;
switch (adapter->hw.mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
break;
default:
return;
}
if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0)
return;
reg = base + PCIER_LINK_CAP;
link_cap = pci_read_config(dev, reg, 2);
if ((link_cap & PCIEM_LINK_CAP_ASPM) == 0)
return;
reg = base + PCIER_LINK_CTL;
link_ctrl = pci_read_config(dev, reg, 2);
link_ctrl &= ~PCIEM_LINK_CTL_ASPMC;
pci_write_config(dev, reg, link_ctrl, 2);
return;
}
/**********************************************************************
*
* Update the board statistics counters.
*
**********************************************************************/
static void
em_update_stats_counters(struct adapter *adapter)
{
if(adapter->hw.phy.media_type == e1000_media_type_copper ||
(E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
}
adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
/*
** For watchdog management we need to know if we have been
** paused during the last interval, so capture that here.
*/
adapter->shared->isc_pause_frames = adapter->stats.xoffrxc;
adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
/* Interrupt Counts */
adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC);
adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC);
adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC);
adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC);
adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC);
adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC);
adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC);
adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC);
adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC);
if (adapter->hw.mac.type >= e1000_82543) {
adapter->stats.algnerrc +=
E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
adapter->stats.rxerrc +=
E1000_READ_REG(&adapter->hw, E1000_RXERRC);
adapter->stats.tncrs +=
E1000_READ_REG(&adapter->hw, E1000_TNCRS);
adapter->stats.cexterr +=
E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
adapter->stats.tsctc +=
E1000_READ_REG(&adapter->hw, E1000_TSCTC);
adapter->stats.tsctfc +=
E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
}
}
static uint64_t
em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct ifnet *ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_COLLISIONS:
return (adapter->stats.colc);
case IFCOUNTER_IERRORS:
return (adapter->dropped_pkts + adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.mpc + adapter->stats.cexterr);
case IFCOUNTER_OERRORS:
return (adapter->stats.ecol + adapter->stats.latecol +
adapter->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
}
/* Export a single 32-bit register via a read-only sysctl. */
static int
em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter;
u_int val;
adapter = oidp->oid_arg1;
val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
return (sysctl_handle_int(oidp, &val, 0, req));
}
/*
* Add sysctl variables, one per statistic, to the system.
*/
static void
em_add_hw_stats(struct adapter *adapter)
{
device_t dev = iflib_get_dev(adapter->ctx);
struct em_tx_queue *tx_que = adapter->tx_queues;
struct em_rx_queue *rx_que = adapter->rx_queues;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct e1000_hw_stats *stats = &adapter->stats;
struct sysctl_oid *stat_node, *queue_node, *int_node;
struct sysctl_oid_list *stat_list, *queue_list, *int_list;
#define QUEUE_NAME_LEN 32
char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
CTLFLAG_RD, &adapter->dropped_pkts,
"Driver dropped packets");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &adapter->link_irq,
"Link MSIX IRQ Handled");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
CTLFLAG_RD, &adapter->mbuf_defrag_failed,
"Defragmenting mbuf chain failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
CTLFLAG_RD, &adapter->no_tx_dma_setup,
"Driver tx dma failure in xmit");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
CTLFLAG_RD, &adapter->rx_overruns,
"RX overruns");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
CTLFLAG_RD, &adapter->watchdog_events,
"Watchdog timeouts");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
em_sysctl_reg_handler, "IU",
"Device Control Register");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
em_sysctl_reg_handler, "IU",
"Receiver Control Register");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
"Flow Control High Watermark");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
"Flow Control Low Watermark");
for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "TX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_TDH(txr->me),
em_sysctl_reg_handler, "IU",
"Transmit Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_TDT(txr->me),
em_sysctl_reg_handler, "IU",
"Transmit Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
CTLFLAG_RD, &txr->tx_irq,
"Queue MSI-X Transmit Interrupts");
}
for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) {
struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "RX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_RDH(rxr->me),
em_sysctl_reg_handler, "IU",
"Receive Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
CTLTYPE_UINT | CTLFLAG_RD, adapter,
E1000_RDT(rxr->me),
em_sysctl_reg_handler, "IU",
"Receive Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
CTLFLAG_RD, &rxr->rx_irq,
"Queue MSI-X Receive Interrupts");
}
/* MAC stats get their own sub node */
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
CTLFLAG_RD, NULL, "Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
CTLFLAG_RD, &stats->ecol,
"Excessive collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
CTLFLAG_RD, &stats->scc,
"Single collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
CTLFLAG_RD, &stats->mcc,
"Multiple collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
CTLFLAG_RD, &stats->latecol,
"Late collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
CTLFLAG_RD, &stats->colc,
"Collision Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
CTLFLAG_RD, &adapter->stats.symerrs,
"Symbol Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
CTLFLAG_RD, &adapter->stats.sec,
"Sequence Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
CTLFLAG_RD, &adapter->stats.dc,
"Defer Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
CTLFLAG_RD, &adapter->stats.mpc,
"Missed Packets");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
CTLFLAG_RD, &adapter->stats.rnbc,
"Receive No Buffers");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
CTLFLAG_RD, &adapter->stats.ruc,
"Receive Undersize");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
CTLFLAG_RD, &adapter->stats.rfc,
"Fragmented Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
CTLFLAG_RD, &adapter->stats.roc,
"Oversized Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
CTLFLAG_RD, &adapter->stats.rjc,
"Recevied Jabber");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
CTLFLAG_RD, &adapter->stats.rxerrc,
"Receive Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
CTLFLAG_RD, &adapter->stats.crcerrs,
"CRC errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
CTLFLAG_RD, &adapter->stats.algnerrc,
"Alignment Errors");
/* On 82575 these are collision counts */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
CTLFLAG_RD, &adapter->stats.cexterr,
"Collision/Carrier extension errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
CTLFLAG_RD, &adapter->stats.xonrxc,
"XON Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
CTLFLAG_RD, &adapter->stats.xontxc,
"XON Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
CTLFLAG_RD, &adapter->stats.xoffrxc,
"XOFF Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
CTLFLAG_RD, &adapter->stats.xofftxc,
"XOFF Transmitted");
/* Packet Reception Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
CTLFLAG_RD, &adapter->stats.tpr,
"Total Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
CTLFLAG_RD, &adapter->stats.gprc,
"Good Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
CTLFLAG_RD, &adapter->stats.bprc,
"Broadcast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
CTLFLAG_RD, &adapter->stats.mprc,
"Multicast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
CTLFLAG_RD, &adapter->stats.prc64,
"64 byte frames received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
CTLFLAG_RD, &adapter->stats.prc127,
"65-127 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
CTLFLAG_RD, &adapter->stats.prc255,
"128-255 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
CTLFLAG_RD, &adapter->stats.prc511,
"256-511 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
CTLFLAG_RD, &adapter->stats.prc1023,
"512-1023 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
CTLFLAG_RD, &adapter->stats.prc1522,
"1023-1522 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
CTLFLAG_RD, &adapter->stats.gorc,
"Good Octets Received");
/* Packet Transmission Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
CTLFLAG_RD, &adapter->stats.gotc,
"Good Octets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
CTLFLAG_RD, &adapter->stats.tpt,
"Total Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
CTLFLAG_RD, &adapter->stats.gptc,
"Good Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
CTLFLAG_RD, &adapter->stats.bptc,
"Broadcast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
CTLFLAG_RD, &adapter->stats.mptc,
"Multicast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
CTLFLAG_RD, &adapter->stats.ptc64,
"64 byte frames transmitted ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
CTLFLAG_RD, &adapter->stats.ptc127,
"65-127 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
CTLFLAG_RD, &adapter->stats.ptc255,
"128-255 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
CTLFLAG_RD, &adapter->stats.ptc511,
"256-511 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
CTLFLAG_RD, &adapter->stats.ptc1023,
"512-1023 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
CTLFLAG_RD, &adapter->stats.ptc1522,
"1024-1522 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
CTLFLAG_RD, &adapter->stats.tsctc,
"TSO Contexts Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
CTLFLAG_RD, &adapter->stats.tsctfc,
"TSO Contexts Failed");
/* Interrupt Stats */
int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
CTLFLAG_RD, NULL, "Interrupt Statistics");
int_list = SYSCTL_CHILDREN(int_node);
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
CTLFLAG_RD, &adapter->stats.iac,
"Interrupt Assertion Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
CTLFLAG_RD, &adapter->stats.icrxptc,
"Interrupt Cause Rx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
CTLFLAG_RD, &adapter->stats.icrxatc,
"Interrupt Cause Rx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
CTLFLAG_RD, &adapter->stats.ictxptc,
"Interrupt Cause Tx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
CTLFLAG_RD, &adapter->stats.ictxatc,
"Interrupt Cause Tx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
CTLFLAG_RD, &adapter->stats.ictxqec,
"Interrupt Cause Tx Queue Empty Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
CTLFLAG_RD, &adapter->stats.ictxqmtc,
"Interrupt Cause Tx Queue Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
CTLFLAG_RD, &adapter->stats.icrxdmtc,
"Interrupt Cause Rx Desc Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
CTLFLAG_RD, &adapter->stats.icrxoc,
"Interrupt Cause Receiver Overrun Count");
}
/**********************************************************************
*
* This routine provides a way to dump out the adapter eeprom,
* often a useful debug/service tool. This only dumps the first
* 32 words, stuff that matters is in that extent.
*
**********************************************************************/
static int
em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter = (struct adapter *)arg1;
int error;
int result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
/*
* This value will cause a hex dump of the
* first 32 16-bit words of the EEPROM to
* the screen.
*/
if (result == 1)
em_print_nvm_info(adapter);
return (error);
}
static void
em_print_nvm_info(struct adapter *adapter)
{
u16 eeprom_data;
int i, j, row = 0;
/* Its a bit crude, but it gets the job done */
printf("\nInterface EEPROM Dump:\n");
printf("Offset\n0x0000 ");
for (i = 0, j = 0; i < 32; i++, j++) {
if (j == 8) { /* Make the offset block */
j = 0; ++row;
printf("\n0x00%x0 ",row);
}
e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
printf("%04x ", eeprom_data);
}
printf("\n");
}
static int
em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
{
struct em_int_delay_info *info;
struct adapter *adapter;
u32 regval;
int error, usecs, ticks;
info = (struct em_int_delay_info *) arg1;
usecs = info->value;
error = sysctl_handle_int(oidp, &usecs, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
return (EINVAL);
info->value = usecs;
ticks = EM_USECS_TO_TICKS(usecs);
if (info->offset == E1000_ITR) /* units are 256ns here */
ticks *= 4;
adapter = info->adapter;
regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
regval = (regval & ~0xffff) | (ticks & 0xffff);
/* Handle a few special cases. */
switch (info->offset) {
case E1000_RDTR:
break;
case E1000_TIDV:
if (ticks == 0) {
adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
/* Don't write 0 into the TIDV register. */
regval++;
} else
adapter->txd_cmd |= E1000_TXD_CMD_IDE;
break;
}
E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
return (0);
}
static void
em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
const char *description, struct em_int_delay_info *info,
int offset, int value)
{
info->adapter = adapter;
info->offset = offset;
info->value = value;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
info, 0, em_sysctl_int_delay, "I", description);
}
/*
* Set flow control using sysctl:
* Flow control values:
* 0 - off
* 1 - rx pause
* 2 - tx pause
* 3 - full
*/
static int
em_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
int error;
static int input = 3; /* default is full */
struct adapter *adapter = (struct adapter *) arg1;
error = sysctl_handle_int(oidp, &input, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (input == adapter->fc) /* no change? */
return (error);
switch (input) {
case e1000_fc_rx_pause:
case e1000_fc_tx_pause:
case e1000_fc_full:
case e1000_fc_none:
adapter->hw.fc.requested_mode = input;
adapter->fc = input;
break;
default:
/* Do nothing */
return (error);
}
adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
e1000_force_mac_fc(&adapter->hw);
return (error);
}
/*
* Manage Energy Efficient Ethernet:
* Control values:
* 0/1 - enabled/disabled
*/
static int
em_sysctl_eee(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter = (struct adapter *) arg1;
int error, value;
value = adapter->hw.dev_spec.ich8lan.eee_disable;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0);
em_if_init(adapter->ctx);
return (0);
}
static int
em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter;
int error;
int result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
adapter = (struct adapter *) arg1;
em_print_debug_info(adapter);
}
return (error);
}
static int
em_get_rs(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter = (struct adapter *) arg1;
int error;
int result;
result = 0;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr || result != 1)
return (error);
em_dump_rs(adapter);
return (error);
}
static void
em_if_debug(if_ctx_t ctx)
{
em_dump_rs(iflib_get_softc(ctx));
}
/*
* This routine is meant to be fluid, add whatever is
* needed for debugging a problem. -jfv
*/
static void
em_print_debug_info(struct adapter *adapter)
{
device_t dev = iflib_get_dev(adapter->ctx);
struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
struct tx_ring *txr = &adapter->tx_queues->txr;
struct rx_ring *rxr = &adapter->rx_queues->rxr;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
printf("Interface is RUNNING ");
else
printf("Interface is NOT RUNNING\n");
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
printf("and INACTIVE\n");
else
printf("and ACTIVE\n");
for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
device_printf(dev, "TX Queue %d ------\n", i);
device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
}
for (int j=0; j < adapter->rx_num_queues; j++, rxr++) {
device_printf(dev, "RX Queue %d ------\n", j);
device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
E1000_READ_REG(&adapter->hw, E1000_RDH(j)),
E1000_READ_REG(&adapter->hw, E1000_RDT(j)));
}
}
/*
* 82574 only:
* Write a new value to the EEPROM increasing the number of MSIX
* vectors from 3 to 5, for proper multiqueue support.
*/
static void
em_enable_vectors_82574(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct e1000_hw *hw = &adapter->hw;
device_t dev = iflib_get_dev(ctx);
u16 edata;
e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
printf("Current cap: %#06x\n", edata);
if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) {
device_printf(dev, "Writing to eeprom: increasing "
"reported MSIX vectors from 3 to 5...\n");
edata &= ~(EM_NVM_MSIX_N_MASK);
edata |= 4 << EM_NVM_MSIX_N_SHIFT;
e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
e1000_update_nvm_checksum(hw);
device_printf(dev, "Writing to eeprom: done\n");
}
}
Index: head/sys/dev/esp/ncr53c9x.c
===================================================================
--- head/sys/dev/esp/ncr53c9x.c (revision 328217)
+++ head/sys/dev/esp/ncr53c9x.c (revision 328218)
@@ -1,3259 +1,3259 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause NetBSD
*
* Copyright (c) 2004 Scott Long
* Copyright (c) 2005, 2008 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/* $NetBSD: ncr53c9x.c,v 1.145 2012/06/18 21:23:56 martin Exp $ */
/*-
* Copyright (c) 1998, 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 1994 Peter Galbavy
* Copyright (c) 1995 Paul Kranenburg
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Peter Galbavy
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Based on aic6360 by Jarle Greipsland
*
* Acknowledgements: Many of the algorithms used in this driver are
* inspired by the work of Julian Elischer (julian@FreeBSD.org) and
* Charles Hannum (mycroft@duality.gnu.ai.mit.edu). Thanks a million!
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/time.h>
#include <sys/callout.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <dev/esp/ncr53c9xreg.h>
#include <dev/esp/ncr53c9xvar.h>
devclass_t esp_devclass;
MODULE_DEPEND(esp, cam, 1, 1, 1);
#ifdef NCR53C9X_DEBUG
int ncr53c9x_debug =
NCR_SHOWMISC /* | NCR_SHOWPHASE | NCR_SHOWTRAC | NCR_SHOWCMDS */;
#endif
static void ncr53c9x_abort(struct ncr53c9x_softc *sc,
struct ncr53c9x_ecb *ecb);
static void ncr53c9x_action(struct cam_sim *sim, union ccb *ccb);
static void ncr53c9x_async(void *cbarg, uint32_t code,
struct cam_path *path, void *arg);
static void ncr53c9x_callout(void *arg);
static void ncr53c9x_clear(struct ncr53c9x_softc *sc, cam_status result);
static void ncr53c9x_clear_target(struct ncr53c9x_softc *sc, int target,
cam_status result);
static void ncr53c9x_dequeue(struct ncr53c9x_softc *sc,
struct ncr53c9x_ecb *ecb);
static void ncr53c9x_done(struct ncr53c9x_softc *sc,
struct ncr53c9x_ecb *ecb);
static void ncr53c9x_free_ecb(struct ncr53c9x_softc *sc,
struct ncr53c9x_ecb *ecb);
static void ncr53c9x_msgin(struct ncr53c9x_softc *sc);
static void ncr53c9x_msgout(struct ncr53c9x_softc *sc);
static void ncr53c9x_init(struct ncr53c9x_softc *sc, int doreset);
static void ncr53c9x_intr1(struct ncr53c9x_softc *sc);
static void ncr53c9x_poll(struct cam_sim *sim);
static int ncr53c9x_rdfifo(struct ncr53c9x_softc *sc, int how);
static int ncr53c9x_reselect(struct ncr53c9x_softc *sc, int message,
int tagtype, int tagid);
static void ncr53c9x_reset(struct ncr53c9x_softc *sc);
static void ncr53c9x_sense(struct ncr53c9x_softc *sc,
struct ncr53c9x_ecb *ecb);
static void ncr53c9x_sched(struct ncr53c9x_softc *sc);
static void ncr53c9x_select(struct ncr53c9x_softc *sc,
struct ncr53c9x_ecb *ecb);
static void ncr53c9x_watch(void *arg);
static void ncr53c9x_wrfifo(struct ncr53c9x_softc *sc, uint8_t *p,
int len);
static struct ncr53c9x_ecb *ncr53c9x_get_ecb(struct ncr53c9x_softc *sc);
static struct ncr53c9x_linfo *ncr53c9x_lunsearch(struct ncr53c9x_tinfo *sc,
int64_t lun);
static inline void ncr53c9x_readregs(struct ncr53c9x_softc *sc);
static inline void ncr53c9x_setsync(struct ncr53c9x_softc *sc,
struct ncr53c9x_tinfo *ti);
static inline int ncr53c9x_stp2cpb(struct ncr53c9x_softc *sc,
int period);
#define NCR_RDFIFO_START 0
#define NCR_RDFIFO_CONTINUE 1
#define NCR_SET_COUNT(sc, size) do { \
NCR_WRITE_REG((sc), NCR_TCL, (size)); \
NCR_WRITE_REG((sc), NCR_TCM, (size) >> 8); \
if ((sc->sc_features & NCR_F_LARGEXFER) != 0) \
NCR_WRITE_REG((sc), NCR_TCH, (size) >> 16); \
if (sc->sc_rev == NCR_VARIANT_FAS366) \
NCR_WRITE_REG(sc, NCR_RCH, 0); \
} while (/* CONSTCOND */0)
#ifndef mstohz
#define mstohz(ms) \
(((ms) < 0x20000) ? \
((ms +0u) / 1000u) * hz : \
((ms +0u) * hz) /1000u)
#endif
/*
* Names for the NCR53c9x variants, corresponding to the variant tags
* in ncr53c9xvar.h.
*/
static const char *ncr53c9x_variant_names[] = {
"ESP100",
"ESP100A",
"ESP200",
"NCR53C94",
"NCR53C96",
"ESP406",
"FAS408",
"FAS216",
"AM53C974",
"FAS366/HME",
"NCR53C90 (86C01)",
"FAS100A",
"FAS236",
};
/*
* Search linked list for LUN info by LUN id.
*/
static struct ncr53c9x_linfo *
ncr53c9x_lunsearch(struct ncr53c9x_tinfo *ti, int64_t lun)
{
struct ncr53c9x_linfo *li;
LIST_FOREACH(li, &ti->luns, link)
if (li->lun == lun)
return (li);
return (NULL);
}
/*
* Attach this instance, and then all the sub-devices.
*/
int
ncr53c9x_attach(struct ncr53c9x_softc *sc)
{
struct cam_devq *devq;
struct cam_sim *sim;
struct cam_path *path;
struct ncr53c9x_ecb *ecb;
int error, i;
if (NCR_LOCK_INITIALIZED(sc) == 0) {
device_printf(sc->sc_dev, "mutex not initialized\n");
return (ENXIO);
}
callout_init_mtx(&sc->sc_watchdog, &sc->sc_lock, 0);
/*
* Note, the front-end has set us up to print the chip variation.
*/
if (sc->sc_rev >= NCR_VARIANT_MAX) {
device_printf(sc->sc_dev, "unknown variant %d, devices not "
"attached\n", sc->sc_rev);
return (EINVAL);
}
device_printf(sc->sc_dev, "%s, %d MHz, SCSI ID %d\n",
ncr53c9x_variant_names[sc->sc_rev], sc->sc_freq, sc->sc_id);
sc->sc_ntarg = (sc->sc_rev == NCR_VARIANT_FAS366) ? 16 : 8;
/*
* Allocate SCSI message buffers.
* Front-ends can override allocation to avoid alignment
* handling in the DMA engines. Note that ncr53c9x_msgout()
* can request a 1 byte DMA transfer.
*/
if (sc->sc_omess == NULL) {
sc->sc_omess_self = 1;
sc->sc_omess = malloc(NCR_MAX_MSG_LEN, M_DEVBUF, M_NOWAIT);
if (sc->sc_omess == NULL) {
device_printf(sc->sc_dev,
"cannot allocate MSGOUT buffer\n");
return (ENOMEM);
}
} else
sc->sc_omess_self = 0;
if (sc->sc_imess == NULL) {
sc->sc_imess_self = 1;
sc->sc_imess = malloc(NCR_MAX_MSG_LEN + 1, M_DEVBUF, M_NOWAIT);
if (sc->sc_imess == NULL) {
device_printf(sc->sc_dev,
"cannot allocate MSGIN buffer\n");
error = ENOMEM;
goto fail_omess;
}
} else
sc->sc_imess_self = 0;
- sc->sc_tinfo = mallocarray(sc->sc_ntarg, sizeof(sc->sc_tinfo[0]),
+ sc->sc_tinfo = malloc(sc->sc_ntarg * sizeof(sc->sc_tinfo[0]),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->sc_tinfo == NULL) {
device_printf(sc->sc_dev,
"cannot allocate target info buffer\n");
error = ENOMEM;
goto fail_imess;
}
/*
* Treat NCR53C90 with the 86C01 DMA chip exactly as ESP100
* from now on.
*/
if (sc->sc_rev == NCR_VARIANT_NCR53C90_86C01)
sc->sc_rev = NCR_VARIANT_ESP100;
sc->sc_ccf = FREQTOCCF(sc->sc_freq);
/* The value *must not* be == 1. Make it 2. */
if (sc->sc_ccf == 1)
sc->sc_ccf = 2;
/*
* The recommended timeout is 250ms. This register is loaded
* with a value calculated as follows, from the docs:
*
* (timeout period) x (CLK frequency)
* reg = -------------------------------------
* 8192 x (Clock Conversion Factor)
*
* Since CCF has a linear relation to CLK, this generally computes
* to the constant of 153.
*/
sc->sc_timeout = ((250 * 1000) * sc->sc_freq) / (8192 * sc->sc_ccf);
/* The CCF register only has 3 bits; 0 is actually 8. */
sc->sc_ccf &= 7;
/*
* Register with CAM.
*/
devq = cam_simq_alloc(sc->sc_ntarg);
if (devq == NULL) {
device_printf(sc->sc_dev, "cannot allocate device queue\n");
error = ENOMEM;
goto fail_tinfo;
}
sim = cam_sim_alloc(ncr53c9x_action, ncr53c9x_poll, "esp", sc,
device_get_unit(sc->sc_dev), &sc->sc_lock, 1, NCR_TAG_DEPTH, devq);
if (sim == NULL) {
device_printf(sc->sc_dev, "cannot allocate SIM entry\n");
error = ENOMEM;
goto fail_devq;
}
NCR_LOCK(sc);
if (xpt_bus_register(sim, sc->sc_dev, 0) != CAM_SUCCESS) {
device_printf(sc->sc_dev, "cannot register bus\n");
error = EIO;
goto fail_lock;
}
if (xpt_create_path(&path, NULL, cam_sim_path(sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
device_printf(sc->sc_dev, "cannot create path\n");
error = EIO;
goto fail_bus;
}
if (xpt_register_async(AC_LOST_DEVICE, ncr53c9x_async, sim, path) !=
CAM_REQ_CMP) {
device_printf(sc->sc_dev, "cannot register async handler\n");
error = EIO;
goto fail_path;
}
sc->sc_sim = sim;
sc->sc_path = path;
/* Reset state and bus. */
#if 0
sc->sc_cfflags = sc->sc_dev.dv_cfdata->cf_flags;
#else
sc->sc_cfflags = 0;
#endif
sc->sc_state = 0;
ncr53c9x_init(sc, 1);
TAILQ_INIT(&sc->free_list);
if ((sc->ecb_array =
malloc(sizeof(struct ncr53c9x_ecb) * NCR_TAG_DEPTH, M_DEVBUF,
M_NOWAIT | M_ZERO)) == NULL) {
device_printf(sc->sc_dev, "cannot allocate ECB array\n");
error = ENOMEM;
goto fail_async;
}
for (i = 0; i < NCR_TAG_DEPTH; i++) {
ecb = &sc->ecb_array[i];
ecb->sc = sc;
ecb->tag_id = i;
callout_init_mtx(&ecb->ch, &sc->sc_lock, 0);
TAILQ_INSERT_HEAD(&sc->free_list, ecb, free_links);
}
callout_reset(&sc->sc_watchdog, 60 * hz, ncr53c9x_watch, sc);
NCR_UNLOCK(sc);
return (0);
fail_async:
xpt_register_async(0, ncr53c9x_async, sim, path);
fail_path:
xpt_free_path(path);
fail_bus:
xpt_bus_deregister(cam_sim_path(sim));
fail_lock:
NCR_UNLOCK(sc);
cam_sim_free(sim, TRUE);
fail_devq:
cam_simq_free(devq);
fail_tinfo:
free(sc->sc_tinfo, M_DEVBUF);
fail_imess:
if (sc->sc_imess_self)
free(sc->sc_imess, M_DEVBUF);
fail_omess:
if (sc->sc_omess_self)
free(sc->sc_omess, M_DEVBUF);
return (error);
}
int
ncr53c9x_detach(struct ncr53c9x_softc *sc)
{
struct ncr53c9x_linfo *li, *nextli;
int t;
callout_drain(&sc->sc_watchdog);
NCR_LOCK(sc);
if (sc->sc_tinfo) {
/* Cancel all commands. */
ncr53c9x_clear(sc, CAM_REQ_ABORTED);
/* Free logical units. */
for (t = 0; t < sc->sc_ntarg; t++) {
for (li = LIST_FIRST(&sc->sc_tinfo[t].luns); li;
li = nextli) {
nextli = LIST_NEXT(li, link);
free(li, M_DEVBUF);
}
}
}
xpt_register_async(0, ncr53c9x_async, sc->sc_sim, sc->sc_path);
xpt_free_path(sc->sc_path);
xpt_bus_deregister(cam_sim_path(sc->sc_sim));
cam_sim_free(sc->sc_sim, TRUE);
NCR_UNLOCK(sc);
free(sc->ecb_array, M_DEVBUF);
free(sc->sc_tinfo, M_DEVBUF);
if (sc->sc_imess_self)
free(sc->sc_imess, M_DEVBUF);
if (sc->sc_omess_self)
free(sc->sc_omess, M_DEVBUF);
return (0);
}
/*
* This is the generic ncr53c9x reset function. It does not reset the SCSI
* bus, only this controller, but kills any on-going commands, and also stops
* and resets the DMA.
*
* After reset, registers are loaded with the defaults from the attach
* routine above.
*/
static void
ncr53c9x_reset(struct ncr53c9x_softc *sc)
{
NCR_LOCK_ASSERT(sc, MA_OWNED);
/* Reset DMA first. */
NCRDMA_RESET(sc);
/* Reset SCSI chip. */
NCRCMD(sc, NCRCMD_RSTCHIP);
NCRCMD(sc, NCRCMD_NOP);
DELAY(500);
/* Do these backwards, and fall through. */
switch (sc->sc_rev) {
case NCR_VARIANT_ESP406:
case NCR_VARIANT_FAS408:
NCR_WRITE_REG(sc, NCR_CFG5, sc->sc_cfg5 | NCRCFG5_SINT);
NCR_WRITE_REG(sc, NCR_CFG4, sc->sc_cfg4);
/* FALLTHROUGH */
case NCR_VARIANT_AM53C974:
case NCR_VARIANT_FAS100A:
case NCR_VARIANT_FAS216:
case NCR_VARIANT_FAS236:
case NCR_VARIANT_NCR53C94:
case NCR_VARIANT_NCR53C96:
case NCR_VARIANT_ESP200:
sc->sc_features |= NCR_F_HASCFG3;
NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3);
/* FALLTHROUGH */
case NCR_VARIANT_ESP100A:
sc->sc_features |= NCR_F_SELATN3;
if ((sc->sc_cfg2 & NCRCFG2_FE) != 0)
sc->sc_features |= NCR_F_LARGEXFER;
NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2);
/* FALLTHROUGH */
case NCR_VARIANT_ESP100:
NCR_WRITE_REG(sc, NCR_CFG1, sc->sc_cfg1);
NCR_WRITE_REG(sc, NCR_CCF, sc->sc_ccf);
NCR_WRITE_REG(sc, NCR_SYNCOFF, 0);
NCR_WRITE_REG(sc, NCR_TIMEOUT, sc->sc_timeout);
break;
case NCR_VARIANT_FAS366:
sc->sc_features |= NCR_F_HASCFG3 | NCR_F_FASTSCSI |
NCR_F_SELATN3 | NCR_F_LARGEXFER;
sc->sc_cfg3 = NCRFASCFG3_FASTCLK | NCRFASCFG3_OBAUTO;
if (sc->sc_id > 7)
sc->sc_cfg3 |= NCRFASCFG3_IDBIT3;
sc->sc_cfg3_fscsi = NCRFASCFG3_FASTSCSI;
NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3);
sc->sc_cfg2 = NCRCFG2_HMEFE | NCRCFG2_HME32;
NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2);
NCR_WRITE_REG(sc, NCR_CFG1, sc->sc_cfg1);
NCR_WRITE_REG(sc, NCR_CCF, sc->sc_ccf);
NCR_WRITE_REG(sc, NCR_SYNCOFF, 0);
NCR_WRITE_REG(sc, NCR_TIMEOUT, sc->sc_timeout);
break;
default:
device_printf(sc->sc_dev,
"unknown revision code, assuming ESP100\n");
NCR_WRITE_REG(sc, NCR_CFG1, sc->sc_cfg1);
NCR_WRITE_REG(sc, NCR_CCF, sc->sc_ccf);
NCR_WRITE_REG(sc, NCR_SYNCOFF, 0);
NCR_WRITE_REG(sc, NCR_TIMEOUT, sc->sc_timeout);
}
if (sc->sc_rev == NCR_VARIANT_AM53C974)
NCR_WRITE_REG(sc, NCR_AMDCFG4, sc->sc_cfg4);
#if 0
device_printf(sc->sc_dev, "%s: revision %d\n", __func__, sc->sc_rev);
device_printf(sc->sc_dev, "%s: cfg1 0x%x, cfg2 0x%x, cfg3 0x%x, ccf "
"0x%x, timeout 0x%x\n", __func__, sc->sc_cfg1, sc->sc_cfg2,
sc->sc_cfg3, sc->sc_ccf, sc->sc_timeout);
#endif
}
/*
* Clear all commands.
*/
static void
ncr53c9x_clear(struct ncr53c9x_softc *sc, cam_status result)
{
struct ncr53c9x_ecb *ecb;
int r;
NCR_LOCK_ASSERT(sc, MA_OWNED);
/* Cancel any active commands. */
sc->sc_state = NCR_CLEANING;
sc->sc_msgify = 0;
ecb = sc->sc_nexus;
if (ecb != NULL) {
ecb->ccb->ccb_h.status = result;
ncr53c9x_done(sc, ecb);
}
/* Cancel outstanding disconnected commands. */
for (r = 0; r < sc->sc_ntarg; r++)
ncr53c9x_clear_target(sc, r, result);
}
/*
* Clear all commands for a specific target.
*/
static void
ncr53c9x_clear_target(struct ncr53c9x_softc *sc, int target,
cam_status result)
{
struct ncr53c9x_ecb *ecb;
struct ncr53c9x_linfo *li;
int i;
NCR_LOCK_ASSERT(sc, MA_OWNED);
/* Cancel outstanding disconnected commands on each LUN. */
LIST_FOREACH(li, &sc->sc_tinfo[target].luns, link) {
ecb = li->untagged;
if (ecb != NULL) {
li->untagged = NULL;
/*
* XXX should we terminate a command
* that never reached the disk?
*/
li->busy = 0;
ecb->ccb->ccb_h.status = result;
ncr53c9x_done(sc, ecb);
}
for (i = 0; i < NCR_TAG_DEPTH; i++) {
ecb = li->queued[i];
if (ecb != NULL) {
li->queued[i] = NULL;
ecb->ccb->ccb_h.status = result;
ncr53c9x_done(sc, ecb);
}
}
li->used = 0;
}
}
/*
* Initialize ncr53c9x state machine.
*/
static void
ncr53c9x_init(struct ncr53c9x_softc *sc, int doreset)
{
struct ncr53c9x_tinfo *ti;
int r;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_MISC(("[NCR_INIT(%d) %d] ", doreset, sc->sc_state));
if (sc->sc_state == 0) {
/* First time through; initialize. */
TAILQ_INIT(&sc->ready_list);
sc->sc_nexus = NULL;
memset(sc->sc_tinfo, 0, sizeof(*sc->sc_tinfo));
for (r = 0; r < sc->sc_ntarg; r++) {
LIST_INIT(&sc->sc_tinfo[r].luns);
}
} else
ncr53c9x_clear(sc, CAM_CMD_TIMEOUT);
/*
* Reset the chip to a known state.
*/
ncr53c9x_reset(sc);
sc->sc_flags = 0;
sc->sc_msgpriq = sc->sc_msgout = sc->sc_msgoutq = 0;
sc->sc_phase = sc->sc_prevphase = INVALID_PHASE;
/*
* If we're the first time through, set the default parameters
* for all targets. Otherwise we only clear their current transfer
* settings so we'll renegotiate their goal settings with the next
* command.
*/
if (sc->sc_state == 0) {
for (r = 0; r < sc->sc_ntarg; r++) {
ti = &sc->sc_tinfo[r];
/* XXX - config flags per target: low bits: no reselect; high bits: no synch */
ti->flags = ((sc->sc_minsync != 0 &&
(sc->sc_cfflags & (1 << ((r & 7) + 8))) == 0) ?
0 : T_SYNCHOFF) |
((sc->sc_cfflags & (1 << (r & 7))) == 0 ?
0 : T_RSELECTOFF);
ti->curr.period = ti->goal.period = 0;
ti->curr.offset = ti->goal.offset = 0;
ti->curr.width = ti->goal.width =
MSG_EXT_WDTR_BUS_8_BIT;
}
} else {
for (r = 0; r < sc->sc_ntarg; r++) {
ti = &sc->sc_tinfo[r];
ti->flags &= ~(T_SDTRSENT | T_WDTRSENT);
ti->curr.period = 0;
ti->curr.offset = 0;
ti->curr.width = MSG_EXT_WDTR_BUS_8_BIT;
}
}
if (doreset) {
sc->sc_state = NCR_SBR;
NCRCMD(sc, NCRCMD_RSTSCSI);
/* Give the bus a fighting chance to settle. */
DELAY(250000);
} else {
sc->sc_state = NCR_IDLE;
ncr53c9x_sched(sc);
}
}
/*
* Read the NCR registers, and save their contents for later use.
* NCR_STAT, NCR_STEP & NCR_INTR are mostly zeroed out when reading
* NCR_INTR - so make sure it is the last read.
*
* I think that (from reading the docs) most bits in these registers
* only make sense when the DMA CSR has an interrupt showing. Call only
* if an interrupt is pending.
*/
static inline void
ncr53c9x_readregs(struct ncr53c9x_softc *sc)
{
NCR_LOCK_ASSERT(sc, MA_OWNED);
sc->sc_espstat = NCR_READ_REG(sc, NCR_STAT);
/* Only the step bits are of interest. */
sc->sc_espstep = NCR_READ_REG(sc, NCR_STEP) & NCRSTEP_MASK;
if (sc->sc_rev == NCR_VARIANT_FAS366)
sc->sc_espstat2 = NCR_READ_REG(sc, NCR_STAT2);
sc->sc_espintr = NCR_READ_REG(sc, NCR_INTR);
/*
* Determine the SCSI bus phase, return either a real SCSI bus phase
* or some pseudo phase we use to detect certain exceptions.
*/
sc->sc_phase = (sc->sc_espintr & NCRINTR_DIS) ?
BUSFREE_PHASE : sc->sc_espstat & NCRSTAT_PHASE;
NCR_INTS(("regs[intr=%02x,stat=%02x,step=%02x,stat2=%02x] ",
sc->sc_espintr, sc->sc_espstat, sc->sc_espstep, sc->sc_espstat2));
}
/*
* Convert Synchronous Transfer Period to chip register Clock Per Byte value.
*/
static inline int
ncr53c9x_stp2cpb(struct ncr53c9x_softc *sc, int period)
{
int v;
NCR_LOCK_ASSERT(sc, MA_OWNED);
v = (sc->sc_freq * period) / 250;
if (ncr53c9x_cpb2stp(sc, v) < period)
/* Correct round-down error. */
v++;
return (v);
}
static inline void
ncr53c9x_setsync(struct ncr53c9x_softc *sc, struct ncr53c9x_tinfo *ti)
{
uint8_t cfg3, syncoff, synctp;
NCR_LOCK_ASSERT(sc, MA_OWNED);
cfg3 = sc->sc_cfg3;
if (ti->curr.offset != 0) {
syncoff = ti->curr.offset;
synctp = ncr53c9x_stp2cpb(sc, ti->curr.period);
if (sc->sc_features & NCR_F_FASTSCSI) {
/*
* If the period is 200ns or less (ti->period <= 50),
* put the chip in Fast SCSI mode.
*/
if (ti->curr.period <= 50)
/*
* There are (at least) 4 variations of the
* configuration 3 register. The drive attach
* routine sets the appropriate bit to put the
* chip into Fast SCSI mode so that it doesn't
* have to be figured out here each time.
*/
cfg3 |= sc->sc_cfg3_fscsi;
}
/*
* Am53c974 requires different SYNCTP values when the
* FSCSI bit is off.
*/
if (sc->sc_rev == NCR_VARIANT_AM53C974 &&
(cfg3 & NCRAMDCFG3_FSCSI) == 0)
synctp--;
} else {
syncoff = 0;
synctp = 0;
}
if (ti->curr.width != MSG_EXT_WDTR_BUS_8_BIT) {
if (sc->sc_rev == NCR_VARIANT_FAS366)
cfg3 |= NCRFASCFG3_EWIDE;
}
if (sc->sc_features & NCR_F_HASCFG3)
NCR_WRITE_REG(sc, NCR_CFG3, cfg3);
NCR_WRITE_REG(sc, NCR_SYNCOFF, syncoff);
NCR_WRITE_REG(sc, NCR_SYNCTP, synctp);
}
/*
* Send a command to a target, set the driver state to NCR_SELECTING
* and let the caller take care of the rest.
*
* Keeping this as a function allows me to say that this may be done
* by DMA instead of programmed I/O soon.
*/
static void
ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{
struct ncr53c9x_tinfo *ti;
uint8_t *cmd;
size_t dmasize;
int clen, error, selatn3, selatns;
int lun = ecb->ccb->ccb_h.target_lun;
int target = ecb->ccb->ccb_h.target_id;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s(t%d,l%d,cmd:%x,tag:%x,%x)] ", __func__, target, lun,
ecb->cmd.cmd.opcode, ecb->tag[0], ecb->tag[1]));
ti = &sc->sc_tinfo[target];
sc->sc_state = NCR_SELECTING;
/*
* Schedule the callout now, the first time we will go away
* expecting to come back due to an interrupt, because it is
* always possible that the interrupt may never happen.
*/
callout_reset(&ecb->ch, mstohz(ecb->timeout), ncr53c9x_callout, ecb);
/*
* The docs say the target register is never reset, and I
* can't think of a better place to set it.
*/
if (sc->sc_rev == NCR_VARIANT_FAS366) {
NCRCMD(sc, NCRCMD_FLUSH);
NCR_WRITE_REG(sc, NCR_SELID, target | NCR_BUSID_HMEXC32 |
NCR_BUSID_HMEENCID);
} else
NCR_WRITE_REG(sc, NCR_SELID, target);
/*
* If we are requesting sense, force a renegotiation if we are
* currently using anything different from asynchronous at 8 bit
* as the target might have lost our transfer negotiations.
*/
if ((ecb->flags & ECB_SENSE) != 0 && (ti->curr.offset != 0 ||
ti->curr.width != MSG_EXT_WDTR_BUS_8_BIT)) {
ti->curr.period = 0;
ti->curr.offset = 0;
ti->curr.width = MSG_EXT_WDTR_BUS_8_BIT;
}
ncr53c9x_setsync(sc, ti);
selatn3 = selatns = 0;
if (ecb->tag[0] != 0) {
if (sc->sc_features & NCR_F_SELATN3)
/* Use SELATN3 to send tag messages. */
selatn3 = 1;
else
/* We don't have SELATN3; use SELATNS to send tags. */
selatns = 1;
}
if (ti->curr.period != ti->goal.period ||
ti->curr.offset != ti->goal.offset ||
ti->curr.width != ti->goal.width) {
/* We have to use SELATNS to send sync/wide messages. */
selatn3 = 0;
selatns = 1;
}
cmd = (uint8_t *)&ecb->cmd.cmd;
if (selatn3) {
/* We'll use tags with SELATN3. */
clen = ecb->clen + 3;
cmd -= 3;
cmd[0] = MSG_IDENTIFY(lun, 1); /* msg[0] */
cmd[1] = ecb->tag[0]; /* msg[1] */
cmd[2] = ecb->tag[1]; /* msg[2] */
} else {
/* We don't have tags, or will send messages with SELATNS. */
clen = ecb->clen + 1;
cmd -= 1;
cmd[0] = MSG_IDENTIFY(lun, (ti->flags & T_RSELECTOFF) == 0);
}
if ((sc->sc_features & NCR_F_DMASELECT) && !selatns) {
/* Setup DMA transfer for command. */
dmasize = clen;
sc->sc_cmdlen = clen;
sc->sc_cmdp = cmd;
error = NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen, 0,
&dmasize);
if (error != 0)
goto cmd;
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, dmasize);
/* Load the count in. */
NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
/* And get the target's attention. */
if (selatn3) {
sc->sc_msgout = SEND_TAG;
sc->sc_flags |= NCR_ATN;
NCRCMD(sc, NCRCMD_SELATN3 | NCRCMD_DMA);
} else
NCRCMD(sc, NCRCMD_SELATN | NCRCMD_DMA);
NCRDMA_GO(sc);
return;
}
cmd:
/*
* Who am I? This is where we tell the target that we are
* happy for it to disconnect etc.
*/
/* Now get the command into the FIFO. */
sc->sc_cmdlen = 0;
ncr53c9x_wrfifo(sc, cmd, clen);
/* And get the target's attention. */
if (selatns) {
NCR_MSGS(("SELATNS \n"));
/* Arbitrate, select and stop after IDENTIFY message. */
NCRCMD(sc, NCRCMD_SELATNS);
} else if (selatn3) {
sc->sc_msgout = SEND_TAG;
sc->sc_flags |= NCR_ATN;
NCRCMD(sc, NCRCMD_SELATN3);
} else
NCRCMD(sc, NCRCMD_SELATN);
}
static void
ncr53c9x_free_ecb(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{
NCR_LOCK_ASSERT(sc, MA_OWNED);
ecb->flags = 0;
TAILQ_INSERT_TAIL(&sc->free_list, ecb, free_links);
}
static struct ncr53c9x_ecb *
ncr53c9x_get_ecb(struct ncr53c9x_softc *sc)
{
struct ncr53c9x_ecb *ecb;
NCR_LOCK_ASSERT(sc, MA_OWNED);
ecb = TAILQ_FIRST(&sc->free_list);
if (ecb) {
if (ecb->flags != 0)
panic("%s: ecb flags not cleared", __func__);
TAILQ_REMOVE(&sc->free_list, ecb, free_links);
ecb->flags = ECB_ALLOC;
bzero(&ecb->ccb, sizeof(struct ncr53c9x_ecb) -
offsetof(struct ncr53c9x_ecb, ccb));
}
return (ecb);
}
/*
* DRIVER FUNCTIONS CALLABLE FROM HIGHER LEVEL DRIVERS:
*/
/*
* Start a SCSI-command.
* This function is called by the higher level SCSI-driver to queue/run
* SCSI-commands.
*/
static void
ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
{
struct ccb_pathinq *cpi;
struct ccb_scsiio *csio;
struct ccb_trans_settings *cts;
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
struct ncr53c9x_ecb *ecb;
struct ncr53c9x_softc *sc;
struct ncr53c9x_tinfo *ti;
int target;
sc = cam_sim_softc(sim);
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s %d]", __func__, ccb->ccb_h.func_code));
switch (ccb->ccb_h.func_code) {
case XPT_RESET_BUS:
ncr53c9x_init(sc, 1);
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_CALC_GEOMETRY:
cam_calc_geometry(&ccb->ccg, sc->sc_extended_geom);
break;
case XPT_PATH_INQ:
cpi = &ccb->cpi;
cpi->version_num = 1;
cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
cpi->hba_inquiry |=
(sc->sc_rev == NCR_VARIANT_FAS366) ? PI_WIDE_16 : 0;
cpi->target_sprt = 0;
cpi->hba_misc = 0;
cpi->hba_eng_cnt = 0;
cpi->max_target = sc->sc_ntarg - 1;
cpi->max_lun = 7;
cpi->initiator_id = sc->sc_id;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "NCR", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = 0;
cpi->base_transfer_speed = 3300;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_2;
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->maxio = sc->sc_maxxfer;
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_GET_TRAN_SETTINGS:
cts = &ccb->cts;
ti = &sc->sc_tinfo[ccb->ccb_h.target_id];
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_2;
cts->transport = XPORT_SPI;
cts->transport_version = 2;
if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
spi->sync_period = ti->curr.period;
spi->sync_offset = ti->curr.offset;
spi->bus_width = ti->curr.width;
if ((ti->flags & T_TAG) != 0) {
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
} else {
spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
}
} else {
if ((ti->flags & T_SYNCHOFF) != 0) {
spi->sync_period = 0;
spi->sync_offset = 0;
} else {
spi->sync_period = sc->sc_minsync;
spi->sync_offset = sc->sc_maxoffset;
}
spi->bus_width = sc->sc_maxwidth;
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
}
spi->valid =
CTS_SPI_VALID_BUS_WIDTH |
CTS_SPI_VALID_SYNC_RATE |
CTS_SPI_VALID_SYNC_OFFSET |
CTS_SPI_VALID_DISC;
scsi->valid = CTS_SCSI_VALID_TQ;
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_ABORT:
device_printf(sc->sc_dev, "XPT_ABORT called\n");
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_TERM_IO:
device_printf(sc->sc_dev, "XPT_TERM_IO called\n");
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_RESET_DEV:
case XPT_SCSI_IO:
if (ccb->ccb_h.target_id >= sc->sc_ntarg) {
ccb->ccb_h.status = CAM_PATH_INVALID;
goto done;
}
/* Get an ECB to use. */
ecb = ncr53c9x_get_ecb(sc);
/*
* This should never happen as we track resources
* in the mid-layer.
*/
if (ecb == NULL) {
xpt_freeze_simq(sim, 1);
ccb->ccb_h.status = CAM_REQUEUE_REQ;
device_printf(sc->sc_dev, "unable to allocate ecb\n");
goto done;
}
/* Initialize ecb. */
ecb->ccb = ccb;
ecb->timeout = ccb->ccb_h.timeout;
if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
ecb->flags |= ECB_RESET;
ecb->clen = 0;
ecb->dleft = 0;
} else {
csio = &ccb->csio;
if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
bcopy(csio->cdb_io.cdb_ptr, &ecb->cmd.cmd,
csio->cdb_len);
else
bcopy(csio->cdb_io.cdb_bytes, &ecb->cmd.cmd,
csio->cdb_len);
ecb->clen = csio->cdb_len;
ecb->daddr = csio->data_ptr;
ecb->dleft = csio->dxfer_len;
}
ecb->stat = 0;
TAILQ_INSERT_TAIL(&sc->ready_list, ecb, chain);
ecb->flags |= ECB_READY;
if (sc->sc_state == NCR_IDLE)
ncr53c9x_sched(sc);
return;
case XPT_SET_TRAN_SETTINGS:
cts = &ccb->cts;
target = ccb->ccb_h.target_id;
ti = &sc->sc_tinfo[target];
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
if ((sc->sc_cfflags & (1<<((target & 7) + 16))) == 0 &&
(scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)) {
NCR_MISC(("%s: target %d: tagged queuing\n",
device_get_nameunit(sc->sc_dev), target));
ti->flags |= T_TAG;
} else
ti->flags &= ~T_TAG;
}
if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
NCR_MISC(("%s: target %d: wide negotiation\n",
device_get_nameunit(sc->sc_dev), target));
ti->goal.width = spi->bus_width;
}
if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) {
NCR_MISC(("%s: target %d: sync period negotiation\n",
device_get_nameunit(sc->sc_dev), target));
ti->goal.period = spi->sync_period;
}
if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) {
NCR_MISC(("%s: target %d: sync offset negotiation\n",
device_get_nameunit(sc->sc_dev), target));
ti->goal.offset = spi->sync_offset;
}
ccb->ccb_h.status = CAM_REQ_CMP;
break;
default:
device_printf(sc->sc_dev, "Unhandled function code %d\n",
ccb->ccb_h.func_code);
ccb->ccb_h.status = CAM_PROVIDE_FAIL;
}
done:
xpt_done(ccb);
}
/*
* Used when interrupt driven I/O is not allowed, e.g. during boot.
*/
static void
ncr53c9x_poll(struct cam_sim *sim)
{
struct ncr53c9x_softc *sc;
sc = cam_sim_softc(sim);
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s] ", __func__));
if (NCRDMA_ISINTR(sc))
ncr53c9x_intr1(sc);
}
/*
* Asynchronous notification handler
*/
static void
ncr53c9x_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg)
{
struct ncr53c9x_softc *sc;
struct ncr53c9x_tinfo *ti;
int target;
sc = cam_sim_softc(cbarg);
NCR_LOCK_ASSERT(sc, MA_OWNED);
switch (code) {
case AC_LOST_DEVICE:
target = xpt_path_target_id(path);
if (target < 0 || target >= sc->sc_ntarg)
break;
/* Cancel outstanding disconnected commands. */
ncr53c9x_clear_target(sc, target, CAM_REQ_ABORTED);
/* Set the default parameters for the target. */
ti = &sc->sc_tinfo[target];
/* XXX - config flags per target: low bits: no reselect; high bits: no synch */
ti->flags = ((sc->sc_minsync != 0 &&
(sc->sc_cfflags & (1 << ((target & 7) + 8))) == 0) ?
0 : T_SYNCHOFF) |
((sc->sc_cfflags & (1 << (target & 7))) == 0 ?
0 : T_RSELECTOFF);
ti->curr.period = ti->goal.period = 0;
ti->curr.offset = ti->goal.offset = 0;
ti->curr.width = ti->goal.width = MSG_EXT_WDTR_BUS_8_BIT;
break;
}
}
/*
* LOW LEVEL SCSI UTILITIES
*/
/*
* Schedule a SCSI operation. This has now been pulled out of the interrupt
* handler so that we may call it from ncr53c9x_action and ncr53c9x_done.
* This may save us an unnecessary interrupt just to get things going.
* Should only be called when state == NCR_IDLE and with sc_lock held.
*/
static void
ncr53c9x_sched(struct ncr53c9x_softc *sc)
{
struct ncr53c9x_ecb *ecb;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
int lun, tag;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s] ", __func__));
if (sc->sc_state != NCR_IDLE)
panic("%s: not IDLE (state=%d)", __func__, sc->sc_state);
/*
* Find first ecb in ready queue that is for a target/lunit
* combinations that is not busy.
*/
TAILQ_FOREACH(ecb, &sc->ready_list, chain) {
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
lun = ecb->ccb->ccb_h.target_lun;
/* Select type of tag for this command */
if ((ti->flags & (T_RSELECTOFF | T_TAG)) != T_TAG)
tag = 0;
else if ((ecb->flags & ECB_SENSE) != 0)
tag = 0;
else if ((ecb->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0)
tag = 0;
else if (ecb->ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
tag = 0;
else
tag = ecb->ccb->csio.tag_action;
li = TINFO_LUN(ti, lun);
if (li == NULL) {
/* Initialize LUN info and add to list. */
li = malloc(sizeof(*li), M_DEVBUF, M_NOWAIT | M_ZERO);
if (li == NULL)
continue;
li->lun = lun;
LIST_INSERT_HEAD(&ti->luns, li, link);
if (lun < NCR_NLUN)
ti->lun[lun] = li;
}
li->last_used = time_second;
if (tag == 0) {
/* Try to issue this as an untagged command. */
if (li->untagged == NULL)
li->untagged = ecb;
}
if (li->untagged != NULL) {
tag = 0;
if ((li->busy != 1) && li->used == 0) {
/*
* We need to issue this untagged command
* now.
*/
ecb = li->untagged;
} else {
/* not ready, yet */
continue;
}
}
ecb->tag[0] = tag;
if (tag != 0) {
li->queued[ecb->tag_id] = ecb;
ecb->tag[1] = ecb->tag_id;
li->used++;
}
if (li->untagged != NULL && (li->busy != 1)) {
li->busy = 1;
TAILQ_REMOVE(&sc->ready_list, ecb, chain);
ecb->flags &= ~ECB_READY;
sc->sc_nexus = ecb;
ncr53c9x_select(sc, ecb);
break;
}
if (li->untagged == NULL && tag != 0) {
TAILQ_REMOVE(&sc->ready_list, ecb, chain);
ecb->flags &= ~ECB_READY;
sc->sc_nexus = ecb;
ncr53c9x_select(sc, ecb);
break;
} else
NCR_TRACE(("[%s %d:%d busy] \n", __func__,
ecb->ccb->ccb_h.target_id,
ecb->ccb->ccb_h.target_lun));
}
}
static void
ncr53c9x_sense(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{
union ccb *ccb = ecb->ccb;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
struct scsi_request_sense *ss = (void *)&ecb->cmd.cmd;
int lun;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s] ", __func__));
lun = ccb->ccb_h.target_lun;
ti = &sc->sc_tinfo[ccb->ccb_h.target_id];
/* Next, setup a REQUEST SENSE command block. */
memset(ss, 0, sizeof(*ss));
ss->opcode = REQUEST_SENSE;
ss->byte2 = ccb->ccb_h.target_lun << SCSI_CMD_LUN_SHIFT;
ss->length = sizeof(struct scsi_sense_data);
ecb->clen = sizeof(*ss);
memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
ecb->daddr = (uint8_t *)&ccb->csio.sense_data;
ecb->dleft = sizeof(struct scsi_sense_data);
ecb->flags |= ECB_SENSE;
ecb->timeout = NCR_SENSE_TIMEOUT;
ti->senses++;
li = TINFO_LUN(ti, lun);
if (li->busy)
li->busy = 0;
ncr53c9x_dequeue(sc, ecb);
li->untagged = ecb; /* Must be executed first to fix C/A. */
li->busy = 2;
if (ecb == sc->sc_nexus)
ncr53c9x_select(sc, ecb);
else {
TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain);
ecb->flags |= ECB_READY;
if (sc->sc_state == NCR_IDLE)
ncr53c9x_sched(sc);
}
}
/*
* POST PROCESSING OF SCSI_CMD (usually current)
*/
static void
ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{
union ccb *ccb = ecb->ccb;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
int lun, sense_returned;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s(status:%x)] ", __func__, ccb->ccb_h.status));
ti = &sc->sc_tinfo[ccb->ccb_h.target_id];
lun = ccb->ccb_h.target_lun;
li = TINFO_LUN(ti, lun);
callout_stop(&ecb->ch);
/*
* Now, if we've come here with no error code, i.e. we've kept the
* initial CAM_REQ_CMP, and the status code signals that we should
* check sense, we'll need to set up a request sense cmd block and
* push the command back into the ready queue *before* any other
* commands for this target/lunit, else we lose the sense info.
* We don't support chk sense conditions for the request sense cmd.
*/
if (ccb->ccb_h.status == CAM_REQ_CMP) {
ccb->csio.scsi_status = ecb->stat;
if ((ecb->flags & ECB_ABORT) != 0)
ccb->ccb_h.status = CAM_CMD_TIMEOUT;
else if ((ecb->flags & ECB_SENSE) != 0 &&
(ecb->stat != SCSI_STATUS_CHECK_COND)) {
ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR |
CAM_AUTOSNS_VALID;
sense_returned = sizeof(ccb->csio.sense_data) -
ecb->dleft;
if (sense_returned < ccb->csio.sense_len)
ccb->csio.sense_resid = ccb->csio.sense_len -
sense_returned;
else
ccb->csio.sense_resid = 0;
} else if (ecb->stat == SCSI_STATUS_CHECK_COND) {
if ((ecb->flags & ECB_SENSE) != 0)
ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
else {
/* First, save the return values. */
ccb->csio.resid = ecb->dleft;
if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) ==
0) {
ncr53c9x_sense(sc, ecb);
return;
}
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
}
} else
ccb->csio.resid = ecb->dleft;
if (ecb->stat == SCSI_STATUS_QUEUE_FULL)
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
else if (ecb->stat == SCSI_STATUS_BUSY)
ccb->ccb_h.status = CAM_SCSI_BUSY;
} else if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
ccb->ccb_h.status |= CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, 1);
}
#ifdef NCR53C9X_DEBUG
if ((ncr53c9x_debug & NCR_SHOWTRAC) != 0) {
if (ccb->csio.resid != 0)
printf("resid=%d ", ccb->csio.resid);
if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
printf("sense=0x%02x\n",
ccb->csio.sense_data.error_code);
else
printf("status SCSI=0x%x CAM=0x%x\n",
ccb->csio.scsi_status, ccb->ccb_h.status);
}
#endif
/*
* Remove the ECB from whatever queue it's on.
*/
ncr53c9x_dequeue(sc, ecb);
if (ecb == sc->sc_nexus) {
sc->sc_nexus = NULL;
if (sc->sc_state != NCR_CLEANING) {
sc->sc_state = NCR_IDLE;
ncr53c9x_sched(sc);
}
}
if ((ccb->ccb_h.status & CAM_SEL_TIMEOUT) != 0) {
/* Selection timeout -- discard this LUN if empty. */
if (li->untagged == NULL && li->used == 0) {
if (lun < NCR_NLUN)
ti->lun[lun] = NULL;
LIST_REMOVE(li, link);
free(li, M_DEVBUF);
}
}
ncr53c9x_free_ecb(sc, ecb);
ti->cmds++;
xpt_done(ccb);
}
static void
ncr53c9x_dequeue(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
int64_t lun;
NCR_LOCK_ASSERT(sc, MA_OWNED);
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
lun = ecb->ccb->ccb_h.target_lun;
li = TINFO_LUN(ti, lun);
#ifdef DIAGNOSTIC
if (li == NULL || li->lun != lun)
panic("%s: lun %llx for ecb %p does not exist", __func__,
(long long)lun, ecb);
#endif
if (li->untagged == ecb) {
li->busy = 0;
li->untagged = NULL;
}
if (ecb->tag[0] && li->queued[ecb->tag[1]] != NULL) {
#ifdef DIAGNOSTIC
if (li->queued[ecb->tag[1]] != NULL &&
(li->queued[ecb->tag[1]] != ecb))
panic("%s: slot %d for lun %llx has %p instead of ecb "
"%p", __func__, ecb->tag[1], (long long)lun,
li->queued[ecb->tag[1]], ecb);
#endif
li->queued[ecb->tag[1]] = NULL;
li->used--;
}
ecb->tag[0] = ecb->tag[1] = 0;
if ((ecb->flags & ECB_READY) != 0) {
ecb->flags &= ~ECB_READY;
TAILQ_REMOVE(&sc->ready_list, ecb, chain);
}
}
/*
* INTERRUPT/PROTOCOL ENGINE
*/
/*
* Schedule an outgoing message by prioritizing it, and asserting
* attention on the bus. We can only do this when we are the initiator
* else there will be an illegal command interrupt.
*/
#define ncr53c9x_sched_msgout(m) do { \
NCR_MSGS(("ncr53c9x_sched_msgout %x %d", m, __LINE__)); \
NCRCMD(sc, NCRCMD_SETATN); \
sc->sc_flags |= NCR_ATN; \
sc->sc_msgpriq |= (m); \
} while (/* CONSTCOND */0)
static void
ncr53c9x_flushfifo(struct ncr53c9x_softc *sc)
{
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s] ", __func__));
NCRCMD(sc, NCRCMD_FLUSH);
if (sc->sc_phase == COMMAND_PHASE ||
sc->sc_phase == MESSAGE_OUT_PHASE)
DELAY(2);
}
static int
ncr53c9x_rdfifo(struct ncr53c9x_softc *sc, int how)
{
int i, n;
uint8_t *ibuf;
NCR_LOCK_ASSERT(sc, MA_OWNED);
switch (how) {
case NCR_RDFIFO_START:
ibuf = sc->sc_imess;
sc->sc_imlen = 0;
break;
case NCR_RDFIFO_CONTINUE:
ibuf = sc->sc_imess + sc->sc_imlen;
break;
default:
panic("%s: bad flag", __func__);
/* NOTREACHED */
}
/*
* XXX buffer (sc_imess) size for message
*/
n = NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF;
if (sc->sc_rev == NCR_VARIANT_FAS366) {
n *= 2;
for (i = 0; i < n; i++)
ibuf[i] = NCR_READ_REG(sc, NCR_FIFO);
if (sc->sc_espstat2 & NCRFAS_STAT2_ISHUTTLE) {
NCR_WRITE_REG(sc, NCR_FIFO, 0);
ibuf[i++] = NCR_READ_REG(sc, NCR_FIFO);
NCR_READ_REG(sc, NCR_FIFO);
ncr53c9x_flushfifo(sc);
}
} else
for (i = 0; i < n; i++)
ibuf[i] = NCR_READ_REG(sc, NCR_FIFO);
sc->sc_imlen += i;
#if 0
#ifdef NCR53C9X_DEBUG
NCR_TRACE(("\n[rdfifo %s (%d):",
(how == NCR_RDFIFO_START) ? "start" : "cont", (int)sc->sc_imlen));
if ((ncr53c9x_debug & NCR_SHOWTRAC) != 0) {
for (i = 0; i < sc->sc_imlen; i++)
printf(" %02x", sc->sc_imess[i]);
printf("]\n");
}
#endif
#endif
return (sc->sc_imlen);
}
static void
ncr53c9x_wrfifo(struct ncr53c9x_softc *sc, uint8_t *p, int len)
{
int i;
NCR_LOCK_ASSERT(sc, MA_OWNED);
#ifdef NCR53C9X_DEBUG
NCR_MSGS(("[wrfifo(%d):", len));
if ((ncr53c9x_debug & NCR_SHOWMSGS) != 0) {
for (i = 0; i < len; i++)
printf(" %02x", p[i]);
printf("]\n");
}
#endif
for (i = 0; i < len; i++) {
NCR_WRITE_REG(sc, NCR_FIFO, p[i]);
if (sc->sc_rev == NCR_VARIANT_FAS366)
NCR_WRITE_REG(sc, NCR_FIFO, 0);
}
}
static int
ncr53c9x_reselect(struct ncr53c9x_softc *sc, int message, int tagtype,
int tagid)
{
struct ncr53c9x_ecb *ecb = NULL;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
uint8_t lun, selid, target;
NCR_LOCK_ASSERT(sc, MA_OWNED);
if (sc->sc_rev == NCR_VARIANT_FAS366)
target = sc->sc_selid;
else {
/*
* The SCSI chip made a snapshot of the data bus
* while the reselection was being negotiated.
* This enables us to determine which target did
* the reselect.
*/
selid = sc->sc_selid & ~(1 << sc->sc_id);
if (selid & (selid - 1)) {
device_printf(sc->sc_dev, "reselect with invalid "
"selid %02x; sending DEVICE RESET\n", selid);
goto reset;
}
target = ffs(selid) - 1;
}
lun = message & 0x07;
/*
* Search wait queue for disconnected command.
* The list should be short, so I haven't bothered with
* any more sophisticated structures than a simple
* singly linked list.
*/
ti = &sc->sc_tinfo[target];
li = TINFO_LUN(ti, lun);
/*
* We can get as far as the LUN with the IDENTIFY
* message. Check to see if we're running an
* untagged command. Otherwise ack the IDENTIFY
* and wait for a tag message.
*/
if (li != NULL) {
if (li->untagged != NULL && li->busy)
ecb = li->untagged;
else if (tagtype != MSG_SIMPLE_Q_TAG) {
/* Wait for tag to come by. */
sc->sc_state = NCR_IDENTIFIED;
return (0);
} else if (tagtype)
ecb = li->queued[tagid];
}
if (ecb == NULL) {
device_printf(sc->sc_dev, "reselect from target %d lun %d "
"tag %x:%x with no nexus; sending ABORT\n",
target, lun, tagtype, tagid);
goto abort;
}
/* Make this nexus active again. */
sc->sc_state = NCR_CONNECTED;
sc->sc_nexus = ecb;
ncr53c9x_setsync(sc, ti);
if (ecb->flags & ECB_RESET)
ncr53c9x_sched_msgout(SEND_DEV_RESET);
else if (ecb->flags & ECB_ABORT)
ncr53c9x_sched_msgout(SEND_ABORT);
/* Do an implicit RESTORE POINTERS. */
sc->sc_dp = ecb->daddr;
sc->sc_dleft = ecb->dleft;
return (0);
reset:
ncr53c9x_sched_msgout(SEND_DEV_RESET);
return (1);
abort:
ncr53c9x_sched_msgout(SEND_ABORT);
return (1);
}
/* From NetBSD; these should go into CAM at some point. */
#define MSG_ISEXTENDED(m) ((m) == MSG_EXTENDED)
#define MSG_IS1BYTE(m) \
((!MSG_ISEXTENDED(m) && (m) < 0x20) || MSG_ISIDENTIFY(m))
#define MSG_IS2BYTE(m) (((m) & 0xf0) == 0x20)
static inline int
__verify_msg_format(uint8_t *p, int len)
{
if (len == 1 && MSG_IS1BYTE(p[0]))
return (1);
if (len == 2 && MSG_IS2BYTE(p[0]))
return (1);
if (len >= 3 && MSG_ISEXTENDED(p[0]) &&
len == p[1] + 2)
return (1);
return (0);
}
/*
* Get an incoming message as initiator.
*
* The SCSI bus must already be in MESSAGE_IN_PHASE and there is a
* byte in the FIFO.
*/
static void
ncr53c9x_msgin(struct ncr53c9x_softc *sc)
{
struct ncr53c9x_ecb *ecb;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
uint8_t *pb;
int len, lun;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s(curmsglen:%ld)] ", __func__, (long)sc->sc_imlen));
if (sc->sc_imlen == 0) {
device_printf(sc->sc_dev, "msgin: no msg byte available\n");
return;
}
/*
* Prepare for a new message. A message should (according
* to the SCSI standard) be transmitted in one single
* MESSAGE_IN_PHASE. If we have been in some other phase,
* then this is a new message.
*/
if (sc->sc_prevphase != MESSAGE_IN_PHASE &&
sc->sc_state != NCR_RESELECTED) {
device_printf(sc->sc_dev, "phase change, dropping message, "
"prev %d, state %d\n", sc->sc_prevphase, sc->sc_state);
sc->sc_flags &= ~NCR_DROP_MSGI;
sc->sc_imlen = 0;
}
/*
* If we're going to reject the message, don't bother storing
* the incoming bytes. But still, we need to ACK them.
*/
if ((sc->sc_flags & NCR_DROP_MSGI) != 0) {
NCRCMD(sc, NCRCMD_MSGOK);
device_printf(sc->sc_dev, "<dropping msg byte %x>",
sc->sc_imess[sc->sc_imlen]);
return;
}
if (sc->sc_imlen >= NCR_MAX_MSG_LEN) {
ncr53c9x_sched_msgout(SEND_REJECT);
sc->sc_flags |= NCR_DROP_MSGI;
} else {
switch (sc->sc_state) {
/*
* if received message is the first of reselection
* then first byte is selid, and then message
*/
case NCR_RESELECTED:
pb = sc->sc_imess + 1;
len = sc->sc_imlen - 1;
break;
default:
pb = sc->sc_imess;
len = sc->sc_imlen;
}
if (__verify_msg_format(pb, len))
goto gotit;
}
/* Acknowledge what we have so far. */
NCRCMD(sc, NCRCMD_MSGOK);
return;
gotit:
NCR_MSGS(("gotmsg(%x) state %d", sc->sc_imess[0], sc->sc_state));
/*
* We got a complete message, flush the imess.
* XXX nobody uses imlen below.
*/
sc->sc_imlen = 0;
/*
* Now we should have a complete message (1 byte, 2 byte
* and moderately long extended messages). We only handle
* extended messages which total length is shorter than
* NCR_MAX_MSG_LEN. Longer messages will be amputated.
*/
switch (sc->sc_state) {
case NCR_CONNECTED:
ecb = sc->sc_nexus;
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
switch (sc->sc_imess[0]) {
case MSG_CMDCOMPLETE:
NCR_MSGS(("cmdcomplete "));
if (sc->sc_dleft < 0) {
xpt_print_path(ecb->ccb->ccb_h.path);
printf("got %ld extra bytes\n",
-(long)sc->sc_dleft);
sc->sc_dleft = 0;
}
ecb->dleft = (ecb->flags & ECB_TENTATIVE_DONE) ?
0 : sc->sc_dleft;
if ((ecb->flags & ECB_SENSE) == 0)
ecb->ccb->csio.resid = ecb->dleft;
sc->sc_state = NCR_CMDCOMPLETE;
break;
case MSG_MESSAGE_REJECT:
NCR_MSGS(("msg reject (msgout=%x) ", sc->sc_msgout));
switch (sc->sc_msgout) {
case SEND_TAG:
/*
* Target does not like tagged queuing.
* - Flush the command queue
* - Disable tagged queuing for the target
* - Dequeue ecb from the queued array.
*/
device_printf(sc->sc_dev, "tagged queuing "
"rejected: target %d\n",
ecb->ccb->ccb_h.target_id);
NCR_MSGS(("(rejected sent tag)"));
NCRCMD(sc, NCRCMD_FLUSH);
DELAY(1);
ti->flags &= ~T_TAG;
lun = ecb->ccb->ccb_h.target_lun;
li = TINFO_LUN(ti, lun);
if (ecb->tag[0] &&
li->queued[ecb->tag[1]] != NULL) {
li->queued[ecb->tag[1]] = NULL;
li->used--;
}
ecb->tag[0] = ecb->tag[1] = 0;
li->untagged = ecb;
li->busy = 1;
break;
case SEND_SDTR:
device_printf(sc->sc_dev, "sync transfer "
"rejected: target %d\n",
ecb->ccb->ccb_h.target_id);
ti->flags &= ~T_SDTRSENT;
ti->curr.period = ti->goal.period = 0;
ti->curr.offset = ti->goal.offset = 0;
ncr53c9x_setsync(sc, ti);
break;
case SEND_WDTR:
device_printf(sc->sc_dev, "wide transfer "
"rejected: target %d\n",
ecb->ccb->ccb_h.target_id);
ti->flags &= ~T_WDTRSENT;
ti->curr.width = ti->goal.width =
MSG_EXT_WDTR_BUS_8_BIT;
ncr53c9x_setsync(sc, ti);
break;
case SEND_INIT_DET_ERR:
goto abort;
}
break;
case MSG_NOOP:
NCR_MSGS(("noop "));
break;
case MSG_HEAD_OF_Q_TAG:
case MSG_SIMPLE_Q_TAG:
case MSG_ORDERED_Q_TAG:
NCR_MSGS(("TAG %x:%x",
sc->sc_imess[0], sc->sc_imess[1]));
break;
case MSG_DISCONNECT:
NCR_MSGS(("disconnect "));
ti->dconns++;
sc->sc_state = NCR_DISCONNECT;
/*
* Mark the fact that all bytes have moved. The
* target may not bother to do a SAVE POINTERS
* at this stage. This flag will set the residual
* count to zero on MSG COMPLETE.
*/
if (sc->sc_dleft == 0)
ecb->flags |= ECB_TENTATIVE_DONE;
break;
case MSG_SAVEDATAPOINTER:
NCR_MSGS(("save datapointer "));
ecb->daddr = sc->sc_dp;
ecb->dleft = sc->sc_dleft;
break;
case MSG_RESTOREPOINTERS:
NCR_MSGS(("restore datapointer "));
sc->sc_dp = ecb->daddr;
sc->sc_dleft = ecb->dleft;
break;
case MSG_IGN_WIDE_RESIDUE:
NCR_MSGS(("ignore wide residue (%d bytes)",
sc->sc_imess[1]));
if (sc->sc_imess[1] != 1) {
xpt_print_path(ecb->ccb->ccb_h.path);
printf("unexpected MESSAGE IGNORE WIDE "
"RESIDUE (%d bytes); sending REJECT\n",
sc->sc_imess[1]);
goto reject;
}
/*
* If there was a last transfer of an even number of
* bytes, wipe the "done" memory and adjust by one
* byte (sc->sc_imess[1]).
*/
len = sc->sc_dleft - ecb->dleft;
if (len != 0 && (len & 1) == 0) {
ecb->flags &= ~ECB_TENTATIVE_DONE;
sc->sc_dp = (char *)sc->sc_dp - 1;
sc->sc_dleft--;
}
break;
case MSG_EXTENDED:
NCR_MSGS(("extended(%x) ", sc->sc_imess[2]));
switch (sc->sc_imess[2]) {
case MSG_EXT_SDTR:
NCR_MSGS(("SDTR period %d, offset %d ",
sc->sc_imess[3], sc->sc_imess[4]));
if (sc->sc_imess[1] != 3)
goto reject;
ti->curr.period = sc->sc_imess[3];
ti->curr.offset = sc->sc_imess[4];
if (sc->sc_minsync == 0 ||
ti->curr.offset == 0 ||
ti->curr.period > 124) {
#if 0
#ifdef NCR53C9X_DEBUG
xpt_print_path(ecb->ccb->ccb_h.path);
printf("async mode\n");
#endif
#endif
if ((ti->flags & T_SDTRSENT) == 0) {
/*
* target initiated negotiation
*/
ti->curr.offset = 0;
ncr53c9x_sched_msgout(
SEND_SDTR);
}
} else {
ti->curr.period =
ncr53c9x_cpb2stp(sc,
ncr53c9x_stp2cpb(sc,
ti->curr.period));
if ((ti->flags & T_SDTRSENT) == 0) {
/*
* target initiated negotiation
*/
if (ti->curr.period <
sc->sc_minsync)
ti->curr.period =
sc->sc_minsync;
if (ti->curr.offset >
sc->sc_maxoffset)
ti->curr.offset =
sc->sc_maxoffset;
ncr53c9x_sched_msgout(
SEND_SDTR);
}
}
ti->flags &= ~T_SDTRSENT;
ti->goal.period = ti->curr.period;
ti->goal.offset = ti->curr.offset;
ncr53c9x_setsync(sc, ti);
break;
case MSG_EXT_WDTR:
NCR_MSGS(("wide mode %d ", sc->sc_imess[3]));
ti->curr.width = sc->sc_imess[3];
if (!(ti->flags & T_WDTRSENT))
/*
* target initiated negotiation
*/
ncr53c9x_sched_msgout(SEND_WDTR);
ti->flags &= ~T_WDTRSENT;
ti->goal.width = ti->curr.width;
ncr53c9x_setsync(sc, ti);
break;
default:
xpt_print_path(ecb->ccb->ccb_h.path);
printf("unrecognized MESSAGE EXTENDED 0x%x;"
" sending REJECT\n", sc->sc_imess[2]);
goto reject;
}
break;
default:
NCR_MSGS(("ident "));
xpt_print_path(ecb->ccb->ccb_h.path);
printf("unrecognized MESSAGE 0x%x; sending REJECT\n",
sc->sc_imess[0]);
/* FALLTHROUGH */
reject:
ncr53c9x_sched_msgout(SEND_REJECT);
break;
}
break;
case NCR_IDENTIFIED:
/*
* IDENTIFY message was received and queue tag is expected
* now.
*/
if ((sc->sc_imess[0] != MSG_SIMPLE_Q_TAG) ||
(sc->sc_msgify == 0)) {
device_printf(sc->sc_dev, "TAG reselect without "
"IDENTIFY; MSG %x; sending DEVICE RESET\n",
sc->sc_imess[0]);
goto reset;
}
(void)ncr53c9x_reselect(sc, sc->sc_msgify,
sc->sc_imess[0], sc->sc_imess[1]);
break;
case NCR_RESELECTED:
if (MSG_ISIDENTIFY(sc->sc_imess[1]))
sc->sc_msgify = sc->sc_imess[1];
else {
device_printf(sc->sc_dev, "reselect without IDENTIFY;"
" MSG %x; sending DEVICE RESET\n", sc->sc_imess[1]);
goto reset;
}
(void)ncr53c9x_reselect(sc, sc->sc_msgify, 0, 0);
break;
default:
device_printf(sc->sc_dev, "unexpected MESSAGE IN; "
"sending DEVICE RESET\n");
/* FALLTHROUGH */
reset:
ncr53c9x_sched_msgout(SEND_DEV_RESET);
break;
abort:
ncr53c9x_sched_msgout(SEND_ABORT);
}
/* If we have more messages to send set ATN. */
if (sc->sc_msgpriq) {
NCRCMD(sc, NCRCMD_SETATN);
sc->sc_flags |= NCR_ATN;
}
/* Acknowledge last message byte. */
NCRCMD(sc, NCRCMD_MSGOK);
/* Done, reset message pointer. */
sc->sc_flags &= ~NCR_DROP_MSGI;
sc->sc_imlen = 0;
}
/*
* Send the highest priority, scheduled message.
*/
static void
ncr53c9x_msgout(struct ncr53c9x_softc *sc)
{
struct ncr53c9x_tinfo *ti;
struct ncr53c9x_ecb *ecb;
size_t size;
int error;
#ifdef NCR53C9X_DEBUG
int i;
#endif
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_TRACE(("[%s(priq:%x, prevphase:%x)]", __func__, sc->sc_msgpriq,
sc->sc_prevphase));
/*
* XXX - the NCR_ATN flag is not in sync with the actual ATN
* condition on the SCSI bus. The 53c9x chip
* automatically turns off ATN before sending the
* message byte. (See also the comment below in the
* default case when picking out a message to send.)
*/
if (sc->sc_flags & NCR_ATN) {
if (sc->sc_prevphase != MESSAGE_OUT_PHASE) {
new:
NCRCMD(sc, NCRCMD_FLUSH);
#if 0
DELAY(1);
#endif
sc->sc_msgoutq = 0;
sc->sc_omlen = 0;
}
} else {
if (sc->sc_prevphase == MESSAGE_OUT_PHASE) {
ncr53c9x_sched_msgout(sc->sc_msgoutq);
goto new;
} else
device_printf(sc->sc_dev, "at line %d: unexpected "
"MESSAGE OUT phase\n", __LINE__);
}
if (sc->sc_omlen == 0) {
/* Pick up highest priority message. */
sc->sc_msgout = sc->sc_msgpriq & -sc->sc_msgpriq;
sc->sc_msgoutq |= sc->sc_msgout;
sc->sc_msgpriq &= ~sc->sc_msgout;
sc->sc_omlen = 1; /* "Default" message len */
switch (sc->sc_msgout) {
case SEND_SDTR:
ecb = sc->sc_nexus;
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
sc->sc_omess[0] = MSG_EXTENDED;
sc->sc_omess[1] = MSG_EXT_SDTR_LEN;
sc->sc_omess[2] = MSG_EXT_SDTR;
sc->sc_omess[3] = ti->goal.period;
sc->sc_omess[4] = ti->goal.offset;
sc->sc_omlen = 5;
break;
case SEND_WDTR:
ecb = sc->sc_nexus;
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
sc->sc_omess[0] = MSG_EXTENDED;
sc->sc_omess[1] = MSG_EXT_WDTR_LEN;
sc->sc_omess[2] = MSG_EXT_WDTR;
sc->sc_omess[3] = ti->goal.width;
sc->sc_omlen = 4;
break;
case SEND_IDENTIFY:
if (sc->sc_state != NCR_CONNECTED)
device_printf(sc->sc_dev, "at line %d: no "
"nexus\n", __LINE__);
ecb = sc->sc_nexus;
sc->sc_omess[0] =
MSG_IDENTIFY(ecb->ccb->ccb_h.target_lun, 0);
break;
case SEND_TAG:
if (sc->sc_state != NCR_CONNECTED)
device_printf(sc->sc_dev, "at line %d: no "
"nexus\n", __LINE__);
ecb = sc->sc_nexus;
sc->sc_omess[0] = ecb->tag[0];
sc->sc_omess[1] = ecb->tag[1];
sc->sc_omlen = 2;
break;
case SEND_DEV_RESET:
sc->sc_flags |= NCR_ABORTING;
sc->sc_omess[0] = MSG_BUS_DEV_RESET;
ecb = sc->sc_nexus;
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
ti->curr.period = 0;
ti->curr.offset = 0;
ti->curr.width = MSG_EXT_WDTR_BUS_8_BIT;
break;
case SEND_PARITY_ERROR:
sc->sc_omess[0] = MSG_PARITY_ERROR;
break;
case SEND_ABORT:
sc->sc_flags |= NCR_ABORTING;
sc->sc_omess[0] = MSG_ABORT;
break;
case SEND_INIT_DET_ERR:
sc->sc_omess[0] = MSG_INITIATOR_DET_ERR;
break;
case SEND_REJECT:
sc->sc_omess[0] = MSG_MESSAGE_REJECT;
break;
default:
/*
* We normally do not get here, since the chip
* automatically turns off ATN before the last
* byte of a message is sent to the target.
* However, if the target rejects our (multi-byte)
* message early by switching to MSG IN phase
* ATN remains on, so the target may return to
* MSG OUT phase. If there are no scheduled messages
* left we send a NO-OP.
*
* XXX - Note that this leaves no useful purpose for
* the NCR_ATN flag.
*/
sc->sc_flags &= ~NCR_ATN;
sc->sc_omess[0] = MSG_NOOP;
}
sc->sc_omp = sc->sc_omess;
}
#ifdef NCR53C9X_DEBUG
if ((ncr53c9x_debug & NCR_SHOWMSGS) != 0) {
NCR_MSGS(("<msgout:"));
for (i = 0; i < sc->sc_omlen; i++)
NCR_MSGS((" %02x", sc->sc_omess[i]));
NCR_MSGS(("> "));
}
#endif
if (sc->sc_rev != NCR_VARIANT_FAS366) {
/* (Re)send the message. */
size = ulmin(sc->sc_omlen, sc->sc_maxxfer);
error = NCRDMA_SETUP(sc, &sc->sc_omp, &sc->sc_omlen, 0, &size);
if (error != 0)
goto cmd;
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, size);
/* Load the count in and start the message-out transfer. */
NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
NCRCMD(sc, NCRCMD_TRANS | NCRCMD_DMA);
NCRDMA_GO(sc);
return;
}
cmd:
/*
* XXX FIFO size
*/
sc->sc_cmdlen = 0;
ncr53c9x_flushfifo(sc);
ncr53c9x_wrfifo(sc, sc->sc_omp, sc->sc_omlen);
NCRCMD(sc, NCRCMD_TRANS);
}
void
ncr53c9x_intr(void *arg)
{
struct ncr53c9x_softc *sc = arg;
if (!NCRDMA_ISINTR(sc))
return;
NCR_LOCK(sc);
ncr53c9x_intr1(sc);
NCR_UNLOCK(sc);
}
/*
* This is the most critical part of the driver, and has to know
* how to deal with *all* error conditions and phases from the SCSI
* bus. If there are no errors and the DMA was active, then call the
* DMA pseudo-interrupt handler. If this returns 1, then that was it
* and we can return from here without further processing.
*
* Most of this needs verifying.
*/
static void
ncr53c9x_intr1(struct ncr53c9x_softc *sc)
{
struct ncr53c9x_ecb *ecb;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
struct timeval cur, wait;
size_t size;
int error, i, nfifo;
uint8_t msg;
NCR_LOCK_ASSERT(sc, MA_OWNED);
NCR_INTS(("[ncr53c9x_intr: state %d]", sc->sc_state));
again:
/* and what do the registers say... */
ncr53c9x_readregs(sc);
/*
* At the moment, only a SCSI Bus Reset or Illegal
* Command are classed as errors. A disconnect is a
* valid condition, and we let the code check is the
* "NCR_BUSFREE_OK" flag was set before declaring it
* and error.
*
* Also, the status register tells us about "Gross
* Errors" and "Parity errors". Only the Gross Error
* is really bad, and the parity errors are dealt
* with later.
*
* TODO
* If there are too many parity error, go to slow
* cable mode?
*/
if ((sc->sc_espintr & NCRINTR_SBR) != 0) {
if ((NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) != 0) {
NCRCMD(sc, NCRCMD_FLUSH);
DELAY(1);
}
if (sc->sc_state != NCR_SBR) {
device_printf(sc->sc_dev, "SCSI bus reset\n");
ncr53c9x_init(sc, 0); /* Restart everything. */
return;
}
#if 0
/*XXX*/ device_printf(sc->sc_dev, "<expected bus reset: "
"[intr %x, stat %x, step %d]>\n",
sc->sc_espintr, sc->sc_espstat, sc->sc_espstep);
#endif
if (sc->sc_nexus != NULL)
panic("%s: nexus in reset state",
device_get_nameunit(sc->sc_dev));
goto sched;
}
ecb = sc->sc_nexus;
#define NCRINTR_ERR (NCRINTR_SBR | NCRINTR_ILL)
if (sc->sc_espintr & NCRINTR_ERR ||
sc->sc_espstat & NCRSTAT_GE) {
if ((sc->sc_espstat & NCRSTAT_GE) != 0) {
/* Gross Error; no target? */
if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) {
NCRCMD(sc, NCRCMD_FLUSH);
DELAY(1);
}
if (sc->sc_state == NCR_CONNECTED ||
sc->sc_state == NCR_SELECTING) {
ecb->ccb->ccb_h.status = CAM_SEL_TIMEOUT;
ncr53c9x_done(sc, ecb);
}
return;
}
if ((sc->sc_espintr & NCRINTR_ILL) != 0) {
if ((sc->sc_flags & NCR_EXPECT_ILLCMD) != 0) {
/*
* Eat away "Illegal command" interrupt
* on a ESP100 caused by a re-selection
* while we were trying to select
* another target.
*/
#ifdef NCR53C9X_DEBUG
device_printf(sc->sc_dev, "ESP100 work-around "
"activated\n");
#endif
sc->sc_flags &= ~NCR_EXPECT_ILLCMD;
return;
}
/* Illegal command, out of sync? */
device_printf(sc->sc_dev, "illegal command: 0x%x "
"(state %d, phase %x, prevphase %x)\n",
sc->sc_lastcmd,
sc->sc_state, sc->sc_phase, sc->sc_prevphase);
if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) {
NCRCMD(sc, NCRCMD_FLUSH);
DELAY(1);
}
goto reset;
}
}
sc->sc_flags &= ~NCR_EXPECT_ILLCMD;
/*
* Call if DMA is active.
*
* If DMA_INTR returns true, then maybe go 'round the loop
* again in case there is no more DMA queued, but a phase
* change is expected.
*/
if (NCRDMA_ISACTIVE(sc)) {
if (NCRDMA_INTR(sc) == -1) {
device_printf(sc->sc_dev, "DMA error; resetting\n");
goto reset;
}
/* If DMA active here, then go back to work... */
if (NCRDMA_ISACTIVE(sc))
return;
if ((sc->sc_espstat & NCRSTAT_TC) == 0) {
/*
* DMA not completed. If we can not find a
* acceptable explanation, print a diagnostic.
*/
if (sc->sc_state == NCR_SELECTING)
/*
* This can happen if we are reselected
* while using DMA to select a target.
*/
/*void*/;
else if (sc->sc_prevphase == MESSAGE_OUT_PHASE) {
/*
* Our (multi-byte) message (eg SDTR) was
* interrupted by the target to send
* a MSG REJECT.
* Print diagnostic if current phase
* is not MESSAGE IN.
*/
if (sc->sc_phase != MESSAGE_IN_PHASE)
device_printf(sc->sc_dev,"!TC on MSGOUT"
" [intr %x, stat %x, step %d]"
" prevphase %x, resid %lx\n",
sc->sc_espintr,
sc->sc_espstat,
sc->sc_espstep,
sc->sc_prevphase,
(u_long)sc->sc_omlen);
} else if (sc->sc_dleft == 0) {
/*
* The DMA operation was started for
* a DATA transfer. Print a diagnostic
* if the DMA counter and TC bit
* appear to be out of sync.
*
* XXX This is fatal and usually means that
* the DMA engine is hopelessly out of
* sync with reality. A disk is likely
* getting spammed at this point.
*/
device_printf(sc->sc_dev, "!TC on DATA XFER"
" [intr %x, stat %x, step %d]"
" prevphase %x, resid %x\n",
sc->sc_espintr,
sc->sc_espstat,
sc->sc_espstep,
sc->sc_prevphase,
ecb ? ecb->dleft : -1);
goto reset;
}
}
}
/*
* Check for less serious errors.
*/
if ((sc->sc_espstat & NCRSTAT_PE) != 0) {
device_printf(sc->sc_dev, "SCSI bus parity error\n");
if (sc->sc_prevphase == MESSAGE_IN_PHASE)
ncr53c9x_sched_msgout(SEND_PARITY_ERROR);
else
ncr53c9x_sched_msgout(SEND_INIT_DET_ERR);
}
if ((sc->sc_espintr & NCRINTR_DIS) != 0) {
sc->sc_msgify = 0;
NCR_INTS(("<DISC [intr %x, stat %x, step %d]>",
sc->sc_espintr,sc->sc_espstat,sc->sc_espstep));
if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) {
NCRCMD(sc, NCRCMD_FLUSH);
#if 0
DELAY(1);
#endif
}
/*
* This command must (apparently) be issued within
* 250mS of a disconnect. So here you are...
*/
NCRCMD(sc, NCRCMD_ENSEL);
switch (sc->sc_state) {
case NCR_RESELECTED:
goto sched;
case NCR_SELECTING:
ecb->ccb->ccb_h.status = CAM_SEL_TIMEOUT;
/* Selection timeout -- discard all LUNs if empty. */
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
li = LIST_FIRST(&ti->luns);
while (li != NULL) {
if (li->untagged == NULL && li->used == 0) {
if (li->lun < NCR_NLUN)
ti->lun[li->lun] = NULL;
LIST_REMOVE(li, link);
free(li, M_DEVBUF);
/*
* Restart the search at the beginning.
*/
li = LIST_FIRST(&ti->luns);
continue;
}
li = LIST_NEXT(li, link);
}
goto finish;
case NCR_CONNECTED:
if (ecb != NULL) {
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
if ((ti->flags & T_SDTRSENT) != 0) {
xpt_print_path(ecb->ccb->ccb_h.path);
printf("sync nego not completed!\n");
ti->flags &= ~T_SDTRSENT;
ti->curr.period = ti->goal.period = 0;
ti->curr.offset = ti->goal.offset = 0;
ncr53c9x_setsync(sc, ti);
}
if ((ti->flags & T_WDTRSENT) != 0) {
xpt_print_path(ecb->ccb->ccb_h.path);
printf("wide nego not completed!\n");
ti->flags &= ~T_WDTRSENT;
ti->curr.width = ti->goal.width =
MSG_EXT_WDTR_BUS_8_BIT;
ncr53c9x_setsync(sc, ti);
}
}
/* It may be OK to disconnect. */
if ((sc->sc_flags & NCR_ABORTING) == 0) {
/*
* Section 5.1.1 of the SCSI 2 spec
* suggests issuing a REQUEST SENSE
* following an unexpected disconnect.
* Some devices go into a contingent
* allegiance condition when
* disconnecting, and this is necessary
* to clean up their state.
*/
device_printf(sc->sc_dev, "unexpected "
"disconnect [state %d, intr %x, stat %x, "
"phase(c %x, p %x)]; ", sc->sc_state,
sc->sc_espintr, sc->sc_espstat,
sc->sc_phase, sc->sc_prevphase);
/*
* XXX This will cause a chip reset and will
* prevent us from finding out the real
* problem with the device. However, it's
* necessary until a way can be found to
* safely cancel the DMA that is in
* progress.
*/
if (1 || (ecb->flags & ECB_SENSE) != 0) {
printf("resetting\n");
goto reset;
}
printf("sending REQUEST SENSE\n");
callout_stop(&ecb->ch);
ncr53c9x_sense(sc, ecb);
return;
} else if (ecb != NULL &&
(ecb->flags & ECB_RESET) != 0) {
ecb->ccb->ccb_h.status = CAM_REQ_CMP;
goto finish;
}
ecb->ccb->ccb_h.status = CAM_CMD_TIMEOUT;
goto finish;
case NCR_DISCONNECT:
sc->sc_nexus = NULL;
goto sched;
case NCR_CMDCOMPLETE:
ecb->ccb->ccb_h.status = CAM_REQ_CMP;
goto finish;
}
}
switch (sc->sc_state) {
case NCR_SBR:
device_printf(sc->sc_dev, "waiting for Bus Reset to happen\n");
return;
case NCR_RESELECTED:
/*
* We must be continuing a message?
*/
device_printf(sc->sc_dev, "unhandled reselect continuation, "
"state %d, intr %02x\n", sc->sc_state, sc->sc_espintr);
goto reset;
break;
case NCR_IDENTIFIED:
ecb = sc->sc_nexus;
if (sc->sc_phase != MESSAGE_IN_PHASE) {
i = NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF;
/*
* Things are seriously screwed up.
* Pull the brakes, i.e. reset.
*/
device_printf(sc->sc_dev, "target didn't send tag: %d "
"bytes in FIFO\n", i);
/* Drain and display FIFO. */
while (i-- > 0)
printf("[%d] ", NCR_READ_REG(sc, NCR_FIFO));
goto reset;
} else
goto msgin;
case NCR_IDLE:
case NCR_SELECTING:
ecb = sc->sc_nexus;
if (sc->sc_espintr & NCRINTR_RESEL) {
sc->sc_msgpriq = sc->sc_msgout = sc->sc_msgoutq = 0;
sc->sc_flags = 0;
/*
* If we're trying to select a
* target ourselves, push our command
* back into the ready list.
*/
if (sc->sc_state == NCR_SELECTING) {
NCR_INTS(("backoff selector "));
callout_stop(&ecb->ch);
ncr53c9x_dequeue(sc, ecb);
TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain);
ecb->flags |= ECB_READY;
ecb = sc->sc_nexus = NULL;
}
sc->sc_state = NCR_RESELECTED;
if (sc->sc_phase != MESSAGE_IN_PHASE) {
/*
* Things are seriously screwed up.
* Pull the brakes, i.e. reset
*/
device_printf(sc->sc_dev, "target didn't "
"identify\n");
goto reset;
}
/*
* The C90 only inhibits FIFO writes until reselection
* is complete instead of waiting until the interrupt
* status register has been read. So, if the reselect
* happens while we were entering command bytes (for
* another target) some of those bytes can appear in
* the FIFO here, after the interrupt is taken.
*
* To remedy this situation, pull the Selection ID
* and Identify message from the FIFO directly, and
* ignore any extraneous FIFO contents. Also, set
* a flag that allows one Illegal Command Interrupt
* to occur which the chip also generates as a result
* of writing to the FIFO during a reselect.
*/
if (sc->sc_rev == NCR_VARIANT_ESP100) {
nfifo =
NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF;
sc->sc_imess[0] = NCR_READ_REG(sc, NCR_FIFO);
sc->sc_imess[1] = NCR_READ_REG(sc, NCR_FIFO);
sc->sc_imlen = 2;
if (nfifo != 2) {
/* Flush the rest. */
NCRCMD(sc, NCRCMD_FLUSH);
}
sc->sc_flags |= NCR_EXPECT_ILLCMD;
if (nfifo > 2)
nfifo = 2; /* We fixed it... */
} else
nfifo = ncr53c9x_rdfifo(sc, NCR_RDFIFO_START);
if (nfifo != 2) {
device_printf(sc->sc_dev, "RESELECT: %d bytes "
"in FIFO! [intr %x, stat %x, step %d, "
"prevphase %x]\n",
nfifo,
sc->sc_espintr,
sc->sc_espstat,
sc->sc_espstep,
sc->sc_prevphase);
goto reset;
}
sc->sc_selid = sc->sc_imess[0];
NCR_INTS(("selid=%02x ", sc->sc_selid));
/* Handle IDENTIFY message. */
ncr53c9x_msgin(sc);
if (sc->sc_state != NCR_CONNECTED &&
sc->sc_state != NCR_IDENTIFIED) {
/* IDENTIFY fail?! */
device_printf(sc->sc_dev, "identify failed, "
"state %d, intr %02x\n", sc->sc_state,
sc->sc_espintr);
goto reset;
}
goto shortcut; /* i.e. next phase expected soon */
}
#define NCRINTR_DONE (NCRINTR_FC | NCRINTR_BS)
if ((sc->sc_espintr & NCRINTR_DONE) == NCRINTR_DONE) {
/*
* Arbitration won; examine the `step' register
* to determine how far the selection could progress.
*/
if (ecb == NULL) {
/*
* When doing path inquiry during boot
* FAS100A trigger a stray interrupt which
* we just ignore instead of panicing.
*/
if (sc->sc_state == NCR_IDLE &&
sc->sc_espstep == 0)
return;
panic("%s: no nexus", __func__);
}
ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id];
switch (sc->sc_espstep) {
case 0:
/*
* The target did not respond with a
* message out phase - probably an old
* device that doesn't recognize ATN.
* Clear ATN and just continue, the
* target should be in the command
* phase.
* XXX check for command phase?
*/
NCRCMD(sc, NCRCMD_RSTATN);
break;
case 1:
if (ti->curr.period == ti->goal.period &&
ti->curr.offset == ti->goal.offset &&
ti->curr.width == ti->goal.width &&
ecb->tag[0] == 0) {
device_printf(sc->sc_dev, "step 1 "
"and no negotiation to perform "
"or tag to send\n");
goto reset;
}
if (sc->sc_phase != MESSAGE_OUT_PHASE) {
device_printf(sc->sc_dev, "step 1 "
"but not in MESSAGE_OUT_PHASE\n");
goto reset;
}
sc->sc_prevphase = MESSAGE_OUT_PHASE; /* XXX */
if (ecb->flags & ECB_RESET) {
/*
* A DEVICE RESET was scheduled and
* ATNS used. As SEND_DEV_RESET has
* the highest priority, the target
* will reset and disconnect and we
* will end up in ncr53c9x_done w/o
* negotiating or sending a TAG. So
* we just break here in order to
* avoid warnings about negotiation
* not having completed.
*/
ncr53c9x_sched_msgout(SEND_DEV_RESET);
break;
}
if (ti->curr.width != ti->goal.width) {
ti->flags |= T_WDTRSENT | T_SDTRSENT;
ncr53c9x_sched_msgout(SEND_WDTR |
SEND_SDTR);
}
if (ti->curr.period != ti->goal.period ||
ti->curr.offset != ti->goal.offset) {
ti->flags |= T_SDTRSENT;
ncr53c9x_sched_msgout(SEND_SDTR);
}
if (ecb->tag[0] != 0)
/* Could not do ATN3 so send TAG. */
ncr53c9x_sched_msgout(SEND_TAG);
break;
case 3:
/*
* Grr, this is supposed to mean
* "target left command phase prematurely".
* It seems to happen regularly when
* sync mode is on.
* Look at FIFO to see if command went out.
* (Timing problems?)
*/
if (sc->sc_features & NCR_F_DMASELECT) {
if (sc->sc_cmdlen == 0) {
/* Hope for the best... */
break;
}
} else if ((NCR_READ_REG(sc, NCR_FFLAG) &
NCRFIFO_FF) == 0) {
/* Hope for the best... */
break;
}
xpt_print_path(ecb->ccb->ccb_h.path);
printf("selection failed; %d left in FIFO "
"[intr %x, stat %x, step %d]\n",
NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF,
sc->sc_espintr, sc->sc_espstat,
sc->sc_espstep);
NCRCMD(sc, NCRCMD_FLUSH);
ncr53c9x_sched_msgout(SEND_ABORT);
return;
case 2:
/* Select stuck at Command Phase. */
NCRCMD(sc, NCRCMD_FLUSH);
break;
case 4:
if (sc->sc_features & NCR_F_DMASELECT &&
sc->sc_cmdlen != 0) {
xpt_print_path(ecb->ccb->ccb_h.path);
printf("select; %lu left in DMA buffer "
"[intr %x, stat %x, step %d]\n",
(u_long)sc->sc_cmdlen,
sc->sc_espintr,
sc->sc_espstat,
sc->sc_espstep);
}
/* So far, everything went fine. */
break;
}
sc->sc_prevphase = INVALID_PHASE; /* ??? */
/* Do an implicit RESTORE POINTERS. */
sc->sc_dp = ecb->daddr;
sc->sc_dleft = ecb->dleft;
sc->sc_state = NCR_CONNECTED;
break;
} else {
device_printf(sc->sc_dev, "unexpected status after "
"select: [intr %x, stat %x, step %x]\n",
sc->sc_espintr, sc->sc_espstat, sc->sc_espstep);
NCRCMD(sc, NCRCMD_FLUSH);
DELAY(1);
goto reset;
}
if (sc->sc_state == NCR_IDLE) {
device_printf(sc->sc_dev, "stray interrupt\n");
return;
}
break;
case NCR_CONNECTED:
if ((sc->sc_flags & NCR_ICCS) != 0) {
/* "Initiate Command Complete Steps" in progress */
sc->sc_flags &= ~NCR_ICCS;
if ((sc->sc_espintr & NCRINTR_DONE) == 0) {
device_printf(sc->sc_dev, "ICCS: "
": [intr %x, stat %x, step %x]\n",
sc->sc_espintr, sc->sc_espstat,
sc->sc_espstep);
}
ncr53c9x_rdfifo(sc, NCR_RDFIFO_START);
if (sc->sc_imlen < 2)
device_printf(sc->sc_dev, "can't get status, "
"only %d bytes\n", (int)sc->sc_imlen);
ecb->stat = sc->sc_imess[sc->sc_imlen - 2];
msg = sc->sc_imess[sc->sc_imlen - 1];
NCR_PHASE(("<stat:(%x,%x)>", ecb->stat, msg));
if (msg == MSG_CMDCOMPLETE) {
ecb->dleft =
(ecb->flags & ECB_TENTATIVE_DONE) ?
0 : sc->sc_dleft;
if ((ecb->flags & ECB_SENSE) == 0)
ecb->ccb->csio.resid = ecb->dleft;
sc->sc_state = NCR_CMDCOMPLETE;
} else
device_printf(sc->sc_dev, "STATUS_PHASE: "
"msg %d\n", msg);
sc->sc_imlen = 0;
NCRCMD(sc, NCRCMD_MSGOK);
goto shortcut; /* i.e. wait for disconnect */
}
break;
default:
device_printf(sc->sc_dev, "invalid state: %d [intr %x, "
"phase(c %x, p %x)]\n", sc->sc_state,
sc->sc_espintr, sc->sc_phase, sc->sc_prevphase);
goto reset;
}
/*
* Driver is now in state NCR_CONNECTED, i.e. we
* have a current command working the SCSI bus.
*/
if (sc->sc_state != NCR_CONNECTED || ecb == NULL)
panic("%s: no nexus", __func__);
switch (sc->sc_phase) {
case MESSAGE_OUT_PHASE:
NCR_PHASE(("MESSAGE_OUT_PHASE "));
ncr53c9x_msgout(sc);
sc->sc_prevphase = MESSAGE_OUT_PHASE;
break;
case MESSAGE_IN_PHASE:
msgin:
NCR_PHASE(("MESSAGE_IN_PHASE "));
if ((sc->sc_espintr & NCRINTR_BS) != 0) {
if ((sc->sc_rev != NCR_VARIANT_FAS366) ||
(sc->sc_espstat2 & NCRFAS_STAT2_EMPTY) == 0) {
NCRCMD(sc, NCRCMD_FLUSH);
}
sc->sc_flags |= NCR_WAITI;
NCRCMD(sc, NCRCMD_TRANS);
} else if ((sc->sc_espintr & NCRINTR_FC) != 0) {
if ((sc->sc_flags & NCR_WAITI) == 0) {
device_printf(sc->sc_dev, "MSGIN: unexpected "
"FC bit: [intr %x, stat %x, step %x]\n",
sc->sc_espintr, sc->sc_espstat,
sc->sc_espstep);
}
sc->sc_flags &= ~NCR_WAITI;
ncr53c9x_rdfifo(sc,
(sc->sc_prevphase == sc->sc_phase) ?
NCR_RDFIFO_CONTINUE : NCR_RDFIFO_START);
ncr53c9x_msgin(sc);
} else
device_printf(sc->sc_dev, "MSGIN: weird bits: "
"[intr %x, stat %x, step %x]\n",
sc->sc_espintr, sc->sc_espstat, sc->sc_espstep);
sc->sc_prevphase = MESSAGE_IN_PHASE;
goto shortcut; /* i.e. expect data to be ready */
case COMMAND_PHASE:
/*
* Send the command block. Normally we don't see this
* phase because the SEL_ATN command takes care of
* all this. However, we end up here if either the
* target or we wanted to exchange some more messages
* first (e.g. to start negotiations).
*/
NCR_PHASE(("COMMAND_PHASE 0x%02x (%d) ",
ecb->cmd.cmd.opcode, ecb->clen));
if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) {
NCRCMD(sc, NCRCMD_FLUSH);
#if 0
DELAY(1);
#endif
}
/*
* If we have more messages to send, e.g. WDTR or SDTR
* after we've sent a TAG, set ATN so we'll go back to
* MESSAGE_OUT_PHASE.
*/
if (sc->sc_msgpriq) {
NCRCMD(sc, NCRCMD_SETATN);
sc->sc_flags |= NCR_ATN;
}
if (sc->sc_features & NCR_F_DMASELECT) {
/* Setup DMA transfer for command. */
size = ecb->clen;
sc->sc_cmdlen = size;
sc->sc_cmdp = (void *)&ecb->cmd.cmd;
error = NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen,
0, &size);
if (error != 0)
goto cmd;
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, size);
/* Load the count in. */
NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
/* Start the command transfer. */
NCRCMD(sc, NCRCMD_TRANS | NCRCMD_DMA);
NCRDMA_GO(sc);
sc->sc_prevphase = COMMAND_PHASE;
break;
}
cmd:
sc->sc_cmdlen = 0;
ncr53c9x_wrfifo(sc, (uint8_t *)&ecb->cmd.cmd, ecb->clen);
NCRCMD(sc, NCRCMD_TRANS);
sc->sc_prevphase = COMMAND_PHASE;
break;
case DATA_OUT_PHASE:
NCR_PHASE(("DATA_OUT_PHASE [%ld] ", (long)sc->sc_dleft));
sc->sc_prevphase = DATA_OUT_PHASE;
NCRCMD(sc, NCRCMD_FLUSH);
size = ulmin(sc->sc_dleft, sc->sc_maxxfer);
error = NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 0, &size);
goto setup_xfer;
case DATA_IN_PHASE:
NCR_PHASE(("DATA_IN_PHASE "));
sc->sc_prevphase = DATA_IN_PHASE;
if (sc->sc_rev == NCR_VARIANT_ESP100)
NCRCMD(sc, NCRCMD_FLUSH);
size = ulmin(sc->sc_dleft, sc->sc_maxxfer);
error = NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 1, &size);
setup_xfer:
if (error != 0) {
switch (error) {
case EFBIG:
ecb->ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
break;
case EINPROGRESS:
panic("%s: cannot deal with deferred DMA",
__func__);
case EINVAL:
ecb->ccb->ccb_h.status |= CAM_REQ_INVALID;
break;
case ENOMEM:
ecb->ccb->ccb_h.status |= CAM_REQUEUE_REQ;
break;
default:
ecb->ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
}
goto finish;
}
/* Target returned to data phase: wipe "done" memory. */
ecb->flags &= ~ECB_TENTATIVE_DONE;
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, size);
/* Load the count in. */
NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
/*
* Note that if `size' is 0, we've already transceived
* all the bytes we want but we're still in DATA PHASE.
* Apparently, the device needs padding. Also, a
* transfer size of 0 means "maximum" to the chip
* DMA logic.
*/
NCRCMD(sc,
(size == 0 ? NCRCMD_TRPAD : NCRCMD_TRANS) | NCRCMD_DMA);
NCRDMA_GO(sc);
return;
case STATUS_PHASE:
NCR_PHASE(("STATUS_PHASE "));
sc->sc_flags |= NCR_ICCS;
NCRCMD(sc, NCRCMD_ICCS);
sc->sc_prevphase = STATUS_PHASE;
goto shortcut; /* i.e. expect status results soon */
case INVALID_PHASE:
break;
default:
device_printf(sc->sc_dev,
"unexpected bus phase; resetting\n");
goto reset;
}
return;
reset:
ncr53c9x_init(sc, 1);
return;
finish:
ncr53c9x_done(sc, ecb);
return;
sched:
sc->sc_state = NCR_IDLE;
ncr53c9x_sched(sc);
return;
shortcut:
/*
* The idea is that many of the SCSI operations take very little
* time, and going away and getting interrupted is too high an
* overhead to pay. For example, selecting, sending a message
* and command and then doing some work can be done in one "pass".
*
* The delay is a heuristic. It is 2 when at 20 MHz, 2 at 25 MHz and
* 1 at 40 MHz. This needs testing.
*/
microtime(&wait);
wait.tv_usec += 50 / sc->sc_freq;
if (wait.tv_usec > 1000000) {
wait.tv_sec++;
wait.tv_usec -= 1000000;
}
do {
if (NCRDMA_ISINTR(sc))
goto again;
microtime(&cur);
} while (cur.tv_sec <= wait.tv_sec && cur.tv_usec <= wait.tv_usec);
}
static void
ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{
NCR_LOCK_ASSERT(sc, MA_OWNED);
/* 2 secs for the abort */
ecb->timeout = NCR_ABORT_TIMEOUT;
ecb->flags |= ECB_ABORT;
if (ecb == sc->sc_nexus) {
/*
* If we're still selecting, the message will be scheduled
* after selection is complete.
*/
if (sc->sc_state == NCR_CONNECTED)
ncr53c9x_sched_msgout(SEND_ABORT);
/*
* Reschedule callout.
*/
callout_reset(&ecb->ch, mstohz(ecb->timeout),
ncr53c9x_callout, ecb);
} else {
/*
* Just leave the command where it is.
* XXX - what choice do we have but to reset the SCSI
* eventually?
*/
if (sc->sc_state == NCR_IDLE)
ncr53c9x_sched(sc);
}
}
static void
ncr53c9x_callout(void *arg)
{
struct ncr53c9x_ecb *ecb = arg;
union ccb *ccb = ecb->ccb;
struct ncr53c9x_softc *sc = ecb->sc;
struct ncr53c9x_tinfo *ti;
NCR_LOCK_ASSERT(sc, MA_OWNED);
ti = &sc->sc_tinfo[ccb->ccb_h.target_id];
xpt_print_path(ccb->ccb_h.path);
device_printf(sc->sc_dev, "timed out [ecb %p (flags 0x%x, dleft %x, "
"stat %x)], <state %d, nexus %p, phase(l %x, c %x, p %x), "
"resid %lx, msg(q %x,o %x) %s>",
ecb, ecb->flags, ecb->dleft, ecb->stat,
sc->sc_state, sc->sc_nexus,
NCR_READ_REG(sc, NCR_STAT),
sc->sc_phase, sc->sc_prevphase,
(long)sc->sc_dleft, sc->sc_msgpriq, sc->sc_msgout,
NCRDMA_ISACTIVE(sc) ? "DMA active" : "");
#if defined(NCR53C9X_DEBUG) && NCR53C9X_DEBUG > 1
printf("TRACE: %s.", ecb->trace);
#endif
if (ecb->flags & ECB_ABORT) {
/* Abort timed out. */
printf(" AGAIN\n");
ncr53c9x_init(sc, 1);
} else {
/* Abort the operation that has timed out. */
printf("\n");
ccb->ccb_h.status = CAM_CMD_TIMEOUT;
ncr53c9x_abort(sc, ecb);
/* Disable sync mode if stuck in a data phase. */
if (ecb == sc->sc_nexus && ti->curr.offset != 0 &&
(sc->sc_phase & (MSGI | CDI)) == 0) {
/* XXX ASYNC CALLBACK! */
ti->goal.offset = 0;
xpt_print_path(ccb->ccb_h.path);
printf("sync negotiation disabled\n");
}
}
}
static void
ncr53c9x_watch(void *arg)
{
struct ncr53c9x_softc *sc = arg;
struct ncr53c9x_linfo *li;
struct ncr53c9x_tinfo *ti;
time_t old;
int t;
NCR_LOCK_ASSERT(sc, MA_OWNED);
/* Delete any structures that have not been used in 10min. */
old = time_second - (10 * 60);
for (t = 0; t < sc->sc_ntarg; t++) {
ti = &sc->sc_tinfo[t];
li = LIST_FIRST(&ti->luns);
while (li) {
if (li->last_used < old &&
li->untagged == NULL &&
li->used == 0) {
if (li->lun < NCR_NLUN)
ti->lun[li->lun] = NULL;
LIST_REMOVE(li, link);
free(li, M_DEVBUF);
/* Restart the search at the beginning. */
li = LIST_FIRST(&ti->luns);
continue;
}
li = LIST_NEXT(li, link);
}
}
callout_reset(&sc->sc_watchdog, 60 * hz, ncr53c9x_watch, sc);
}
Index: head/sys/dev/fb/splash.c
===================================================================
--- head/sys/dev/fb/splash.c (revision 328217)
+++ head/sys/dev/fb/splash.c (revision 328218)
@@ -1,217 +1,217 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1999 Kazutaka YOKOTA <yokota@zodiac.mech.utsunomiya-u.ac.jp>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer as
* the first lines of this file unmodified.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_splash.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/linker.h>
#include <sys/fbio.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <dev/fb/fbreg.h>
#include <dev/fb/splashreg.h>
MODULE_VERSION(splash, 1);
/* video adapter and image decoder */
static video_adapter_t *splash_adp;
static splash_decoder_t *splash_decoder;
/* decoder candidates */
static int decoders;
static splash_decoder_t **decoder_set;
#define DECODER_ARRAY_DELTA 4
/* console driver callback */
static int (*splash_callback)(int, void *);
static void *splash_arg;
static int
splash_find_data(splash_decoder_t *decoder)
{
caddr_t image_module;
void *ptr;
size_t sz;
if (decoder->data_type == NULL)
return (0);
image_module = preload_search_by_type(decoder->data_type);
if (image_module == NULL)
return (ENOENT);
ptr = preload_fetch_addr(image_module);
sz = preload_fetch_size(image_module);
if (ptr == NULL || sz == 0)
return (ENOENT);
if (bootverbose)
printf("splash: image@%p, size:%zu\n", ptr, sz);
decoder->data = ptr;
decoder->data_size = sz;
return (0);
}
static int
splash_test(splash_decoder_t *decoder)
{
if (splash_find_data(decoder))
return ENOENT; /* XXX */
if (*decoder->init && (*decoder->init)(splash_adp)) {
decoder->data = NULL;
decoder->data_size = 0;
return ENODEV; /* XXX */
}
if (bootverbose)
printf("splash: image decoder found: %s\n", decoder->name);
return 0;
}
static void
splash_new(splash_decoder_t *decoder)
{
splash_decoder = decoder;
if (splash_callback != NULL)
(*splash_callback)(SPLASH_INIT, splash_arg);
}
int
splash_register(splash_decoder_t *decoder)
{
splash_decoder_t **p;
int error;
int i;
if (splash_adp != NULL) {
/*
* If the video card has already been initialized, test
* this decoder immediately.
*/
error = splash_test(decoder);
if (error == 0) {
/* replace the current decoder with new one */
if (splash_decoder != NULL)
error = splash_term(splash_adp);
if (error == 0)
splash_new(decoder);
}
return error;
} else {
/* register the decoder for later use */
for (i = 0; i < decoders; ++i) {
if (decoder_set[i] == NULL)
break;
}
if ((i >= decoders) && (decoders % DECODER_ARRAY_DELTA) == 0) {
- p = mallocarray(decoders + DECODER_ARRAY_DELTA,
- sizeof(*p), M_DEVBUF, M_NOWAIT);
+ p = malloc(sizeof(*p)*(decoders + DECODER_ARRAY_DELTA),
+ M_DEVBUF, M_NOWAIT);
if (p == NULL)
return ENOMEM;
if (decoder_set != NULL) {
bcopy(decoder_set, p, sizeof(*p)*decoders);
free(decoder_set, M_DEVBUF);
}
decoder_set = p;
i = decoders++;
}
decoder_set[i] = decoder;
}
return 0;
}
int
splash_unregister(splash_decoder_t *decoder)
{
int error;
if (splash_decoder == decoder) {
if ((error = splash_term(splash_adp)) != 0)
return error;
}
return 0;
}
int
splash_init(video_adapter_t *adp, int (*callback)(int, void *), void *arg)
{
int i;
splash_adp = adp;
splash_callback = callback;
splash_arg = arg;
splash_decoder = NULL;
for (i = 0; i < decoders; ++i) {
if (decoder_set[i] == NULL)
continue;
if (splash_test(decoder_set[i]) == 0) {
splash_new(decoder_set[i]);
break;
}
decoder_set[i] = NULL;
}
for (++i; i < decoders; ++i) {
decoder_set[i] = NULL;
}
return 0;
}
int
splash_term(video_adapter_t *adp)
{
int error = 0;
if (splash_adp != adp)
return EINVAL;
if (splash_decoder != NULL) {
if (splash_callback != NULL)
error = (*splash_callback)(SPLASH_TERM, splash_arg);
if (error == 0 && splash_decoder->term)
error = (*splash_decoder->term)(adp);
if (error == 0)
splash_decoder = NULL;
}
return error;
}
int
splash(video_adapter_t *adp, int on)
{
if (splash_decoder != NULL)
return (*splash_decoder->splash)(adp, on);
return ENODEV;
}
Index: head/sys/dev/gpio/gpiobus.c
===================================================================
--- head/sys/dev/gpio/gpiobus.c (revision 328217)
+++ head/sys/dev/gpio/gpiobus.c (revision 328218)
@@ -1,862 +1,862 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2009 Oleksandr Tymoshenko <gonzo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/gpio.h>
#ifdef INTRNG
#include <sys/intr.h>
#endif
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <dev/gpio/gpiobusvar.h>
#include "gpiobus_if.h"
#undef GPIOBUS_DEBUG
#ifdef GPIOBUS_DEBUG
#define dprintf printf
#else
#define dprintf(x, arg...)
#endif
static void gpiobus_print_pins(struct gpiobus_ivar *, char *, size_t);
static int gpiobus_parse_pins(struct gpiobus_softc *, device_t, int);
static int gpiobus_probe(device_t);
static int gpiobus_attach(device_t);
static int gpiobus_detach(device_t);
static int gpiobus_suspend(device_t);
static int gpiobus_resume(device_t);
static void gpiobus_probe_nomatch(device_t, device_t);
static int gpiobus_print_child(device_t, device_t);
static int gpiobus_child_location_str(device_t, device_t, char *, size_t);
static int gpiobus_child_pnpinfo_str(device_t, device_t, char *, size_t);
static device_t gpiobus_add_child(device_t, u_int, const char *, int);
static void gpiobus_hinted_child(device_t, const char *, int);
/*
* GPIOBUS interface
*/
static int gpiobus_acquire_bus(device_t, device_t, int);
static void gpiobus_release_bus(device_t, device_t);
static int gpiobus_pin_setflags(device_t, device_t, uint32_t, uint32_t);
static int gpiobus_pin_getflags(device_t, device_t, uint32_t, uint32_t*);
static int gpiobus_pin_getcaps(device_t, device_t, uint32_t, uint32_t*);
static int gpiobus_pin_set(device_t, device_t, uint32_t, unsigned int);
static int gpiobus_pin_get(device_t, device_t, uint32_t, unsigned int*);
static int gpiobus_pin_toggle(device_t, device_t, uint32_t);
/*
* XXX -> Move me to better place - gpio_subr.c?
* Also, this function must be changed when interrupt configuration
* data will be moved into struct resource.
*/
#ifdef INTRNG
struct resource *
gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags,
gpio_pin_t pin, uint32_t intr_mode)
{
u_int irq;
struct intr_map_data_gpio *gpio_data;
struct resource *res;
gpio_data = (struct intr_map_data_gpio *)intr_alloc_map_data(
INTR_MAP_DATA_GPIO, sizeof(*gpio_data), M_WAITOK | M_ZERO);
gpio_data->gpio_pin_num = pin->pin;
gpio_data->gpio_pin_flags = pin->flags;
gpio_data->gpio_intr_mode = intr_mode;
irq = intr_map_irq(pin->dev, 0, (struct intr_map_data *)gpio_data);
res = bus_alloc_resource(consumer_dev, SYS_RES_IRQ, rid, irq, irq, 1,
alloc_flags);
if (res == NULL) {
intr_free_intr_map_data((struct intr_map_data *)gpio_data);
return (NULL);
}
rman_set_virtual(res, gpio_data);
return (res);
}
#else
struct resource *
gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags,
gpio_pin_t pin, uint32_t intr_mode)
{
return (NULL);
}
#endif
int
gpio_check_flags(uint32_t caps, uint32_t flags)
{
/* Filter unwanted flags. */
flags &= caps;
/* Cannot mix input/output together. */
if (flags & GPIO_PIN_INPUT && flags & GPIO_PIN_OUTPUT)
return (EINVAL);
/* Cannot mix pull-up/pull-down together. */
if (flags & GPIO_PIN_PULLUP && flags & GPIO_PIN_PULLDOWN)
return (EINVAL);
return (0);
}
static void
gpiobus_print_pins(struct gpiobus_ivar *devi, char *buf, size_t buflen)
{
char tmp[128];
int i, range_start, range_stop, need_coma;
if (devi->npins == 0)
return;
need_coma = 0;
range_start = range_stop = devi->pins[0];
for (i = 1; i < devi->npins; i++) {
if (devi->pins[i] != (range_stop + 1)) {
if (need_coma)
strlcat(buf, ",", buflen);
memset(tmp, 0, sizeof(tmp));
if (range_start != range_stop)
snprintf(tmp, sizeof(tmp) - 1, "%d-%d",
range_start, range_stop);
else
snprintf(tmp, sizeof(tmp) - 1, "%d",
range_start);
strlcat(buf, tmp, buflen);
range_start = range_stop = devi->pins[i];
need_coma = 1;
}
else
range_stop++;
}
if (need_coma)
strlcat(buf, ",", buflen);
memset(tmp, 0, sizeof(tmp));
if (range_start != range_stop)
snprintf(tmp, sizeof(tmp) - 1, "%d-%d",
range_start, range_stop);
else
snprintf(tmp, sizeof(tmp) - 1, "%d",
range_start);
strlcat(buf, tmp, buflen);
}
device_t
gpiobus_attach_bus(device_t dev)
{
device_t busdev;
busdev = device_add_child(dev, "gpiobus", -1);
if (busdev == NULL)
return (NULL);
if (device_add_child(dev, "gpioc", -1) == NULL) {
device_delete_child(dev, busdev);
return (NULL);
}
#ifdef FDT
ofw_gpiobus_register_provider(dev);
#endif
bus_generic_attach(dev);
return (busdev);
}
int
gpiobus_detach_bus(device_t dev)
{
int err;
#ifdef FDT
ofw_gpiobus_unregister_provider(dev);
#endif
err = bus_generic_detach(dev);
if (err != 0)
return (err);
return (device_delete_children(dev));
}
int
gpiobus_init_softc(device_t dev)
{
struct gpiobus_softc *sc;
sc = GPIOBUS_SOFTC(dev);
sc->sc_busdev = dev;
sc->sc_dev = device_get_parent(dev);
sc->sc_intr_rman.rm_type = RMAN_ARRAY;
sc->sc_intr_rman.rm_descr = "GPIO Interrupts";
if (rman_init(&sc->sc_intr_rman) != 0 ||
rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0)
panic("%s: failed to set up rman.", __func__);
if (GPIO_PIN_MAX(sc->sc_dev, &sc->sc_npins) != 0)
return (ENXIO);
KASSERT(sc->sc_npins >= 0, ("GPIO device with no pins"));
/* Pins = GPIO_PIN_MAX() + 1 */
sc->sc_npins++;
- sc->sc_pins = mallocarray(sc->sc_npins, sizeof(*sc->sc_pins), M_DEVBUF,
+ sc->sc_pins = malloc(sizeof(*sc->sc_pins) * sc->sc_npins, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (sc->sc_pins == NULL)
return (ENOMEM);
/* Initialize the bus lock. */
GPIOBUS_LOCK_INIT(sc);
return (0);
}
int
gpiobus_alloc_ivars(struct gpiobus_ivar *devi)
{
/* Allocate pins and flags memory. */
- devi->pins = mallocarray(devi->npins, sizeof(uint32_t), M_DEVBUF,
+ devi->pins = malloc(sizeof(uint32_t) * devi->npins, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (devi->pins == NULL)
return (ENOMEM);
- devi->flags = mallocarray(devi->npins, sizeof(uint32_t), M_DEVBUF,
+ devi->flags = malloc(sizeof(uint32_t) * devi->npins, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (devi->flags == NULL) {
free(devi->pins, M_DEVBUF);
return (ENOMEM);
}
return (0);
}
void
gpiobus_free_ivars(struct gpiobus_ivar *devi)
{
if (devi->flags) {
free(devi->flags, M_DEVBUF);
devi->flags = NULL;
}
if (devi->pins) {
free(devi->pins, M_DEVBUF);
devi->pins = NULL;
}
}
int
gpiobus_acquire_pin(device_t bus, uint32_t pin)
{
struct gpiobus_softc *sc;
sc = device_get_softc(bus);
/* Consistency check. */
if (pin >= sc->sc_npins) {
device_printf(bus,
"invalid pin %d, max: %d\n", pin, sc->sc_npins - 1);
return (-1);
}
/* Mark pin as mapped and give warning if it's already mapped. */
if (sc->sc_pins[pin].mapped) {
device_printf(bus, "warning: pin %d is already mapped\n", pin);
return (-1);
}
sc->sc_pins[pin].mapped = 1;
return (0);
}
/* Release mapped pin */
int
gpiobus_release_pin(device_t bus, uint32_t pin)
{
struct gpiobus_softc *sc;
sc = device_get_softc(bus);
/* Consistency check. */
if (pin >= sc->sc_npins) {
device_printf(bus,
"gpiobus_acquire_pin: invalid pin %d, max=%d\n",
pin, sc->sc_npins - 1);
return (-1);
}
if (!sc->sc_pins[pin].mapped) {
device_printf(bus, "gpiobus_acquire_pin: pin %d is not mapped\n", pin);
return (-1);
}
sc->sc_pins[pin].mapped = 0;
return (0);
}
static int
gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask)
{
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
int i, npins;
npins = 0;
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
npins++;
}
if (npins == 0) {
device_printf(child, "empty pin mask\n");
return (EINVAL);
}
devi->npins = npins;
if (gpiobus_alloc_ivars(devi) != 0) {
device_printf(child, "cannot allocate device ivars\n");
return (EINVAL);
}
npins = 0;
for (i = 0; i < 32; i++) {
if ((mask & (1 << i)) == 0)
continue;
/* Reserve the GPIO pin. */
if (gpiobus_acquire_pin(sc->sc_busdev, i) != 0) {
gpiobus_free_ivars(devi);
return (EINVAL);
}
devi->pins[npins++] = i;
/* Use the child name as pin name. */
GPIOBUS_PIN_SETNAME(sc->sc_busdev, i,
device_get_nameunit(child));
}
return (0);
}
static int
gpiobus_probe(device_t dev)
{
device_set_desc(dev, "GPIO bus");
return (BUS_PROBE_GENERIC);
}
static int
gpiobus_attach(device_t dev)
{
int err;
err = gpiobus_init_softc(dev);
if (err != 0)
return (err);
/*
* Get parent's pins and mark them as unmapped
*/
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
return (bus_generic_attach(dev));
}
/*
* Since this is not a self-enumerating bus, and since we always add
* children in attach, we have to always delete children here.
*/
static int
gpiobus_detach(device_t dev)
{
struct gpiobus_softc *sc;
struct gpiobus_ivar *devi;
device_t *devlist;
int i, err, ndevs;
sc = GPIOBUS_SOFTC(dev);
KASSERT(mtx_initialized(&sc->sc_mtx),
("gpiobus mutex not initialized"));
GPIOBUS_LOCK_DESTROY(sc);
if ((err = bus_generic_detach(dev)) != 0)
return (err);
if ((err = device_get_children(dev, &devlist, &ndevs)) != 0)
return (err);
for (i = 0; i < ndevs; i++) {
devi = GPIOBUS_IVAR(devlist[i]);
gpiobus_free_ivars(devi);
resource_list_free(&devi->rl);
free(devi, M_DEVBUF);
device_delete_child(dev, devlist[i]);
}
free(devlist, M_TEMP);
rman_fini(&sc->sc_intr_rman);
if (sc->sc_pins) {
for (i = 0; i < sc->sc_npins; i++) {
if (sc->sc_pins[i].name != NULL)
free(sc->sc_pins[i].name, M_DEVBUF);
sc->sc_pins[i].name = NULL;
}
free(sc->sc_pins, M_DEVBUF);
sc->sc_pins = NULL;
}
return (0);
}
static int
gpiobus_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
gpiobus_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static void
gpiobus_probe_nomatch(device_t dev, device_t child)
{
char pins[128];
struct gpiobus_ivar *devi;
devi = GPIOBUS_IVAR(child);
memset(pins, 0, sizeof(pins));
gpiobus_print_pins(devi, pins, sizeof(pins));
if (devi->npins > 1)
device_printf(dev, "<unknown device> at pins %s", pins);
else
device_printf(dev, "<unknown device> at pin %s", pins);
resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd");
printf("\n");
}
static int
gpiobus_print_child(device_t dev, device_t child)
{
char pins[128];
int retval = 0;
struct gpiobus_ivar *devi;
devi = GPIOBUS_IVAR(child);
memset(pins, 0, sizeof(pins));
retval += bus_print_child_header(dev, child);
if (devi->npins > 0) {
if (devi->npins > 1)
retval += printf(" at pins ");
else
retval += printf(" at pin ");
gpiobus_print_pins(devi, pins, sizeof(pins));
retval += printf("%s", pins);
}
resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd");
retval += bus_print_child_footer(dev, child);
return (retval);
}
static int
gpiobus_child_location_str(device_t bus, device_t child, char *buf,
size_t buflen)
{
struct gpiobus_ivar *devi;
devi = GPIOBUS_IVAR(child);
if (devi->npins > 1)
strlcpy(buf, "pins=", buflen);
else
strlcpy(buf, "pin=", buflen);
gpiobus_print_pins(devi, buf, buflen);
return (0);
}
static int
gpiobus_child_pnpinfo_str(device_t bus, device_t child, char *buf,
size_t buflen)
{
*buf = '\0';
return (0);
}
static device_t
gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
{
device_t child;
struct gpiobus_ivar *devi;
child = device_add_child_ordered(dev, order, name, unit);
if (child == NULL)
return (child);
devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO);
if (devi == NULL) {
device_delete_child(dev, child);
return (NULL);
}
resource_list_init(&devi->rl);
device_set_ivars(child, devi);
return (child);
}
static void
gpiobus_hinted_child(device_t bus, const char *dname, int dunit)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(bus);
struct gpiobus_ivar *devi;
device_t child;
int irq, pins;
child = BUS_ADD_CHILD(bus, 0, dname, dunit);
devi = GPIOBUS_IVAR(child);
resource_int_value(dname, dunit, "pins", &pins);
if (gpiobus_parse_pins(sc, child, pins)) {
resource_list_free(&devi->rl);
free(devi, M_DEVBUF);
device_delete_child(bus, child);
}
if (resource_int_value(dname, dunit, "irq", &irq) == 0) {
if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0)
device_printf(bus,
"warning: bus_set_resource() failed\n");
}
}
static int
gpiobus_set_resource(device_t dev, device_t child, int type, int rid,
rman_res_t start, rman_res_t count)
{
struct gpiobus_ivar *devi;
struct resource_list_entry *rle;
dprintf("%s: entry (%p, %p, %d, %d, %p, %ld)\n",
__func__, dev, child, type, rid, (void *)(intptr_t)start, count);
devi = GPIOBUS_IVAR(child);
rle = resource_list_add(&devi->rl, type, rid, start,
start + count - 1, count);
if (rle == NULL)
return (ENXIO);
return (0);
}
static struct resource *
gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct gpiobus_softc *sc;
struct resource *rv;
struct resource_list *rl;
struct resource_list_entry *rle;
int isdefault;
if (type != SYS_RES_IRQ)
return (NULL);
isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1);
rle = NULL;
if (isdefault) {
rl = BUS_GET_RESOURCE_LIST(bus, child);
if (rl == NULL)
return (NULL);
rle = resource_list_find(rl, type, *rid);
if (rle == NULL)
return (NULL);
if (rle->res != NULL)
panic("%s: resource entry is busy", __func__);
start = rle->start;
count = rle->count;
end = rle->end;
}
sc = device_get_softc(bus);
rv = rman_reserve_resource(&sc->sc_intr_rman, start, end, count, flags,
child);
if (rv == NULL)
return (NULL);
rman_set_rid(rv, *rid);
if ((flags & RF_ACTIVE) != 0 &&
bus_activate_resource(child, type, *rid, rv) != 0) {
rman_release_resource(rv);
return (NULL);
}
return (rv);
}
static int
gpiobus_release_resource(device_t bus __unused, device_t child, int type,
int rid, struct resource *r)
{
int error;
if (rman_get_flags(r) & RF_ACTIVE) {
error = bus_deactivate_resource(child, type, rid, r);
if (error)
return (error);
}
return (rman_release_resource(r));
}
static struct resource_list *
gpiobus_get_resource_list(device_t bus __unused, device_t child)
{
struct gpiobus_ivar *ivar;
ivar = GPIOBUS_IVAR(child);
return (&ivar->rl);
}
static int
gpiobus_acquire_bus(device_t busdev, device_t child, int how)
{
struct gpiobus_softc *sc;
sc = device_get_softc(busdev);
GPIOBUS_ASSERT_UNLOCKED(sc);
GPIOBUS_LOCK(sc);
if (sc->sc_owner != NULL) {
if (sc->sc_owner == child)
panic("%s: %s still owns the bus.",
device_get_nameunit(busdev),
device_get_nameunit(child));
if (how == GPIOBUS_DONTWAIT) {
GPIOBUS_UNLOCK(sc);
return (EWOULDBLOCK);
}
while (sc->sc_owner != NULL)
mtx_sleep(sc, &sc->sc_mtx, 0, "gpiobuswait", 0);
}
sc->sc_owner = child;
GPIOBUS_UNLOCK(sc);
return (0);
}
static void
gpiobus_release_bus(device_t busdev, device_t child)
{
struct gpiobus_softc *sc;
sc = device_get_softc(busdev);
GPIOBUS_ASSERT_UNLOCKED(sc);
GPIOBUS_LOCK(sc);
if (sc->sc_owner == NULL)
panic("%s: %s releasing unowned bus.",
device_get_nameunit(busdev),
device_get_nameunit(child));
if (sc->sc_owner != child)
panic("%s: %s trying to release bus owned by %s",
device_get_nameunit(busdev),
device_get_nameunit(child),
device_get_nameunit(sc->sc_owner));
sc->sc_owner = NULL;
wakeup(sc);
GPIOBUS_UNLOCK(sc);
}
static int
gpiobus_pin_setflags(device_t dev, device_t child, uint32_t pin,
uint32_t flags)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
uint32_t caps;
if (pin >= devi->npins)
return (EINVAL);
if (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], &caps) != 0)
return (EINVAL);
if (gpio_check_flags(caps, flags) != 0)
return (EINVAL);
return (GPIO_PIN_SETFLAGS(sc->sc_dev, devi->pins[pin], flags));
}
static int
gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin,
uint32_t *flags)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
if (pin >= devi->npins)
return (EINVAL);
return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags);
}
static int
gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin,
uint32_t *caps)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
if (pin >= devi->npins)
return (EINVAL);
return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps);
}
static int
gpiobus_pin_set(device_t dev, device_t child, uint32_t pin,
unsigned int value)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
if (pin >= devi->npins)
return (EINVAL);
return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value);
}
static int
gpiobus_pin_get(device_t dev, device_t child, uint32_t pin,
unsigned int *value)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
if (pin >= devi->npins)
return (EINVAL);
return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value);
}
static int
gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
if (pin >= devi->npins)
return (EINVAL);
return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]);
}
static int
gpiobus_pin_getname(device_t dev, uint32_t pin, char *name)
{
struct gpiobus_softc *sc;
sc = GPIOBUS_SOFTC(dev);
if (pin > sc->sc_npins)
return (EINVAL);
/* Did we have a name for this pin ? */
if (sc->sc_pins[pin].name != NULL) {
memcpy(name, sc->sc_pins[pin].name, GPIOMAXNAME);
return (0);
}
/* Return the default pin name. */
return (GPIO_PIN_GETNAME(device_get_parent(dev), pin, name));
}
static int
gpiobus_pin_setname(device_t dev, uint32_t pin, const char *name)
{
struct gpiobus_softc *sc;
sc = GPIOBUS_SOFTC(dev);
if (pin > sc->sc_npins)
return (EINVAL);
if (name == NULL)
return (EINVAL);
/* Save the pin name. */
if (sc->sc_pins[pin].name == NULL)
sc->sc_pins[pin].name = malloc(GPIOMAXNAME, M_DEVBUF,
M_WAITOK | M_ZERO);
strlcpy(sc->sc_pins[pin].name, name, GPIOMAXNAME);
return (0);
}
static device_method_t gpiobus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, gpiobus_probe),
DEVMETHOD(device_attach, gpiobus_attach),
DEVMETHOD(device_detach, gpiobus_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, gpiobus_suspend),
DEVMETHOD(device_resume, gpiobus_resume),
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_config_intr, bus_generic_config_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_set_resource, gpiobus_set_resource),
DEVMETHOD(bus_alloc_resource, gpiobus_alloc_resource),
DEVMETHOD(bus_release_resource, gpiobus_release_resource),
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
DEVMETHOD(bus_get_resource_list, gpiobus_get_resource_list),
DEVMETHOD(bus_add_child, gpiobus_add_child),
DEVMETHOD(bus_probe_nomatch, gpiobus_probe_nomatch),
DEVMETHOD(bus_print_child, gpiobus_print_child),
DEVMETHOD(bus_child_pnpinfo_str, gpiobus_child_pnpinfo_str),
DEVMETHOD(bus_child_location_str, gpiobus_child_location_str),
DEVMETHOD(bus_hinted_child, gpiobus_hinted_child),
/* GPIO protocol */
DEVMETHOD(gpiobus_acquire_bus, gpiobus_acquire_bus),
DEVMETHOD(gpiobus_release_bus, gpiobus_release_bus),
DEVMETHOD(gpiobus_pin_getflags, gpiobus_pin_getflags),
DEVMETHOD(gpiobus_pin_getcaps, gpiobus_pin_getcaps),
DEVMETHOD(gpiobus_pin_setflags, gpiobus_pin_setflags),
DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get),
DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set),
DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle),
DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname),
DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname),
DEVMETHOD_END
};
driver_t gpiobus_driver = {
"gpiobus",
gpiobus_methods,
sizeof(struct gpiobus_softc)
};
devclass_t gpiobus_devclass;
EARLY_DRIVER_MODULE(gpiobus, gpio, gpiobus_driver, gpiobus_devclass, 0, 0,
BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
MODULE_VERSION(gpiobus, 1);
Index: head/sys/dev/if_ndis/if_ndis.c
===================================================================
--- head/sys/dev/if_ndis/if_ndis.c (revision 328217)
+++ head/sys/dev/if_ndis/if_ndis.c (revision 328218)
@@ -1,3426 +1,3426 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2003
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* WPA support originally contributed by Arvind Srinivasan <arvind@celar.us>
* then hacked upon mercilessly by my.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/priv.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/kthread.h>
#include <sys/limits.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/route.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_ioctl.h>
#include <net80211/ieee80211_regdomain.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <compat/ndis/pe_var.h>
#include <compat/ndis/cfg_var.h>
#include <compat/ndis/resource_var.h>
#include <compat/ndis/ntoskrnl_var.h>
#include <compat/ndis/hal_var.h>
#include <compat/ndis/ndis_var.h>
#include <compat/ndis/usbd_var.h>
#include <dev/if_ndis/if_ndisvar.h>
#define NDIS_DEBUG
#ifdef NDIS_DEBUG
#define DPRINTF(x) do { if (ndis_debug > 0) printf x; } while (0)
int ndis_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, ndis, CTLFLAG_RW, &ndis_debug, 0,
"if_ndis debug level");
#else
#define DPRINTF(x)
#endif
SYSCTL_DECL(_hw_ndisusb);
int ndisusb_halt = 1;
SYSCTL_INT(_hw_ndisusb, OID_AUTO, halt, CTLFLAG_RW, &ndisusb_halt, 0,
"Halt NDIS USB driver when it's attached");
/* 0 - 30 dBm to mW conversion table */
static const uint16_t dBm2mW[] = {
1, 1, 1, 1, 2, 2, 2, 2, 3, 3,
3, 4, 4, 4, 5, 6, 6, 7, 8, 9,
10, 11, 13, 14, 16, 18, 20, 22, 25, 28,
32, 35, 40, 45, 50, 56, 63, 71, 79, 89,
100, 112, 126, 141, 158, 178, 200, 224, 251, 282,
316, 355, 398, 447, 501, 562, 631, 708, 794, 891,
1000
};
MODULE_DEPEND(ndis, ether, 1, 1, 1);
MODULE_DEPEND(ndis, wlan, 1, 1, 1);
MODULE_DEPEND(ndis, ndisapi, 1, 1, 1);
MODULE_VERSION(ndis, 1);
int ndis_attach (device_t);
int ndis_detach (device_t);
int ndis_suspend (device_t);
int ndis_resume (device_t);
void ndis_shutdown (device_t);
int ndisdrv_modevent (module_t, int, void *);
static void ndis_txeof (ndis_handle, ndis_packet *, ndis_status);
static void ndis_rxeof (ndis_handle, ndis_packet **, uint32_t);
static void ndis_rxeof_eth (ndis_handle, ndis_handle, char *, void *,
uint32_t, void *, uint32_t, uint32_t);
static void ndis_rxeof_done (ndis_handle);
static void ndis_rxeof_xfr (kdpc *, ndis_handle, void *, void *);
static void ndis_rxeof_xfr_done (ndis_handle, ndis_packet *,
uint32_t, uint32_t);
static void ndis_linksts (ndis_handle, ndis_status, void *, uint32_t);
static void ndis_linksts_done (ndis_handle);
/* We need to wrap these functions for amd64. */
static funcptr ndis_txeof_wrap;
static funcptr ndis_rxeof_wrap;
static funcptr ndis_rxeof_eth_wrap;
static funcptr ndis_rxeof_done_wrap;
static funcptr ndis_rxeof_xfr_wrap;
static funcptr ndis_rxeof_xfr_done_wrap;
static funcptr ndis_linksts_wrap;
static funcptr ndis_linksts_done_wrap;
static funcptr ndis_ticktask_wrap;
static funcptr ndis_ifstarttask_wrap;
static funcptr ndis_resettask_wrap;
static funcptr ndis_inputtask_wrap;
static struct ieee80211vap *ndis_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void ndis_vap_delete (struct ieee80211vap *);
static void ndis_tick (void *);
static void ndis_ticktask (device_object *, void *);
static int ndis_raw_xmit (struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void ndis_update_mcast (struct ieee80211com *);
static void ndis_update_promisc (struct ieee80211com *);
static void ndis_ifstart (struct ifnet *);
static void ndis_ifstarttask (device_object *, void *);
static void ndis_resettask (device_object *, void *);
static void ndis_inputtask (device_object *, void *);
static int ndis_ifioctl (struct ifnet *, u_long, caddr_t);
static int ndis_newstate (struct ieee80211vap *, enum ieee80211_state,
int);
static int ndis_nettype_chan (uint32_t);
static int ndis_nettype_mode (uint32_t);
static void ndis_scan (void *);
static void ndis_scan_results (struct ndis_softc *);
static void ndis_scan_start (struct ieee80211com *);
static void ndis_scan_end (struct ieee80211com *);
static void ndis_set_channel (struct ieee80211com *);
static void ndis_scan_curchan (struct ieee80211_scan_state *, unsigned long);
static void ndis_scan_mindwell (struct ieee80211_scan_state *);
static void ndis_init (void *);
static void ndis_stop (struct ndis_softc *);
static int ndis_ifmedia_upd (struct ifnet *);
static void ndis_ifmedia_sts (struct ifnet *, struct ifmediareq *);
static int ndis_get_bssid_list (struct ndis_softc *,
ndis_80211_bssid_list_ex **);
static int ndis_get_assoc (struct ndis_softc *, ndis_wlan_bssid_ex **);
static int ndis_probe_offload (struct ndis_softc *);
static int ndis_set_offload (struct ndis_softc *);
static void ndis_getstate_80211 (struct ndis_softc *);
static void ndis_setstate_80211 (struct ndis_softc *);
static void ndis_auth_and_assoc (struct ndis_softc *, struct ieee80211vap *);
static void ndis_media_status (struct ifnet *, struct ifmediareq *);
static int ndis_set_cipher (struct ndis_softc *, int);
static int ndis_set_wpa (struct ndis_softc *, void *, int);
static int ndis_add_key (struct ieee80211vap *,
const struct ieee80211_key *);
static int ndis_del_key (struct ieee80211vap *,
const struct ieee80211_key *);
static void ndis_setmulti (struct ndis_softc *);
static void ndis_map_sclist (void *, bus_dma_segment_t *,
int, bus_size_t, int);
static int ndis_ifattach(struct ndis_softc *);
static int ndis_80211attach(struct ndis_softc *);
static int ndis_80211ioctl(struct ieee80211com *, u_long , void *);
static int ndis_80211transmit(struct ieee80211com *, struct mbuf *);
static void ndis_80211parent(struct ieee80211com *);
static int ndisdrv_loaded = 0;
/*
* This routine should call windrv_load() once for each driver
* image. This will do the relocation and dynalinking for the
* image, and create a Windows driver object which will be
* saved in our driver database.
*/
int
ndisdrv_modevent(mod, cmd, arg)
module_t mod;
int cmd;
void *arg;
{
int error = 0;
switch (cmd) {
case MOD_LOAD:
ndisdrv_loaded++;
if (ndisdrv_loaded > 1)
break;
windrv_wrap((funcptr)ndis_rxeof, &ndis_rxeof_wrap,
3, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_eth, &ndis_rxeof_eth_wrap,
8, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_done, &ndis_rxeof_done_wrap,
1, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_xfr, &ndis_rxeof_xfr_wrap,
4, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_xfr_done,
&ndis_rxeof_xfr_done_wrap, 4, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_txeof, &ndis_txeof_wrap,
3, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_linksts, &ndis_linksts_wrap,
4, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_linksts_done,
&ndis_linksts_done_wrap, 1, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_ticktask, &ndis_ticktask_wrap,
2, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_ifstarttask, &ndis_ifstarttask_wrap,
2, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_resettask, &ndis_resettask_wrap,
2, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_inputtask, &ndis_inputtask_wrap,
2, WINDRV_WRAP_STDCALL);
break;
case MOD_UNLOAD:
ndisdrv_loaded--;
if (ndisdrv_loaded > 0)
break;
/* fallthrough */
case MOD_SHUTDOWN:
windrv_unwrap(ndis_rxeof_wrap);
windrv_unwrap(ndis_rxeof_eth_wrap);
windrv_unwrap(ndis_rxeof_done_wrap);
windrv_unwrap(ndis_rxeof_xfr_wrap);
windrv_unwrap(ndis_rxeof_xfr_done_wrap);
windrv_unwrap(ndis_txeof_wrap);
windrv_unwrap(ndis_linksts_wrap);
windrv_unwrap(ndis_linksts_done_wrap);
windrv_unwrap(ndis_ticktask_wrap);
windrv_unwrap(ndis_ifstarttask_wrap);
windrv_unwrap(ndis_resettask_wrap);
windrv_unwrap(ndis_inputtask_wrap);
break;
default:
error = EINVAL;
break;
}
return (error);
}
/*
* Program the 64-bit multicast hash filter.
*/
static void
ndis_setmulti(sc)
struct ndis_softc *sc;
{
struct ifnet *ifp;
struct ifmultiaddr *ifma;
int len, mclistsz, error;
uint8_t *mclist;
if (!NDIS_INITIALIZED(sc))
return;
if (sc->ndis_80211)
return;
ifp = sc->ifp;
if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
len = sizeof(sc->ndis_filter);
error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &len);
if (error)
device_printf(sc->ndis_dev,
"set allmulti failed: %d\n", error);
return;
}
if (TAILQ_EMPTY(&ifp->if_multiaddrs))
return;
len = sizeof(mclistsz);
ndis_get_info(sc, OID_802_3_MAXIMUM_LIST_SIZE, &mclistsz, &len);
mclist = malloc(ETHER_ADDR_LEN * mclistsz, M_TEMP, M_NOWAIT|M_ZERO);
if (mclist == NULL) {
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
goto out;
}
sc->ndis_filter |= NDIS_PACKET_TYPE_MULTICAST;
len = 0;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
mclist + (ETHER_ADDR_LEN * len), ETHER_ADDR_LEN);
len++;
if (len > mclistsz) {
if_maddr_runlock(ifp);
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST;
goto out;
}
}
if_maddr_runlock(ifp);
len = len * ETHER_ADDR_LEN;
error = ndis_set_info(sc, OID_802_3_MULTICAST_LIST, mclist, &len);
if (error) {
device_printf(sc->ndis_dev, "set mclist failed: %d\n", error);
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST;
}
out:
free(mclist, M_TEMP);
len = sizeof(sc->ndis_filter);
error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &len);
if (error)
device_printf(sc->ndis_dev, "set multi failed: %d\n", error);
}
static int
ndis_set_offload(sc)
struct ndis_softc *sc;
{
ndis_task_offload *nto;
ndis_task_offload_hdr *ntoh;
ndis_task_tcpip_csum *nttc;
struct ifnet *ifp;
int len, error;
if (!NDIS_INITIALIZED(sc))
return (EINVAL);
if (sc->ndis_80211)
return (EINVAL);
/* See if there's anything to set. */
ifp = sc->ifp;
error = ndis_probe_offload(sc);
if (error)
return (error);
if (sc->ndis_hwassist == 0 && ifp->if_capabilities == 0)
return (0);
len = sizeof(ndis_task_offload_hdr) + sizeof(ndis_task_offload) +
sizeof(ndis_task_tcpip_csum);
ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO);
if (ntoh == NULL)
return (ENOMEM);
ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION;
ntoh->ntoh_len = sizeof(ndis_task_offload_hdr);
ntoh->ntoh_offset_firsttask = sizeof(ndis_task_offload_hdr);
ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header);
ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3;
ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN;
nto = (ndis_task_offload *)((char *)ntoh +
ntoh->ntoh_offset_firsttask);
nto->nto_vers = NDIS_TASK_OFFLOAD_VERSION;
nto->nto_len = sizeof(ndis_task_offload);
nto->nto_task = NDIS_TASK_TCPIP_CSUM;
nto->nto_offset_nexttask = 0;
nto->nto_taskbuflen = sizeof(ndis_task_tcpip_csum);
nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf;
if (ifp->if_capenable & IFCAP_TXCSUM)
nttc->nttc_v4tx = sc->ndis_v4tx;
if (ifp->if_capenable & IFCAP_RXCSUM)
nttc->nttc_v4rx = sc->ndis_v4rx;
error = ndis_set_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len);
free(ntoh, M_TEMP);
return (error);
}
static int
ndis_probe_offload(sc)
struct ndis_softc *sc;
{
ndis_task_offload *nto;
ndis_task_offload_hdr *ntoh;
ndis_task_tcpip_csum *nttc = NULL;
struct ifnet *ifp;
int len, error, dummy;
ifp = sc->ifp;
len = sizeof(dummy);
error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, &dummy, &len);
if (error != ENOSPC)
return (error);
ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO);
if (ntoh == NULL)
return (ENOMEM);
ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION;
ntoh->ntoh_len = sizeof(ndis_task_offload_hdr);
ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header);
ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3;
ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN;
error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len);
if (error) {
free(ntoh, M_TEMP);
return (error);
}
if (ntoh->ntoh_vers != NDIS_TASK_OFFLOAD_VERSION) {
free(ntoh, M_TEMP);
return (EINVAL);
}
nto = (ndis_task_offload *)((char *)ntoh +
ntoh->ntoh_offset_firsttask);
while (1) {
switch (nto->nto_task) {
case NDIS_TASK_TCPIP_CSUM:
nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf;
break;
/* Don't handle these yet. */
case NDIS_TASK_IPSEC:
case NDIS_TASK_TCP_LARGESEND:
default:
break;
}
if (nto->nto_offset_nexttask == 0)
break;
nto = (ndis_task_offload *)((char *)nto +
nto->nto_offset_nexttask);
}
if (nttc == NULL) {
free(ntoh, M_TEMP);
return (ENOENT);
}
sc->ndis_v4tx = nttc->nttc_v4tx;
sc->ndis_v4rx = nttc->nttc_v4rx;
if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_IP_CSUM)
sc->ndis_hwassist |= CSUM_IP;
if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_TCP_CSUM)
sc->ndis_hwassist |= CSUM_TCP;
if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_UDP_CSUM)
sc->ndis_hwassist |= CSUM_UDP;
if (sc->ndis_hwassist)
ifp->if_capabilities |= IFCAP_TXCSUM;
if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_IP_CSUM)
ifp->if_capabilities |= IFCAP_RXCSUM;
if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_TCP_CSUM)
ifp->if_capabilities |= IFCAP_RXCSUM;
if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_UDP_CSUM)
ifp->if_capabilities |= IFCAP_RXCSUM;
free(ntoh, M_TEMP);
return (0);
}
static int
ndis_nettype_chan(uint32_t type)
{
switch (type) {
case NDIS_80211_NETTYPE_11FH: return (IEEE80211_CHAN_FHSS);
case NDIS_80211_NETTYPE_11DS: return (IEEE80211_CHAN_B);
case NDIS_80211_NETTYPE_11OFDM5: return (IEEE80211_CHAN_A);
case NDIS_80211_NETTYPE_11OFDM24: return (IEEE80211_CHAN_G);
}
DPRINTF(("unknown channel nettype %d\n", type));
return (IEEE80211_CHAN_B); /* Default to 11B chan */
}
static int
ndis_nettype_mode(uint32_t type)
{
switch (type) {
case NDIS_80211_NETTYPE_11FH: return (IEEE80211_MODE_FH);
case NDIS_80211_NETTYPE_11DS: return (IEEE80211_MODE_11B);
case NDIS_80211_NETTYPE_11OFDM5: return (IEEE80211_MODE_11A);
case NDIS_80211_NETTYPE_11OFDM24: return (IEEE80211_MODE_11G);
}
DPRINTF(("unknown mode nettype %d\n", type));
return (IEEE80211_MODE_AUTO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
int
ndis_attach(device_t dev)
{
struct ndis_softc *sc;
driver_object *pdrv;
device_object *pdo;
int error = 0, len;
int i;
sc = device_get_softc(dev);
mtx_init(&sc->ndis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
KeInitializeSpinLock(&sc->ndis_rxlock);
KeInitializeSpinLock(&sc->ndisusb_tasklock);
KeInitializeSpinLock(&sc->ndisusb_xferdonelock);
InitializeListHead(&sc->ndis_shlist);
InitializeListHead(&sc->ndisusb_tasklist);
InitializeListHead(&sc->ndisusb_xferdonelist);
callout_init(&sc->ndis_stat_callout, 1);
mbufq_init(&sc->ndis_rxqueue, INT_MAX); /* XXXGL: sane maximum */
if (sc->ndis_iftype == PCMCIABus) {
error = ndis_alloc_amem(sc);
if (error) {
device_printf(dev, "failed to allocate "
"attribute memory\n");
goto fail;
}
}
/* Create sysctl registry nodes */
ndis_create_sysctls(sc);
/* Find the PDO for this device instance. */
if (sc->ndis_iftype == PCIBus)
pdrv = windrv_lookup(0, "PCI Bus");
else if (sc->ndis_iftype == PCMCIABus)
pdrv = windrv_lookup(0, "PCCARD Bus");
else
pdrv = windrv_lookup(0, "USB Bus");
pdo = windrv_find_pdo(pdrv, dev);
/*
* Create a new functional device object for this
* device. This is what creates the miniport block
* for this device instance.
*/
if (NdisAddDevice(sc->ndis_dobj, pdo) != STATUS_SUCCESS) {
device_printf(dev, "failed to create FDO!\n");
error = ENXIO;
goto fail;
}
/* Tell the user what version of the API the driver is using. */
device_printf(dev, "NDIS API version: %d.%d\n",
sc->ndis_chars->nmc_version_major,
sc->ndis_chars->nmc_version_minor);
/* Do resource conversion. */
if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus)
ndis_convert_res(sc);
else
sc->ndis_block->nmb_rlist = NULL;
/* Install our RX and TX interrupt handlers. */
sc->ndis_block->nmb_senddone_func = ndis_txeof_wrap;
sc->ndis_block->nmb_pktind_func = ndis_rxeof_wrap;
sc->ndis_block->nmb_ethrxindicate_func = ndis_rxeof_eth_wrap;
sc->ndis_block->nmb_ethrxdone_func = ndis_rxeof_done_wrap;
sc->ndis_block->nmb_tdcond_func = ndis_rxeof_xfr_done_wrap;
/* Override the status handler so we can detect link changes. */
sc->ndis_block->nmb_status_func = ndis_linksts_wrap;
sc->ndis_block->nmb_statusdone_func = ndis_linksts_done_wrap;
/* Set up work item handlers. */
sc->ndis_tickitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndis_startitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndis_resetitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndis_inputitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndisusb_xferdoneitem =
IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndisusb_taskitem =
IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
KeInitializeDpc(&sc->ndis_rxdpc, ndis_rxeof_xfr_wrap, sc->ndis_block);
/* Call driver's init routine. */
if (ndis_init_nic(sc)) {
device_printf(dev, "init handler failed\n");
error = ENXIO;
goto fail;
}
/*
* Figure out how big to make the TX buffer pool.
*/
len = sizeof(sc->ndis_maxpkts);
if (ndis_get_info(sc, OID_GEN_MAXIMUM_SEND_PACKETS,
&sc->ndis_maxpkts, &len)) {
device_printf(dev, "failed to get max TX packets\n");
error = ENXIO;
goto fail;
}
/*
* If this is a deserialized miniport, we don't have
* to honor the OID_GEN_MAXIMUM_SEND_PACKETS result.
*/
if (!NDIS_SERIALIZED(sc->ndis_block))
sc->ndis_maxpkts = NDIS_TXPKTS;
/* Enforce some sanity, just in case. */
if (sc->ndis_maxpkts == 0)
sc->ndis_maxpkts = 10;
- sc->ndis_txarray = mallocarray(sc->ndis_maxpkts,
- sizeof(ndis_packet *), M_DEVBUF, M_NOWAIT|M_ZERO);
+ sc->ndis_txarray = malloc(sizeof(ndis_packet *) *
+ sc->ndis_maxpkts, M_DEVBUF, M_NOWAIT|M_ZERO);
/* Allocate a pool of ndis_packets for TX encapsulation. */
NdisAllocatePacketPool(&i, &sc->ndis_txpool,
sc->ndis_maxpkts, PROTOCOL_RESERVED_SIZE_IN_PACKET);
if (i != NDIS_STATUS_SUCCESS) {
sc->ndis_txpool = NULL;
device_printf(dev, "failed to allocate TX packet pool");
error = ENOMEM;
goto fail;
}
sc->ndis_txpending = sc->ndis_maxpkts;
sc->ndis_oidcnt = 0;
/* Get supported oid list. */
ndis_get_supported_oids(sc, &sc->ndis_oids, &sc->ndis_oidcnt);
/* If the NDIS module requested scatter/gather, init maps. */
if (sc->ndis_sc)
ndis_init_dma(sc);
/*
* See if the OID_802_11_CONFIGURATION OID is
* supported by this driver. If it is, then this an 802.11
* wireless driver, and we should set up media for wireless.
*/
for (i = 0; i < sc->ndis_oidcnt; i++)
if (sc->ndis_oids[i] == OID_802_11_CONFIGURATION) {
sc->ndis_80211 = 1;
break;
}
if (sc->ndis_80211)
error = ndis_80211attach(sc);
else
error = ndis_ifattach(sc);
fail:
if (error) {
ndis_detach(dev);
return (error);
}
if (sc->ndis_iftype == PNPBus && ndisusb_halt == 0)
return (error);
DPRINTF(("attach done.\n"));
/* We're done talking to the NIC for now; halt it. */
ndis_halt_nic(sc);
DPRINTF(("halting done.\n"));
return (error);
}
static int
ndis_80211attach(struct ndis_softc *sc)
{
struct ieee80211com *ic = &sc->ndis_ic;
ndis_80211_rates_ex rates;
struct ndis_80211_nettype_list *ntl;
uint32_t arg;
int mode, i, r, len, nonettypes = 1;
uint8_t bands[IEEE80211_MODE_BYTES] = { 0 };
callout_init(&sc->ndis_scan_callout, 1);
ic->ic_softc = sc;
ic->ic_ioctl = ndis_80211ioctl;
ic->ic_name = device_get_nameunit(sc->ndis_dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_DS;
ic->ic_caps = IEEE80211_C_8023ENCAP |
IEEE80211_C_STA | IEEE80211_C_IBSS;
setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO);
len = 0;
r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED, NULL, &len);
if (r != ENOSPC)
goto nonettypes;
ntl = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED, ntl, &len);
if (r != 0) {
free(ntl, M_DEVBUF);
goto nonettypes;
}
for (i = 0; i < ntl->ntl_items; i++) {
mode = ndis_nettype_mode(ntl->ntl_type[i]);
if (mode) {
nonettypes = 0;
setbit(ic->ic_modecaps, mode);
setbit(bands, mode);
} else
device_printf(sc->ndis_dev, "Unknown nettype %d\n",
ntl->ntl_type[i]);
}
free(ntl, M_DEVBUF);
nonettypes:
/* Default to 11b channels if the card did not supply any */
if (nonettypes) {
setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11B);
}
len = sizeof(rates);
bzero((char *)&rates, len);
r = ndis_get_info(sc, OID_802_11_SUPPORTED_RATES, (void *)rates, &len);
if (r != 0)
device_printf(sc->ndis_dev, "get rates failed: 0x%x\n", r);
/*
* Since the supported rates only up to 8 can be supported,
* if this is not 802.11b we're just going to be faking it
* all up to heck.
*/
#define TESTSETRATE(x, y) \
do { \
int i; \
for (i = 0; i < ic->ic_sup_rates[x].rs_nrates; i++) { \
if (ic->ic_sup_rates[x].rs_rates[i] == (y)) \
break; \
} \
if (i == ic->ic_sup_rates[x].rs_nrates) { \
ic->ic_sup_rates[x].rs_rates[i] = (y); \
ic->ic_sup_rates[x].rs_nrates++; \
} \
} while (0)
#define SETRATE(x, y) \
ic->ic_sup_rates[x].rs_rates[ic->ic_sup_rates[x].rs_nrates] = (y)
#define INCRATE(x) \
ic->ic_sup_rates[x].rs_nrates++
ic->ic_curmode = IEEE80211_MODE_AUTO;
if (isset(ic->ic_modecaps, IEEE80211_MODE_11A))
ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates = 0;
if (isset(ic->ic_modecaps, IEEE80211_MODE_11B))
ic->ic_sup_rates[IEEE80211_MODE_11B].rs_nrates = 0;
if (isset(ic->ic_modecaps, IEEE80211_MODE_11G))
ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates = 0;
for (i = 0; i < len; i++) {
switch (rates[i] & IEEE80211_RATE_VAL) {
case 2:
case 4:
case 11:
case 10:
case 22:
if (isclr(ic->ic_modecaps, IEEE80211_MODE_11B)) {
/* Lazy-init 802.11b. */
setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
ic->ic_sup_rates[IEEE80211_MODE_11B].
rs_nrates = 0;
}
SETRATE(IEEE80211_MODE_11B, rates[i]);
INCRATE(IEEE80211_MODE_11B);
break;
default:
if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) {
SETRATE(IEEE80211_MODE_11A, rates[i]);
INCRATE(IEEE80211_MODE_11A);
}
if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) {
SETRATE(IEEE80211_MODE_11G, rates[i]);
INCRATE(IEEE80211_MODE_11G);
}
break;
}
}
/*
* If the hardware supports 802.11g, it most
* likely supports 802.11b and all of the
* 802.11b and 802.11g speeds, so maybe we can
* just cheat here. Just how in the heck do
* we detect turbo modes, though?
*/
if (isset(ic->ic_modecaps, IEEE80211_MODE_11B)) {
TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|2);
TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|4);
TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|11);
TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|22);
}
if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) {
TESTSETRATE(IEEE80211_MODE_11G, 48);
TESTSETRATE(IEEE80211_MODE_11G, 72);
TESTSETRATE(IEEE80211_MODE_11G, 96);
TESTSETRATE(IEEE80211_MODE_11G, 108);
}
if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) {
TESTSETRATE(IEEE80211_MODE_11A, 48);
TESTSETRATE(IEEE80211_MODE_11A, 72);
TESTSETRATE(IEEE80211_MODE_11A, 96);
TESTSETRATE(IEEE80211_MODE_11A, 108);
}
#undef SETRATE
#undef INCRATE
#undef TESTSETRATE
ieee80211_init_channels(ic, NULL, bands);
/*
* To test for WPA support, we need to see if we can
* set AUTHENTICATION_MODE to WPA and read it back
* successfully.
*/
i = sizeof(arg);
arg = NDIS_80211_AUTHMODE_WPA;
r = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i);
if (r == 0) {
r = ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i);
if (r == 0 && arg == NDIS_80211_AUTHMODE_WPA)
ic->ic_caps |= IEEE80211_C_WPA;
}
/*
* To test for supported ciphers, we set each
* available encryption type in descending order.
* If ENC3 works, then we have WEP, TKIP and AES.
* If only ENC2 works, then we have WEP and TKIP.
* If only ENC1 works, then we have just WEP.
*/
i = sizeof(arg);
arg = NDIS_80211_WEPSTAT_ENC3ENABLED;
r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
if (r == 0) {
ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
| IEEE80211_CRYPTO_TKIP
| IEEE80211_CRYPTO_AES_CCM;
goto got_crypto;
}
arg = NDIS_80211_WEPSTAT_ENC2ENABLED;
r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
if (r == 0) {
ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
| IEEE80211_CRYPTO_TKIP;
goto got_crypto;
}
arg = NDIS_80211_WEPSTAT_ENC1ENABLED;
r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
if (r == 0)
ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
got_crypto:
i = sizeof(arg);
r = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &i);
if (r == 0)
ic->ic_caps |= IEEE80211_C_PMGT;
r = ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &i);
if (r == 0)
ic->ic_caps |= IEEE80211_C_TXPMGT;
/*
* Get station address from the driver.
*/
len = sizeof(ic->ic_macaddr);
ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, &ic->ic_macaddr, &len);
ieee80211_ifattach(ic);
ic->ic_raw_xmit = ndis_raw_xmit;
ic->ic_scan_start = ndis_scan_start;
ic->ic_scan_end = ndis_scan_end;
ic->ic_set_channel = ndis_set_channel;
ic->ic_scan_curchan = ndis_scan_curchan;
ic->ic_scan_mindwell = ndis_scan_mindwell;
ic->ic_bsschan = IEEE80211_CHAN_ANYC;
ic->ic_vap_create = ndis_vap_create;
ic->ic_vap_delete = ndis_vap_delete;
ic->ic_update_mcast = ndis_update_mcast;
ic->ic_update_promisc = ndis_update_promisc;
ic->ic_transmit = ndis_80211transmit;
ic->ic_parent = ndis_80211parent;
if (bootverbose)
ieee80211_announce(ic);
return (0);
}
static int
ndis_ifattach(struct ndis_softc *sc)
{
struct ifnet *ifp;
u_char eaddr[ETHER_ADDR_LEN];
int len;
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL)
return (ENOSPC);
sc->ifp = ifp;
ifp->if_softc = sc;
/* Check for task offload support. */
ndis_probe_offload(sc);
/*
* Get station address from the driver.
*/
len = sizeof(eaddr);
ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, eaddr, &len);
if_initname(ifp, device_get_name(sc->ndis_dev),
device_get_unit(sc->ndis_dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ndis_ifioctl;
ifp->if_start = ndis_ifstart;
ifp->if_init = ndis_init;
ifp->if_baudrate = 10000000;
IFQ_SET_MAXLEN(&ifp->if_snd, 50);
ifp->if_snd.ifq_drv_maxlen = 25;
IFQ_SET_READY(&ifp->if_snd);
ifp->if_capenable = ifp->if_capabilities;
ifp->if_hwassist = sc->ndis_hwassist;
ifmedia_init(&sc->ifmedia, IFM_IMASK, ndis_ifmedia_upd,
ndis_ifmedia_sts);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
ether_ifattach(ifp, eaddr);
return (0);
}
static struct ieee80211vap *
ndis_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ndis_vap *nvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
nvp = malloc(sizeof(struct ndis_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &nvp->vap;
ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override with driver methods */
nvp->newstate = vap->iv_newstate;
vap->iv_newstate = ndis_newstate;
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change, ndis_media_status,
mac);
ic->ic_opmode = opmode;
/* install key handing routines */
vap->iv_key_set = ndis_add_key;
vap->iv_key_delete = ndis_del_key;
return vap;
}
static void
ndis_vap_delete(struct ieee80211vap *vap)
{
struct ndis_vap *nvp = NDIS_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct ndis_softc *sc = ic->ic_softc;
ndis_stop(sc);
callout_drain(&sc->ndis_scan_callout);
ieee80211_vap_detach(vap);
free(nvp, M_80211_VAP);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
int
ndis_detach(device_t dev)
{
struct ifnet *ifp;
struct ndis_softc *sc;
driver_object *drv;
sc = device_get_softc(dev);
NDIS_LOCK(sc);
if (!sc->ndis_80211)
ifp = sc->ifp;
else
ifp = NULL;
if (ifp != NULL)
ifp->if_flags &= ~IFF_UP;
if (device_is_attached(dev)) {
NDIS_UNLOCK(sc);
ndis_stop(sc);
if (sc->ndis_80211)
ieee80211_ifdetach(&sc->ndis_ic);
else if (ifp != NULL)
ether_ifdetach(ifp);
} else
NDIS_UNLOCK(sc);
if (sc->ndis_tickitem != NULL)
IoFreeWorkItem(sc->ndis_tickitem);
if (sc->ndis_startitem != NULL)
IoFreeWorkItem(sc->ndis_startitem);
if (sc->ndis_resetitem != NULL)
IoFreeWorkItem(sc->ndis_resetitem);
if (sc->ndis_inputitem != NULL)
IoFreeWorkItem(sc->ndis_inputitem);
if (sc->ndisusb_xferdoneitem != NULL)
IoFreeWorkItem(sc->ndisusb_xferdoneitem);
if (sc->ndisusb_taskitem != NULL)
IoFreeWorkItem(sc->ndisusb_taskitem);
bus_generic_detach(dev);
ndis_unload_driver(sc);
if (sc->ndis_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ndis_irq);
if (sc->ndis_res_io)
bus_release_resource(dev, SYS_RES_IOPORT,
sc->ndis_io_rid, sc->ndis_res_io);
if (sc->ndis_res_mem)
bus_release_resource(dev, SYS_RES_MEMORY,
sc->ndis_mem_rid, sc->ndis_res_mem);
if (sc->ndis_res_altmem)
bus_release_resource(dev, SYS_RES_MEMORY,
sc->ndis_altmem_rid, sc->ndis_res_altmem);
if (ifp != NULL)
if_free(ifp);
if (sc->ndis_iftype == PCMCIABus)
ndis_free_amem(sc);
if (sc->ndis_sc)
ndis_destroy_dma(sc);
if (sc->ndis_txarray)
free(sc->ndis_txarray, M_DEVBUF);
if (!sc->ndis_80211)
ifmedia_removeall(&sc->ifmedia);
if (sc->ndis_txpool != NULL)
NdisFreePacketPool(sc->ndis_txpool);
/* Destroy the PDO for this device. */
if (sc->ndis_iftype == PCIBus)
drv = windrv_lookup(0, "PCI Bus");
else if (sc->ndis_iftype == PCMCIABus)
drv = windrv_lookup(0, "PCCARD Bus");
else
drv = windrv_lookup(0, "USB Bus");
if (drv == NULL)
panic("couldn't find driver object");
windrv_destroy_pdo(drv, dev);
if (sc->ndis_iftype == PCIBus)
bus_dma_tag_destroy(sc->ndis_parent_tag);
return (0);
}
int
ndis_suspend(dev)
device_t dev;
{
struct ndis_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->ifp;
#ifdef notdef
if (NDIS_INITIALIZED(sc))
ndis_stop(sc);
#endif
return (0);
}
int
ndis_resume(dev)
device_t dev;
{
struct ndis_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->ifp;
if (NDIS_INITIALIZED(sc))
ndis_init(sc);
return (0);
}
/*
* The following bunch of routines are here to support drivers that
* use the NdisMEthIndicateReceive()/MiniportTransferData() mechanism.
* The NdisMEthIndicateReceive() handler runs at DISPATCH_LEVEL for
* serialized miniports, or IRQL <= DISPATCH_LEVEL for deserialized
* miniports.
*/
static void
ndis_rxeof_eth(adapter, ctx, addr, hdr, hdrlen, lookahead, lookaheadlen, pktlen)
ndis_handle adapter;
ndis_handle ctx;
char *addr;
void *hdr;
uint32_t hdrlen;
void *lookahead;
uint32_t lookaheadlen;
uint32_t pktlen;
{
ndis_miniport_block *block;
uint8_t irql = 0;
uint32_t status;
ndis_buffer *b;
ndis_packet *p;
struct mbuf *m;
ndis_ethpriv *priv;
block = adapter;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return;
/* Save the data provided to us so far. */
m->m_len = lookaheadlen + hdrlen;
m->m_pkthdr.len = pktlen + hdrlen;
m->m_next = NULL;
m_copyback(m, 0, hdrlen, hdr);
m_copyback(m, hdrlen, lookaheadlen, lookahead);
/* Now create a fake NDIS_PACKET to hold the data */
NdisAllocatePacket(&status, &p, block->nmb_rxpool);
if (status != NDIS_STATUS_SUCCESS) {
m_freem(m);
return;
}
p->np_m0 = m;
b = IoAllocateMdl(m->m_data, m->m_pkthdr.len, FALSE, FALSE, NULL);
if (b == NULL) {
NdisFreePacket(p);
m_freem(m);
return;
}
p->np_private.npp_head = p->np_private.npp_tail = b;
p->np_private.npp_totlen = m->m_pkthdr.len;
/* Save the packet RX context somewhere. */
priv = (ndis_ethpriv *)&p->np_protocolreserved;
priv->nep_ctx = ctx;
if (!NDIS_SERIALIZED(block))
KeAcquireSpinLock(&block->nmb_lock, &irql);
InsertTailList((&block->nmb_packetlist), (&p->np_list));
if (!NDIS_SERIALIZED(block))
KeReleaseSpinLock(&block->nmb_lock, irql);
}
/*
* NdisMEthIndicateReceiveComplete() handler, runs at DISPATCH_LEVEL
* for serialized miniports, or IRQL <= DISPATCH_LEVEL for deserialized
* miniports.
*/
static void
ndis_rxeof_done(adapter)
ndis_handle adapter;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
block = adapter;
/* Schedule transfer/RX of queued packets. */
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
KeInsertQueueDpc(&sc->ndis_rxdpc, NULL, NULL);
}
/*
* MiniportTransferData() handler, runs at DISPATCH_LEVEL.
*/
static void
ndis_rxeof_xfr(dpc, adapter, sysarg1, sysarg2)
kdpc *dpc;
ndis_handle adapter;
void *sysarg1;
void *sysarg2;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
ndis_packet *p;
list_entry *l;
uint32_t status;
ndis_ethpriv *priv;
struct ifnet *ifp;
struct mbuf *m;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
KeAcquireSpinLockAtDpcLevel(&block->nmb_lock);
l = block->nmb_packetlist.nle_flink;
while(!IsListEmpty(&block->nmb_packetlist)) {
l = RemoveHeadList((&block->nmb_packetlist));
p = CONTAINING_RECORD(l, ndis_packet, np_list);
InitializeListHead((&p->np_list));
priv = (ndis_ethpriv *)&p->np_protocolreserved;
m = p->np_m0;
p->np_softc = sc;
p->np_m0 = NULL;
KeReleaseSpinLockFromDpcLevel(&block->nmb_lock);
status = MSCALL6(sc->ndis_chars->nmc_transferdata_func,
p, &p->np_private.npp_totlen, block, priv->nep_ctx,
m->m_len, m->m_pkthdr.len - m->m_len);
KeAcquireSpinLockAtDpcLevel(&block->nmb_lock);
/*
* If status is NDIS_STATUS_PENDING, do nothing and
* wait for a callback to the ndis_rxeof_xfr_done()
* handler.
*/
m->m_len = m->m_pkthdr.len;
m->m_pkthdr.rcvif = ifp;
if (status == NDIS_STATUS_SUCCESS) {
IoFreeMdl(p->np_private.npp_head);
NdisFreePacket(p);
KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock);
mbufq_enqueue(&sc->ndis_rxqueue, m);
KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock);
IoQueueWorkItem(sc->ndis_inputitem,
(io_workitem_func)ndis_inputtask_wrap,
WORKQUEUE_CRITICAL, sc);
}
if (status == NDIS_STATUS_FAILURE)
m_freem(m);
/* Advance to next packet */
l = block->nmb_packetlist.nle_flink;
}
KeReleaseSpinLockFromDpcLevel(&block->nmb_lock);
}
/*
* NdisMTransferDataComplete() handler, runs at DISPATCH_LEVEL.
*/
static void
ndis_rxeof_xfr_done(adapter, packet, status, len)
ndis_handle adapter;
ndis_packet *packet;
uint32_t status;
uint32_t len;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
m = packet->np_m0;
IoFreeMdl(packet->np_private.npp_head);
NdisFreePacket(packet);
if (status != NDIS_STATUS_SUCCESS) {
m_freem(m);
return;
}
m->m_len = m->m_pkthdr.len;
m->m_pkthdr.rcvif = ifp;
KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock);
mbufq_enqueue(&sc->ndis_rxqueue, m);
KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock);
IoQueueWorkItem(sc->ndis_inputitem,
(io_workitem_func)ndis_inputtask_wrap,
WORKQUEUE_CRITICAL, sc);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*
* When handling received NDIS packets, the 'status' field in the
* out-of-band portion of the ndis_packet has special meaning. In the
* most common case, the underlying NDIS driver will set this field
* to NDIS_STATUS_SUCCESS, which indicates that it's ok for us to
* take possession of it. We then change the status field to
* NDIS_STATUS_PENDING to tell the driver that we now own the packet,
* and that we will return it at some point in the future via the
* return packet handler.
*
* If the driver hands us a packet with a status of NDIS_STATUS_RESOURCES,
* this means the driver is running out of packet/buffer resources and
* wants to maintain ownership of the packet. In this case, we have to
* copy the packet data into local storage and let the driver keep the
* packet.
*/
static void
ndis_rxeof(adapter, packets, pktcnt)
ndis_handle adapter;
ndis_packet **packets;
uint32_t pktcnt;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
ndis_packet *p;
uint32_t s;
ndis_tcpip_csum *csum;
struct ifnet *ifp;
struct mbuf *m0, *m;
int i;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
/*
* There's a slim chance the driver may indicate some packets
* before we're completely ready to handle them. If we detect this,
* we need to return them to the miniport and ignore them.
*/
if (!sc->ndis_running) {
for (i = 0; i < pktcnt; i++) {
p = packets[i];
if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS) {
p->np_refcnt++;
ndis_return_packet(p);
}
}
return;
}
for (i = 0; i < pktcnt; i++) {
p = packets[i];
/* Stash the softc here so ptom can use it. */
p->np_softc = sc;
if (ndis_ptom(&m0, p)) {
device_printf(sc->ndis_dev, "ptom failed\n");
if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS)
ndis_return_packet(p);
} else {
#ifdef notdef
if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES) {
m = m_dup(m0, M_NOWAIT);
/*
* NOTE: we want to destroy the mbuf here, but
* we don't actually want to return it to the
* driver via the return packet handler. By
* bumping np_refcnt, we can prevent the
* ndis_return_packet() routine from actually
* doing anything.
*/
p->np_refcnt++;
m_freem(m0);
if (m == NULL)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
else
m0 = m;
} else
p->np_oob.npo_status = NDIS_STATUS_PENDING;
#endif
m = m_dup(m0, M_NOWAIT);
if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES)
p->np_refcnt++;
else
p->np_oob.npo_status = NDIS_STATUS_PENDING;
m_freem(m0);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
m0 = m;
m0->m_pkthdr.rcvif = ifp;
/* Deal with checksum offload. */
if (ifp->if_capenable & IFCAP_RXCSUM &&
p->np_ext.npe_info[ndis_tcpipcsum_info] != NULL) {
s = (uintptr_t)
p->np_ext.npe_info[ndis_tcpipcsum_info];
csum = (ndis_tcpip_csum *)&s;
if (csum->u.ntc_rxflags &
NDIS_RXCSUM_IP_PASSED)
m0->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED|CSUM_IP_VALID;
if (csum->u.ntc_rxflags &
(NDIS_RXCSUM_TCP_PASSED |
NDIS_RXCSUM_UDP_PASSED)) {
m0->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
m0->m_pkthdr.csum_data = 0xFFFF;
}
}
KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock);
mbufq_enqueue(&sc->ndis_rxqueue, m0);
KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock);
IoQueueWorkItem(sc->ndis_inputitem,
(io_workitem_func)ndis_inputtask_wrap,
WORKQUEUE_CRITICAL, sc);
}
}
}
/*
* This routine is run at PASSIVE_LEVEL. We use this routine to pass
* packets into the stack in order to avoid calling (*ifp->if_input)()
* with any locks held (at DISPATCH_LEVEL, we'll be holding the
* 'dispatch level' per-cpu sleep lock).
*/
static void
ndis_inputtask(device_object *dobj, void *arg)
{
ndis_miniport_block *block;
struct ndis_softc *sc = arg;
struct mbuf *m;
uint8_t irql;
block = dobj->do_devext;
KeAcquireSpinLock(&sc->ndis_rxlock, &irql);
while ((m = mbufq_dequeue(&sc->ndis_rxqueue)) != NULL) {
KeReleaseSpinLock(&sc->ndis_rxlock, irql);
if ((sc->ndis_80211 != 0)) {
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
if (vap != NULL)
vap->iv_deliver_data(vap, vap->iv_bss, m);
} else {
struct ifnet *ifp = sc->ifp;
(*ifp->if_input)(ifp, m);
}
KeAcquireSpinLock(&sc->ndis_rxlock, &irql);
}
KeReleaseSpinLock(&sc->ndis_rxlock, irql);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
ndis_txeof(adapter, packet, status)
ndis_handle adapter;
ndis_packet *packet;
ndis_status status;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
struct ifnet *ifp;
int idx;
struct mbuf *m;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
m = packet->np_m0;
idx = packet->np_txidx;
if (sc->ndis_sc)
bus_dmamap_unload(sc->ndis_ttag, sc->ndis_tmaps[idx]);
ndis_free_packet(packet);
m_freem(m);
NDIS_LOCK(sc);
sc->ndis_txarray[idx] = NULL;
sc->ndis_txpending++;
if (!sc->ndis_80211) {
struct ifnet *ifp = sc->ifp;
if (status == NDIS_STATUS_SUCCESS)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
sc->ndis_tx_timer = 0;
NDIS_UNLOCK(sc);
if (!sc->ndis_80211)
IoQueueWorkItem(sc->ndis_startitem,
(io_workitem_func)ndis_ifstarttask_wrap,
WORKQUEUE_CRITICAL, sc);
DPRINTF(("%s: ndis_ifstarttask_wrap sc=%p\n", __func__, sc));
}
static void
ndis_linksts(adapter, status, sbuf, slen)
ndis_handle adapter;
ndis_status status;
void *sbuf;
uint32_t slen;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
sc->ndis_sts = status;
/* Event list is all full up, drop this one. */
NDIS_LOCK(sc);
if (sc->ndis_evt[sc->ndis_evtpidx].ne_sts) {
NDIS_UNLOCK(sc);
return;
}
/* Cache the event. */
if (slen) {
sc->ndis_evt[sc->ndis_evtpidx].ne_buf = malloc(slen,
M_TEMP, M_NOWAIT);
if (sc->ndis_evt[sc->ndis_evtpidx].ne_buf == NULL) {
NDIS_UNLOCK(sc);
return;
}
bcopy((char *)sbuf,
sc->ndis_evt[sc->ndis_evtpidx].ne_buf, slen);
}
sc->ndis_evt[sc->ndis_evtpidx].ne_sts = status;
sc->ndis_evt[sc->ndis_evtpidx].ne_len = slen;
NDIS_EVTINC(sc->ndis_evtpidx);
NDIS_UNLOCK(sc);
}
static void
ndis_linksts_done(adapter)
ndis_handle adapter;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ifnet *ifp;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
if (!NDIS_INITIALIZED(sc))
return;
switch (sc->ndis_sts) {
case NDIS_STATUS_MEDIA_CONNECT:
IoQueueWorkItem(sc->ndis_tickitem,
(io_workitem_func)ndis_ticktask_wrap,
WORKQUEUE_CRITICAL, sc);
if (!sc->ndis_80211)
IoQueueWorkItem(sc->ndis_startitem,
(io_workitem_func)ndis_ifstarttask_wrap,
WORKQUEUE_CRITICAL, sc);
break;
case NDIS_STATUS_MEDIA_DISCONNECT:
if (sc->ndis_link)
IoQueueWorkItem(sc->ndis_tickitem,
(io_workitem_func)ndis_ticktask_wrap,
WORKQUEUE_CRITICAL, sc);
break;
default:
break;
}
}
static void
ndis_tick(xsc)
void *xsc;
{
struct ndis_softc *sc;
sc = xsc;
if (sc->ndis_hang_timer && --sc->ndis_hang_timer == 0) {
IoQueueWorkItem(sc->ndis_tickitem,
(io_workitem_func)ndis_ticktask_wrap,
WORKQUEUE_CRITICAL, sc);
sc->ndis_hang_timer = sc->ndis_block->nmb_checkforhangsecs;
}
if (sc->ndis_tx_timer && --sc->ndis_tx_timer == 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
device_printf(sc->ndis_dev, "watchdog timeout\n");
IoQueueWorkItem(sc->ndis_resetitem,
(io_workitem_func)ndis_resettask_wrap,
WORKQUEUE_CRITICAL, sc);
if (!sc->ndis_80211)
IoQueueWorkItem(sc->ndis_startitem,
(io_workitem_func)ndis_ifstarttask_wrap,
WORKQUEUE_CRITICAL, sc);
}
callout_reset(&sc->ndis_stat_callout, hz, ndis_tick, sc);
}
static void
ndis_ticktask(device_object *d, void *xsc)
{
struct ndis_softc *sc = xsc;
ndis_checkforhang_handler hangfunc;
uint8_t rval;
NDIS_LOCK(sc);
if (!NDIS_INITIALIZED(sc)) {
NDIS_UNLOCK(sc);
return;
}
NDIS_UNLOCK(sc);
hangfunc = sc->ndis_chars->nmc_checkhang_func;
if (hangfunc != NULL) {
rval = MSCALL1(hangfunc,
sc->ndis_block->nmb_miniportadapterctx);
if (rval == TRUE) {
ndis_reset_nic(sc);
return;
}
}
NDIS_LOCK(sc);
if (sc->ndis_link == 0 &&
sc->ndis_sts == NDIS_STATUS_MEDIA_CONNECT) {
sc->ndis_link = 1;
if (sc->ndis_80211 != 0) {
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
if (vap != NULL) {
NDIS_UNLOCK(sc);
ndis_getstate_80211(sc);
ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
NDIS_LOCK(sc);
if_link_state_change(vap->iv_ifp,
LINK_STATE_UP);
}
} else
if_link_state_change(sc->ifp, LINK_STATE_UP);
}
if (sc->ndis_link == 1 &&
sc->ndis_sts == NDIS_STATUS_MEDIA_DISCONNECT) {
sc->ndis_link = 0;
if (sc->ndis_80211 != 0) {
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
if (vap != NULL) {
NDIS_UNLOCK(sc);
ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
NDIS_LOCK(sc);
if_link_state_change(vap->iv_ifp,
LINK_STATE_DOWN);
}
} else
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
NDIS_UNLOCK(sc);
}
static void
ndis_map_sclist(arg, segs, nseg, mapsize, error)
void *arg;
bus_dma_segment_t *segs;
int nseg;
bus_size_t mapsize;
int error;
{
struct ndis_sc_list *sclist;
int i;
if (error || arg == NULL)
return;
sclist = arg;
sclist->nsl_frags = nseg;
for (i = 0; i < nseg; i++) {
sclist->nsl_elements[i].nse_addr.np_quad = segs[i].ds_addr;
sclist->nsl_elements[i].nse_len = segs[i].ds_len;
}
}
static int
ndis_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
/* no support; just discard */
m_freem(m);
ieee80211_free_node(ni);
return (0);
}
static void
ndis_update_mcast(struct ieee80211com *ic)
{
struct ndis_softc *sc = ic->ic_softc;
ndis_setmulti(sc);
}
static void
ndis_update_promisc(struct ieee80211com *ic)
{
/* not supported */
}
static void
ndis_ifstarttask(device_object *d, void *arg)
{
struct ndis_softc *sc = arg;
DPRINTF(("%s: sc=%p, ifp=%p\n", __func__, sc, sc->ifp));
if (sc->ndis_80211)
return;
struct ifnet *ifp = sc->ifp;
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
ndis_ifstart(ifp);
}
/*
* Main transmit routine. To make NDIS drivers happy, we need to
* transform mbuf chains into NDIS packets and feed them to the
* send packet routines. Most drivers allow you to send several
* packets at once (up to the maxpkts limit). Unfortunately, rather
* that accepting them in the form of a linked list, they expect
* a contiguous array of pointers to packets.
*
* For those drivers which use the NDIS scatter/gather DMA mechanism,
* we need to perform busdma work here. Those that use map registers
* will do the mapping themselves on a buffer by buffer basis.
*/
static void
ndis_ifstart(struct ifnet *ifp)
{
struct ndis_softc *sc;
struct mbuf *m = NULL;
ndis_packet **p0 = NULL, *p = NULL;
ndis_tcpip_csum *csum;
int pcnt = 0, status;
sc = ifp->if_softc;
NDIS_LOCK(sc);
if (!sc->ndis_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
NDIS_UNLOCK(sc);
return;
}
p0 = &sc->ndis_txarray[sc->ndis_txidx];
while(sc->ndis_txpending) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
break;
NdisAllocatePacket(&status,
&sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool);
if (status != NDIS_STATUS_SUCCESS)
break;
if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) {
IFQ_DRV_PREPEND(&ifp->if_snd, m);
NDIS_UNLOCK(sc);
return;
}
/*
* Save pointer to original mbuf
* so we can free it later.
*/
p = sc->ndis_txarray[sc->ndis_txidx];
p->np_txidx = sc->ndis_txidx;
p->np_m0 = m;
p->np_oob.npo_status = NDIS_STATUS_PENDING;
/*
* Do scatter/gather processing, if driver requested it.
*/
if (sc->ndis_sc) {
bus_dmamap_load_mbuf(sc->ndis_ttag,
sc->ndis_tmaps[sc->ndis_txidx], m,
ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT);
bus_dmamap_sync(sc->ndis_ttag,
sc->ndis_tmaps[sc->ndis_txidx],
BUS_DMASYNC_PREREAD);
p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist;
}
/* Handle checksum offload. */
if (ifp->if_capenable & IFCAP_TXCSUM &&
m->m_pkthdr.csum_flags) {
csum = (ndis_tcpip_csum *)
&p->np_ext.npe_info[ndis_tcpipcsum_info];
csum->u.ntc_txflags = NDIS_TXCSUM_DO_IPV4;
if (m->m_pkthdr.csum_flags & CSUM_IP)
csum->u.ntc_txflags |= NDIS_TXCSUM_DO_IP;
if (m->m_pkthdr.csum_flags & CSUM_TCP)
csum->u.ntc_txflags |= NDIS_TXCSUM_DO_TCP;
if (m->m_pkthdr.csum_flags & CSUM_UDP)
csum->u.ntc_txflags |= NDIS_TXCSUM_DO_UDP;
p->np_private.npp_flags = NDIS_PROTOCOL_ID_TCP_IP;
}
NDIS_INC(sc);
sc->ndis_txpending--;
pcnt++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
if (!sc->ndis_80211) /* XXX handle 80211 */
BPF_MTAP(ifp, m);
/*
* The array that p0 points to must appear contiguous,
* so we must not wrap past the end of sc->ndis_txarray[].
* If it looks like we're about to wrap, break out here
* so the this batch of packets can be transmitted, then
* wait for txeof to ask us to send the rest.
*/
if (sc->ndis_txidx == 0)
break;
}
if (pcnt == 0) {
NDIS_UNLOCK(sc);
return;
}
if (sc->ndis_txpending == 0)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->ndis_tx_timer = 5;
NDIS_UNLOCK(sc);
/*
* According to NDIS documentation, if a driver exports
* a MiniportSendPackets() routine, we prefer that over
* a MiniportSend() routine (which sends just a single
* packet).
*/
if (sc->ndis_chars->nmc_sendmulti_func != NULL)
ndis_send_packets(sc, p0, pcnt);
else
ndis_send_packet(sc, p);
return;
}
static int
ndis_80211transmit(struct ieee80211com *ic, struct mbuf *m)
{
struct ndis_softc *sc = ic->ic_softc;
ndis_packet **p0 = NULL, *p = NULL;
int status;
NDIS_LOCK(sc);
if (!sc->ndis_link || !sc->ndis_running) {
NDIS_UNLOCK(sc);
return (ENXIO);
}
if (sc->ndis_txpending == 0) {
NDIS_UNLOCK(sc);
return (ENOBUFS);
}
p0 = &sc->ndis_txarray[sc->ndis_txidx];
NdisAllocatePacket(&status,
&sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool);
if (status != NDIS_STATUS_SUCCESS) {
NDIS_UNLOCK(sc);
return (ENOBUFS);
}
if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) {
NDIS_UNLOCK(sc);
return (ENOBUFS);
}
/*
* Save pointer to original mbuf
* so we can free it later.
*/
p = sc->ndis_txarray[sc->ndis_txidx];
p->np_txidx = sc->ndis_txidx;
p->np_m0 = m;
p->np_oob.npo_status = NDIS_STATUS_PENDING;
/*
* Do scatter/gather processing, if driver requested it.
*/
if (sc->ndis_sc) {
bus_dmamap_load_mbuf(sc->ndis_ttag,
sc->ndis_tmaps[sc->ndis_txidx], m,
ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT);
bus_dmamap_sync(sc->ndis_ttag,
sc->ndis_tmaps[sc->ndis_txidx],
BUS_DMASYNC_PREREAD);
p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist;
}
NDIS_INC(sc);
sc->ndis_txpending--;
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->ndis_tx_timer = 5;
NDIS_UNLOCK(sc);
/*
* According to NDIS documentation, if a driver exports
* a MiniportSendPackets() routine, we prefer that over
* a MiniportSend() routine (which sends just a single
* packet).
*/
if (sc->ndis_chars->nmc_sendmulti_func != NULL)
ndis_send_packets(sc, p0, 1);
else
ndis_send_packet(sc, p);
return (0);
}
static void
ndis_80211parent(struct ieee80211com *ic)
{
struct ndis_softc *sc = ic->ic_softc;
/*NDIS_LOCK(sc);*/
if (ic->ic_nrunning > 0) {
if (!sc->ndis_running)
ndis_init(sc);
} else if (sc->ndis_running)
ndis_stop(sc);
/*NDIS_UNLOCK(sc);*/
}
static void
ndis_init(void *xsc)
{
struct ndis_softc *sc = xsc;
int i, len, error;
/*
* Avoid reintializing the link unnecessarily.
* This should be dealt with in a better way by
* fixing the upper layer modules so they don't
* call ifp->if_init() quite as often.
*/
if (sc->ndis_link)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
ndis_stop(sc);
if (!(sc->ndis_iftype == PNPBus && ndisusb_halt == 0)) {
error = ndis_init_nic(sc);
if (error != 0) {
device_printf(sc->ndis_dev,
"failed to initialize the device: %d\n", error);
return;
}
}
/* Program the packet filter */
sc->ndis_filter = NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_BROADCAST;
if (sc->ndis_80211) {
struct ieee80211com *ic = &sc->ndis_ic;
if (ic->ic_promisc > 0)
sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS;
} else {
struct ifnet *ifp = sc->ifp;
if (ifp->if_flags & IFF_PROMISC)
sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS;
}
len = sizeof(sc->ndis_filter);
error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &len);
if (error)
device_printf(sc->ndis_dev, "set filter failed: %d\n", error);
/*
* Set lookahead.
*/
if (sc->ndis_80211)
i = ETHERMTU;
else
i = sc->ifp->if_mtu;
len = sizeof(i);
ndis_set_info(sc, OID_GEN_CURRENT_LOOKAHEAD, &i, &len);
/*
* Program the multicast filter, if necessary.
*/
ndis_setmulti(sc);
/* Setup task offload. */
ndis_set_offload(sc);
NDIS_LOCK(sc);
sc->ndis_txidx = 0;
sc->ndis_txpending = sc->ndis_maxpkts;
sc->ndis_link = 0;
if (!sc->ndis_80211) {
if_link_state_change(sc->ifp, LINK_STATE_UNKNOWN);
sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
sc->ndis_tx_timer = 0;
/*
* Some drivers don't set this value. The NDIS spec says
* the default checkforhang timeout is "approximately 2
* seconds." We use 3 seconds, because it seems for some
* drivers, exactly 2 seconds is too fast.
*/
if (sc->ndis_block->nmb_checkforhangsecs == 0)
sc->ndis_block->nmb_checkforhangsecs = 3;
sc->ndis_hang_timer = sc->ndis_block->nmb_checkforhangsecs;
callout_reset(&sc->ndis_stat_callout, hz, ndis_tick, sc);
sc->ndis_running = 1;
NDIS_UNLOCK(sc);
/* XXX force handling */
if (sc->ndis_80211)
ieee80211_start_all(&sc->ndis_ic); /* start all vap's */
}
/*
* Set media options.
*/
static int
ndis_ifmedia_upd(ifp)
struct ifnet *ifp;
{
struct ndis_softc *sc;
sc = ifp->if_softc;
if (NDIS_INITIALIZED(sc))
ndis_init(sc);
return (0);
}
/*
* Report current media status.
*/
static void
ndis_ifmedia_sts(ifp, ifmr)
struct ifnet *ifp;
struct ifmediareq *ifmr;
{
struct ndis_softc *sc;
uint32_t media_info;
ndis_media_state linkstate;
int len;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
sc = ifp->if_softc;
if (!NDIS_INITIALIZED(sc))
return;
len = sizeof(linkstate);
ndis_get_info(sc, OID_GEN_MEDIA_CONNECT_STATUS,
(void *)&linkstate, &len);
len = sizeof(media_info);
ndis_get_info(sc, OID_GEN_LINK_SPEED,
(void *)&media_info, &len);
if (linkstate == nmc_connected)
ifmr->ifm_status |= IFM_ACTIVE;
switch (media_info) {
case 100000:
ifmr->ifm_active |= IFM_10_T;
break;
case 1000000:
ifmr->ifm_active |= IFM_100_TX;
break;
case 10000000:
ifmr->ifm_active |= IFM_1000_T;
break;
default:
device_printf(sc->ndis_dev, "unknown speed: %d\n", media_info);
break;
}
}
static int
ndis_set_cipher(struct ndis_softc *sc, int cipher)
{
struct ieee80211com *ic = &sc->ndis_ic;
int rval = 0, len;
uint32_t arg, save;
len = sizeof(arg);
if (cipher == WPA_CSE_WEP40 || cipher == WPA_CSE_WEP104) {
if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP))
return (ENOTSUP);
arg = NDIS_80211_WEPSTAT_ENC1ENABLED;
}
if (cipher == WPA_CSE_TKIP) {
if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP))
return (ENOTSUP);
arg = NDIS_80211_WEPSTAT_ENC2ENABLED;
}
if (cipher == WPA_CSE_CCMP) {
if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_AES_CCM))
return (ENOTSUP);
arg = NDIS_80211_WEPSTAT_ENC3ENABLED;
}
DPRINTF(("Setting cipher to %d\n", arg));
save = arg;
rval = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len);
if (rval)
return (rval);
/* Check that the cipher was set correctly. */
len = sizeof(save);
rval = ndis_get_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len);
if (rval != 0 || arg != save)
return (ENODEV);
return (0);
}
/*
* WPA is hairy to set up. Do the work in a separate routine
* so we don't clutter the setstate function too much.
* Important yet undocumented fact: first we have to set the
* authentication mode, _then_ we enable the ciphers. If one
* of the WPA authentication modes isn't enabled, the driver
* might not permit the TKIP or AES ciphers to be selected.
*/
static int
ndis_set_wpa(sc, ie, ielen)
struct ndis_softc *sc;
void *ie;
int ielen;
{
struct ieee80211_ie_wpa *w;
struct ndis_ie *n;
char *pos;
uint32_t arg;
int i;
/*
* Apparently, the only way for us to know what ciphers
* and key management/authentication mode to use is for
* us to inspect the optional information element (IE)
* stored in the 802.11 state machine. This IE should be
* supplied by the WPA supplicant.
*/
w = (struct ieee80211_ie_wpa *)ie;
/* Check for the right kind of IE. */
if (w->wpa_id != IEEE80211_ELEMID_VENDOR) {
DPRINTF(("Incorrect IE type %d\n", w->wpa_id));
return (EINVAL);
}
/* Skip over the ucast cipher OIDs. */
pos = (char *)&w->wpa_uciphers[0];
pos += w->wpa_uciphercnt * sizeof(struct ndis_ie);
/* Skip over the authmode count. */
pos += sizeof(u_int16_t);
/*
* Check for the authentication modes. I'm
* pretty sure there's only supposed to be one.
*/
n = (struct ndis_ie *)pos;
if (n->ni_val == WPA_ASE_NONE)
arg = NDIS_80211_AUTHMODE_WPANONE;
if (n->ni_val == WPA_ASE_8021X_UNSPEC)
arg = NDIS_80211_AUTHMODE_WPA;
if (n->ni_val == WPA_ASE_8021X_PSK)
arg = NDIS_80211_AUTHMODE_WPAPSK;
DPRINTF(("Setting WPA auth mode to %d\n", arg));
i = sizeof(arg);
if (ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i))
return (ENOTSUP);
i = sizeof(arg);
ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i);
/* Now configure the desired ciphers. */
/* First, set up the multicast group cipher. */
n = (struct ndis_ie *)&w->wpa_mcipher[0];
if (ndis_set_cipher(sc, n->ni_val))
return (ENOTSUP);
/* Now start looking around for the unicast ciphers. */
pos = (char *)&w->wpa_uciphers[0];
n = (struct ndis_ie *)pos;
for (i = 0; i < w->wpa_uciphercnt; i++) {
if (ndis_set_cipher(sc, n->ni_val))
return (ENOTSUP);
n++;
}
return (0);
}
static void
ndis_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ndis_softc *sc = vap->iv_ic->ic_softc;
uint32_t txrate;
int len;
if (!NDIS_INITIALIZED(sc))
return;
len = sizeof(txrate);
if (ndis_get_info(sc, OID_GEN_LINK_SPEED, &txrate, &len) == 0)
vap->iv_bss->ni_txrate = txrate / 5000;
ieee80211_media_status(ifp, imr);
}
static void
ndis_setstate_80211(struct ndis_softc *sc)
{
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
ndis_80211_macaddr bssid;
ndis_80211_config config;
int rval = 0, len;
uint32_t arg;
if (!NDIS_INITIALIZED(sc)) {
DPRINTF(("%s: NDIS not initialized\n", __func__));
return;
}
/* Disassociate and turn off radio. */
len = sizeof(arg);
arg = 1;
ndis_set_info(sc, OID_802_11_DISASSOCIATE, &arg, &len);
/* Set network infrastructure mode. */
len = sizeof(arg);
if (ic->ic_opmode == IEEE80211_M_IBSS)
arg = NDIS_80211_NET_INFRA_IBSS;
else
arg = NDIS_80211_NET_INFRA_BSS;
rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len);
if (rval)
device_printf (sc->ndis_dev, "set infra failed: %d\n", rval);
/* Set power management */
len = sizeof(arg);
if (vap->iv_flags & IEEE80211_F_PMGTON)
arg = NDIS_80211_POWERMODE_FAST_PSP;
else
arg = NDIS_80211_POWERMODE_CAM;
ndis_set_info(sc, OID_802_11_POWER_MODE, &arg, &len);
/* Set TX power */
if ((ic->ic_caps & IEEE80211_C_TXPMGT) &&
ic->ic_txpowlimit < nitems(dBm2mW)) {
arg = dBm2mW[ic->ic_txpowlimit];
len = sizeof(arg);
ndis_set_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &len);
}
/*
* Default encryption mode to off, authentication
* to open and privacy to 'accept everything.'
*/
len = sizeof(arg);
arg = NDIS_80211_WEPSTAT_DISABLED;
ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len);
len = sizeof(arg);
arg = NDIS_80211_AUTHMODE_OPEN;
ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len);
/*
* Note that OID_802_11_PRIVACY_FILTER is optional:
* not all drivers implement it.
*/
len = sizeof(arg);
arg = NDIS_80211_PRIVFILT_8021XWEP;
ndis_set_info(sc, OID_802_11_PRIVACY_FILTER, &arg, &len);
len = sizeof(config);
bzero((char *)&config, len);
config.nc_length = len;
config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh);
rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len);
/*
* Some drivers expect us to initialize these values, so
* provide some defaults.
*/
if (config.nc_beaconperiod == 0)
config.nc_beaconperiod = 100;
if (config.nc_atimwin == 0)
config.nc_atimwin = 100;
if (config.nc_fhconfig.ncf_dwelltime == 0)
config.nc_fhconfig.ncf_dwelltime = 200;
if (rval == 0 && ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
int chan, chanflag;
chan = ieee80211_chan2ieee(ic, ic->ic_bsschan);
chanflag = config.nc_dsconfig > 2500000 ? IEEE80211_CHAN_2GHZ :
IEEE80211_CHAN_5GHZ;
if (chan != ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0)) {
config.nc_dsconfig =
ic->ic_bsschan->ic_freq * 1000;
len = sizeof(config);
config.nc_length = len;
config.nc_fhconfig.ncf_length =
sizeof(ndis_80211_config_fh);
DPRINTF(("Setting channel to %ukHz\n", config.nc_dsconfig));
rval = ndis_set_info(sc, OID_802_11_CONFIGURATION,
&config, &len);
if (rval)
device_printf(sc->ndis_dev, "couldn't change "
"DS config to %ukHz: %d\n",
config.nc_dsconfig, rval);
}
} else if (rval)
device_printf(sc->ndis_dev, "couldn't retrieve "
"channel info: %d\n", rval);
/* Set the BSSID to our value so the driver doesn't associate */
len = IEEE80211_ADDR_LEN;
bcopy(vap->iv_myaddr, bssid, len);
DPRINTF(("Setting BSSID to %6D\n", (uint8_t *)&bssid, ":"));
rval = ndis_set_info(sc, OID_802_11_BSSID, &bssid, &len);
if (rval)
device_printf(sc->ndis_dev,
"setting BSSID failed: %d\n", rval);
}
static void
ndis_auth_and_assoc(struct ndis_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211_node *ni = vap->iv_bss;
ndis_80211_ssid ssid;
ndis_80211_macaddr bssid;
ndis_80211_wep wep;
int i, rval = 0, len, error;
uint32_t arg;
if (!NDIS_INITIALIZED(sc)) {
DPRINTF(("%s: NDIS not initialized\n", __func__));
return;
}
/* Initial setup */
ndis_setstate_80211(sc);
/* Set network infrastructure mode. */
len = sizeof(arg);
if (vap->iv_opmode == IEEE80211_M_IBSS)
arg = NDIS_80211_NET_INFRA_IBSS;
else
arg = NDIS_80211_NET_INFRA_BSS;
rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len);
if (rval)
device_printf (sc->ndis_dev, "set infra failed: %d\n", rval);
/* Set RTS threshold */
len = sizeof(arg);
arg = vap->iv_rtsthreshold;
ndis_set_info(sc, OID_802_11_RTS_THRESHOLD, &arg, &len);
/* Set fragmentation threshold */
len = sizeof(arg);
arg = vap->iv_fragthreshold;
ndis_set_info(sc, OID_802_11_FRAGMENTATION_THRESHOLD, &arg, &len);
/* Set WEP */
if (vap->iv_flags & IEEE80211_F_PRIVACY &&
!(vap->iv_flags & IEEE80211_F_WPA)) {
int keys_set = 0;
if (ni->ni_authmode == IEEE80211_AUTH_SHARED) {
len = sizeof(arg);
arg = NDIS_80211_AUTHMODE_SHARED;
DPRINTF(("Setting shared auth\n"));
ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE,
&arg, &len);
}
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
if (vap->iv_nw_keys[i].wk_keylen) {
if (vap->iv_nw_keys[i].wk_cipher->ic_cipher !=
IEEE80211_CIPHER_WEP)
continue;
bzero((char *)&wep, sizeof(wep));
wep.nw_keylen = vap->iv_nw_keys[i].wk_keylen;
/*
* 5, 13 and 16 are the only valid
* key lengths. Anything in between
* will be zero padded out to the
* next highest boundary.
*/
if (vap->iv_nw_keys[i].wk_keylen < 5)
wep.nw_keylen = 5;
else if (vap->iv_nw_keys[i].wk_keylen > 5 &&
vap->iv_nw_keys[i].wk_keylen < 13)
wep.nw_keylen = 13;
else if (vap->iv_nw_keys[i].wk_keylen > 13 &&
vap->iv_nw_keys[i].wk_keylen < 16)
wep.nw_keylen = 16;
wep.nw_keyidx = i;
wep.nw_length = (sizeof(uint32_t) * 3)
+ wep.nw_keylen;
if (i == vap->iv_def_txkey)
wep.nw_keyidx |= NDIS_80211_WEPKEY_TX;
bcopy(vap->iv_nw_keys[i].wk_key,
wep.nw_keydata, wep.nw_length);
len = sizeof(wep);
DPRINTF(("Setting WEP key %d\n", i));
rval = ndis_set_info(sc,
OID_802_11_ADD_WEP, &wep, &len);
if (rval)
device_printf(sc->ndis_dev,
"set wepkey failed: %d\n", rval);
keys_set++;
}
}
if (keys_set) {
DPRINTF(("Setting WEP on\n"));
arg = NDIS_80211_WEPSTAT_ENABLED;
len = sizeof(arg);
rval = ndis_set_info(sc,
OID_802_11_WEP_STATUS, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"enable WEP failed: %d\n", rval);
if (vap->iv_flags & IEEE80211_F_DROPUNENC)
arg = NDIS_80211_PRIVFILT_8021XWEP;
else
arg = NDIS_80211_PRIVFILT_ACCEPTALL;
len = sizeof(arg);
ndis_set_info(sc,
OID_802_11_PRIVACY_FILTER, &arg, &len);
}
}
/* Set up WPA. */
if ((vap->iv_flags & IEEE80211_F_WPA) &&
vap->iv_appie_assocreq != NULL) {
struct ieee80211_appie *ie = vap->iv_appie_assocreq;
error = ndis_set_wpa(sc, ie->ie_data, ie->ie_len);
if (error != 0)
device_printf(sc->ndis_dev, "WPA setup failed\n");
}
#ifdef notyet
/* Set network type. */
arg = 0;
switch (vap->iv_curmode) {
case IEEE80211_MODE_11A:
arg = NDIS_80211_NETTYPE_11OFDM5;
break;
case IEEE80211_MODE_11B:
arg = NDIS_80211_NETTYPE_11DS;
break;
case IEEE80211_MODE_11G:
arg = NDIS_80211_NETTYPE_11OFDM24;
break;
default:
device_printf(sc->ndis_dev, "unknown mode: %d\n",
vap->iv_curmode);
}
if (arg) {
DPRINTF(("Setting network type to %d\n", arg));
len = sizeof(arg);
rval = ndis_set_info(sc, OID_802_11_NETWORK_TYPE_IN_USE,
&arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"set nettype failed: %d\n", rval);
}
#endif
/*
* If the user selected a specific BSSID, try
* to use that one. This is useful in the case where
* there are several APs in range with the same network
* name. To delete the BSSID, we use the broadcast
* address as the BSSID.
* Note that some drivers seem to allow setting a BSSID
* in ad-hoc mode, which has the effect of forcing the
* NIC to create an ad-hoc cell with a specific BSSID,
* instead of a randomly chosen one. However, the net80211
* code makes the assumtion that the BSSID setting is invalid
* when you're in ad-hoc mode, so we don't allow that here.
*/
len = IEEE80211_ADDR_LEN;
if (vap->iv_flags & IEEE80211_F_DESBSSID &&
vap->iv_opmode != IEEE80211_M_IBSS)
bcopy(ni->ni_bssid, bssid, len);
else
bcopy(ieee80211broadcastaddr, bssid, len);
DPRINTF(("Setting BSSID to %6D\n", (uint8_t *)&bssid, ":"));
rval = ndis_set_info(sc, OID_802_11_BSSID, &bssid, &len);
if (rval)
device_printf(sc->ndis_dev,
"setting BSSID failed: %d\n", rval);
/* Set SSID -- always do this last. */
#ifdef NDIS_DEBUG
if (ndis_debug > 0) {
printf("Setting ESSID to ");
ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
printf("\n");
}
#endif
len = sizeof(ssid);
bzero((char *)&ssid, len);
ssid.ns_ssidlen = ni->ni_esslen;
if (ssid.ns_ssidlen == 0) {
ssid.ns_ssidlen = 1;
} else
bcopy(ni->ni_essid, ssid.ns_ssid, ssid.ns_ssidlen);
rval = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len);
if (rval)
device_printf (sc->ndis_dev, "set ssid failed: %d\n", rval);
return;
}
static int
ndis_get_bssid_list(sc, bl)
struct ndis_softc *sc;
ndis_80211_bssid_list_ex **bl;
{
int len, error;
len = sizeof(uint32_t) + (sizeof(ndis_wlan_bssid_ex) * 16);
*bl = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (*bl == NULL)
return (ENOMEM);
error = ndis_get_info(sc, OID_802_11_BSSID_LIST, *bl, &len);
if (error == ENOSPC) {
free(*bl, M_DEVBUF);
*bl = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (*bl == NULL)
return (ENOMEM);
error = ndis_get_info(sc, OID_802_11_BSSID_LIST, *bl, &len);
}
if (error) {
DPRINTF(("%s: failed to read\n", __func__));
free(*bl, M_DEVBUF);
return (error);
}
return (0);
}
static int
ndis_get_assoc(struct ndis_softc *sc, ndis_wlan_bssid_ex **assoc)
{
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap;
struct ieee80211_node *ni;
ndis_80211_bssid_list_ex *bl;
ndis_wlan_bssid_ex *bs;
ndis_80211_macaddr bssid;
int i, len, error;
if (!sc->ndis_link)
return (ENOENT);
len = sizeof(bssid);
error = ndis_get_info(sc, OID_802_11_BSSID, &bssid, &len);
if (error) {
device_printf(sc->ndis_dev, "failed to get bssid\n");
return (ENOENT);
}
vap = TAILQ_FIRST(&ic->ic_vaps);
ni = vap->iv_bss;
error = ndis_get_bssid_list(sc, &bl);
if (error)
return (error);
bs = (ndis_wlan_bssid_ex *)&bl->nblx_bssid[0];
for (i = 0; i < bl->nblx_items; i++) {
if (bcmp(bs->nwbx_macaddr, bssid, sizeof(bssid)) == 0) {
*assoc = malloc(bs->nwbx_len, M_TEMP, M_NOWAIT);
if (*assoc == NULL) {
free(bl, M_TEMP);
return (ENOMEM);
}
bcopy((char *)bs, (char *)*assoc, bs->nwbx_len);
free(bl, M_TEMP);
if (ic->ic_opmode == IEEE80211_M_STA)
ni->ni_associd = 1 | 0xc000; /* fake associd */
return (0);
}
bs = (ndis_wlan_bssid_ex *)((char *)bs + bs->nwbx_len);
}
free(bl, M_TEMP);
return (ENOENT);
}
static void
ndis_getstate_80211(struct ndis_softc *sc)
{
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni = vap->iv_bss;
ndis_wlan_bssid_ex *bs;
int rval, len, i = 0;
int chanflag;
uint32_t arg;
if (!NDIS_INITIALIZED(sc))
return;
if ((rval = ndis_get_assoc(sc, &bs)) != 0)
return;
/* We're associated, retrieve info on the current bssid. */
ic->ic_curmode = ndis_nettype_mode(bs->nwbx_nettype);
chanflag = ndis_nettype_chan(bs->nwbx_nettype);
IEEE80211_ADDR_COPY(ni->ni_bssid, bs->nwbx_macaddr);
/* Get SSID from current association info. */
bcopy(bs->nwbx_ssid.ns_ssid, ni->ni_essid,
bs->nwbx_ssid.ns_ssidlen);
ni->ni_esslen = bs->nwbx_ssid.ns_ssidlen;
if (ic->ic_caps & IEEE80211_C_PMGT) {
len = sizeof(arg);
rval = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"get power mode failed: %d\n", rval);
if (arg == NDIS_80211_POWERMODE_CAM)
vap->iv_flags &= ~IEEE80211_F_PMGTON;
else
vap->iv_flags |= IEEE80211_F_PMGTON;
}
/* Get TX power */
if (ic->ic_caps & IEEE80211_C_TXPMGT) {
len = sizeof(arg);
ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &len);
for (i = 0; i < nitems(dBm2mW); i++)
if (dBm2mW[i] >= arg)
break;
ic->ic_txpowlimit = i;
}
/*
* Use the current association information to reflect
* what channel we're on.
*/
ic->ic_curchan = ieee80211_find_channel(ic,
bs->nwbx_config.nc_dsconfig / 1000, chanflag);
if (ic->ic_curchan == NULL)
ic->ic_curchan = &ic->ic_channels[0];
ni->ni_chan = ic->ic_curchan;
ic->ic_bsschan = ic->ic_curchan;
free(bs, M_TEMP);
/*
* Determine current authentication mode.
*/
len = sizeof(arg);
rval = ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"get authmode status failed: %d\n", rval);
else {
vap->iv_flags &= ~IEEE80211_F_WPA;
switch (arg) {
case NDIS_80211_AUTHMODE_OPEN:
ni->ni_authmode = IEEE80211_AUTH_OPEN;
break;
case NDIS_80211_AUTHMODE_SHARED:
ni->ni_authmode = IEEE80211_AUTH_SHARED;
break;
case NDIS_80211_AUTHMODE_AUTO:
ni->ni_authmode = IEEE80211_AUTH_AUTO;
break;
case NDIS_80211_AUTHMODE_WPA:
case NDIS_80211_AUTHMODE_WPAPSK:
case NDIS_80211_AUTHMODE_WPANONE:
ni->ni_authmode = IEEE80211_AUTH_WPA;
vap->iv_flags |= IEEE80211_F_WPA1;
break;
case NDIS_80211_AUTHMODE_WPA2:
case NDIS_80211_AUTHMODE_WPA2PSK:
ni->ni_authmode = IEEE80211_AUTH_WPA;
vap->iv_flags |= IEEE80211_F_WPA2;
break;
default:
ni->ni_authmode = IEEE80211_AUTH_NONE;
break;
}
}
len = sizeof(arg);
rval = ndis_get_info(sc, OID_802_11_WEP_STATUS, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"get wep status failed: %d\n", rval);
if (arg == NDIS_80211_WEPSTAT_ENABLED)
vap->iv_flags |= IEEE80211_F_PRIVACY|IEEE80211_F_DROPUNENC;
else
vap->iv_flags &= ~(IEEE80211_F_PRIVACY|IEEE80211_F_DROPUNENC);
}
static int
ndis_ifioctl(ifp, command, data)
struct ifnet *ifp;
u_long command;
caddr_t data;
{
struct ndis_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
int i, error = 0;
/*NDIS_LOCK(sc);*/
switch (command) {
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP) {
if (sc->ndis_running &&
ifp->if_flags & IFF_PROMISC &&
!(sc->ndis_if_flags & IFF_PROMISC)) {
sc->ndis_filter |=
NDIS_PACKET_TYPE_PROMISCUOUS;
i = sizeof(sc->ndis_filter);
error = ndis_set_info(sc,
OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &i);
} else if (sc->ndis_running &&
!(ifp->if_flags & IFF_PROMISC) &&
sc->ndis_if_flags & IFF_PROMISC) {
sc->ndis_filter &=
~NDIS_PACKET_TYPE_PROMISCUOUS;
i = sizeof(sc->ndis_filter);
error = ndis_set_info(sc,
OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &i);
} else
ndis_init(sc);
} else {
if (sc->ndis_running)
ndis_stop(sc);
}
sc->ndis_if_flags = ifp->if_flags;
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ndis_setmulti(sc);
error = 0;
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
break;
case SIOCSIFCAP:
ifp->if_capenable = ifr->ifr_reqcap;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = sc->ndis_hwassist;
else
ifp->if_hwassist = 0;
ndis_set_offload(sc);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
/*NDIS_UNLOCK(sc);*/
return(error);
}
static int
ndis_80211ioctl(struct ieee80211com *ic, u_long cmd, void *data)
{
struct ndis_softc *sc = ic->ic_softc;
struct ifreq *ifr = data;
struct ndis_oid_data oid;
struct ndis_evt evt;
void *oidbuf = NULL;
int error = 0;
if ((error = priv_check(curthread, PRIV_DRIVER)) != 0)
return (error);
switch (cmd) {
case SIOCGDRVSPEC:
case SIOCSDRVSPEC:
error = copyin(ifr->ifr_data, &oid, sizeof(oid));
if (error)
break;
oidbuf = malloc(oid.len, M_TEMP, M_WAITOK | M_ZERO);
error = copyin(ifr->ifr_data + sizeof(oid), oidbuf, oid.len);
}
if (error) {
free(oidbuf, M_TEMP);
return (error);
}
switch (cmd) {
case SIOCGDRVSPEC:
error = ndis_get_info(sc, oid.oid, oidbuf, &oid.len);
break;
case SIOCSDRVSPEC:
error = ndis_set_info(sc, oid.oid, oidbuf, &oid.len);
break;
case SIOCGPRIVATE_0:
NDIS_LOCK(sc);
if (sc->ndis_evt[sc->ndis_evtcidx].ne_sts == 0) {
error = ENOENT;
NDIS_UNLOCK(sc);
break;
}
error = copyin(ifr->ifr_data, &evt, sizeof(evt));
if (error) {
NDIS_UNLOCK(sc);
break;
}
if (evt.ne_len < sc->ndis_evt[sc->ndis_evtcidx].ne_len) {
error = ENOSPC;
NDIS_UNLOCK(sc);
break;
}
error = copyout(&sc->ndis_evt[sc->ndis_evtcidx],
ifr->ifr_data, sizeof(uint32_t) * 2);
if (error) {
NDIS_UNLOCK(sc);
break;
}
if (sc->ndis_evt[sc->ndis_evtcidx].ne_len) {
error = copyout(sc->ndis_evt[sc->ndis_evtcidx].ne_buf,
ifr->ifr_data + (sizeof(uint32_t) * 2),
sc->ndis_evt[sc->ndis_evtcidx].ne_len);
if (error) {
NDIS_UNLOCK(sc);
break;
}
free(sc->ndis_evt[sc->ndis_evtcidx].ne_buf, M_TEMP);
sc->ndis_evt[sc->ndis_evtcidx].ne_buf = NULL;
}
sc->ndis_evt[sc->ndis_evtcidx].ne_len = 0;
sc->ndis_evt[sc->ndis_evtcidx].ne_sts = 0;
NDIS_EVTINC(sc->ndis_evtcidx);
NDIS_UNLOCK(sc);
break;
default:
error = ENOTTY;
break;
}
switch (cmd) {
case SIOCGDRVSPEC:
case SIOCSDRVSPEC:
error = copyout(&oid, ifr->ifr_data, sizeof(oid));
if (error)
break;
error = copyout(oidbuf, ifr->ifr_data + sizeof(oid), oid.len);
}
free(oidbuf, M_TEMP);
return (error);
}
int
ndis_del_key(struct ieee80211vap *vap, const struct ieee80211_key *key)
{
struct ndis_softc *sc = vap->iv_ic->ic_softc;
ndis_80211_key rkey;
int len, error = 0;
bzero((char *)&rkey, sizeof(rkey));
len = sizeof(rkey);
rkey.nk_len = len;
rkey.nk_keyidx = key->wk_keyix;
bcopy(vap->iv_ifp->if_broadcastaddr,
rkey.nk_bssid, IEEE80211_ADDR_LEN);
error = ndis_set_info(sc, OID_802_11_REMOVE_KEY, &rkey, &len);
if (error)
return (0);
return (1);
}
/*
* In theory this could be called for any key, but we'll
* only use it for WPA TKIP or AES keys. These need to be
* set after initial authentication with the AP.
*/
static int
ndis_add_key(struct ieee80211vap *vap, const struct ieee80211_key *key)
{
struct ndis_softc *sc = vap->iv_ic->ic_softc;
ndis_80211_key rkey;
int len, error = 0;
switch (key->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_TKIP:
len = sizeof(ndis_80211_key);
bzero((char *)&rkey, sizeof(rkey));
rkey.nk_len = len;
rkey.nk_keylen = key->wk_keylen;
if (key->wk_flags & IEEE80211_KEY_SWMIC)
rkey.nk_keylen += 16;
/* key index - gets weird in NDIS */
if (key->wk_keyix != IEEE80211_KEYIX_NONE)
rkey.nk_keyidx = key->wk_keyix;
else
rkey.nk_keyidx = 0;
if (key->wk_flags & IEEE80211_KEY_XMIT)
rkey.nk_keyidx |= 1 << 31;
if (key->wk_flags & IEEE80211_KEY_GROUP) {
bcopy(ieee80211broadcastaddr,
rkey.nk_bssid, IEEE80211_ADDR_LEN);
} else {
bcopy(vap->iv_bss->ni_bssid,
rkey.nk_bssid, IEEE80211_ADDR_LEN);
/* pairwise key */
rkey.nk_keyidx |= 1 << 30;
}
/* need to set bit 29 based on keyrsc */
rkey.nk_keyrsc = key->wk_keyrsc[0]; /* XXX need tid */
if (rkey.nk_keyrsc)
rkey.nk_keyidx |= 1 << 29;
if (key->wk_flags & IEEE80211_KEY_SWMIC) {
bcopy(key->wk_key, rkey.nk_keydata, 16);
bcopy(key->wk_key + 24, rkey.nk_keydata + 16, 8);
bcopy(key->wk_key + 16, rkey.nk_keydata + 24, 8);
} else
bcopy(key->wk_key, rkey.nk_keydata, key->wk_keylen);
error = ndis_set_info(sc, OID_802_11_ADD_KEY, &rkey, &len);
break;
case IEEE80211_CIPHER_WEP:
error = 0;
break;
/*
* I don't know how to set up keys for the AES
* cipher yet. Is it the same as TKIP?
*/
case IEEE80211_CIPHER_AES_CCM:
default:
error = ENOTTY;
break;
}
/* We need to return 1 for success, 0 for failure. */
if (error)
return (0);
return (1);
}
static void
ndis_resettask(d, arg)
device_object *d;
void *arg;
{
struct ndis_softc *sc;
sc = arg;
ndis_reset_nic(sc);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
ndis_stop(struct ndis_softc *sc)
{
int i;
callout_drain(&sc->ndis_stat_callout);
NDIS_LOCK(sc);
sc->ndis_tx_timer = 0;
sc->ndis_link = 0;
if (!sc->ndis_80211)
sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->ndis_running = 0;
NDIS_UNLOCK(sc);
if (sc->ndis_iftype != PNPBus ||
(sc->ndis_iftype == PNPBus &&
!(sc->ndisusb_status & NDISUSB_STATUS_DETACH) &&
ndisusb_halt != 0))
ndis_halt_nic(sc);
NDIS_LOCK(sc);
for (i = 0; i < NDIS_EVENTS; i++) {
if (sc->ndis_evt[i].ne_sts && sc->ndis_evt[i].ne_buf != NULL) {
free(sc->ndis_evt[i].ne_buf, M_TEMP);
sc->ndis_evt[i].ne_buf = NULL;
}
sc->ndis_evt[i].ne_sts = 0;
sc->ndis_evt[i].ne_len = 0;
}
sc->ndis_evtcidx = 0;
sc->ndis_evtpidx = 0;
NDIS_UNLOCK(sc);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
void
ndis_shutdown(dev)
device_t dev;
{
struct ndis_softc *sc;
sc = device_get_softc(dev);
ndis_stop(sc);
}
static int
ndis_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ndis_vap *nvp = NDIS_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct ndis_softc *sc = ic->ic_softc;
enum ieee80211_state ostate;
DPRINTF(("%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]));
ostate = vap->iv_state;
vap->iv_state = nstate;
switch (nstate) {
/* pass on to net80211 */
case IEEE80211_S_INIT:
case IEEE80211_S_SCAN:
return nvp->newstate(vap, nstate, arg);
case IEEE80211_S_ASSOC:
if (ostate != IEEE80211_S_AUTH) {
IEEE80211_UNLOCK(ic);
ndis_auth_and_assoc(sc, vap);
IEEE80211_LOCK(ic);
}
break;
case IEEE80211_S_AUTH:
IEEE80211_UNLOCK(ic);
ndis_auth_and_assoc(sc, vap);
if (vap->iv_state == IEEE80211_S_AUTH) /* XXX */
ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
IEEE80211_LOCK(ic);
break;
default:
break;
}
return (0);
}
static void
ndis_scan(void *arg)
{
struct ieee80211vap *vap = arg;
ieee80211_scan_done(vap);
}
static void
ndis_scan_results(struct ndis_softc *sc)
{
struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
ndis_80211_bssid_list_ex *bl;
ndis_wlan_bssid_ex *wb;
struct ieee80211_scanparams sp;
struct ieee80211_frame wh;
struct ieee80211_channel *saved_chan;
int i, j;
int rssi, noise, freq, chanflag;
uint8_t ssid[2+IEEE80211_NWID_LEN];
uint8_t rates[2+IEEE80211_RATE_MAXSIZE];
uint8_t *frm, *efrm;
saved_chan = ic->ic_curchan;
noise = -96;
if (ndis_get_bssid_list(sc, &bl))
return;
DPRINTF(("%s: %d results\n", __func__, bl->nblx_items));
wb = &bl->nblx_bssid[0];
for (i = 0; i < bl->nblx_items; i++) {
memset(&sp, 0, sizeof(sp));
memcpy(wh.i_addr2, wb->nwbx_macaddr, sizeof(wh.i_addr2));
memcpy(wh.i_addr3, wb->nwbx_macaddr, sizeof(wh.i_addr3));
rssi = 100 * (wb->nwbx_rssi - noise) / (-32 - noise);
rssi = max(0, min(rssi, 100)); /* limit 0 <= rssi <= 100 */
if (wb->nwbx_privacy)
sp.capinfo |= IEEE80211_CAPINFO_PRIVACY;
sp.bintval = wb->nwbx_config.nc_beaconperiod;
switch (wb->nwbx_netinfra) {
case NDIS_80211_NET_INFRA_IBSS:
sp.capinfo |= IEEE80211_CAPINFO_IBSS;
break;
case NDIS_80211_NET_INFRA_BSS:
sp.capinfo |= IEEE80211_CAPINFO_ESS;
break;
}
sp.rates = &rates[0];
for (j = 0; j < IEEE80211_RATE_MAXSIZE; j++) {
/* XXX - check units */
if (wb->nwbx_supportedrates[j] == 0)
break;
rates[2 + j] =
wb->nwbx_supportedrates[j] & 0x7f;
}
rates[1] = j;
sp.ssid = (uint8_t *)&ssid[0];
memcpy(sp.ssid + 2, &wb->nwbx_ssid.ns_ssid,
wb->nwbx_ssid.ns_ssidlen);
sp.ssid[1] = wb->nwbx_ssid.ns_ssidlen;
chanflag = ndis_nettype_chan(wb->nwbx_nettype);
freq = wb->nwbx_config.nc_dsconfig / 1000;
sp.chan = sp.bchan = ieee80211_mhz2ieee(freq, chanflag);
/* Hack ic->ic_curchan to be in sync with the scan result */
ic->ic_curchan = ieee80211_find_channel(ic, freq, chanflag);
if (ic->ic_curchan == NULL)
ic->ic_curchan = &ic->ic_channels[0];
/* Process extended info from AP */
if (wb->nwbx_len > sizeof(ndis_wlan_bssid)) {
frm = (uint8_t *)&wb->nwbx_ies;
efrm = frm + wb->nwbx_ielen;
if (efrm - frm < 12)
goto done;
sp.tstamp = frm; frm += 8;
sp.bintval = le16toh(*(uint16_t *)frm); frm += 2;
sp.capinfo = le16toh(*(uint16_t *)frm); frm += 2;
sp.ies = frm;
sp.ies_len = efrm - frm;
}
done:
DPRINTF(("scan: bssid %s chan %dMHz (%d/%d) rssi %d\n",
ether_sprintf(wb->nwbx_macaddr), freq, sp.bchan, chanflag,
rssi));
ieee80211_add_scan(vap, ic->ic_curchan, &sp, &wh, 0, rssi, noise);
wb = (ndis_wlan_bssid_ex *)((char *)wb + wb->nwbx_len);
}
free(bl, M_DEVBUF);
/* Restore the channel after messing with it */
ic->ic_curchan = saved_chan;
}
static void
ndis_scan_start(struct ieee80211com *ic)
{
struct ndis_softc *sc = ic->ic_softc;
struct ieee80211vap *vap;
struct ieee80211_scan_state *ss;
ndis_80211_ssid ssid;
int error, len;
ss = ic->ic_scan;
vap = TAILQ_FIRST(&ic->ic_vaps);
if (!NDIS_INITIALIZED(sc)) {
DPRINTF(("%s: scan aborted\n", __func__));
ieee80211_cancel_scan(vap);
return;
}
len = sizeof(ssid);
bzero((char *)&ssid, len);
if (ss->ss_nssid == 0)
ssid.ns_ssidlen = 1;
else {
/* Perform a directed scan */
ssid.ns_ssidlen = ss->ss_ssid[0].len;
bcopy(ss->ss_ssid[0].ssid, ssid.ns_ssid, ssid.ns_ssidlen);
}
error = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len);
if (error)
DPRINTF(("%s: set ESSID failed\n", __func__));
len = 0;
error = ndis_set_info(sc, OID_802_11_BSSID_LIST_SCAN, NULL, &len);
if (error) {
DPRINTF(("%s: scan command failed\n", __func__));
ieee80211_cancel_scan(vap);
return;
}
/* Set a timer to collect the results */
callout_reset(&sc->ndis_scan_callout, hz * 3, ndis_scan, vap);
}
static void
ndis_set_channel(struct ieee80211com *ic)
{
/* ignore */
}
static void
ndis_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
/* ignore */
}
static void
ndis_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
ndis_scan_end(struct ieee80211com *ic)
{
struct ndis_softc *sc = ic->ic_softc;
ndis_scan_results(sc);
}
Index: head/sys/dev/iwi/if_iwi.c
===================================================================
--- head/sys/dev/iwi/if_iwi.c (revision 328217)
+++ head/sys/dev/iwi/if_iwi.c (revision 328218)
@@ -1,3619 +1,3619 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2004, 2005
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
* Copyright (c) 2005-2006 Sam Leffler, Errno Consulting
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Intel(R) PRO/Wireless 2200BG/2225BG/2915ABG driver
* http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_input.h>
#include <net80211/ieee80211_regdomain.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/iwi/if_iwireg.h>
#include <dev/iwi/if_iwivar.h>
#include <dev/iwi/if_iwi_ioctl.h>
#define IWI_DEBUG
#ifdef IWI_DEBUG
#define DPRINTF(x) do { if (iwi_debug > 0) printf x; } while (0)
#define DPRINTFN(n, x) do { if (iwi_debug >= (n)) printf x; } while (0)
int iwi_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, iwi, CTLFLAG_RW, &iwi_debug, 0, "iwi debug level");
static const char *iwi_fw_states[] = {
"IDLE", /* IWI_FW_IDLE */
"LOADING", /* IWI_FW_LOADING */
"ASSOCIATING", /* IWI_FW_ASSOCIATING */
"DISASSOCIATING", /* IWI_FW_DISASSOCIATING */
"SCANNING", /* IWI_FW_SCANNING */
};
#else
#define DPRINTF(x)
#define DPRINTFN(n, x)
#endif
MODULE_DEPEND(iwi, pci, 1, 1, 1);
MODULE_DEPEND(iwi, wlan, 1, 1, 1);
MODULE_DEPEND(iwi, firmware, 1, 1, 1);
enum {
IWI_LED_TX,
IWI_LED_RX,
IWI_LED_POLL,
};
struct iwi_ident {
uint16_t vendor;
uint16_t device;
const char *name;
};
static const struct iwi_ident iwi_ident_table[] = {
{ 0x8086, 0x4220, "Intel(R) PRO/Wireless 2200BG" },
{ 0x8086, 0x4221, "Intel(R) PRO/Wireless 2225BG" },
{ 0x8086, 0x4223, "Intel(R) PRO/Wireless 2915ABG" },
{ 0x8086, 0x4224, "Intel(R) PRO/Wireless 2915ABG" },
{ 0, 0, NULL }
};
static const uint8_t def_chan_2ghz[] =
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
static const uint8_t def_chan_5ghz_band1[] =
{ 36, 40, 44, 48, 52, 56, 60, 64 };
static const uint8_t def_chan_5ghz_band2[] =
{ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 };
static const uint8_t def_chan_5ghz_band3[] =
{ 149, 153, 157, 161, 165 };
static struct ieee80211vap *iwi_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void iwi_vap_delete(struct ieee80211vap *);
static void iwi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int iwi_alloc_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *,
int);
static void iwi_reset_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *);
static void iwi_free_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *);
static int iwi_alloc_tx_ring(struct iwi_softc *, struct iwi_tx_ring *,
int, bus_addr_t, bus_addr_t);
static void iwi_reset_tx_ring(struct iwi_softc *, struct iwi_tx_ring *);
static void iwi_free_tx_ring(struct iwi_softc *, struct iwi_tx_ring *);
static int iwi_alloc_rx_ring(struct iwi_softc *, struct iwi_rx_ring *,
int);
static void iwi_reset_rx_ring(struct iwi_softc *, struct iwi_rx_ring *);
static void iwi_free_rx_ring(struct iwi_softc *, struct iwi_rx_ring *);
static struct ieee80211_node *iwi_node_alloc(struct ieee80211vap *,
const uint8_t [IEEE80211_ADDR_LEN]);
static void iwi_node_free(struct ieee80211_node *);
static void iwi_media_status(struct ifnet *, struct ifmediareq *);
static int iwi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void iwi_wme_init(struct iwi_softc *);
static int iwi_wme_setparams(struct iwi_softc *);
static int iwi_wme_update(struct ieee80211com *);
static uint16_t iwi_read_prom_word(struct iwi_softc *, uint8_t);
static void iwi_frame_intr(struct iwi_softc *, struct iwi_rx_data *, int,
struct iwi_frame *);
static void iwi_notification_intr(struct iwi_softc *, struct iwi_notif *);
static void iwi_rx_intr(struct iwi_softc *);
static void iwi_tx_intr(struct iwi_softc *, struct iwi_tx_ring *);
static void iwi_intr(void *);
static int iwi_cmd(struct iwi_softc *, uint8_t, void *, uint8_t);
static void iwi_write_ibssnode(struct iwi_softc *, const u_int8_t [], int);
static int iwi_tx_start(struct iwi_softc *, struct mbuf *,
struct ieee80211_node *, int);
static int iwi_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void iwi_start(struct iwi_softc *);
static int iwi_transmit(struct ieee80211com *, struct mbuf *);
static void iwi_watchdog(void *);
static int iwi_ioctl(struct ieee80211com *, u_long, void *);
static void iwi_parent(struct ieee80211com *);
static void iwi_stop_master(struct iwi_softc *);
static int iwi_reset(struct iwi_softc *);
static int iwi_load_ucode(struct iwi_softc *, const struct iwi_fw *);
static int iwi_load_firmware(struct iwi_softc *, const struct iwi_fw *);
static void iwi_release_fw_dma(struct iwi_softc *sc);
static int iwi_config(struct iwi_softc *);
static int iwi_get_firmware(struct iwi_softc *, enum ieee80211_opmode);
static void iwi_put_firmware(struct iwi_softc *);
static void iwi_monitor_scan(void *, int);
static int iwi_scanchan(struct iwi_softc *, unsigned long, int);
static void iwi_scan_start(struct ieee80211com *);
static void iwi_scan_end(struct ieee80211com *);
static void iwi_set_channel(struct ieee80211com *);
static void iwi_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell);
static void iwi_scan_mindwell(struct ieee80211_scan_state *);
static int iwi_auth_and_assoc(struct iwi_softc *, struct ieee80211vap *);
static void iwi_disassoc(void *, int);
static int iwi_disassociate(struct iwi_softc *, int quiet);
static void iwi_init_locked(struct iwi_softc *);
static void iwi_init(void *);
static int iwi_init_fw_dma(struct iwi_softc *, int);
static void iwi_stop_locked(void *);
static void iwi_stop(struct iwi_softc *);
static void iwi_restart(void *, int);
static int iwi_getrfkill(struct iwi_softc *);
static void iwi_radio_on(void *, int);
static void iwi_radio_off(void *, int);
static void iwi_sysctlattach(struct iwi_softc *);
static void iwi_led_event(struct iwi_softc *, int);
static void iwi_ledattach(struct iwi_softc *);
static void iwi_collect_bands(struct ieee80211com *, uint8_t [], size_t);
static void iwi_getradiocaps(struct ieee80211com *, int, int *,
struct ieee80211_channel []);
static int iwi_probe(device_t);
static int iwi_attach(device_t);
static int iwi_detach(device_t);
static int iwi_shutdown(device_t);
static int iwi_suspend(device_t);
static int iwi_resume(device_t);
static device_method_t iwi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, iwi_probe),
DEVMETHOD(device_attach, iwi_attach),
DEVMETHOD(device_detach, iwi_detach),
DEVMETHOD(device_shutdown, iwi_shutdown),
DEVMETHOD(device_suspend, iwi_suspend),
DEVMETHOD(device_resume, iwi_resume),
DEVMETHOD_END
};
static driver_t iwi_driver = {
"iwi",
iwi_methods,
sizeof (struct iwi_softc)
};
static devclass_t iwi_devclass;
DRIVER_MODULE(iwi, pci, iwi_driver, iwi_devclass, NULL, NULL);
MODULE_VERSION(iwi, 1);
static __inline uint8_t
MEM_READ_1(struct iwi_softc *sc, uint32_t addr)
{
CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr);
return CSR_READ_1(sc, IWI_CSR_INDIRECT_DATA);
}
static __inline uint32_t
MEM_READ_4(struct iwi_softc *sc, uint32_t addr)
{
CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr);
return CSR_READ_4(sc, IWI_CSR_INDIRECT_DATA);
}
static int
iwi_probe(device_t dev)
{
const struct iwi_ident *ident;
for (ident = iwi_ident_table; ident->name != NULL; ident++) {
if (pci_get_vendor(dev) == ident->vendor &&
pci_get_device(dev) == ident->device) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
}
return ENXIO;
}
static int
iwi_attach(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
uint16_t val;
int i, error;
sc->sc_dev = dev;
sc->sc_ledevent = ticks;
IWI_LOCK_INIT(sc);
mbufq_init(&sc->sc_snd, ifqmaxlen);
sc->sc_unr = new_unrhdr(1, IWI_MAX_IBSSNODE-1, &sc->sc_mtx);
TASK_INIT(&sc->sc_radiontask, 0, iwi_radio_on, sc);
TASK_INIT(&sc->sc_radiofftask, 0, iwi_radio_off, sc);
TASK_INIT(&sc->sc_restarttask, 0, iwi_restart, sc);
TASK_INIT(&sc->sc_disassoctask, 0, iwi_disassoc, sc);
TASK_INIT(&sc->sc_monitortask, 0, iwi_monitor_scan, sc);
callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_rftimer, &sc->sc_mtx, 0);
pci_write_config(dev, 0x41, 0, 1);
/* enable bus-mastering */
pci_enable_busmaster(dev);
i = PCIR_BAR(0);
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "could not allocate memory resource\n");
goto fail;
}
sc->sc_st = rman_get_bustag(sc->mem);
sc->sc_sh = rman_get_bushandle(sc->mem);
i = 0;
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq == NULL) {
device_printf(dev, "could not allocate interrupt resource\n");
goto fail;
}
if (iwi_reset(sc) != 0) {
device_printf(dev, "could not reset adapter\n");
goto fail;
}
/*
* Allocate rings.
*/
if (iwi_alloc_cmd_ring(sc, &sc->cmdq, IWI_CMD_RING_COUNT) != 0) {
device_printf(dev, "could not allocate Cmd ring\n");
goto fail;
}
for (i = 0; i < 4; i++) {
error = iwi_alloc_tx_ring(sc, &sc->txq[i], IWI_TX_RING_COUNT,
IWI_CSR_TX1_RIDX + i * 4,
IWI_CSR_TX1_WIDX + i * 4);
if (error != 0) {
device_printf(dev, "could not allocate Tx ring %d\n",
i+i);
goto fail;
}
}
if (iwi_alloc_rx_ring(sc, &sc->rxq, IWI_RX_RING_COUNT) != 0) {
device_printf(dev, "could not allocate Rx ring\n");
goto fail;
}
iwi_wme_init(sc);
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
| IEEE80211_C_PMGT /* power save supported */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_WPA /* 802.11i */
| IEEE80211_C_WME /* 802.11e */
#if 0
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#endif
;
/* read MAC address from EEPROM */
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val & 0xff;
ic->ic_macaddr[1] = val >> 8;
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 1);
ic->ic_macaddr[2] = val & 0xff;
ic->ic_macaddr[3] = val >> 8;
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 2);
ic->ic_macaddr[4] = val & 0xff;
ic->ic_macaddr[5] = val >> 8;
iwi_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
ieee80211_ifattach(ic);
/* override default methods */
ic->ic_node_alloc = iwi_node_alloc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = iwi_node_free;
ic->ic_raw_xmit = iwi_raw_xmit;
ic->ic_scan_start = iwi_scan_start;
ic->ic_scan_end = iwi_scan_end;
ic->ic_set_channel = iwi_set_channel;
ic->ic_scan_curchan = iwi_scan_curchan;
ic->ic_scan_mindwell = iwi_scan_mindwell;
ic->ic_wme.wme_update = iwi_wme_update;
ic->ic_vap_create = iwi_vap_create;
ic->ic_vap_delete = iwi_vap_delete;
ic->ic_ioctl = iwi_ioctl;
ic->ic_transmit = iwi_transmit;
ic->ic_parent = iwi_parent;
ic->ic_getradiocaps = iwi_getradiocaps;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
IWI_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
IWI_RX_RADIOTAP_PRESENT);
iwi_sysctlattach(sc);
iwi_ledattach(sc);
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, iwi_intr, sc, &sc->sc_ih);
if (error != 0) {
device_printf(dev, "could not set up interrupt\n");
goto fail;
}
if (bootverbose)
ieee80211_announce(ic);
return 0;
fail:
/* XXX fix */
iwi_detach(dev);
return ENXIO;
}
static int
iwi_detach(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
bus_teardown_intr(dev, sc->irq, sc->sc_ih);
/* NB: do early to drain any pending tasks */
ieee80211_draintask(ic, &sc->sc_radiontask);
ieee80211_draintask(ic, &sc->sc_radiofftask);
ieee80211_draintask(ic, &sc->sc_restarttask);
ieee80211_draintask(ic, &sc->sc_disassoctask);
ieee80211_draintask(ic, &sc->sc_monitortask);
iwi_stop(sc);
ieee80211_ifdetach(ic);
iwi_put_firmware(sc);
iwi_release_fw_dma(sc);
iwi_free_cmd_ring(sc, &sc->cmdq);
iwi_free_tx_ring(sc, &sc->txq[0]);
iwi_free_tx_ring(sc, &sc->txq[1]);
iwi_free_tx_ring(sc, &sc->txq[2]);
iwi_free_tx_ring(sc, &sc->txq[3]);
iwi_free_rx_ring(sc, &sc->rxq);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq);
bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem),
sc->mem);
delete_unrhdr(sc->sc_unr);
mbufq_drain(&sc->sc_snd);
IWI_LOCK_DESTROY(sc);
return 0;
}
static struct ieee80211vap *
iwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct iwi_softc *sc = ic->ic_softc;
struct iwi_vap *ivp;
struct ieee80211vap *vap;
int i;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
/*
* Get firmware image (and possibly dma memory) on mode change.
*/
if (iwi_get_firmware(sc, opmode))
return NULL;
/* allocate DMA memory for mapping firmware image */
i = sc->fw_fw.size;
if (sc->fw_boot.size > i)
i = sc->fw_boot.size;
/* XXX do we dma the ucode as well ? */
if (sc->fw_uc.size > i)
i = sc->fw_uc.size;
if (iwi_init_fw_dma(sc, i))
return NULL;
ivp = malloc(sizeof(struct iwi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &ivp->iwi_vap;
ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override the default, the setting comes from the linux driver */
vap->iv_bmissthreshold = 24;
/* override with driver methods */
ivp->iwi_newstate = vap->iv_newstate;
vap->iv_newstate = iwi_newstate;
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change, iwi_media_status,
mac);
ic->ic_opmode = opmode;
return vap;
}
static void
iwi_vap_delete(struct ieee80211vap *vap)
{
struct iwi_vap *ivp = IWI_VAP(vap);
ieee80211_vap_detach(vap);
free(ivp, M_80211_VAP);
}
static void
iwi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
iwi_alloc_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring, int count)
{
int error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * IWI_CMD_DESC_SIZE, 1, count * IWI_CMD_DESC_SIZE, 0,
NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * IWI_CMD_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
return 0;
fail: iwi_free_cmd_ring(sc, ring);
return error;
}
static void
iwi_reset_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
{
ring->queued = 0;
ring->cur = ring->next = 0;
}
static void
iwi_free_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
{
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
}
static int
iwi_alloc_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring, int count,
bus_addr_t csr_ridx, bus_addr_t csr_widx)
{
int i, error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = 0;
ring->csr_ridx = csr_ridx;
ring->csr_widx = csr_widx;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * IWI_TX_DESC_SIZE, 1, count * IWI_TX_DESC_SIZE, 0, NULL,
NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * IWI_TX_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
- ring->data = mallocarray(count, sizeof(struct iwi_tx_data), M_DEVBUF,
+ ring->data = malloc(count * sizeof (struct iwi_tx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
IWI_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
error = bus_dmamap_create(ring->data_dmat, 0,
&ring->data[i].map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
}
return 0;
fail: iwi_free_tx_ring(sc, ring);
return error;
}
static void
iwi_reset_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring)
{
struct iwi_tx_data *data;
int i;
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
ring->queued = 0;
ring->cur = ring->next = 0;
}
static void
iwi_free_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring)
{
struct iwi_tx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring, int count)
{
struct iwi_rx_data *data;
int i, error;
ring->count = count;
ring->cur = 0;
- ring->data = mallocarray(count, sizeof(struct iwi_rx_data), M_DEVBUF,
+ ring->data = malloc(count * sizeof (struct iwi_rx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
data = &ring->data[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr,
&data->physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
data->reg = IWI_CSR_RX_BASE + i * 4;
}
return 0;
fail: iwi_free_rx_ring(sc, ring);
return error;
}
static void
iwi_reset_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
{
ring->cur = 0;
}
static void
iwi_free_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
{
struct iwi_rx_data *data;
int i;
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
iwi_shutdown(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
iwi_stop(sc);
iwi_put_firmware(sc); /* ??? XXX */
return 0;
}
static int
iwi_suspend(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
ieee80211_suspend_all(ic);
return 0;
}
static int
iwi_resume(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
pci_write_config(dev, 0x41, 0, 1);
ieee80211_resume_all(ic);
return 0;
}
static struct ieee80211_node *
iwi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct iwi_node *in;
in = malloc(sizeof (struct iwi_node), M_80211_NODE, M_NOWAIT | M_ZERO);
if (in == NULL)
return NULL;
/* XXX assign sta table entry for adhoc */
in->in_station = -1;
return &in->in_node;
}
static void
iwi_node_free(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
struct iwi_softc *sc = ic->ic_softc;
struct iwi_node *in = (struct iwi_node *)ni;
if (in->in_station != -1) {
DPRINTF(("%s mac %6D station %u\n", __func__,
ni->ni_macaddr, ":", in->in_station));
free_unr(sc->sc_unr, in->in_station);
}
sc->sc_node_free(ni);
}
/*
* Convert h/w rate code to IEEE rate code.
*/
static int
iwi_cvtrate(int iwirate)
{
switch (iwirate) {
case IWI_RATE_DS1: return 2;
case IWI_RATE_DS2: return 4;
case IWI_RATE_DS5: return 11;
case IWI_RATE_DS11: return 22;
case IWI_RATE_OFDM6: return 12;
case IWI_RATE_OFDM9: return 18;
case IWI_RATE_OFDM12: return 24;
case IWI_RATE_OFDM18: return 36;
case IWI_RATE_OFDM24: return 48;
case IWI_RATE_OFDM36: return 72;
case IWI_RATE_OFDM48: return 96;
case IWI_RATE_OFDM54: return 108;
}
return 0;
}
/*
* The firmware automatically adapts the transmit speed. We report its current
* value here.
*/
static void
iwi_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
struct iwi_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
/* read current transmission rate from adapter */
ni = ieee80211_ref_node(vap->iv_bss);
ni->ni_txrate =
iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE));
ieee80211_free_node(ni);
ieee80211_media_status(ifp, imr);
}
static int
iwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct iwi_vap *ivp = IWI_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct iwi_softc *sc = ic->ic_softc;
IWI_LOCK_DECL;
DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate], sc->flags));
IEEE80211_UNLOCK(ic);
IWI_LOCK(sc);
switch (nstate) {
case IEEE80211_S_INIT:
/*
* NB: don't try to do this if iwi_stop_master has
* shutdown the firmware and disabled interrupts.
*/
if (vap->iv_state == IEEE80211_S_RUN &&
(sc->flags & IWI_FLAG_FW_INITED))
iwi_disassociate(sc, 0);
break;
case IEEE80211_S_AUTH:
iwi_auth_and_assoc(sc, vap);
break;
case IEEE80211_S_RUN:
if (vap->iv_opmode == IEEE80211_M_IBSS &&
vap->iv_state == IEEE80211_S_SCAN) {
/*
* XXX when joining an ibss network we are called
* with a SCAN -> RUN transition on scan complete.
* Use that to call iwi_auth_and_assoc. On completing
* the join we are then called again with an
* AUTH -> RUN transition and we want to do nothing.
* This is all totally bogus and needs to be redone.
*/
iwi_auth_and_assoc(sc, vap);
} else if (vap->iv_opmode == IEEE80211_M_MONITOR)
ieee80211_runtask(ic, &sc->sc_monitortask);
break;
case IEEE80211_S_ASSOC:
/*
* If we are transitioning from AUTH then just wait
* for the ASSOC status to come back from the firmware.
* Otherwise we need to issue the association request.
*/
if (vap->iv_state == IEEE80211_S_AUTH)
break;
iwi_auth_and_assoc(sc, vap);
break;
default:
break;
}
IWI_UNLOCK(sc);
IEEE80211_LOCK(ic);
return ivp->iwi_newstate(vap, nstate, arg);
}
/*
* WME parameters coming from IEEE 802.11e specification. These values are
* already declared in ieee80211_proto.c, but they are static so they can't
* be reused here.
*/
static const struct wmeParams iwi_wme_cck_params[WME_NUM_AC] = {
{ 0, 3, 5, 7, 0 }, /* WME_AC_BE */
{ 0, 3, 5, 10, 0 }, /* WME_AC_BK */
{ 0, 2, 4, 5, 188 }, /* WME_AC_VI */
{ 0, 2, 3, 4, 102 } /* WME_AC_VO */
};
static const struct wmeParams iwi_wme_ofdm_params[WME_NUM_AC] = {
{ 0, 3, 4, 6, 0 }, /* WME_AC_BE */
{ 0, 3, 4, 10, 0 }, /* WME_AC_BK */
{ 0, 2, 3, 4, 94 }, /* WME_AC_VI */
{ 0, 2, 2, 3, 47 } /* WME_AC_VO */
};
#define IWI_EXP2(v) htole16((1 << (v)) - 1)
#define IWI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v))
static void
iwi_wme_init(struct iwi_softc *sc)
{
const struct wmeParams *wmep;
int ac;
memset(sc->wme, 0, sizeof sc->wme);
for (ac = 0; ac < WME_NUM_AC; ac++) {
/* set WME values for CCK modulation */
wmep = &iwi_wme_cck_params[ac];
sc->wme[1].aifsn[ac] = wmep->wmep_aifsn;
sc->wme[1].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin);
sc->wme[1].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax);
sc->wme[1].burst[ac] = IWI_USEC(wmep->wmep_txopLimit);
sc->wme[1].acm[ac] = wmep->wmep_acm;
/* set WME values for OFDM modulation */
wmep = &iwi_wme_ofdm_params[ac];
sc->wme[2].aifsn[ac] = wmep->wmep_aifsn;
sc->wme[2].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin);
sc->wme[2].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax);
sc->wme[2].burst[ac] = IWI_USEC(wmep->wmep_txopLimit);
sc->wme[2].acm[ac] = wmep->wmep_acm;
}
}
static int
iwi_wme_setparams(struct iwi_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct chanAccParams chp;
const struct wmeParams *wmep;
int ac;
ieee80211_wme_ic_getparams(ic, &chp);
for (ac = 0; ac < WME_NUM_AC; ac++) {
/* set WME values for current operating mode */
wmep = &chp.cap_wmeParams[ac];
sc->wme[0].aifsn[ac] = wmep->wmep_aifsn;
sc->wme[0].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin);
sc->wme[0].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax);
sc->wme[0].burst[ac] = IWI_USEC(wmep->wmep_txopLimit);
sc->wme[0].acm[ac] = wmep->wmep_acm;
}
DPRINTF(("Setting WME parameters\n"));
return iwi_cmd(sc, IWI_CMD_SET_WME_PARAMS, sc->wme, sizeof sc->wme);
}
#undef IWI_USEC
#undef IWI_EXP2
static int
iwi_wme_update(struct ieee80211com *ic)
{
struct iwi_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
IWI_LOCK_DECL;
/*
* We may be called to update the WME parameters in
* the adapter at various places. If we're already
* associated then initiate the request immediately;
* otherwise we assume the params will get sent down
* to the adapter as part of the work iwi_auth_and_assoc
* does.
*/
if (vap->iv_state == IEEE80211_S_RUN) {
IWI_LOCK(sc);
iwi_wme_setparams(sc);
IWI_UNLOCK(sc);
}
return (0);
}
static int
iwi_wme_setie(struct iwi_softc *sc)
{
struct ieee80211_wme_info wme;
memset(&wme, 0, sizeof wme);
wme.wme_id = IEEE80211_ELEMID_VENDOR;
wme.wme_len = sizeof (struct ieee80211_wme_info) - 2;
wme.wme_oui[0] = 0x00;
wme.wme_oui[1] = 0x50;
wme.wme_oui[2] = 0xf2;
wme.wme_type = WME_OUI_TYPE;
wme.wme_subtype = WME_INFO_OUI_SUBTYPE;
wme.wme_version = WME_VERSION;
wme.wme_info = 0;
DPRINTF(("Setting WME IE (len=%u)\n", wme.wme_len));
return iwi_cmd(sc, IWI_CMD_SET_WMEIE, &wme, sizeof wme);
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM.
*/
static uint16_t
iwi_read_prom_word(struct iwi_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
IWI_EEPROM_CTL(sc, 0);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
/* write start bit (1) */
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C);
/* write READ opcode (10) */
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C);
/* write address A7-A0 */
for (n = 7; n >= 0; n--) {
IWI_EEPROM_CTL(sc, IWI_EEPROM_S |
(((addr >> n) & 1) << IWI_EEPROM_SHIFT_D));
IWI_EEPROM_CTL(sc, IWI_EEPROM_S |
(((addr >> n) & 1) << IWI_EEPROM_SHIFT_D) | IWI_EEPROM_C);
}
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
tmp = MEM_READ_4(sc, IWI_MEM_EEPROM_CTL);
val |= ((tmp & IWI_EEPROM_Q) >> IWI_EEPROM_SHIFT_Q) << n;
}
IWI_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
IWI_EEPROM_CTL(sc, 0);
IWI_EEPROM_CTL(sc, IWI_EEPROM_C);
return val;
}
static void
iwi_setcurchan(struct iwi_softc *sc, int chan)
{
struct ieee80211com *ic = &sc->sc_ic;
sc->curchan = chan;
ieee80211_radiotap_chan_change(ic);
}
static void
iwi_frame_intr(struct iwi_softc *sc, struct iwi_rx_data *data, int i,
struct iwi_frame *frame)
{
struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *mnew, *m;
struct ieee80211_node *ni;
int type, error, framelen;
int8_t rssi, nf;
IWI_LOCK_DECL;
framelen = le16toh(frame->len);
if (framelen < IEEE80211_MIN_LEN || framelen > MCLBYTES) {
/*
* XXX >MCLBYTES is bogus as it means the h/w dma'd
* out of bounds; need to figure out how to limit
* frame size in the firmware
*/
/* XXX stat */
DPRINTFN(1,
("drop rx frame len=%u chan=%u rssi=%u rssi_dbm=%u\n",
le16toh(frame->len), frame->chan, frame->rssi,
frame->rssi_dbm));
return;
}
DPRINTFN(5, ("received frame len=%u chan=%u rssi=%u rssi_dbm=%u\n",
le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm));
if (frame->chan != sc->curchan)
iwi_setcurchan(sc, frame->chan);
/*
* Try to allocate a new mbuf for this ring element and load it before
* processing the current mbuf. If the ring element cannot be loaded,
* drop the received packet and reuse the old mbuf. In the unlikely
* case that the old mbuf can't be reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
counter_u64_add(ic->ic_ierrors, 1);
return;
}
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(mnew, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr,
0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr,
&data->physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
counter_u64_add(ic->ic_ierrors, 1);
return;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = mnew;
CSR_WRITE_4(sc, data->reg, data->physaddr);
/* finalize mbuf */
m->m_pkthdr.len = m->m_len = sizeof (struct iwi_hdr) +
sizeof (struct iwi_frame) + framelen;
m_adj(m, sizeof (struct iwi_hdr) + sizeof (struct iwi_frame));
rssi = frame->rssi_dbm;
nf = -95;
if (ieee80211_radiotap_active(ic)) {
struct iwi_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
tap->wr_antsignal = rssi;
tap->wr_antnoise = nf;
tap->wr_rate = iwi_cvtrate(frame->rate);
tap->wr_antenna = frame->antenna;
}
IWI_UNLOCK(sc);
ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
type = ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
type = ieee80211_input_all(ic, m, rssi, nf);
IWI_LOCK(sc);
if (sc->sc_softled) {
/*
* Blink for any data frame. Otherwise do a
* heartbeat-style blink when idle. The latter
* is mainly for station mode where we depend on
* periodic beacon frames to trigger the poll event.
*/
if (type == IEEE80211_FC0_TYPE_DATA) {
sc->sc_rxrate = frame->rate;
iwi_led_event(sc, IWI_LED_RX);
} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
iwi_led_event(sc, IWI_LED_POLL);
}
}
/*
* Check for an association response frame to see if QoS
* has been negotiated. We parse just enough to figure
* out if we're supposed to use QoS. The proper solution
* is to pass the frame up so ieee80211_input can do the
* work but that's made hard by how things currently are
* done in the driver.
*/
static void
iwi_checkforqos(struct ieee80211vap *vap,
const struct ieee80211_frame *wh, int len)
{
#define SUBTYPE(wh) ((wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK)
const uint8_t *frm, *efrm, *wme;
struct ieee80211_node *ni;
uint16_t capinfo, status, associd;
/* NB: +8 for capinfo, status, associd, and first ie */
if (!(sizeof(*wh)+8 < len && len < IEEE80211_MAX_LEN) ||
SUBTYPE(wh) != IEEE80211_FC0_SUBTYPE_ASSOC_RESP)
return;
/*
* asresp frame format
* [2] capability information
* [2] status
* [2] association ID
* [tlv] supported rates
* [tlv] extended supported rates
* [tlv] WME
*/
frm = (const uint8_t *)&wh[1];
efrm = ((const uint8_t *) wh) + len;
capinfo = le16toh(*(const uint16_t *)frm);
frm += 2;
status = le16toh(*(const uint16_t *)frm);
frm += 2;
associd = le16toh(*(const uint16_t *)frm);
frm += 2;
wme = NULL;
while (efrm - frm > 1) {
IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
switch (*frm) {
case IEEE80211_ELEMID_VENDOR:
if (iswmeoui(frm))
wme = frm;
break;
}
frm += frm[1] + 2;
}
ni = ieee80211_ref_node(vap->iv_bss);
ni->ni_capinfo = capinfo;
ni->ni_associd = associd & 0x3fff;
if (wme != NULL)
ni->ni_flags |= IEEE80211_NODE_QOS;
else
ni->ni_flags &= ~IEEE80211_NODE_QOS;
ieee80211_free_node(ni);
#undef SUBTYPE
}
static void
iwi_notif_link_quality(struct iwi_softc *sc, struct iwi_notif *notif)
{
struct iwi_notif_link_quality *lq;
int len;
len = le16toh(notif->len);
DPRINTFN(5, ("Notification (%u) - len=%d, sizeof=%zu\n",
notif->type,
len,
sizeof(struct iwi_notif_link_quality)
));
/* enforce length */
if (len != sizeof(struct iwi_notif_link_quality)) {
DPRINTFN(5, ("Notification: (%u) too short (%d)\n",
notif->type,
len));
return;
}
lq = (struct iwi_notif_link_quality *)(notif + 1);
memcpy(&sc->sc_linkqual, lq, sizeof(sc->sc_linkqual));
sc->sc_linkqual_valid = 1;
}
/*
* Task queue callbacks for iwi_notification_intr used to avoid LOR's.
*/
static void
iwi_notification_intr(struct iwi_softc *sc, struct iwi_notif *notif)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct iwi_notif_scan_channel *chan;
struct iwi_notif_scan_complete *scan;
struct iwi_notif_authentication *auth;
struct iwi_notif_association *assoc;
struct iwi_notif_beacon_state *beacon;
switch (notif->type) {
case IWI_NOTIF_TYPE_SCAN_CHANNEL:
chan = (struct iwi_notif_scan_channel *)(notif + 1);
DPRINTFN(3, ("Scan of channel %u complete (%u)\n",
ieee80211_ieee2mhz(chan->nchan, 0), chan->nchan));
/* Reset the timer, the scan is still going */
sc->sc_state_timer = 3;
break;
case IWI_NOTIF_TYPE_SCAN_COMPLETE:
scan = (struct iwi_notif_scan_complete *)(notif + 1);
DPRINTFN(2, ("Scan completed (%u, %u)\n", scan->nchan,
scan->status));
IWI_STATE_END(sc, IWI_FW_SCANNING);
/*
* Monitor mode works by doing a passive scan to set
* the channel and enable rx. Because we don't want
* to abort a scan lest the firmware crash we scan
* for a short period of time and automatically restart
* the scan when notified the sweep has completed.
*/
if (vap->iv_opmode == IEEE80211_M_MONITOR) {
ieee80211_runtask(ic, &sc->sc_monitortask);
break;
}
if (scan->status == IWI_SCAN_COMPLETED) {
/* NB: don't need to defer, net80211 does it for us */
ieee80211_scan_next(vap);
}
break;
case IWI_NOTIF_TYPE_AUTHENTICATION:
auth = (struct iwi_notif_authentication *)(notif + 1);
switch (auth->state) {
case IWI_AUTH_SUCCESS:
DPRINTFN(2, ("Authentication succeeeded\n"));
ieee80211_new_state(vap, IEEE80211_S_ASSOC, -1);
break;
case IWI_AUTH_FAIL:
/*
* These are delivered as an unsolicited deauth
* (e.g. due to inactivity) or in response to an
* associate request.
*/
sc->flags &= ~IWI_FLAG_ASSOCIATED;
if (vap->iv_state != IEEE80211_S_RUN) {
DPRINTFN(2, ("Authentication failed\n"));
vap->iv_stats.is_rx_auth_fail++;
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
} else {
DPRINTFN(2, ("Deauthenticated\n"));
vap->iv_stats.is_rx_deauth++;
}
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
break;
case IWI_AUTH_SENT_1:
case IWI_AUTH_RECV_2:
case IWI_AUTH_SEQ1_PASS:
break;
case IWI_AUTH_SEQ1_FAIL:
DPRINTFN(2, ("Initial authentication handshake failed; "
"you probably need shared key\n"));
vap->iv_stats.is_rx_auth_fail++;
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
/* XXX retry shared key when in auto */
break;
default:
device_printf(sc->sc_dev,
"unknown authentication state %u\n", auth->state);
break;
}
break;
case IWI_NOTIF_TYPE_ASSOCIATION:
assoc = (struct iwi_notif_association *)(notif + 1);
switch (assoc->state) {
case IWI_AUTH_SUCCESS:
/* re-association, do nothing */
break;
case IWI_ASSOC_SUCCESS:
DPRINTFN(2, ("Association succeeded\n"));
sc->flags |= IWI_FLAG_ASSOCIATED;
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
iwi_checkforqos(vap,
(const struct ieee80211_frame *)(assoc+1),
le16toh(notif->len) - sizeof(*assoc) - 1);
ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
break;
case IWI_ASSOC_INIT:
sc->flags &= ~IWI_FLAG_ASSOCIATED;
switch (sc->fw_state) {
case IWI_FW_ASSOCIATING:
DPRINTFN(2, ("Association failed\n"));
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
break;
case IWI_FW_DISASSOCIATING:
DPRINTFN(2, ("Dissassociated\n"));
IWI_STATE_END(sc, IWI_FW_DISASSOCIATING);
vap->iv_stats.is_rx_disassoc++;
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
break;
}
break;
default:
device_printf(sc->sc_dev,
"unknown association state %u\n", assoc->state);
break;
}
break;
case IWI_NOTIF_TYPE_BEACON:
/* XXX check struct length */
beacon = (struct iwi_notif_beacon_state *)(notif + 1);
DPRINTFN(5, ("Beacon state (%u, %u)\n",
beacon->state, le32toh(beacon->number)));
if (beacon->state == IWI_BEACON_MISS) {
/*
* The firmware notifies us of every beacon miss
* so we need to track the count against the
* configured threshold before notifying the
* 802.11 layer.
* XXX try to roam, drop assoc only on much higher count
*/
if (le32toh(beacon->number) >= vap->iv_bmissthreshold) {
DPRINTF(("Beacon miss: %u >= %u\n",
le32toh(beacon->number),
vap->iv_bmissthreshold));
vap->iv_stats.is_beacon_miss++;
/*
* It's pointless to notify the 802.11 layer
* as it'll try to send a probe request (which
* we'll discard) and then timeout and drop us
* into scan state. Instead tell the firmware
* to disassociate and then on completion we'll
* kick the state machine to scan.
*/
ieee80211_runtask(ic, &sc->sc_disassoctask);
}
}
break;
case IWI_NOTIF_TYPE_CALIBRATION:
case IWI_NOTIF_TYPE_NOISE:
/* XXX handle? */
DPRINTFN(5, ("Notification (%u)\n", notif->type));
break;
case IWI_NOTIF_TYPE_LINK_QUALITY:
iwi_notif_link_quality(sc, notif);
break;
default:
DPRINTF(("unknown notification type %u flags 0x%x len %u\n",
notif->type, notif->flags, le16toh(notif->len)));
break;
}
}
static void
iwi_rx_intr(struct iwi_softc *sc)
{
struct iwi_rx_data *data;
struct iwi_hdr *hdr;
uint32_t hw;
hw = CSR_READ_4(sc, IWI_CSR_RX_RIDX);
for (; sc->rxq.cur != hw;) {
data = &sc->rxq.data[sc->rxq.cur];
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
hdr = mtod(data->m, struct iwi_hdr *);
switch (hdr->type) {
case IWI_HDR_TYPE_FRAME:
iwi_frame_intr(sc, data, sc->rxq.cur,
(struct iwi_frame *)(hdr + 1));
break;
case IWI_HDR_TYPE_NOTIF:
iwi_notification_intr(sc,
(struct iwi_notif *)(hdr + 1));
break;
default:
device_printf(sc->sc_dev, "unknown hdr type %u\n",
hdr->type);
}
DPRINTFN(15, ("rx done idx=%u\n", sc->rxq.cur));
sc->rxq.cur = (sc->rxq.cur + 1) % IWI_RX_RING_COUNT;
}
/* tell the firmware what we have processed */
hw = (hw == 0) ? IWI_RX_RING_COUNT - 1 : hw - 1;
CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, hw);
}
static void
iwi_tx_intr(struct iwi_softc *sc, struct iwi_tx_ring *txq)
{
struct iwi_tx_data *data;
uint32_t hw;
hw = CSR_READ_4(sc, txq->csr_ridx);
while (txq->next != hw) {
data = &txq->data[txq->next];
DPRINTFN(15, ("tx done idx=%u\n", txq->next));
bus_dmamap_sync(txq->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->data_dmat, data->map);
ieee80211_tx_complete(data->ni, data->m, 0);
data->ni = NULL;
data->m = NULL;
txq->queued--;
txq->next = (txq->next + 1) % IWI_TX_RING_COUNT;
}
sc->sc_tx_timer = 0;
if (sc->sc_softled)
iwi_led_event(sc, IWI_LED_TX);
iwi_start(sc);
}
static void
iwi_fatal_error_intr(struct iwi_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
device_printf(sc->sc_dev, "firmware error\n");
if (vap != NULL)
ieee80211_cancel_scan(vap);
ieee80211_runtask(ic, &sc->sc_restarttask);
sc->flags &= ~IWI_FLAG_BUSY;
sc->sc_busy_timer = 0;
wakeup(sc);
}
static void
iwi_radio_off_intr(struct iwi_softc *sc)
{
ieee80211_runtask(&sc->sc_ic, &sc->sc_radiofftask);
}
static void
iwi_intr(void *arg)
{
struct iwi_softc *sc = arg;
uint32_t r;
IWI_LOCK_DECL;
IWI_LOCK(sc);
if ((r = CSR_READ_4(sc, IWI_CSR_INTR)) == 0 || r == 0xffffffff) {
IWI_UNLOCK(sc);
return;
}
/* acknowledge interrupts */
CSR_WRITE_4(sc, IWI_CSR_INTR, r);
if (r & IWI_INTR_FATAL_ERROR) {
iwi_fatal_error_intr(sc);
goto done;
}
if (r & IWI_INTR_FW_INITED) {
if (!(r & (IWI_INTR_FATAL_ERROR | IWI_INTR_PARITY_ERROR)))
wakeup(sc);
}
if (r & IWI_INTR_RADIO_OFF)
iwi_radio_off_intr(sc);
if (r & IWI_INTR_CMD_DONE) {
sc->flags &= ~IWI_FLAG_BUSY;
sc->sc_busy_timer = 0;
wakeup(sc);
}
if (r & IWI_INTR_TX1_DONE)
iwi_tx_intr(sc, &sc->txq[0]);
if (r & IWI_INTR_TX2_DONE)
iwi_tx_intr(sc, &sc->txq[1]);
if (r & IWI_INTR_TX3_DONE)
iwi_tx_intr(sc, &sc->txq[2]);
if (r & IWI_INTR_TX4_DONE)
iwi_tx_intr(sc, &sc->txq[3]);
if (r & IWI_INTR_RX_DONE)
iwi_rx_intr(sc);
if (r & IWI_INTR_PARITY_ERROR) {
/* XXX rate-limit */
device_printf(sc->sc_dev, "parity error\n");
}
done:
IWI_UNLOCK(sc);
}
static int
iwi_cmd(struct iwi_softc *sc, uint8_t type, void *data, uint8_t len)
{
struct iwi_cmd_desc *desc;
IWI_LOCK_ASSERT(sc);
if (sc->flags & IWI_FLAG_BUSY) {
device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n",
__func__, type);
return EAGAIN;
}
sc->flags |= IWI_FLAG_BUSY;
sc->sc_busy_timer = 2;
desc = &sc->cmdq.desc[sc->cmdq.cur];
desc->hdr.type = IWI_HDR_TYPE_COMMAND;
desc->hdr.flags = IWI_HDR_FLAG_IRQ;
desc->type = type;
desc->len = len;
memcpy(desc->data, data, len);
bus_dmamap_sync(sc->cmdq.desc_dmat, sc->cmdq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(2, ("sending command idx=%u type=%u len=%u\n", sc->cmdq.cur,
type, len));
sc->cmdq.cur = (sc->cmdq.cur + 1) % IWI_CMD_RING_COUNT;
CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur);
return msleep(sc, &sc->sc_mtx, 0, "iwicmd", hz);
}
static void
iwi_write_ibssnode(struct iwi_softc *sc,
const u_int8_t addr[IEEE80211_ADDR_LEN], int entry)
{
struct iwi_ibssnode node;
/* write node information into NIC memory */
memset(&node, 0, sizeof node);
IEEE80211_ADDR_COPY(node.bssid, addr);
DPRINTF(("%s mac %6D station %u\n", __func__, node.bssid, ":", entry));
CSR_WRITE_REGION_1(sc,
IWI_CSR_NODE_BASE + entry * sizeof node,
(uint8_t *)&node, sizeof node);
}
static int
iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
int ac)
{
struct ieee80211vap *vap = ni->ni_vap;
struct iwi_node *in = (struct iwi_node *)ni;
const struct ieee80211_frame *wh;
struct ieee80211_key *k;
struct iwi_tx_ring *txq = &sc->txq[ac];
struct iwi_tx_data *data;
struct iwi_tx_desc *desc;
struct mbuf *mnew;
bus_dma_segment_t segs[IWI_MAX_NSEG];
int error, nsegs, hdrlen, i;
int ismcast, flags, xflags, staid;
IWI_LOCK_ASSERT(sc);
wh = mtod(m0, const struct ieee80211_frame *);
/* NB: only data frames use this path */
hdrlen = ieee80211_hdrsize(wh);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
flags = xflags = 0;
if (!ismcast)
flags |= IWI_DATA_FLAG_NEED_ACK;
if (vap->iv_flags & IEEE80211_F_SHPREAMBLE)
flags |= IWI_DATA_FLAG_SHPREAMBLE;
if (IEEE80211_QOS_HAS_SEQ(wh)) {
xflags |= IWI_DATA_XFLAG_QOS;
if (ieee80211_wme_vap_ac_is_noack(vap, ac))
flags &= ~IWI_DATA_FLAG_NEED_ACK;
}
/*
* This is only used in IBSS mode where the firmware expect an index
* in a h/w table instead of a destination address.
*/
if (vap->iv_opmode == IEEE80211_M_IBSS) {
if (!ismcast) {
if (in->in_station == -1) {
in->in_station = alloc_unr(sc->sc_unr);
if (in->in_station == -1) {
/* h/w table is full */
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
m_freem(m0);
ieee80211_free_node(ni);
return 0;
}
iwi_write_ibssnode(sc,
ni->ni_macaddr, in->in_station);
}
staid = in->in_station;
} else {
/*
* Multicast addresses have no associated node
* so there will be no station entry. We reserve
* entry 0 for one mcast address and use that.
* If there are many being used this will be
* expensive and we'll need to do a better job
* but for now this handles the broadcast case.
*/
if (!IEEE80211_ADDR_EQ(wh->i_addr1, sc->sc_mcast)) {
IEEE80211_ADDR_COPY(sc->sc_mcast, wh->i_addr1);
iwi_write_ibssnode(sc, sc->sc_mcast, 0);
}
staid = 0;
}
} else
staid = 0;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct iwi_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
ieee80211_radiotap_tx(vap, m0);
}
data = &txq->data[txq->cur];
desc = &txq->desc[txq->cur];
/* save and trim IEEE802.11 header */
m_copydata(m0, 0, hdrlen, (caddr_t)&desc->wh);
m_adj(m0, hdrlen);
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs,
&nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map,
m0, segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
}
data->m = m0;
data->ni = ni;
desc->hdr.type = IWI_HDR_TYPE_DATA;
desc->hdr.flags = IWI_HDR_FLAG_IRQ;
desc->station = staid;
desc->cmd = IWI_DATA_CMD_TX;
desc->len = htole16(m0->m_pkthdr.len);
desc->flags = flags;
desc->xflags = xflags;
#if 0
if (vap->iv_flags & IEEE80211_F_PRIVACY)
desc->wep_txkey = vap->iv_def_txkey;
else
#endif
desc->flags |= IWI_DATA_FLAG_NO_WEP;
desc->nseg = htole32(nsegs);
for (i = 0; i < nsegs; i++) {
desc->seg_addr[i] = htole32(segs[i].ds_addr);
desc->seg_len[i] = htole16(segs[i].ds_len);
}
bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
DPRINTFN(5, ("sending data frame txq=%u idx=%u len=%u nseg=%u\n",
ac, txq->cur, le16toh(desc->len), nsegs));
txq->queued++;
txq->cur = (txq->cur + 1) % IWI_TX_RING_COUNT;
CSR_WRITE_4(sc, txq->csr_widx, txq->cur);
return 0;
}
static int
iwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
/* no support; just discard */
m_freem(m);
ieee80211_free_node(ni);
return 0;
}
static int
iwi_transmit(struct ieee80211com *ic, struct mbuf *m)
{
struct iwi_softc *sc = ic->ic_softc;
int error;
IWI_LOCK_DECL;
IWI_LOCK(sc);
if (!sc->sc_running) {
IWI_UNLOCK(sc);
return (ENXIO);
}
error = mbufq_enqueue(&sc->sc_snd, m);
if (error) {
IWI_UNLOCK(sc);
return (error);
}
iwi_start(sc);
IWI_UNLOCK(sc);
return (0);
}
static void
iwi_start(struct iwi_softc *sc)
{
struct mbuf *m;
struct ieee80211_node *ni;
int ac;
IWI_LOCK_ASSERT(sc);
while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ac = M_WME_GETAC(m);
if (sc->txq[ac].queued > IWI_TX_RING_COUNT - 8) {
/* there is no place left in this ring; tail drop */
/* XXX tail drop */
mbufq_prepend(&sc->sc_snd, m);
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (iwi_tx_start(sc, m, ni, ac) != 0) {
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
break;
}
sc->sc_tx_timer = 5;
}
}
static void
iwi_watchdog(void *arg)
{
struct iwi_softc *sc = arg;
struct ieee80211com *ic = &sc->sc_ic;
IWI_LOCK_ASSERT(sc);
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
counter_u64_add(ic->ic_oerrors, 1);
ieee80211_runtask(ic, &sc->sc_restarttask);
}
}
if (sc->sc_state_timer > 0) {
if (--sc->sc_state_timer == 0) {
device_printf(sc->sc_dev,
"firmware stuck in state %d, resetting\n",
sc->fw_state);
if (sc->fw_state == IWI_FW_SCANNING)
ieee80211_cancel_scan(TAILQ_FIRST(&ic->ic_vaps));
ieee80211_runtask(ic, &sc->sc_restarttask);
sc->sc_state_timer = 3;
}
}
if (sc->sc_busy_timer > 0) {
if (--sc->sc_busy_timer == 0) {
device_printf(sc->sc_dev,
"firmware command timeout, resetting\n");
ieee80211_runtask(ic, &sc->sc_restarttask);
}
}
callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc);
}
static void
iwi_parent(struct ieee80211com *ic)
{
struct iwi_softc *sc = ic->ic_softc;
int startall = 0;
IWI_LOCK_DECL;
IWI_LOCK(sc);
if (ic->ic_nrunning > 0) {
if (!sc->sc_running) {
iwi_init_locked(sc);
startall = 1;
}
} else if (sc->sc_running)
iwi_stop_locked(sc);
IWI_UNLOCK(sc);
if (startall)
ieee80211_start_all(ic);
}
static int
iwi_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
{
struct ifreq *ifr = data;
struct iwi_softc *sc = ic->ic_softc;
int error;
IWI_LOCK_DECL;
IWI_LOCK(sc);
switch (cmd) {
case SIOCGIWISTATS:
/* XXX validate permissions/memory/etc? */
error = copyout(&sc->sc_linkqual, ifr->ifr_data,
sizeof(struct iwi_notif_link_quality));
break;
case SIOCZIWISTATS:
memset(&sc->sc_linkqual, 0,
sizeof(struct iwi_notif_link_quality));
error = 0;
break;
default:
error = ENOTTY;
break;
}
IWI_UNLOCK(sc);
return (error);
}
static void
iwi_stop_master(struct iwi_softc *sc)
{
uint32_t tmp;
int ntries;
/* disable interrupts */
CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, 0);
CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_STOP_MASTER);
for (ntries = 0; ntries < 5; ntries++) {
if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED)
break;
DELAY(10);
}
if (ntries == 5)
device_printf(sc->sc_dev, "timeout waiting for master\n");
tmp = CSR_READ_4(sc, IWI_CSR_RST);
CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_PRINCETON_RESET);
sc->flags &= ~IWI_FLAG_FW_INITED;
}
static int
iwi_reset(struct iwi_softc *sc)
{
uint32_t tmp;
int i, ntries;
iwi_stop_master(sc);
tmp = CSR_READ_4(sc, IWI_CSR_CTL);
CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT);
CSR_WRITE_4(sc, IWI_CSR_READ_INT, IWI_READ_INT_INIT_HOST);
/* wait for clock stabilization */
for (ntries = 0; ntries < 1000; ntries++) {
if (CSR_READ_4(sc, IWI_CSR_CTL) & IWI_CTL_CLOCK_READY)
break;
DELAY(200);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for clock stabilization\n");
return EIO;
}
tmp = CSR_READ_4(sc, IWI_CSR_RST);
CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_SOFT_RESET);
DELAY(10);
tmp = CSR_READ_4(sc, IWI_CSR_CTL);
CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT);
/* clear NIC memory */
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0);
for (i = 0; i < 0xc000; i++)
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0);
return 0;
}
static const struct iwi_firmware_ohdr *
iwi_setup_ofw(struct iwi_softc *sc, struct iwi_fw *fw)
{
const struct firmware *fp = fw->fp;
const struct iwi_firmware_ohdr *hdr;
if (fp->datasize < sizeof (struct iwi_firmware_ohdr)) {
device_printf(sc->sc_dev, "image '%s' too small\n", fp->name);
return NULL;
}
hdr = (const struct iwi_firmware_ohdr *)fp->data;
if ((IWI_FW_GET_MAJOR(le32toh(hdr->version)) != IWI_FW_REQ_MAJOR) ||
(IWI_FW_GET_MINOR(le32toh(hdr->version)) != IWI_FW_REQ_MINOR)) {
device_printf(sc->sc_dev, "version for '%s' %d.%d != %d.%d\n",
fp->name, IWI_FW_GET_MAJOR(le32toh(hdr->version)),
IWI_FW_GET_MINOR(le32toh(hdr->version)), IWI_FW_REQ_MAJOR,
IWI_FW_REQ_MINOR);
return NULL;
}
fw->data = ((const char *) fp->data) + sizeof(struct iwi_firmware_ohdr);
fw->size = fp->datasize - sizeof(struct iwi_firmware_ohdr);
fw->name = fp->name;
return hdr;
}
static const struct iwi_firmware_ohdr *
iwi_setup_oucode(struct iwi_softc *sc, struct iwi_fw *fw)
{
const struct iwi_firmware_ohdr *hdr;
hdr = iwi_setup_ofw(sc, fw);
if (hdr != NULL && le32toh(hdr->mode) != IWI_FW_MODE_UCODE) {
device_printf(sc->sc_dev, "%s is not a ucode image\n",
fw->name);
hdr = NULL;
}
return hdr;
}
static void
iwi_getfw(struct iwi_fw *fw, const char *fwname,
struct iwi_fw *uc, const char *ucname)
{
if (fw->fp == NULL)
fw->fp = firmware_get(fwname);
/* NB: pre-3.0 ucode is packaged separately */
if (uc->fp == NULL && fw->fp != NULL && fw->fp->version < 300)
uc->fp = firmware_get(ucname);
}
/*
* Get the required firmware images if not already loaded.
* Note that we hold firmware images so long as the device
* is marked up in case we need to reload them on device init.
* This is necessary because we re-init the device sometimes
* from a context where we cannot read from the filesystem
* (e.g. from the taskqueue thread when rfkill is re-enabled).
* XXX return 0 on success, 1 on error.
*
* NB: the order of get'ing and put'ing images here is
* intentional to support handling firmware images bundled
* by operating mode and/or all together in one file with
* the boot firmware as "master".
*/
static int
iwi_get_firmware(struct iwi_softc *sc, enum ieee80211_opmode opmode)
{
const struct iwi_firmware_hdr *hdr;
const struct firmware *fp;
/* invalidate cached firmware on mode change */
if (sc->fw_mode != opmode)
iwi_put_firmware(sc);
switch (opmode) {
case IEEE80211_M_STA:
iwi_getfw(&sc->fw_fw, "iwi_bss", &sc->fw_uc, "iwi_ucode_bss");
break;
case IEEE80211_M_IBSS:
iwi_getfw(&sc->fw_fw, "iwi_ibss", &sc->fw_uc, "iwi_ucode_ibss");
break;
case IEEE80211_M_MONITOR:
iwi_getfw(&sc->fw_fw, "iwi_monitor",
&sc->fw_uc, "iwi_ucode_monitor");
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return EINVAL;
}
fp = sc->fw_fw.fp;
if (fp == NULL) {
device_printf(sc->sc_dev, "could not load firmware\n");
goto bad;
}
if (fp->version < 300) {
/*
* Firmware prior to 3.0 was packaged as separate
* boot, firmware, and ucode images. Verify the
* ucode image was read in, retrieve the boot image
* if needed, and check version stamps for consistency.
* The version stamps in the data are also checked
* above; this is a bit paranoid but is a cheap
* safeguard against mis-packaging.
*/
if (sc->fw_uc.fp == NULL) {
device_printf(sc->sc_dev, "could not load ucode\n");
goto bad;
}
if (sc->fw_boot.fp == NULL) {
sc->fw_boot.fp = firmware_get("iwi_boot");
if (sc->fw_boot.fp == NULL) {
device_printf(sc->sc_dev,
"could not load boot firmware\n");
goto bad;
}
}
if (sc->fw_boot.fp->version != sc->fw_fw.fp->version ||
sc->fw_boot.fp->version != sc->fw_uc.fp->version) {
device_printf(sc->sc_dev,
"firmware version mismatch: "
"'%s' is %d, '%s' is %d, '%s' is %d\n",
sc->fw_boot.fp->name, sc->fw_boot.fp->version,
sc->fw_uc.fp->name, sc->fw_uc.fp->version,
sc->fw_fw.fp->name, sc->fw_fw.fp->version
);
goto bad;
}
/*
* Check and setup each image.
*/
if (iwi_setup_oucode(sc, &sc->fw_uc) == NULL ||
iwi_setup_ofw(sc, &sc->fw_boot) == NULL ||
iwi_setup_ofw(sc, &sc->fw_fw) == NULL)
goto bad;
} else {
/*
* Check and setup combined image.
*/
if (fp->datasize < sizeof(struct iwi_firmware_hdr)) {
device_printf(sc->sc_dev, "image '%s' too small\n",
fp->name);
goto bad;
}
hdr = (const struct iwi_firmware_hdr *)fp->data;
if (fp->datasize < sizeof(*hdr) + le32toh(hdr->bsize) + le32toh(hdr->usize)
+ le32toh(hdr->fsize)) {
device_printf(sc->sc_dev, "image '%s' too small (2)\n",
fp->name);
goto bad;
}
sc->fw_boot.data = ((const char *) fp->data) + sizeof(*hdr);
sc->fw_boot.size = le32toh(hdr->bsize);
sc->fw_boot.name = fp->name;
sc->fw_uc.data = sc->fw_boot.data + sc->fw_boot.size;
sc->fw_uc.size = le32toh(hdr->usize);
sc->fw_uc.name = fp->name;
sc->fw_fw.data = sc->fw_uc.data + sc->fw_uc.size;
sc->fw_fw.size = le32toh(hdr->fsize);
sc->fw_fw.name = fp->name;
}
#if 0
device_printf(sc->sc_dev, "boot %d ucode %d fw %d bytes\n",
sc->fw_boot.size, sc->fw_uc.size, sc->fw_fw.size);
#endif
sc->fw_mode = opmode;
return 0;
bad:
iwi_put_firmware(sc);
return 1;
}
static void
iwi_put_fw(struct iwi_fw *fw)
{
if (fw->fp != NULL) {
firmware_put(fw->fp, FIRMWARE_UNLOAD);
fw->fp = NULL;
}
fw->data = NULL;
fw->size = 0;
fw->name = NULL;
}
/*
* Release any cached firmware images.
*/
static void
iwi_put_firmware(struct iwi_softc *sc)
{
iwi_put_fw(&sc->fw_uc);
iwi_put_fw(&sc->fw_fw);
iwi_put_fw(&sc->fw_boot);
}
static int
iwi_load_ucode(struct iwi_softc *sc, const struct iwi_fw *fw)
{
uint32_t tmp;
const uint16_t *w;
const char *uc = fw->data;
size_t size = fw->size;
int i, ntries, error;
IWI_LOCK_ASSERT(sc);
error = 0;
CSR_WRITE_4(sc, IWI_CSR_RST, CSR_READ_4(sc, IWI_CSR_RST) |
IWI_RST_STOP_MASTER);
for (ntries = 0; ntries < 5; ntries++) {
if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED)
break;
DELAY(10);
}
if (ntries == 5) {
device_printf(sc->sc_dev, "timeout waiting for master\n");
error = EIO;
goto fail;
}
MEM_WRITE_4(sc, 0x3000e0, 0x80000000);
DELAY(5000);
tmp = CSR_READ_4(sc, IWI_CSR_RST);
tmp &= ~IWI_RST_PRINCETON_RESET;
CSR_WRITE_4(sc, IWI_CSR_RST, tmp);
DELAY(5000);
MEM_WRITE_4(sc, 0x3000e0, 0);
DELAY(1000);
MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 1);
DELAY(1000);
MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 0);
DELAY(1000);
MEM_WRITE_1(sc, 0x200000, 0x00);
MEM_WRITE_1(sc, 0x200000, 0x40);
DELAY(1000);
/* write microcode into adapter memory */
for (w = (const uint16_t *)uc; size > 0; w++, size -= 2)
MEM_WRITE_2(sc, 0x200010, htole16(*w));
MEM_WRITE_1(sc, 0x200000, 0x00);
MEM_WRITE_1(sc, 0x200000, 0x80);
/* wait until we get an answer */
for (ntries = 0; ntries < 100; ntries++) {
if (MEM_READ_1(sc, 0x200000) & 1)
break;
DELAY(100);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for ucode to initialize\n");
error = EIO;
goto fail;
}
/* read the answer or the firmware will not initialize properly */
for (i = 0; i < 7; i++)
MEM_READ_4(sc, 0x200004);
MEM_WRITE_1(sc, 0x200000, 0x00);
fail:
return error;
}
/* macro to handle unaligned little endian data in firmware image */
#define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24)
static int
iwi_load_firmware(struct iwi_softc *sc, const struct iwi_fw *fw)
{
u_char *p, *end;
uint32_t sentinel, ctl, src, dst, sum, len, mlen, tmp;
int ntries, error;
IWI_LOCK_ASSERT(sc);
/* copy firmware image to DMA memory */
memcpy(sc->fw_virtaddr, fw->data, fw->size);
/* make sure the adapter will get up-to-date values */
bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_PREWRITE);
/* tell the adapter where the command blocks are stored */
MEM_WRITE_4(sc, 0x3000a0, 0x27000);
/*
* Store command blocks into adapter's internal memory using register
* indirections. The adapter will read the firmware image through DMA
* using information stored in command blocks.
*/
src = sc->fw_physaddr;
p = sc->fw_virtaddr;
end = p + fw->size;
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0x27000);
while (p < end) {
dst = GETLE32(p); p += 4; src += 4;
len = GETLE32(p); p += 4; src += 4;
p += len;
while (len > 0) {
mlen = min(len, IWI_CB_MAXDATALEN);
ctl = IWI_CB_DEFAULT_CTL | mlen;
sum = ctl ^ src ^ dst;
/* write a command block */
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, ctl);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, src);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, dst);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, sum);
src += mlen;
dst += mlen;
len -= mlen;
}
}
/* write a fictive final command block (sentinel) */
sentinel = CSR_READ_4(sc, IWI_CSR_AUTOINC_ADDR);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0);
tmp = CSR_READ_4(sc, IWI_CSR_RST);
tmp &= ~(IWI_RST_MASTER_DISABLED | IWI_RST_STOP_MASTER);
CSR_WRITE_4(sc, IWI_CSR_RST, tmp);
/* tell the adapter to start processing command blocks */
MEM_WRITE_4(sc, 0x3000a4, 0x540100);
/* wait until the adapter reaches the sentinel */
for (ntries = 0; ntries < 400; ntries++) {
if (MEM_READ_4(sc, 0x3000d0) >= sentinel)
break;
DELAY(100);
}
/* sync dma, just in case */
bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_POSTWRITE);
if (ntries == 400) {
device_printf(sc->sc_dev,
"timeout processing command blocks for %s firmware\n",
fw->name);
return EIO;
}
/* we're done with command blocks processing */
MEM_WRITE_4(sc, 0x3000a4, 0x540c00);
/* allow interrupts so we know when the firmware is ready */
CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, IWI_INTR_MASK);
/* tell the adapter to initialize the firmware */
CSR_WRITE_4(sc, IWI_CSR_RST, 0);
tmp = CSR_READ_4(sc, IWI_CSR_CTL);
CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_ALLOW_STANDBY);
/* wait at most one second for firmware initialization to complete */
if ((error = msleep(sc, &sc->sc_mtx, 0, "iwiinit", hz)) != 0) {
device_printf(sc->sc_dev, "timeout waiting for %s firmware "
"initialization to complete\n", fw->name);
}
return error;
}
static int
iwi_setpowermode(struct iwi_softc *sc, struct ieee80211vap *vap)
{
uint32_t data;
if (vap->iv_flags & IEEE80211_F_PMGTON) {
/* XXX set more fine-grained operation */
data = htole32(IWI_POWER_MODE_MAX);
} else
data = htole32(IWI_POWER_MODE_CAM);
DPRINTF(("Setting power mode to %u\n", le32toh(data)));
return iwi_cmd(sc, IWI_CMD_SET_POWER_MODE, &data, sizeof data);
}
static int
iwi_setwepkeys(struct iwi_softc *sc, struct ieee80211vap *vap)
{
struct iwi_wep_key wepkey;
struct ieee80211_key *wk;
int error, i;
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
wk = &vap->iv_nw_keys[i];
wepkey.cmd = IWI_WEP_KEY_CMD_SETKEY;
wepkey.idx = i;
wepkey.len = wk->wk_keylen;
memset(wepkey.key, 0, sizeof wepkey.key);
memcpy(wepkey.key, wk->wk_key, wk->wk_keylen);
DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx,
wepkey.len));
error = iwi_cmd(sc, IWI_CMD_SET_WEP_KEY, &wepkey,
sizeof wepkey);
if (error != 0)
return error;
}
return 0;
}
static int
iwi_config(struct iwi_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct iwi_configuration config;
struct iwi_rateset rs;
struct iwi_txpower power;
uint32_t data;
int error, i;
IWI_LOCK_ASSERT(sc);
DPRINTF(("Setting MAC address to %6D\n", ic->ic_macaddr, ":"));
error = iwi_cmd(sc, IWI_CMD_SET_MAC_ADDRESS, ic->ic_macaddr,
IEEE80211_ADDR_LEN);
if (error != 0)
return error;
memset(&config, 0, sizeof config);
config.bluetooth_coexistence = sc->bluetooth;
config.silence_threshold = 0x1e;
config.antenna = sc->antenna;
config.multicast_enabled = 1;
config.answer_pbreq = (ic->ic_opmode == IEEE80211_M_IBSS) ? 1 : 0;
config.disable_unicast_decryption = 1;
config.disable_multicast_decryption = 1;
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
config.allow_invalid_frames = 1;
config.allow_beacon_and_probe_resp = 1;
config.allow_mgt = 1;
}
DPRINTF(("Configuring adapter\n"));
error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config);
if (error != 0)
return error;
if (ic->ic_opmode == IEEE80211_M_IBSS) {
power.mode = IWI_MODE_11B;
power.nchan = 11;
for (i = 0; i < 11; i++) {
power.chan[i].chan = i + 1;
power.chan[i].power = IWI_TXPOWER_MAX;
}
DPRINTF(("Setting .11b channels tx power\n"));
error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power);
if (error != 0)
return error;
power.mode = IWI_MODE_11G;
DPRINTF(("Setting .11g channels tx power\n"));
error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power);
if (error != 0)
return error;
}
memset(&rs, 0, sizeof rs);
rs.mode = IWI_MODE_11G;
rs.type = IWI_RATESET_TYPE_SUPPORTED;
rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates;
memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11G].rs_rates,
rs.nrates);
DPRINTF(("Setting .11bg supported rates (%u)\n", rs.nrates));
error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs);
if (error != 0)
return error;
memset(&rs, 0, sizeof rs);
rs.mode = IWI_MODE_11A;
rs.type = IWI_RATESET_TYPE_SUPPORTED;
rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates;
memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11A].rs_rates,
rs.nrates);
DPRINTF(("Setting .11a supported rates (%u)\n", rs.nrates));
error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs);
if (error != 0)
return error;
data = htole32(arc4random());
DPRINTF(("Setting initialization vector to %u\n", le32toh(data)));
error = iwi_cmd(sc, IWI_CMD_SET_IV, &data, sizeof data);
if (error != 0)
return error;
/* enable adapter */
DPRINTF(("Enabling adapter\n"));
return iwi_cmd(sc, IWI_CMD_ENABLE, NULL, 0);
}
static __inline void
set_scan_type(struct iwi_scan_ext *scan, int ix, int scan_type)
{
uint8_t *st = &scan->scan_type[ix / 2];
if (ix % 2)
*st = (*st & 0xf0) | ((scan_type & 0xf) << 0);
else
*st = (*st & 0x0f) | ((scan_type & 0xf) << 4);
}
static int
scan_type(const struct ieee80211_scan_state *ss,
const struct ieee80211_channel *chan)
{
/* We can only set one essid for a directed scan */
if (ss->ss_nssid != 0)
return IWI_SCAN_TYPE_BDIRECTED;
if ((ss->ss_flags & IEEE80211_SCAN_ACTIVE) &&
(chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
return IWI_SCAN_TYPE_BROADCAST;
return IWI_SCAN_TYPE_PASSIVE;
}
static __inline int
scan_band(const struct ieee80211_channel *c)
{
return IEEE80211_IS_CHAN_5GHZ(c) ? IWI_CHAN_5GHZ : IWI_CHAN_2GHZ;
}
static void
iwi_monitor_scan(void *arg, int npending)
{
struct iwi_softc *sc = arg;
IWI_LOCK_DECL;
IWI_LOCK(sc);
(void) iwi_scanchan(sc, 2000, 0);
IWI_UNLOCK(sc);
}
/*
* Start a scan on the current channel or all channels.
*/
static int
iwi_scanchan(struct iwi_softc *sc, unsigned long maxdwell, int allchan)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *chan;
struct ieee80211_scan_state *ss;
struct iwi_scan_ext scan;
int error = 0;
IWI_LOCK_ASSERT(sc);
if (sc->fw_state == IWI_FW_SCANNING) {
/*
* This should not happen as we only trigger scan_next after
* completion
*/
DPRINTF(("%s: called too early - still scanning\n", __func__));
return (EBUSY);
}
IWI_STATE_BEGIN(sc, IWI_FW_SCANNING);
ss = ic->ic_scan;
memset(&scan, 0, sizeof scan);
scan.full_scan_index = htole32(++sc->sc_scangen);
scan.dwell_time[IWI_SCAN_TYPE_PASSIVE] = htole16(maxdwell);
if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) {
/*
* Use very short dwell times for when we send probe request
* frames. Without this bg scans hang. Ideally this should
* be handled with early-termination as done by net80211 but
* that's not feasible (aborting a scan is problematic).
*/
scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(30);
scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(30);
} else {
scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(maxdwell);
scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(maxdwell);
}
/* We can only set one essid for a directed scan */
if (ss->ss_nssid != 0) {
error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ss->ss_ssid[0].ssid,
ss->ss_ssid[0].len);
if (error)
return (error);
}
if (allchan) {
int i, next, band, b, bstart;
/*
* Convert scan list to run-length encoded channel list
* the firmware requires (preserving the order setup by
* net80211). The first entry in each run specifies the
* band and the count of items in the run.
*/
next = 0; /* next open slot */
bstart = 0; /* NB: not needed, silence compiler */
band = -1; /* NB: impossible value */
KASSERT(ss->ss_last > 0, ("no channels"));
for (i = 0; i < ss->ss_last; i++) {
chan = ss->ss_chans[i];
b = scan_band(chan);
if (b != band) {
if (band != -1)
scan.channels[bstart] =
(next - bstart) | band;
/* NB: this allocates a slot for the run-len */
band = b, bstart = next++;
}
if (next >= IWI_SCAN_CHANNELS) {
DPRINTF(("truncating scan list\n"));
break;
}
scan.channels[next] = ieee80211_chan2ieee(ic, chan);
set_scan_type(&scan, next, scan_type(ss, chan));
next++;
}
scan.channels[bstart] = (next - bstart) | band;
} else {
/* Scan the current channel only */
chan = ic->ic_curchan;
scan.channels[0] = 1 | scan_band(chan);
scan.channels[1] = ieee80211_chan2ieee(ic, chan);
set_scan_type(&scan, 1, scan_type(ss, chan));
}
#ifdef IWI_DEBUG
if (iwi_debug > 0) {
static const char *scantype[8] =
{ "PSTOP", "PASV", "DIR", "BCAST", "BDIR", "5", "6", "7" };
int i;
printf("Scan request: index %u dwell %d/%d/%d\n"
, le32toh(scan.full_scan_index)
, le16toh(scan.dwell_time[IWI_SCAN_TYPE_PASSIVE])
, le16toh(scan.dwell_time[IWI_SCAN_TYPE_BROADCAST])
, le16toh(scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED])
);
i = 0;
do {
int run = scan.channels[i];
if (run == 0)
break;
printf("Scan %d %s channels:", run & 0x3f,
run & IWI_CHAN_2GHZ ? "2.4GHz" : "5GHz");
for (run &= 0x3f, i++; run > 0; run--, i++) {
uint8_t type = scan.scan_type[i/2];
printf(" %u/%s", scan.channels[i],
scantype[(i & 1 ? type : type>>4) & 7]);
}
printf("\n");
} while (i < IWI_SCAN_CHANNELS);
}
#endif
return (iwi_cmd(sc, IWI_CMD_SCAN_EXT, &scan, sizeof scan));
}
static int
iwi_set_sensitivity(struct iwi_softc *sc, int8_t rssi_dbm)
{
struct iwi_sensitivity sens;
DPRINTF(("Setting sensitivity to %d\n", rssi_dbm));
memset(&sens, 0, sizeof sens);
sens.rssi = htole16(rssi_dbm);
return iwi_cmd(sc, IWI_CMD_SET_SENSITIVITY, &sens, sizeof sens);
}
static int
iwi_auth_and_assoc(struct iwi_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211_node *ni;
struct iwi_configuration config;
struct iwi_associate *assoc = &sc->assoc;
struct iwi_rateset rs;
uint16_t capinfo;
uint32_t data;
int error, mode;
IWI_LOCK_ASSERT(sc);
ni = ieee80211_ref_node(vap->iv_bss);
if (sc->flags & IWI_FLAG_ASSOCIATED) {
DPRINTF(("Already associated\n"));
return (-1);
}
IWI_STATE_BEGIN(sc, IWI_FW_ASSOCIATING);
error = 0;
mode = 0;
if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
mode = IWI_MODE_11A;
else if (IEEE80211_IS_CHAN_G(ic->ic_curchan))
mode = IWI_MODE_11G;
if (IEEE80211_IS_CHAN_B(ic->ic_curchan))
mode = IWI_MODE_11B;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
memset(&config, 0, sizeof config);
config.bluetooth_coexistence = sc->bluetooth;
config.antenna = sc->antenna;
config.multicast_enabled = 1;
if (mode == IWI_MODE_11G)
config.use_protection = 1;
config.answer_pbreq =
(vap->iv_opmode == IEEE80211_M_IBSS) ? 1 : 0;
config.disable_unicast_decryption = 1;
config.disable_multicast_decryption = 1;
DPRINTF(("Configuring adapter\n"));
error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config);
if (error != 0)
goto done;
}
#ifdef IWI_DEBUG
if (iwi_debug > 0) {
printf("Setting ESSID to ");
ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
printf("\n");
}
#endif
error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ni->ni_essid, ni->ni_esslen);
if (error != 0)
goto done;
error = iwi_setpowermode(sc, vap);
if (error != 0)
goto done;
data = htole32(vap->iv_rtsthreshold);
DPRINTF(("Setting RTS threshold to %u\n", le32toh(data)));
error = iwi_cmd(sc, IWI_CMD_SET_RTS_THRESHOLD, &data, sizeof data);
if (error != 0)
goto done;
data = htole32(vap->iv_fragthreshold);
DPRINTF(("Setting fragmentation threshold to %u\n", le32toh(data)));
error = iwi_cmd(sc, IWI_CMD_SET_FRAG_THRESHOLD, &data, sizeof data);
if (error != 0)
goto done;
/* the rate set has already been "negotiated" */
memset(&rs, 0, sizeof rs);
rs.mode = mode;
rs.type = IWI_RATESET_TYPE_NEGOTIATED;
rs.nrates = ni->ni_rates.rs_nrates;
if (rs.nrates > IWI_RATESET_SIZE) {
DPRINTF(("Truncating negotiated rate set from %u\n",
rs.nrates));
rs.nrates = IWI_RATESET_SIZE;
}
memcpy(rs.rates, ni->ni_rates.rs_rates, rs.nrates);
DPRINTF(("Setting negotiated rates (%u)\n", rs.nrates));
error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs);
if (error != 0)
goto done;
memset(assoc, 0, sizeof *assoc);
if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) {
/* NB: don't treat WME setup as failure */
if (iwi_wme_setparams(sc) == 0 && iwi_wme_setie(sc) == 0)
assoc->policy |= htole16(IWI_POLICY_WME);
/* XXX complain on failure? */
}
if (vap->iv_appie_wpa != NULL) {
struct ieee80211_appie *ie = vap->iv_appie_wpa;
DPRINTF(("Setting optional IE (len=%u)\n", ie->ie_len));
error = iwi_cmd(sc, IWI_CMD_SET_OPTIE, ie->ie_data, ie->ie_len);
if (error != 0)
goto done;
}
error = iwi_set_sensitivity(sc, ic->ic_node_getrssi(ni));
if (error != 0)
goto done;
assoc->mode = mode;
assoc->chan = ic->ic_curchan->ic_ieee;
/*
* NB: do not arrange for shared key auth w/o privacy
* (i.e. a wep key); it causes a firmware error.
*/
if ((vap->iv_flags & IEEE80211_F_PRIVACY) &&
ni->ni_authmode == IEEE80211_AUTH_SHARED) {
assoc->auth = IWI_AUTH_SHARED;
/*
* It's possible to have privacy marked but no default
* key setup. This typically is due to a user app bug
* but if we blindly grab the key the firmware will
* barf so avoid it for now.
*/
if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE)
assoc->auth |= vap->iv_def_txkey << 4;
error = iwi_setwepkeys(sc, vap);
if (error != 0)
goto done;
}
if (vap->iv_flags & IEEE80211_F_WPA)
assoc->policy |= htole16(IWI_POLICY_WPA);
if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf == 0)
assoc->type = IWI_HC_IBSS_START;
else
assoc->type = IWI_HC_ASSOC;
memcpy(assoc->tstamp, ni->ni_tstamp.data, 8);
if (vap->iv_opmode == IEEE80211_M_IBSS)
capinfo = IEEE80211_CAPINFO_IBSS;
else
capinfo = IEEE80211_CAPINFO_ESS;
if (vap->iv_flags & IEEE80211_F_PRIVACY)
capinfo |= IEEE80211_CAPINFO_PRIVACY;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
if (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)
capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
assoc->capinfo = htole16(capinfo);
assoc->lintval = htole16(ic->ic_lintval);
assoc->intval = htole16(ni->ni_intval);
IEEE80211_ADDR_COPY(assoc->bssid, ni->ni_bssid);
if (vap->iv_opmode == IEEE80211_M_IBSS)
IEEE80211_ADDR_COPY(assoc->dst, ifp->if_broadcastaddr);
else
IEEE80211_ADDR_COPY(assoc->dst, ni->ni_bssid);
DPRINTF(("%s bssid %6D dst %6D channel %u policy 0x%x "
"auth %u capinfo 0x%x lintval %u bintval %u\n",
assoc->type == IWI_HC_IBSS_START ? "Start" : "Join",
assoc->bssid, ":", assoc->dst, ":",
assoc->chan, le16toh(assoc->policy), assoc->auth,
le16toh(assoc->capinfo), le16toh(assoc->lintval),
le16toh(assoc->intval)));
error = iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc);
done:
ieee80211_free_node(ni);
if (error)
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
return (error);
}
static void
iwi_disassoc(void *arg, int pending)
{
struct iwi_softc *sc = arg;
IWI_LOCK_DECL;
IWI_LOCK(sc);
iwi_disassociate(sc, 0);
IWI_UNLOCK(sc);
}
static int
iwi_disassociate(struct iwi_softc *sc, int quiet)
{
struct iwi_associate *assoc = &sc->assoc;
if ((sc->flags & IWI_FLAG_ASSOCIATED) == 0) {
DPRINTF(("Not associated\n"));
return (-1);
}
IWI_STATE_BEGIN(sc, IWI_FW_DISASSOCIATING);
if (quiet)
assoc->type = IWI_HC_DISASSOC_QUIET;
else
assoc->type = IWI_HC_DISASSOC;
DPRINTF(("Trying to disassociate from %6D channel %u\n",
assoc->bssid, ":", assoc->chan));
return iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc);
}
/*
* release dma resources for the firmware
*/
static void
iwi_release_fw_dma(struct iwi_softc *sc)
{
if (sc->fw_flags & IWI_FW_HAVE_PHY)
bus_dmamap_unload(sc->fw_dmat, sc->fw_map);
if (sc->fw_flags & IWI_FW_HAVE_MAP)
bus_dmamem_free(sc->fw_dmat, sc->fw_virtaddr, sc->fw_map);
if (sc->fw_flags & IWI_FW_HAVE_DMAT)
bus_dma_tag_destroy(sc->fw_dmat);
sc->fw_flags = 0;
sc->fw_dma_size = 0;
sc->fw_dmat = NULL;
sc->fw_map = NULL;
sc->fw_physaddr = 0;
sc->fw_virtaddr = NULL;
}
/*
* allocate the dma descriptor for the firmware.
* Return 0 on success, 1 on error.
* Must be called unlocked, protected by IWI_FLAG_FW_LOADING.
*/
static int
iwi_init_fw_dma(struct iwi_softc *sc, int size)
{
if (sc->fw_dma_size >= size)
return 0;
if (bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
size, 1, size, 0, NULL, NULL, &sc->fw_dmat) != 0) {
device_printf(sc->sc_dev,
"could not create firmware DMA tag\n");
goto error;
}
sc->fw_flags |= IWI_FW_HAVE_DMAT;
if (bus_dmamem_alloc(sc->fw_dmat, &sc->fw_virtaddr, 0,
&sc->fw_map) != 0) {
device_printf(sc->sc_dev,
"could not allocate firmware DMA memory\n");
goto error;
}
sc->fw_flags |= IWI_FW_HAVE_MAP;
if (bus_dmamap_load(sc->fw_dmat, sc->fw_map, sc->fw_virtaddr,
size, iwi_dma_map_addr, &sc->fw_physaddr, 0) != 0) {
device_printf(sc->sc_dev, "could not load firmware DMA map\n");
goto error;
}
sc->fw_flags |= IWI_FW_HAVE_PHY;
sc->fw_dma_size = size;
return 0;
error:
iwi_release_fw_dma(sc);
return 1;
}
static void
iwi_init_locked(struct iwi_softc *sc)
{
struct iwi_rx_data *data;
int i;
IWI_LOCK_ASSERT(sc);
if (sc->fw_state == IWI_FW_LOADING) {
device_printf(sc->sc_dev, "%s: already loading\n", __func__);
return; /* XXX: condvar? */
}
iwi_stop_locked(sc);
IWI_STATE_BEGIN(sc, IWI_FW_LOADING);
if (iwi_reset(sc) != 0) {
device_printf(sc->sc_dev, "could not reset adapter\n");
goto fail;
}
if (iwi_load_firmware(sc, &sc->fw_boot) != 0) {
device_printf(sc->sc_dev,
"could not load boot firmware %s\n", sc->fw_boot.name);
goto fail;
}
if (iwi_load_ucode(sc, &sc->fw_uc) != 0) {
device_printf(sc->sc_dev,
"could not load microcode %s\n", sc->fw_uc.name);
goto fail;
}
iwi_stop_master(sc);
CSR_WRITE_4(sc, IWI_CSR_CMD_BASE, sc->cmdq.physaddr);
CSR_WRITE_4(sc, IWI_CSR_CMD_SIZE, sc->cmdq.count);
CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur);
CSR_WRITE_4(sc, IWI_CSR_TX1_BASE, sc->txq[0].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX1_SIZE, sc->txq[0].count);
CSR_WRITE_4(sc, IWI_CSR_TX1_WIDX, sc->txq[0].cur);
CSR_WRITE_4(sc, IWI_CSR_TX2_BASE, sc->txq[1].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX2_SIZE, sc->txq[1].count);
CSR_WRITE_4(sc, IWI_CSR_TX2_WIDX, sc->txq[1].cur);
CSR_WRITE_4(sc, IWI_CSR_TX3_BASE, sc->txq[2].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX3_SIZE, sc->txq[2].count);
CSR_WRITE_4(sc, IWI_CSR_TX3_WIDX, sc->txq[2].cur);
CSR_WRITE_4(sc, IWI_CSR_TX4_BASE, sc->txq[3].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX4_SIZE, sc->txq[3].count);
CSR_WRITE_4(sc, IWI_CSR_TX4_WIDX, sc->txq[3].cur);
for (i = 0; i < sc->rxq.count; i++) {
data = &sc->rxq.data[i];
CSR_WRITE_4(sc, data->reg, data->physaddr);
}
CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, sc->rxq.count - 1);
if (iwi_load_firmware(sc, &sc->fw_fw) != 0) {
device_printf(sc->sc_dev,
"could not load main firmware %s\n", sc->fw_fw.name);
goto fail;
}
sc->flags |= IWI_FLAG_FW_INITED;
IWI_STATE_END(sc, IWI_FW_LOADING);
if (iwi_config(sc) != 0) {
device_printf(sc->sc_dev, "unable to enable adapter\n");
goto fail2;
}
callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc);
sc->sc_running = 1;
return;
fail:
IWI_STATE_END(sc, IWI_FW_LOADING);
fail2:
iwi_stop_locked(sc);
}
static void
iwi_init(void *priv)
{
struct iwi_softc *sc = priv;
struct ieee80211com *ic = &sc->sc_ic;
IWI_LOCK_DECL;
IWI_LOCK(sc);
iwi_init_locked(sc);
IWI_UNLOCK(sc);
if (sc->sc_running)
ieee80211_start_all(ic);
}
static void
iwi_stop_locked(void *priv)
{
struct iwi_softc *sc = priv;
IWI_LOCK_ASSERT(sc);
sc->sc_running = 0;
if (sc->sc_softled) {
callout_stop(&sc->sc_ledtimer);
sc->sc_blinking = 0;
}
callout_stop(&sc->sc_wdtimer);
callout_stop(&sc->sc_rftimer);
iwi_stop_master(sc);
CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_SOFT_RESET);
/* reset rings */
iwi_reset_cmd_ring(sc, &sc->cmdq);
iwi_reset_tx_ring(sc, &sc->txq[0]);
iwi_reset_tx_ring(sc, &sc->txq[1]);
iwi_reset_tx_ring(sc, &sc->txq[2]);
iwi_reset_tx_ring(sc, &sc->txq[3]);
iwi_reset_rx_ring(sc, &sc->rxq);
sc->sc_tx_timer = 0;
sc->sc_state_timer = 0;
sc->sc_busy_timer = 0;
sc->flags &= ~(IWI_FLAG_BUSY | IWI_FLAG_ASSOCIATED);
sc->fw_state = IWI_FW_IDLE;
wakeup(sc);
}
static void
iwi_stop(struct iwi_softc *sc)
{
IWI_LOCK_DECL;
IWI_LOCK(sc);
iwi_stop_locked(sc);
IWI_UNLOCK(sc);
}
static void
iwi_restart(void *arg, int npending)
{
struct iwi_softc *sc = arg;
iwi_init(sc);
}
/*
* Return whether or not the radio is enabled in hardware
* (i.e. the rfkill switch is "off").
*/
static int
iwi_getrfkill(struct iwi_softc *sc)
{
return (CSR_READ_4(sc, IWI_CSR_IO) & IWI_IO_RADIO_ENABLED) == 0;
}
static void
iwi_radio_on(void *arg, int pending)
{
struct iwi_softc *sc = arg;
struct ieee80211com *ic = &sc->sc_ic;
device_printf(sc->sc_dev, "radio turned on\n");
iwi_init(sc);
ieee80211_notify_radio(ic, 1);
}
static void
iwi_rfkill_poll(void *arg)
{
struct iwi_softc *sc = arg;
IWI_LOCK_ASSERT(sc);
/*
* Check for a change in rfkill state. We get an
* interrupt when a radio is disabled but not when
* it is enabled so we must poll for the latter.
*/
if (!iwi_getrfkill(sc)) {
ieee80211_runtask(&sc->sc_ic, &sc->sc_radiontask);
return;
}
callout_reset(&sc->sc_rftimer, 2*hz, iwi_rfkill_poll, sc);
}
static void
iwi_radio_off(void *arg, int pending)
{
struct iwi_softc *sc = arg;
struct ieee80211com *ic = &sc->sc_ic;
IWI_LOCK_DECL;
device_printf(sc->sc_dev, "radio turned off\n");
ieee80211_notify_radio(ic, 0);
IWI_LOCK(sc);
iwi_stop_locked(sc);
iwi_rfkill_poll(sc);
IWI_UNLOCK(sc);
}
static int
iwi_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct iwi_softc *sc = arg1;
uint32_t size, buf[128];
memset(buf, 0, sizeof buf);
if (!(sc->flags & IWI_FLAG_FW_INITED))
return SYSCTL_OUT(req, buf, sizeof buf);
size = min(CSR_READ_4(sc, IWI_CSR_TABLE0_SIZE), 128 - 1);
CSR_READ_REGION_4(sc, IWI_CSR_TABLE0_BASE, &buf[1], size);
return SYSCTL_OUT(req, buf, size);
}
static int
iwi_sysctl_radio(SYSCTL_HANDLER_ARGS)
{
struct iwi_softc *sc = arg1;
int val = !iwi_getrfkill(sc);
return SYSCTL_OUT(req, &val, sizeof val);
}
/*
* Add sysctl knobs.
*/
static void
iwi_sysctlattach(struct iwi_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "radio",
CTLTYPE_INT | CTLFLAG_RD, sc, 0, iwi_sysctl_radio, "I",
"radio transmitter switch state (0=off, 1=on)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, iwi_sysctl_stats, "S",
"statistics");
sc->bluetooth = 0;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "bluetooth",
CTLFLAG_RW, &sc->bluetooth, 0, "bluetooth coexistence");
sc->antenna = IWI_ANTENNA_AUTO;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "antenna",
CTLFLAG_RW, &sc->antenna, 0, "antenna (0=auto)");
}
/*
* LED support.
*
* Different cards have different capabilities. Some have three
* led's while others have only one. The linux ipw driver defines
* led's for link state (associated or not), band (11a, 11g, 11b),
* and for link activity. We use one led and vary the blink rate
* according to the tx/rx traffic a la the ath driver.
*/
static __inline uint32_t
iwi_toggle_event(uint32_t r)
{
return r &~ (IWI_RST_STANDBY | IWI_RST_GATE_ODMA |
IWI_RST_GATE_IDMA | IWI_RST_GATE_ADMA);
}
static uint32_t
iwi_read_event(struct iwi_softc *sc)
{
return MEM_READ_4(sc, IWI_MEM_EEPROM_EVENT);
}
static void
iwi_write_event(struct iwi_softc *sc, uint32_t v)
{
MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, v);
}
static void
iwi_led_done(void *arg)
{
struct iwi_softc *sc = arg;
sc->sc_blinking = 0;
}
/*
* Turn the activity LED off: flip the pin and then set a timer so no
* update will happen for the specified duration.
*/
static void
iwi_led_off(void *arg)
{
struct iwi_softc *sc = arg;
uint32_t v;
v = iwi_read_event(sc);
v &= ~sc->sc_ledpin;
iwi_write_event(sc, iwi_toggle_event(v));
callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, iwi_led_done, sc);
}
/*
* Blink the LED according to the specified on/off times.
*/
static void
iwi_led_blink(struct iwi_softc *sc, int on, int off)
{
uint32_t v;
v = iwi_read_event(sc);
v |= sc->sc_ledpin;
iwi_write_event(sc, iwi_toggle_event(v));
sc->sc_blinking = 1;
sc->sc_ledoff = off;
callout_reset(&sc->sc_ledtimer, on, iwi_led_off, sc);
}
static void
iwi_led_event(struct iwi_softc *sc, int event)
{
/* NB: on/off times from the Atheros NDIS driver, w/ permission */
static const struct {
u_int rate; /* tx/rx iwi rate */
u_int16_t timeOn; /* LED on time (ms) */
u_int16_t timeOff; /* LED off time (ms) */
} blinkrates[] = {
{ IWI_RATE_OFDM54, 40, 10 },
{ IWI_RATE_OFDM48, 44, 11 },
{ IWI_RATE_OFDM36, 50, 13 },
{ IWI_RATE_OFDM24, 57, 14 },
{ IWI_RATE_OFDM18, 67, 16 },
{ IWI_RATE_OFDM12, 80, 20 },
{ IWI_RATE_DS11, 100, 25 },
{ IWI_RATE_OFDM9, 133, 34 },
{ IWI_RATE_OFDM6, 160, 40 },
{ IWI_RATE_DS5, 200, 50 },
{ 6, 240, 58 }, /* XXX 3Mb/s if it existed */
{ IWI_RATE_DS2, 267, 66 },
{ IWI_RATE_DS1, 400, 100 },
{ 0, 500, 130 }, /* unknown rate/polling */
};
uint32_t txrate;
int j = 0; /* XXX silence compiler */
sc->sc_ledevent = ticks; /* time of last event */
if (sc->sc_blinking) /* don't interrupt active blink */
return;
switch (event) {
case IWI_LED_POLL:
j = nitems(blinkrates)-1;
break;
case IWI_LED_TX:
/* read current transmission rate from adapter */
txrate = CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE);
if (blinkrates[sc->sc_txrix].rate != txrate) {
for (j = 0; j < nitems(blinkrates)-1; j++)
if (blinkrates[j].rate == txrate)
break;
sc->sc_txrix = j;
} else
j = sc->sc_txrix;
break;
case IWI_LED_RX:
if (blinkrates[sc->sc_rxrix].rate != sc->sc_rxrate) {
for (j = 0; j < nitems(blinkrates)-1; j++)
if (blinkrates[j].rate == sc->sc_rxrate)
break;
sc->sc_rxrix = j;
} else
j = sc->sc_rxrix;
break;
}
/* XXX beware of overflow */
iwi_led_blink(sc, (blinkrates[j].timeOn * hz) / 1000,
(blinkrates[j].timeOff * hz) / 1000);
}
static int
iwi_sysctl_softled(SYSCTL_HANDLER_ARGS)
{
struct iwi_softc *sc = arg1;
int softled = sc->sc_softled;
int error;
error = sysctl_handle_int(oidp, &softled, 0, req);
if (error || !req->newptr)
return error;
softled = (softled != 0);
if (softled != sc->sc_softled) {
if (softled) {
uint32_t v = iwi_read_event(sc);
v &= ~sc->sc_ledpin;
iwi_write_event(sc, iwi_toggle_event(v));
}
sc->sc_softled = softled;
}
return 0;
}
static void
iwi_ledattach(struct iwi_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
sc->sc_blinking = 0;
sc->sc_ledstate = 1;
sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
callout_init_mtx(&sc->sc_ledtimer, &sc->sc_mtx, 0);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
iwi_sysctl_softled, "I", "enable/disable software LED support");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0,
"pin setting to turn activity LED on");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
"idle time for inactivity LED (ticks)");
/* XXX for debugging */
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"nictype", CTLFLAG_RD, &sc->sc_nictype, 0,
"NIC type from EEPROM");
sc->sc_ledpin = IWI_RST_LED_ACTIVITY;
sc->sc_softled = 1;
sc->sc_nictype = (iwi_read_prom_word(sc, IWI_EEPROM_NIC) >> 8) & 0xff;
if (sc->sc_nictype == 1) {
/*
* NB: led's are reversed.
*/
sc->sc_ledpin = IWI_RST_LED_ASSOCIATED;
}
}
static void
iwi_scan_start(struct ieee80211com *ic)
{
/* ignore */
}
static void
iwi_set_channel(struct ieee80211com *ic)
{
struct iwi_softc *sc = ic->ic_softc;
if (sc->fw_state == IWI_FW_IDLE)
iwi_setcurchan(sc, ic->ic_curchan->ic_ieee);
}
static void
iwi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
struct ieee80211vap *vap = ss->ss_vap;
struct iwi_softc *sc = vap->iv_ic->ic_softc;
IWI_LOCK_DECL;
IWI_LOCK(sc);
if (iwi_scanchan(sc, maxdwell, 0))
ieee80211_cancel_scan(vap);
IWI_UNLOCK(sc);
}
static void
iwi_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
iwi_scan_end(struct ieee80211com *ic)
{
struct iwi_softc *sc = ic->ic_softc;
IWI_LOCK_DECL;
IWI_LOCK(sc);
sc->flags &= ~IWI_FLAG_CHANNEL_SCAN;
/* NB: make sure we're still scanning */
if (sc->fw_state == IWI_FW_SCANNING)
iwi_cmd(sc, IWI_CMD_ABORT_SCAN, NULL, 0);
IWI_UNLOCK(sc);
}
static void
iwi_collect_bands(struct ieee80211com *ic, uint8_t bands[], size_t bands_sz)
{
struct iwi_softc *sc = ic->ic_softc;
device_t dev = sc->sc_dev;
memset(bands, 0, bands_sz);
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
if (pci_get_device(dev) >= 0x4223)
setbit(bands, IEEE80211_MODE_11A);
}
static void
iwi_getradiocaps(struct ieee80211com *ic,
int maxchans, int *nchans, struct ieee80211_channel chans[])
{
uint8_t bands[IEEE80211_MODE_BYTES];
iwi_collect_bands(ic, bands, sizeof(bands));
*nchans = 0;
if (isset(bands, IEEE80211_MODE_11B) || isset(bands, IEEE80211_MODE_11G))
ieee80211_add_channel_list_2ghz(chans, maxchans, nchans,
def_chan_2ghz, nitems(def_chan_2ghz), bands, 0);
if (isset(bands, IEEE80211_MODE_11A)) {
ieee80211_add_channel_list_5ghz(chans, maxchans, nchans,
def_chan_5ghz_band1, nitems(def_chan_5ghz_band1),
bands, 0);
ieee80211_add_channel_list_5ghz(chans, maxchans, nchans,
def_chan_5ghz_band2, nitems(def_chan_5ghz_band2),
bands, 0);
ieee80211_add_channel_list_5ghz(chans, maxchans, nchans,
def_chan_5ghz_band3, nitems(def_chan_5ghz_band3),
bands, 0);
}
}
Index: head/sys/dev/ixl/if_ixlv.c
===================================================================
--- head/sys/dev/ixl/if_ixlv.c (revision 328217)
+++ head/sys/dev/ixl/if_ixlv.c (revision 328218)
@@ -1,3104 +1,3104 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl.h"
#include "ixlv.h"
/*********************************************************************
* Driver version
*********************************************************************/
char ixlv_driver_version[] = "1.4.12-k";
/*********************************************************************
* PCI Device ID Table
*
* Used by probe to select devices to load on
* Last field stores an index into ixlv_strings
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
*********************************************************************/
static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
/*********************************************************************
* Table of branding strings
*********************************************************************/
static char *ixlv_strings[] = {
"Intel(R) Ethernet Connection XL710/X722 VF Driver"
};
/*********************************************************************
* Function prototypes
*********************************************************************/
static int ixlv_probe(device_t);
static int ixlv_attach(device_t);
static int ixlv_detach(device_t);
static int ixlv_shutdown(device_t);
static void ixlv_init_locked(struct ixlv_sc *);
static int ixlv_allocate_pci_resources(struct ixlv_sc *);
static void ixlv_free_pci_resources(struct ixlv_sc *);
static int ixlv_assign_msix(struct ixlv_sc *);
static int ixlv_init_msix(struct ixlv_sc *);
static int ixlv_init_taskqueue(struct ixlv_sc *);
static int ixlv_setup_queues(struct ixlv_sc *);
static void ixlv_config_rss(struct ixlv_sc *);
static void ixlv_stop(struct ixlv_sc *);
static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
static int ixlv_media_change(struct ifnet *);
static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
static void ixlv_local_timer(void *);
static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
static void ixlv_init_filters(struct ixlv_sc *);
static void ixlv_free_filters(struct ixlv_sc *);
static void ixlv_msix_que(void *);
static void ixlv_msix_adminq(void *);
static void ixlv_do_adminq(void *, int);
static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
static void ixlv_handle_que(void *, int);
static int ixlv_reset(struct ixlv_sc *);
static int ixlv_reset_complete(struct i40e_hw *);
static void ixlv_set_queue_rx_itr(struct ixl_queue *);
static void ixlv_set_queue_tx_itr(struct ixl_queue *);
static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
enum i40e_status_code);
static void ixlv_configure_itr(struct ixlv_sc *);
static void ixlv_enable_adminq_irq(struct i40e_hw *);
static void ixlv_disable_adminq_irq(struct i40e_hw *);
static void ixlv_enable_queue_irq(struct i40e_hw *, int);
static void ixlv_disable_queue_irq(struct i40e_hw *, int);
static void ixlv_setup_vlan_filters(struct ixlv_sc *);
static void ixlv_register_vlan(void *, struct ifnet *, u16);
static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
static void ixlv_init_hw(struct ixlv_sc *);
static int ixlv_setup_vc(struct ixlv_sc *);
static int ixlv_vf_config(struct ixlv_sc *);
static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
static void ixlv_add_sysctls(struct ixlv_sc *);
#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
#endif
/*********************************************************************
* FreeBSD Device Interface Entry Points
*********************************************************************/
static device_method_t ixlv_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ixlv_probe),
DEVMETHOD(device_attach, ixlv_attach),
DEVMETHOD(device_detach, ixlv_detach),
DEVMETHOD(device_shutdown, ixlv_shutdown),
{0, 0}
};
static driver_t ixlv_driver = {
"ixlv", ixlv_methods, sizeof(struct ixlv_sc),
};
devclass_t ixlv_devclass;
DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
MODULE_DEPEND(ixlv, pci, 1, 1, 1);
MODULE_DEPEND(ixlv, ether, 1, 1, 1);
/*
** TUNEABLE PARAMETERS:
*/
static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
"IXLV driver parameters");
/*
** Number of descriptors per ring:
** - TX and RX are the same size
*/
static int ixlv_ringsz = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixlv_ringsz, 0, "Descriptor Ring Size");
/* Set to zero to auto calculate */
int ixlv_max_queues = 0;
TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
&ixlv_max_queues, 0, "Number of Queues");
/*
** Number of entries in Tx queue buf_ring.
** Increasing this will reduce the number of
** errors when transmitting fragmented UDP
** packets.
*/
static int ixlv_txbrsz = DEFAULT_TXBRSZ;
TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
&ixlv_txbrsz, 0, "TX Buf Ring Size");
/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
** - default values for static ITR
*/
int ixlv_dynamic_rx_itr = 0;
TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
&ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
int ixlv_dynamic_tx_itr = 0;
TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
&ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
int ixlv_rx_itr = IXL_ITR_8K;
TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
&ixlv_rx_itr, 0, "RX Interrupt Rate");
int ixlv_tx_itr = IXL_ITR_4K;
TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixlv_tx_itr, 0, "TX Interrupt Rate");
/*********************************************************************
* Device identification routine
*
* ixlv_probe determines if the driver should be loaded on
* the hardware based on PCI vendor/device id of the device.
*
* return BUS_PROBE_DEFAULT on success, positive on failure
*********************************************************************/
static int
ixlv_probe(device_t dev)
{
ixl_vendor_info_t *ent;
u16 pci_vendor_id, pci_device_id;
u16 pci_subvendor_id, pci_subdevice_id;
char device_name[256];
#if 0
INIT_DEBUGOUT("ixlv_probe: begin");
#endif
pci_vendor_id = pci_get_vendor(dev);
if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
return (ENXIO);
pci_device_id = pci_get_device(dev);
pci_subvendor_id = pci_get_subvendor(dev);
pci_subdevice_id = pci_get_subdevice(dev);
ent = ixlv_vendor_info_array;
while (ent->vendor_id != 0) {
if ((pci_vendor_id == ent->vendor_id) &&
(pci_device_id == ent->device_id) &&
((pci_subvendor_id == ent->subvendor_id) ||
(ent->subvendor_id == 0)) &&
((pci_subdevice_id == ent->subdevice_id) ||
(ent->subdevice_id == 0))) {
sprintf(device_name, "%s, Version - %s",
ixlv_strings[ent->index],
ixlv_driver_version);
device_set_desc_copy(dev, device_name);
return (BUS_PROBE_DEFAULT);
}
ent++;
}
return (ENXIO);
}
/*********************************************************************
* Device initialization routine
*
* The attach entry point is called when the driver is being loaded.
* This routine identifies the type of hardware, allocates all resources
* and initializes the hardware.
*
* return 0 on success, positive on failure
*********************************************************************/
static int
ixlv_attach(device_t dev)
{
struct ixlv_sc *sc;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
int error = 0;
INIT_DBG_DEV(dev, "begin");
/* Allocate, clear, and link in our primary soft structure */
sc = device_get_softc(dev);
sc->dev = sc->osdep.dev = dev;
hw = &sc->hw;
vsi = &sc->vsi;
vsi->dev = dev;
/* Initialize hw struct */
ixlv_init_hw(sc);
/* Allocate filter lists */
ixlv_init_filters(sc);
/* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
/* Set up the timer callout */
callout_init_mtx(&sc->timer, &sc->mtx, 0);
/* Do PCI setup - map BAR0, etc */
if (ixlv_allocate_pci_resources(sc)) {
device_printf(dev, "%s: Allocation of PCI resources failed\n",
__func__);
error = ENXIO;
goto err_early;
}
INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
error = i40e_set_mac_type(hw);
if (error) {
device_printf(dev, "%s: set_mac_type failed: %d\n",
__func__, error);
goto err_pci_res;
}
error = ixlv_reset_complete(hw);
if (error) {
device_printf(dev, "%s: Device is still being reset\n",
__func__);
goto err_pci_res;
}
INIT_DBG_DEV(dev, "VF Device is ready for configuration");
error = ixlv_setup_vc(sc);
if (error) {
device_printf(dev, "%s: Error setting up PF comms, %d\n",
__func__, error);
goto err_pci_res;
}
INIT_DBG_DEV(dev, "PF API version verified");
/* Need API version before sending reset message */
error = ixlv_reset(sc);
if (error) {
device_printf(dev, "VF reset failed; reload the driver\n");
goto err_aq;
}
INIT_DBG_DEV(dev, "VF reset complete");
/* Ask for VF config from PF */
error = ixlv_vf_config(sc);
if (error) {
device_printf(dev, "Error getting configuration from PF: %d\n",
error);
goto err_aq;
}
device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
sc->vf_res->num_vsis,
sc->vf_res->num_queue_pairs,
sc->vf_res->max_vectors,
sc->vf_res->rss_key_size,
sc->vf_res->rss_lut_size);
#ifdef IXL_DEBUG
device_printf(dev, "Offload flags: 0x%b\n",
sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
#endif
/* got VF config message back from PF, now we can parse it */
for (int i = 0; i < sc->vf_res->num_vsis; i++) {
if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
sc->vsi_res = &sc->vf_res->vsi_res[i];
}
if (!sc->vsi_res) {
device_printf(dev, "%s: no LAN VSI found\n", __func__);
error = EIO;
goto err_res_buf;
}
INIT_DBG_DEV(dev, "Resource Acquisition complete");
/* If no mac address was assigned just make a random one */
if (!ixlv_check_ether_addr(hw->mac.addr)) {
u8 addr[ETHER_ADDR_LEN];
arc4rand(&addr, sizeof(addr), 0);
addr[0] &= 0xFE;
addr[0] |= 0x02;
bcopy(addr, hw->mac.addr, sizeof(addr));
}
/* Now that the number of queues for this VF is known, set up interrupts */
sc->msix = ixlv_init_msix(sc);
/* We fail without MSIX support */
if (sc->msix == 0) {
error = ENXIO;
goto err_res_buf;
}
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
sc->link_up = TRUE;
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
device_printf(dev, "%s: setup queues failed!\n",
__func__);
error = EIO;
goto out;
}
/* Setup the stack interface */
if (ixlv_setup_interface(dev, sc) != 0) {
device_printf(dev, "%s: setup interface failed!\n",
__func__);
error = EIO;
goto out;
}
INIT_DBG_DEV(dev, "Queue memory and interface setup");
/* Do queue interrupt setup */
if (ixlv_assign_msix(sc) != 0) {
device_printf(dev, "%s: allocating queue interrupts failed!\n",
__func__);
error = ENXIO;
goto out;
}
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
/* Initialize stats */
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
ixlv_add_sysctls(sc);
/* Register for VLAN events */
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
/* We want AQ enabled early */
ixlv_enable_adminq_irq(hw);
/* Set things up to run init */
sc->init_state = IXLV_INIT_READY;
ixl_vc_init_mgr(sc, &sc->vc_mgr);
INIT_DBG_DEV(dev, "end");
return (error);
out:
ixlv_free_queues(vsi);
err_res_buf:
free(sc->vf_res, M_DEVBUF);
err_aq:
i40e_shutdown_adminq(hw);
err_pci_res:
ixlv_free_pci_resources(sc);
err_early:
mtx_destroy(&sc->mtx);
ixlv_free_filters(sc);
INIT_DBG_DEV(dev, "end: error %d", error);
return (error);
}
/*********************************************************************
* Device removal routine
*
* The detach entry point is called when the driver is being removed.
* This routine stops the adapter and deallocates all the resources
* that were allocated for driver operation.
*
* return 0 on success, positive on failure
*********************************************************************/
static int
ixlv_detach(device_t dev)
{
struct ixlv_sc *sc = device_get_softc(dev);
struct ixl_vsi *vsi = &sc->vsi;
struct i40e_hw *hw = &sc->hw;
enum i40e_status_code status;
INIT_DBG_DEV(dev, "begin");
/* Make sure VLANS are not using driver */
if (vsi->ifp->if_vlantrunk != NULL) {
if_printf(vsi->ifp, "Vlan in use, detach first\n");
return (EBUSY);
}
/* Stop driver */
ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
mtx_lock(&sc->mtx);
ixlv_stop(sc);
mtx_unlock(&sc->mtx);
}
/* Unregister VLAN events */
if (vsi->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
if (vsi->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
/* Drain VC mgr */
callout_drain(&sc->vc_mgr.callout);
ixlv_disable_adminq_irq(hw);
ixlv_teardown_adminq_msix(sc);
/* Drain admin queue taskqueue */
taskqueue_free(sc->tq);
status = i40e_shutdown_adminq(&sc->hw);
if (status != I40E_SUCCESS) {
device_printf(dev,
"i40e_shutdown_adminq() failed with status %s\n",
i40e_stat_str(hw, status));
}
if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
ixlv_free_filters(sc);
bus_generic_detach(dev);
mtx_destroy(&sc->mtx);
INIT_DBG_DEV(dev, "end");
return (0);
}
/*********************************************************************
*
* Shutdown entry point
*
**********************************************************************/
static int
ixlv_shutdown(device_t dev)
{
struct ixlv_sc *sc = device_get_softc(dev);
INIT_DBG_DEV(dev, "begin");
mtx_lock(&sc->mtx);
ixlv_stop(sc);
mtx_unlock(&sc->mtx);
INIT_DBG_DEV(dev, "end");
return (0);
}
/*
* Configure TXCSUM(IPV6) and TSO(4/6)
* - the hardware handles these together so we
* need to tweak them
*/
static void
ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
{
/* Enable/disable TXCSUM/TSO4 */
if (!(ifp->if_capenable & IFCAP_TXCSUM)
&& !(ifp->if_capenable & IFCAP_TSO4)) {
if (mask & IFCAP_TXCSUM) {
ifp->if_capenable |= IFCAP_TXCSUM;
/* enable TXCSUM, restore TSO if previously enabled */
if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
ifp->if_capenable |= IFCAP_TSO4;
}
}
else if (mask & IFCAP_TSO4) {
ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
if_printf(ifp,
"TSO4 requires txcsum, enabling both...\n");
}
} else if((ifp->if_capenable & IFCAP_TXCSUM)
&& !(ifp->if_capenable & IFCAP_TSO4)) {
if (mask & IFCAP_TXCSUM)
ifp->if_capenable &= ~IFCAP_TXCSUM;
else if (mask & IFCAP_TSO4)
ifp->if_capenable |= IFCAP_TSO4;
} else if((ifp->if_capenable & IFCAP_TXCSUM)
&& (ifp->if_capenable & IFCAP_TSO4)) {
if (mask & IFCAP_TXCSUM) {
vsi->flags |= IXL_FLAGS_KEEP_TSO4;
ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
if_printf(ifp,
"TSO4 requires txcsum, disabling both...\n");
} else if (mask & IFCAP_TSO4)
ifp->if_capenable &= ~IFCAP_TSO4;
}
/* Enable/disable TXCSUM_IPV6/TSO6 */
if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
&& !(ifp->if_capenable & IFCAP_TSO6)) {
if (mask & IFCAP_TXCSUM_IPV6) {
ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
ifp->if_capenable |= IFCAP_TSO6;
}
} else if (mask & IFCAP_TSO6) {
ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
if_printf(ifp,
"TSO6 requires txcsum6, enabling both...\n");
}
} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
&& !(ifp->if_capenable & IFCAP_TSO6)) {
if (mask & IFCAP_TXCSUM_IPV6)
ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
else if (mask & IFCAP_TSO6)
ifp->if_capenable |= IFCAP_TSO6;
} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
&& (ifp->if_capenable & IFCAP_TSO6)) {
if (mask & IFCAP_TXCSUM_IPV6) {
vsi->flags |= IXL_FLAGS_KEEP_TSO6;
ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
if_printf(ifp,
"TSO6 requires txcsum6, disabling both...\n");
} else if (mask & IFCAP_TSO6)
ifp->if_capenable &= ~IFCAP_TSO6;
}
}
/*********************************************************************
* Ioctl entry point
*
* ixlv_ioctl is called when the user wants to configure the
* interface.
*
* return 0 on success, positive on failure
**********************************************************************/
static int
ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ixlv_sc *sc = vsi->back;
struct ifreq *ifr = (struct ifreq *)data;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
bool avoid_reset = FALSE;
#endif
int error = 0;
switch (command) {
case SIOCSIFADDR:
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = TRUE;
#endif
#ifdef INET6
if (ifa->ifa_addr->sa_family == AF_INET6)
avoid_reset = TRUE;
#endif
#if defined(INET) || defined(INET6)
/*
** Calling init results in link renegotiation,
** so we avoid doing it when possible.
*/
if (avoid_reset) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
ixlv_init(vsi);
#ifdef INET
if (!(ifp->if_flags & IFF_NOARP))
arp_ifinit(ifp, ifa);
#endif
} else
error = ether_ioctl(ifp, command, data);
break;
#endif
case SIOCSIFMTU:
IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
mtx_lock(&sc->mtx);
if (ifr->ifr_mtu > IXL_MAX_FRAME -
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
error = EINVAL;
IOCTL_DBG_IF(ifp, "mtu too large");
} else {
IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
// ERJ: Interestingly enough, these types don't match
ifp->if_mtu = (u_long)ifr->ifr_mtu;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixlv_init_locked(sc);
}
mtx_unlock(&sc->mtx);
break;
case SIOCSIFFLAGS:
IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
mtx_lock(&sc->mtx);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
ixlv_init_locked(sc);
} else
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixlv_stop(sc);
sc->if_flags = ifp->if_flags;
mtx_unlock(&sc->mtx);
break;
case SIOCADDMULTI:
IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
mtx_lock(&sc->mtx);
ixlv_disable_intr(vsi);
ixlv_add_multi(vsi);
ixlv_enable_intr(vsi);
mtx_unlock(&sc->mtx);
}
break;
case SIOCDELMULTI:
IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
if (sc->init_state == IXLV_RUNNING) {
mtx_lock(&sc->mtx);
ixlv_disable_intr(vsi);
ixlv_del_multi(vsi);
ixlv_enable_intr(vsi);
mtx_unlock(&sc->mtx);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
case SIOCSIFCAP:
{
int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
ixlv_cap_txcsum_tso(vsi, ifp, mask);
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (mask & IFCAP_VLAN_HWFILTER)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ixlv_init(vsi);
}
VLAN_CAPABILITIES(ifp);
break;
}
default:
IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
/*
** To do a reinit on the VF is unfortunately more complicated
** than a physical device, we must have the PF more or less
** completely recreate our memory, so many things that were
** done only once at attach in traditional drivers now must be
** redone at each reinitialization. This function does that
** 'prelude' so we can then call the normal locked init code.
*/
int
ixlv_reinit_locked(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ifnet *ifp = vsi->ifp;
struct ixlv_mac_filter *mf, *mf_temp;
struct ixlv_vlan_filter *vf;
int error = 0;
INIT_DBG_IF(ifp, "begin");
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixlv_stop(sc);
error = ixlv_reset(sc);
INIT_DBG_IF(ifp, "VF was reset");
/* set the state in case we went thru RESET */
sc->init_state = IXLV_RUNNING;
/*
** Resetting the VF drops all filters from hardware;
** we need to mark them to be re-added in init.
*/
SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
if (mf->flags & IXL_FILTER_DEL) {
SLIST_REMOVE(sc->mac_filters, mf,
ixlv_mac_filter, next);
free(mf, M_DEVBUF);
} else
mf->flags |= IXL_FILTER_ADD;
}
if (vsi->num_vlans != 0)
SLIST_FOREACH(vf, sc->vlan_filters, next)
vf->flags = IXL_FILTER_ADD;
else { /* clean any stale filters */
while (!SLIST_EMPTY(sc->vlan_filters)) {
vf = SLIST_FIRST(sc->vlan_filters);
SLIST_REMOVE_HEAD(sc->vlan_filters, next);
free(vf, M_DEVBUF);
}
}
ixlv_enable_adminq_irq(hw);
ixl_vc_flush(&sc->vc_mgr);
INIT_DBG_IF(ifp, "end");
return (error);
}
static void
ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
enum i40e_status_code code)
{
struct ixlv_sc *sc;
sc = arg;
/*
* Ignore "Adapter Stopped" message as that happens if an ifconfig down
* happens while a command is in progress, so we don't print an error
* in that case.
*/
if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
if_printf(sc->vsi.ifp,
"Error %s waiting for PF to complete operation %d\n",
i40e_stat_str(&sc->hw, code), cmd->request);
}
}
static void
ixlv_init_locked(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
struct ifnet *ifp = vsi->ifp;
int error = 0;
INIT_DBG_IF(ifp, "begin");
IXLV_CORE_LOCK_ASSERT(sc);
/* Do a reinit first if an init has already been done */
if ((sc->init_state == IXLV_RUNNING) ||
(sc->init_state == IXLV_RESET_REQUIRED) ||
(sc->init_state == IXLV_RESET_PENDING))
error = ixlv_reinit_locked(sc);
/* Don't bother with init if we failed reinit */
if (error)
goto init_done;
/* Remove existing MAC filter if new MAC addr is set */
if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
error = ixlv_del_mac_filter(sc, hw->mac.addr);
if (error == 0)
ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
sc);
}
/* Check for an LAA mac address... */
bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TSO)
ifp->if_hwassist |= CSUM_TSO;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
/* Add mac filter for this VF to PF */
if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
if (!error || error == EEXIST)
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
sc);
}
/* Setup vlan's if needed */
ixlv_setup_vlan_filters(sc);
/* Prepare the queues for operation */
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
ixl_init_tx_ring(que);
if (vsi->max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
ixl_init_rx_ring(que);
}
/* Set initial ITR values */
ixlv_configure_itr(sc);
/* Configure queues */
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
/* Set up RSS */
ixlv_config_rss(sc);
/* Map vectors */
ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
/* Enable queues */
ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
/* Start the local timer */
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
sc->init_state = IXLV_RUNNING;
init_done:
INIT_DBG_IF(ifp, "end");
return;
}
/*
** Init entry point for the stack
*/
void
ixlv_init(void *arg)
{
struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
struct ixlv_sc *sc = vsi->back;
int retries = 0;
/* Prevent init from running again while waiting for AQ calls
* made in init_locked() to complete. */
mtx_lock(&sc->mtx);
if (sc->init_in_progress) {
mtx_unlock(&sc->mtx);
return;
} else
sc->init_in_progress = true;
ixlv_init_locked(sc);
mtx_unlock(&sc->mtx);
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < IXLV_MAX_INIT_WAIT) {
i40e_msec_pause(25);
}
if (retries >= IXLV_MAX_INIT_WAIT) {
if_printf(vsi->ifp,
"Init failed to complete in allotted time!\n");
}
mtx_lock(&sc->mtx);
sc->init_in_progress = false;
mtx_unlock(&sc->mtx);
}
/*
* ixlv_attach() helper function; gathers information about
* the (virtual) hardware for use elsewhere in the driver.
*/
static void
ixlv_init_hw(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
/* Save off the information about this board */
hw->vendor_id = pci_get_vendor(dev);
hw->device_id = pci_get_device(dev);
hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
hw->subsystem_vendor_id =
pci_read_config(dev, PCIR_SUBVEND_0, 2);
hw->subsystem_device_id =
pci_read_config(dev, PCIR_SUBDEV_0, 2);
hw->bus.device = pci_get_slot(dev);
hw->bus.func = pci_get_function(dev);
}
/*
* ixlv_attach() helper function; initalizes the admin queue
* and attempts to establish contact with the PF by
* retrying the initial "API version" message several times
* or until the PF responds.
*/
static int
ixlv_setup_vc(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
int error = 0, ret_error = 0, asq_retries = 0;
bool send_api_ver_retried = 0;
/* Need to set these AQ paramters before initializing AQ */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
/* Initialize admin queue */
error = i40e_init_adminq(hw);
if (error) {
device_printf(dev, "%s: init_adminq failed: %d\n",
__func__, error);
ret_error = 1;
continue;
}
INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
" send_api_ver attempt %d", i+1);
retry_send:
/* Send VF's API version */
error = ixlv_send_api_ver(sc);
if (error) {
i40e_shutdown_adminq(hw);
ret_error = 2;
device_printf(dev, "%s: unable to send api"
" version to PF on attempt %d, error %d\n",
__func__, i+1, error);
}
asq_retries = 0;
while (!i40e_asq_done(hw)) {
if (++asq_retries > IXLV_AQ_MAX_ERR) {
i40e_shutdown_adminq(hw);
device_printf(dev, "Admin Queue timeout "
"(waiting for send_api_ver), %d more tries...\n",
IXLV_AQ_MAX_ERR - (i + 1));
ret_error = 3;
break;
}
i40e_msec_pause(10);
}
if (asq_retries > IXLV_AQ_MAX_ERR)
continue;
INIT_DBG_DEV(dev, "Sent API version message to PF");
/* Verify that the VF accepts the PF's API version */
error = ixlv_verify_api_ver(sc);
if (error == ETIMEDOUT) {
if (!send_api_ver_retried) {
/* Resend message, one more time */
send_api_ver_retried = true;
device_printf(dev,
"%s: Timeout while verifying API version on first"
" try!\n", __func__);
goto retry_send;
} else {
device_printf(dev,
"%s: Timeout while verifying API version on second"
" try!\n", __func__);
ret_error = 4;
break;
}
}
if (error) {
device_printf(dev,
"%s: Unable to verify API version,"
" error %s\n", __func__, i40e_stat_str(hw, error));
ret_error = 5;
}
break;
}
if (ret_error >= 4)
i40e_shutdown_adminq(hw);
return (ret_error);
}
/*
* ixlv_attach() helper function; asks the PF for this VF's
* configuration, and saves the information if it receives it.
*/
static int
ixlv_vf_config(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
int bufsz, error = 0, ret_error = 0;
int asq_retries, retried = 0;
retry_config:
error = ixlv_send_vf_config_msg(sc);
if (error) {
device_printf(dev,
"%s: Unable to send VF config request, attempt %d,"
" error %d\n", __func__, retried + 1, error);
ret_error = 2;
}
asq_retries = 0;
while (!i40e_asq_done(hw)) {
if (++asq_retries > IXLV_AQ_MAX_ERR) {
device_printf(dev, "%s: Admin Queue timeout "
"(waiting for send_vf_config_msg), attempt %d\n",
__func__, retried + 1);
ret_error = 3;
goto fail;
}
i40e_msec_pause(10);
}
INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
retried + 1);
if (!sc->vf_res) {
bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
(I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
if (!sc->vf_res) {
device_printf(dev,
"%s: Unable to allocate memory for VF configuration"
" message from PF on attempt %d\n", __func__, retried + 1);
ret_error = 1;
goto fail;
}
}
/* Check for VF config response */
error = ixlv_get_vf_config(sc);
if (error == ETIMEDOUT) {
/* The 1st time we timeout, send the configuration message again */
if (!retried) {
retried++;
goto retry_config;
}
device_printf(dev,
"%s: ixlv_get_vf_config() timed out waiting for a response\n",
__func__);
}
if (error) {
device_printf(dev,
"%s: Unable to get VF configuration from PF after %d tries!\n",
__func__, retried + 1);
ret_error = 4;
}
goto done;
fail:
free(sc->vf_res, M_DEVBUF);
done:
return (ret_error);
}
/*
* Allocate MSI/X vectors, setup the AQ vector early
*/
static int
ixlv_init_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
int rid, want, vectors, queues, available;
int auto_max_queues;
rid = PCIR_BAR(IXL_MSIX_BAR);
sc->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!sc->msix_mem) {
/* May not be enabled */
device_printf(sc->dev,
"Unable to map MSIX table\n");
goto fail;
}
available = pci_msix_count(dev);
if (available == 0) { /* system has msix disabled */
bus_release_resource(dev, SYS_RES_MEMORY,
rid, sc->msix_mem);
sc->msix_mem = NULL;
goto fail;
}
/* Clamp queues to number of CPUs and # of MSI-X vectors available */
auto_max_queues = min(mp_ncpus, available - 1);
/* Clamp queues to # assigned to VF by PF */
auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
/* Override with tunable value if tunable is less than autoconfig count */
if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
queues = ixlv_max_queues;
/* Use autoconfig amount if that's lower */
else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
device_printf(dev, "ixlv_max_queues (%d) is too large, using "
"autoconfig amount (%d)...\n",
ixlv_max_queues, auto_max_queues);
queues = auto_max_queues;
}
/* Limit maximum auto-configured queues to 8 if no user value is set */
else
queues = min(auto_max_queues, 8);
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
/*
** Want one vector (RX/TX pair) per queue
** plus an additional for the admin queue.
*/
want = queues + 1;
if (want <= available) /* Have enough */
vectors = want;
else {
device_printf(sc->dev,
"MSIX Configuration Problem, "
"%d vectors available but %d wanted!\n",
available, want);
goto fail;
}
#ifdef RSS
/*
* If we're doing RSS, the number of queues needs to
* match the number of RSS buckets that are configured.
*
* + If there's more queues than RSS buckets, we'll end
* up with queues that get no traffic.
*
* + If there's more RSS buckets than queues, we'll end
* up having multiple RSS buckets map to the same queue,
* so there'll be some contention.
*/
if (queues != rss_getnumbuckets()) {
device_printf(dev,
"%s: queues (%d) != RSS buckets (%d)"
"; performance will be impacted.\n",
__func__, queues, rss_getnumbuckets());
}
#endif
if (pci_alloc_msix(dev, &vectors) == 0) {
device_printf(sc->dev,
"Using MSIX interrupts with %d vectors\n", vectors);
sc->msix = vectors;
sc->vsi.num_queues = queues;
}
/* Next we need to setup the vector for the Admin Queue */
rid = 1; /* zero vector + 1 */
sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_SHAREABLE | RF_ACTIVE);
if (sc->res == NULL) {
device_printf(dev, "Unable to allocate"
" bus resource: AQ interrupt \n");
goto fail;
}
if (bus_setup_intr(dev, sc->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixlv_msix_adminq, sc, &sc->tag)) {
sc->res = NULL;
device_printf(dev, "Failed to register AQ handler");
goto fail;
}
bus_describe_intr(dev, sc->res, sc->tag, "adminq");
return (vectors);
fail:
/* The VF driver MUST use MSIX */
return (0);
}
static int
ixlv_allocate_pci_resources(struct ixlv_sc *sc)
{
int rid;
device_t dev = sc->dev;
rid = PCIR_BAR(0);
sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (!(sc->pci_mem)) {
device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
sc->osdep.mem_bus_space_tag =
rman_get_bustag(sc->pci_mem);
sc->osdep.mem_bus_space_handle =
rman_get_bushandle(sc->pci_mem);
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
sc->hw.back = &sc->osdep;
/*
** Explicitly set the guest PCI BUSMASTER capability
** and we must rewrite the ENABLE in the MSIX control
** register again at this point to cause the host to
** successfully initialize us.
**
** This must be set before accessing any registers.
*/
{
u16 pci_cmd_word;
int msix_ctrl;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
}
/* Disable adminq interrupts (just in case) */
ixlv_disable_adminq_irq(&sc->hw);
return (0);
}
static void
ixlv_free_pci_resources(struct ixlv_sc *sc)
{
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
/* We may get here before stations are setup */
if (que == NULL)
goto early;
/*
** Release all msix queue resources:
*/
for (int i = 0; i < vsi->num_queues; i++, que++) {
int rid = que->msix + 1;
if (que->tag != NULL) {
bus_teardown_intr(dev, que->res, que->tag);
que->tag = NULL;
}
if (que->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
que->res = NULL;
}
}
early:
pci_release_msi(dev);
if (sc->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), sc->pci_mem);
}
/*
* Create taskqueue and tasklet for Admin Queue interrupts.
*/
static int
ixlv_init_taskqueue(struct ixlv_sc *sc)
{
int error = 0;
TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
taskqueue_thread_enqueue, &sc->tq);
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
device_get_nameunit(sc->dev));
return (error);
}
/*********************************************************************
*
* Setup MSIX Interrupt resources and handlers for the VSI queues
*
**********************************************************************/
static int
ixlv_assign_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
int error, rid, vector = 1;
#ifdef RSS
cpuset_t cpu_mask;
#endif
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
int cpu_id = i;
rid = vector + 1;
txr = &que->txr;
que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (que->res == NULL) {
device_printf(dev,"Unable to allocate"
" bus resource: que interrupt [%d]\n", vector);
return (ENXIO);
}
/* Set the handler function */
error = bus_setup_intr(dev, que->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixlv_msix_que, que, &que->tag);
if (error) {
que->res = NULL;
device_printf(dev, "Failed to register que handler");
return (error);
}
bus_describe_intr(dev, que->res, que->tag, "que %d", i);
/* Bind the vector to a CPU */
#ifdef RSS
cpu_id = rss_getcpu(i % rss_getnumbuckets());
#endif
bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixlv_handle_que, que);
que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
#ifdef RSS
CPU_SETOF(cpu_id, &cpu_mask);
taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
&cpu_mask, "%s (bucket %d)",
device_get_nameunit(dev), cpu_id);
#else
taskqueue_start_threads(&que->tq, 1, PI_NET,
"%s que", device_get_nameunit(dev));
#endif
}
return (0);
}
/*
** Requests a VF reset from the PF.
**
** Requires the VF's Admin Queue to be initialized.
*/
static int
ixlv_reset(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
int error = 0;
/* Ask the PF to reset us if we are initiating */
if (sc->init_state != IXLV_RESET_PENDING)
ixlv_request_reset(sc);
i40e_msec_pause(100);
error = ixlv_reset_complete(hw);
if (error) {
device_printf(dev, "%s: VF reset failed\n",
__func__);
return (error);
}
error = i40e_shutdown_adminq(hw);
if (error) {
device_printf(dev, "%s: shutdown_adminq failed: %d\n",
__func__, error);
return (error);
}
error = i40e_init_adminq(hw);
if (error) {
device_printf(dev, "%s: init_adminq failed: %d\n",
__func__, error);
return(error);
}
return (0);
}
static int
ixlv_reset_complete(struct i40e_hw *hw)
{
u32 reg;
/* Wait up to ~10 seconds */
for (int i = 0; i < 100; i++) {
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((reg == I40E_VFR_VFACTIVE) ||
(reg == I40E_VFR_COMPLETED))
return (0);
i40e_msec_pause(100);
}
return (EBUSY);
}
/*********************************************************************
*
* Setup networking device structure and register an interface.
*
**********************************************************************/
static int
ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
{
struct ifnet *ifp;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
INIT_DBG_DEV(dev, "begin");
ifp = vsi->ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "%s: could not allocate ifnet"
" structure!\n", __func__);
return (-1);
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
ifp->if_baudrate = IF_Gbps(40);
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixlv_ioctl;
#if __FreeBSD_version >= 1100000
if_setgetcounterfn(ifp, ixl_get_counter);
#endif
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
ifp->if_snd.ifq_maxlen = que->num_desc - 2;
ether_ifattach(ifp, sc->hw.mac.addr);
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
/*
* Tell the upper layer(s) we support long frames.
*/
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_HWCSUM;
ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
ifp->if_capabilities |= IFCAP_TSO;
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_HWTSO
| IFCAP_VLAN_MTU
| IFCAP_VLAN_HWCSUM
| IFCAP_LRO;
ifp->if_capenable = ifp->if_capabilities;
/*
** Don't turn this on by default, if vlans are
** created on another pseudo device (eg. lagg)
** then vlan events are not passed thru, breaking
** operation, but with HW FILTER off it works. If
** using vlans directly on the ixl driver you can
** enable this and get full hardware tag filtering.
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
ixlv_media_status);
// JFV Add media types later?
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
INIT_DBG_DEV(dev, "end");
return (0);
}
/*
** Allocate and setup the interface queues
*/
static int
ixlv_setup_queues(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi;
struct ixl_queue *que;
struct tx_ring *txr;
struct rx_ring *rxr;
int rsize, tsize;
int error = I40E_SUCCESS;
vsi = &sc->vsi;
vsi->back = (void *)sc;
vsi->hw = &sc->hw;
vsi->num_vlans = 0;
/* Get memory for the station queues */
if (!(vsi->queues =
- (struct ixl_queue *) mallocarray(vsi->num_queues,
- sizeof(struct ixl_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
error = ENOMEM;
goto early;
}
for (int i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
que->num_desc = ixlv_ringsz;
que->me = i;
que->vsi = vsi;
txr = &que->txr;
txr->que = que;
txr->tail = I40E_QTX_TAIL1(que->me);
/* Initialize the TX lock */
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
/*
** Create the TX descriptor ring, the extra int is
** added as the location for HEAD WB.
*/
tsize = roundup2((que->num_desc *
sizeof(struct i40e_tx_desc)) +
sizeof(u32), DBA_ALIGN);
if (i40e_allocate_dma_mem(&sc->hw,
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
device_printf(dev,
"Unable to allocate TX Descriptor memory\n");
error = ENOMEM;
goto fail;
}
txr->base = (struct i40e_tx_desc *)txr->dma.va;
bzero((void *)txr->base, tsize);
/* Now allocate transmit soft structs for the ring */
if (ixl_allocate_tx_data(que)) {
device_printf(dev,
"Critical Failure setting up TX structures\n");
error = ENOMEM;
goto fail;
}
/* Allocate a buf ring */
txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
M_WAITOK, &txr->mtx);
if (txr->br == NULL) {
device_printf(dev,
"Critical Failure setting up TX buf ring\n");
error = ENOMEM;
goto fail;
}
/*
* Next the RX queues...
*/
rsize = roundup2(que->num_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
rxr = &que->rxr;
rxr->que = que;
rxr->tail = I40E_QRX_TAIL1(que->me);
/* Initialize the RX side lock */
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
if (i40e_allocate_dma_mem(&sc->hw,
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
device_printf(dev,
"Unable to allocate RX Descriptor memory\n");
error = ENOMEM;
goto fail;
}
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
/* Allocate receive soft structs for the ring */
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
error = ENOMEM;
goto fail;
}
}
return (0);
fail:
for (int i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
rxr = &que->rxr;
txr = &que->txr;
if (rxr->base)
i40e_free_dma_mem(&sc->hw, &rxr->dma);
if (txr->base)
i40e_free_dma_mem(&sc->hw, &txr->dma);
}
free(vsi->queues, M_DEVBUF);
early:
return (error);
}
/*
** This routine is run via an vlan config EVENT,
** it enables us to use the HW Filter table since
** we can get the vlan id. This just creates the
** entry in the soft version of the VFTA, init will
** repopulate the real table.
*/
static void
ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
struct ixl_vsi *vsi = arg;
struct ixlv_sc *sc = vsi->back;
struct ixlv_vlan_filter *v;
if (ifp->if_softc != arg) /* Not our event */
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
/* Sanity check - make sure it doesn't already exist */
SLIST_FOREACH(v, sc->vlan_filters, next) {
if (v->vlan == vtag)
return;
}
mtx_lock(&sc->mtx);
++vsi->num_vlans;
v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
v->vlan = vtag;
v->flags = IXL_FILTER_ADD;
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
mtx_unlock(&sc->mtx);
return;
}
/*
** This routine is run via an vlan
** unconfig EVENT, remove our entry
** in the soft vfta.
*/
static void
ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
struct ixl_vsi *vsi = arg;
struct ixlv_sc *sc = vsi->back;
struct ixlv_vlan_filter *v;
int i = 0;
if (ifp->if_softc != arg)
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
mtx_lock(&sc->mtx);
SLIST_FOREACH(v, sc->vlan_filters, next) {
if (v->vlan == vtag) {
v->flags = IXL_FILTER_DEL;
++i;
--vsi->num_vlans;
}
}
if (i)
ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
mtx_unlock(&sc->mtx);
return;
}
/*
** Get a new filter and add it to the mac filter list.
*/
static struct ixlv_mac_filter *
ixlv_get_mac_filter(struct ixlv_sc *sc)
{
struct ixlv_mac_filter *f;
f = malloc(sizeof(struct ixlv_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (f)
SLIST_INSERT_HEAD(sc->mac_filters, f, next);
return (f);
}
/*
** Find the filter with matching MAC address
*/
static struct ixlv_mac_filter *
ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
{
struct ixlv_mac_filter *f;
bool match = FALSE;
SLIST_FOREACH(f, sc->mac_filters, next) {
if (cmp_etheraddr(f->macaddr, macaddr)) {
match = TRUE;
break;
}
}
if (!match)
f = NULL;
return (f);
}
static int
ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
int error = 0;
if (sc->tag != NULL) {
bus_teardown_intr(dev, sc->res, sc->tag);
if (error) {
device_printf(dev, "bus_teardown_intr() for"
" interrupt 0 failed\n");
// return (ENXIO);
}
sc->tag = NULL;
}
if (sc->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
if (error) {
device_printf(dev, "bus_release_resource() for"
" interrupt 0 failed\n");
// return (ENXIO);
}
sc->res = NULL;
}
return (0);
}
/*
** Admin Queue interrupt handler
*/
static void
ixlv_msix_adminq(void *arg)
{
struct ixlv_sc *sc = arg;
struct i40e_hw *hw = &sc->hw;
u32 reg, mask;
reg = rd32(hw, I40E_VFINT_ICR01);
mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
reg = rd32(hw, I40E_VFINT_DYN_CTL01);
reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, reg);
/* schedule task */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
return;
}
void
ixlv_enable_intr(struct ixl_vsi *vsi)
{
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
ixlv_enable_adminq_irq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
ixlv_enable_queue_irq(hw, que->me);
}
void
ixlv_disable_intr(struct ixl_vsi *vsi)
{
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
ixlv_disable_adminq_irq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
ixlv_disable_queue_irq(hw, que->me);
}
static void
ixlv_disable_adminq_irq(struct i40e_hw *hw)
{
wr32(hw, I40E_VFINT_DYN_CTL01, 0);
wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
/* flush */
rd32(hw, I40E_VFGEN_RSTAT);
return;
}
static void
ixlv_enable_adminq_irq(struct i40e_hw *hw)
{
wr32(hw, I40E_VFINT_DYN_CTL01,
I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
/* flush */
rd32(hw, I40E_VFGEN_RSTAT);
return;
}
static void
ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
{
u32 reg;
reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
}
static void
ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
{
wr32(hw, I40E_VFINT_DYN_CTLN1(id),
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
rd32(hw, I40E_VFGEN_RSTAT);
return;
}
/*
* Get initial ITR values from tunable values.
*/
static void
ixlv_configure_itr(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
vsi->rx_itr_setting = ixlv_rx_itr;
vsi->tx_itr_setting = ixlv_tx_itr;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
vsi->rx_itr_setting);
rxr->itr = vsi->rx_itr_setting;
rxr->latency = IXL_AVE_LATENCY;
wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
vsi->tx_itr_setting);
txr->itr = vsi->tx_itr_setting;
txr->latency = IXL_AVE_LATENCY;
}
}
/*
** Provide a update to the queue RX
** interrupt moderation value.
*/
static void
ixlv_set_queue_rx_itr(struct ixl_queue *que)
{
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct rx_ring *rxr = &que->rxr;
u16 rx_itr;
u16 rx_latency = 0;
int rx_bytes;
/* Idle, do nothing */
if (rxr->bytes == 0)
return;
if (ixlv_dynamic_rx_itr) {
rx_bytes = rxr->bytes/rxr->itr;
rx_itr = rxr->itr;
/* Adjust latency range */
switch (rxr->latency) {
case IXL_LOW_LATENCY:
if (rx_bytes > 10) {
rx_latency = IXL_AVE_LATENCY;
rx_itr = IXL_ITR_20K;
}
break;
case IXL_AVE_LATENCY:
if (rx_bytes > 20) {
rx_latency = IXL_BULK_LATENCY;
rx_itr = IXL_ITR_8K;
} else if (rx_bytes <= 10) {
rx_latency = IXL_LOW_LATENCY;
rx_itr = IXL_ITR_100K;
}
break;
case IXL_BULK_LATENCY:
if (rx_bytes <= 20) {
rx_latency = IXL_AVE_LATENCY;
rx_itr = IXL_ITR_20K;
}
break;
}
rxr->latency = rx_latency;
if (rx_itr != rxr->itr) {
/* do an exponential smoothing */
rx_itr = (10 * rx_itr * rxr->itr) /
((9 * rx_itr) + rxr->itr);
rxr->itr = min(rx_itr, IXL_MAX_ITR);
wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
que->me), rxr->itr);
}
} else { /* We may have have toggled to non-dynamic */
if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
vsi->rx_itr_setting = ixlv_rx_itr;
/* Update the hardware if needed */
if (rxr->itr != vsi->rx_itr_setting) {
rxr->itr = vsi->rx_itr_setting;
wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
que->me), rxr->itr);
}
}
rxr->bytes = 0;
rxr->packets = 0;
return;
}
/*
** Provide a update to the queue TX
** interrupt moderation value.
*/
static void
ixlv_set_queue_tx_itr(struct ixl_queue *que)
{
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
u16 tx_itr;
u16 tx_latency = 0;
int tx_bytes;
/* Idle, do nothing */
if (txr->bytes == 0)
return;
if (ixlv_dynamic_tx_itr) {
tx_bytes = txr->bytes/txr->itr;
tx_itr = txr->itr;
switch (txr->latency) {
case IXL_LOW_LATENCY:
if (tx_bytes > 10) {
tx_latency = IXL_AVE_LATENCY;
tx_itr = IXL_ITR_20K;
}
break;
case IXL_AVE_LATENCY:
if (tx_bytes > 20) {
tx_latency = IXL_BULK_LATENCY;
tx_itr = IXL_ITR_8K;
} else if (tx_bytes <= 10) {
tx_latency = IXL_LOW_LATENCY;
tx_itr = IXL_ITR_100K;
}
break;
case IXL_BULK_LATENCY:
if (tx_bytes <= 20) {
tx_latency = IXL_AVE_LATENCY;
tx_itr = IXL_ITR_20K;
}
break;
}
txr->latency = tx_latency;
if (tx_itr != txr->itr) {
/* do an exponential smoothing */
tx_itr = (10 * tx_itr * txr->itr) /
((9 * tx_itr) + txr->itr);
txr->itr = min(tx_itr, IXL_MAX_ITR);
wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
que->me), txr->itr);
}
} else { /* We may have have toggled to non-dynamic */
if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
vsi->tx_itr_setting = ixlv_tx_itr;
/* Update the hardware if needed */
if (txr->itr != vsi->tx_itr_setting) {
txr->itr = vsi->tx_itr_setting;
wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
que->me), txr->itr);
}
}
txr->bytes = 0;
txr->packets = 0;
return;
}
/*
**
** MSIX Interrupt Handlers and Tasklets
**
*/
static void
ixlv_handle_que(void *context, int pending)
{
struct ixl_queue *que = context;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
struct ifnet *ifp = vsi->ifp;
bool more;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
more = ixl_rxeof(que, IXL_RX_LIMIT);
mtx_lock(&txr->mtx);
ixl_txeof(que);
if (!drbr_empty(ifp, txr->br))
ixl_mq_start_locked(ifp, txr);
mtx_unlock(&txr->mtx);
if (more) {
taskqueue_enqueue(que->tq, &que->task);
return;
}
}
/* Reenable this interrupt - hmmm */
ixlv_enable_queue_irq(hw, que->me);
return;
}
/*********************************************************************
*
* MSIX Queue Interrupt Service routine
*
**********************************************************************/
static void
ixlv_msix_que(void *arg)
{
struct ixl_queue *que = arg;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
bool more_tx, more_rx;
/* Spurious interrupts are ignored */
if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
++que->irqs;
more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
mtx_lock(&txr->mtx);
more_tx = ixl_txeof(que);
/*
** Make certain that if the stack
** has anything queued the task gets
** scheduled to handle it.
*/
if (!drbr_empty(vsi->ifp, txr->br))
more_tx = 1;
mtx_unlock(&txr->mtx);
ixlv_set_queue_rx_itr(que);
ixlv_set_queue_tx_itr(que);
if (more_tx || more_rx)
taskqueue_enqueue(que->tq, &que->task);
else
ixlv_enable_queue_irq(hw, que->me);
return;
}
/*********************************************************************
*
* Media Ioctl callback
*
* This routine is called whenever the user queries the status of
* the interface using ifconfig.
*
**********************************************************************/
static void
ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ixlv_sc *sc = vsi->back;
INIT_DBG_IF(ifp, "begin");
mtx_lock(&sc->mtx);
ixlv_update_link_status(sc);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (!sc->link_up) {
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end: link not up");
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
/* Hardware is always full-duplex */
ifmr->ifm_active |= IFM_FDX;
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end");
return;
}
/*********************************************************************
*
* Media Ioctl callback
*
* This routine is called when the user changes speed/duplex using
* media/mediopt option with ifconfig.
*
**********************************************************************/
static int
ixlv_media_change(struct ifnet * ifp)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ifmedia *ifm = &vsi->media;
INIT_DBG_IF(ifp, "begin");
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
INIT_DBG_IF(ifp, "end");
return (0);
}
/*********************************************************************
* Multicast Initialization
*
* This routine is called by init to reset a fresh state.
*
**********************************************************************/
static void
ixlv_init_multi(struct ixl_vsi *vsi)
{
struct ixlv_mac_filter *f;
struct ixlv_sc *sc = vsi->back;
int mcnt = 0;
IOCTL_DBG_IF(vsi->ifp, "begin");
/* First clear any multicast filters */
SLIST_FOREACH(f, sc->mac_filters, next) {
if ((f->flags & IXL_FILTER_USED)
&& (f->flags & IXL_FILTER_MC)) {
f->flags |= IXL_FILTER_DEL;
mcnt++;
}
}
if (mcnt > 0)
ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
sc);
IOCTL_DBG_IF(vsi->ifp, "end");
}
static void
ixlv_add_multi(struct ixl_vsi *vsi)
{
struct ifmultiaddr *ifma;
struct ifnet *ifp = vsi->ifp;
struct ixlv_sc *sc = vsi->back;
int mcnt = 0;
IOCTL_DBG_IF(ifp, "begin");
if_maddr_rlock(ifp);
/*
** Get a count, to decide if we
** simply use multicast promiscuous.
*/
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
mcnt++;
}
if_maddr_runlock(ifp);
/* TODO: Remove -- cannot set promiscuous mode in a VF */
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
sc);
IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
return;
}
mcnt = 0;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
if (!ixlv_add_mac_filter(sc,
(u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
IXL_FILTER_MC))
mcnt++;
}
if_maddr_runlock(ifp);
/*
** Notify AQ task that sw filters need to be
** added to hw list
*/
if (mcnt > 0)
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
sc);
IOCTL_DBG_IF(ifp, "end");
}
static void
ixlv_del_multi(struct ixl_vsi *vsi)
{
struct ixlv_mac_filter *f;
struct ifmultiaddr *ifma;
struct ifnet *ifp = vsi->ifp;
struct ixlv_sc *sc = vsi->back;
int mcnt = 0;
bool match = FALSE;
IOCTL_DBG_IF(ifp, "begin");
/* Search for removed multicast addresses */
if_maddr_rlock(ifp);
SLIST_FOREACH(f, sc->mac_filters, next) {
if ((f->flags & IXL_FILTER_USED)
&& (f->flags & IXL_FILTER_MC)) {
/* check if mac address in filter is in sc's list */
match = FALSE;
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
u8 *mc_addr =
(u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
if (cmp_etheraddr(f->macaddr, mc_addr)) {
match = TRUE;
break;
}
}
/* if this filter is not in the sc's list, remove it */
if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
f->flags |= IXL_FILTER_DEL;
mcnt++;
IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
MAC_FORMAT_ARGS(f->macaddr));
}
else if (match == FALSE)
IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
MAC_FORMAT_ARGS(f->macaddr));
}
}
if_maddr_runlock(ifp);
if (mcnt > 0)
ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
sc);
IOCTL_DBG_IF(ifp, "end");
}
/*********************************************************************
* Timer routine
*
* This routine checks for link status,updates statistics,
* and runs the watchdog check.
*
**********************************************************************/
static void
ixlv_local_timer(void *arg)
{
struct ixlv_sc *sc = arg;
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
struct tx_ring *txr;
int hung = 0;
u32 mask, val;
s32 timer, new_timer;
IXLV_CORE_LOCK_ASSERT(sc);
/* If Reset is in progress just bail */
if (sc->init_state == IXLV_RESET_PENDING)
return;
/* Check for when PF triggers a VF reset */
val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (val != I40E_VFR_VFACTIVE
&& val != I40E_VFR_COMPLETED) {
DDPRINTF(dev, "reset in progress! (%d)", val);
return;
}
ixlv_request_stats(sc);
/* clean and process any events */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
/*
** Check status on the queues for a hang
*/
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++, que++) {
txr = &que->txr;
timer = atomic_load_acq_32(&txr->watchdog_timer);
if (timer > 0) {
new_timer = timer - hz;
if (new_timer <= 0) {
atomic_store_rel_32(&txr->watchdog_timer, -1);
device_printf(dev, "WARNING: queue %d "
"appears to be hung!\n", que->me);
++hung;
} else {
/*
* If this fails, that means something in the TX path has updated
* the watchdog, so it means the TX path is still working and
* the watchdog doesn't need to countdown.
*/
atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
/* Any queues with outstanding work get a sw irq */
wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
}
}
}
/* Reset when a queue shows hung */
if (hung)
goto hung;
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
return;
hung:
device_printf(dev, "WARNING: Resetting!\n");
sc->init_state = IXLV_RESET_REQUIRED;
sc->watchdog_events++;
ixlv_stop(sc);
ixlv_init_locked(sc);
}
/*
** Note: this routine updates the OS on the link state
** the real check of the hardware only happens with
** a link interrupt.
*/
void
ixlv_update_link_status(struct ixlv_sc *sc)
{
struct ixl_vsi *vsi = &sc->vsi;
struct ifnet *ifp = vsi->ifp;
if (sc->link_up){
if (vsi->link_active == FALSE) {
if (bootverbose)
if_printf(ifp,"Link is Up, %d Gbps\n",
(sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
vsi->link_active = TRUE;
if_link_state_change(ifp, LINK_STATE_UP);
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
if (bootverbose)
if_printf(ifp,"Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
vsi->link_active = FALSE;
}
}
return;
}
/*********************************************************************
*
* This routine disables all traffic on the adapter by issuing a
* global reset on the MAC and deallocates TX/RX buffers.
*
**********************************************************************/
static void
ixlv_stop(struct ixlv_sc *sc)
{
struct ifnet *ifp;
int start;
ifp = sc->vsi.ifp;
INIT_DBG_IF(ifp, "begin");
IXLV_CORE_LOCK_ASSERT(sc);
ixl_vc_flush(&sc->vc_mgr);
ixlv_disable_queues(sc);
start = ticks;
while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
((ticks - start) < hz/10))
ixlv_do_adminq_locked(sc);
/* Stop the local timer */
callout_stop(&sc->timer);
INIT_DBG_IF(ifp, "end");
}
/*********************************************************************
*
* Free all station queue structs.
*
**********************************************************************/
static void
ixlv_free_queues(struct ixl_vsi *vsi)
{
struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
if (!mtx_initialized(&txr->mtx)) /* uninitialized */
continue;
IXL_TX_LOCK(txr);
ixl_free_que_tx(que);
if (txr->base)
i40e_free_dma_mem(&sc->hw, &txr->dma);
IXL_TX_UNLOCK(txr);
IXL_TX_LOCK_DESTROY(txr);
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
continue;
IXL_RX_LOCK(rxr);
ixl_free_que_rx(que);
if (rxr->base)
i40e_free_dma_mem(&sc->hw, &rxr->dma);
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
}
free(vsi->queues, M_DEVBUF);
}
static void
ixlv_config_rss_reg(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
u32 lut = 0;
u64 set_hena = 0, hena;
int i, j, que_id;
u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
#ifdef RSS
u32 rss_hash_config;
#endif
/* Don't set up RSS if using a single queue */
if (vsi->num_queues == 1) {
wr32(hw, I40E_VFQF_HENA(0), 0);
wr32(hw, I40E_VFQF_HENA(1), 0);
ixl_flush(hw);
return;
}
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
#else
ixl_get_default_rss_key(rss_seed);
#endif
/* Fill out hash function seed */
for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
#ifdef RSS
rss_hash_config = rss_gethashconfig();
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
set_hena = IXL_DEFAULT_RSS_HENA_XL710;
#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
hena |= set_hena;
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
if (j == vsi->num_queues)
j = 0;
#ifdef RSS
/*
* Fetch the RSS bucket id for the given indirection entry.
* Cap it at the number of configured buckets (which is
* num_queues.)
*/
que_id = rss_get_indirection_to_bucket(i);
que_id = que_id % vsi->num_queues;
#else
que_id = j;
#endif
/* lut = 4-byte sliding window of 4 lut entries */
lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
/* On i = 3, we have 4 entries in lut; write to the register */
if ((i & 3) == 3) {
wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
}
}
ixl_flush(hw);
}
static void
ixlv_config_rss_pf(struct ixlv_sc *sc)
{
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
}
/*
** ixlv_config_rss - setup RSS
**
** RSS keys and table are cleared on VF reset.
*/
static void
ixlv_config_rss(struct ixlv_sc *sc)
{
if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
ixlv_config_rss_reg(sc);
} else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
ixlv_config_rss_pf(sc);
} else
device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
}
/*
** This routine refreshes vlan filters, called by init
** it scans the filter table and then updates the AQ
*/
static void
ixlv_setup_vlan_filters(struct ixlv_sc *sc)
{
struct ixl_vsi *vsi = &sc->vsi;
struct ixlv_vlan_filter *f;
int cnt = 0;
if (vsi->num_vlans == 0)
return;
/*
** Scan the filter table for vlan entries,
** and if found call for the AQ update.
*/
SLIST_FOREACH(f, sc->vlan_filters, next)
if (f->flags & IXL_FILTER_ADD)
cnt++;
if (cnt > 0)
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
}
/*
** This routine adds new MAC filters to the sc's list;
** these are later added in hardware by sending a virtual
** channel message.
*/
static int
ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
{
struct ixlv_mac_filter *f;
/* Does one already exist? */
f = ixlv_find_mac_filter(sc, macaddr);
if (f != NULL) {
IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
MAC_FORMAT_ARGS(macaddr));
return (EEXIST);
}
/* If not, get a new empty filter */
f = ixlv_get_mac_filter(sc);
if (f == NULL) {
if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
__func__);
return (ENOMEM);
}
IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
MAC_FORMAT_ARGS(macaddr));
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
f->flags |= flags;
return (0);
}
/*
** Marks a MAC filter for deletion.
*/
static int
ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
{
struct ixlv_mac_filter *f;
f = ixlv_find_mac_filter(sc, macaddr);
if (f == NULL)
return (ENOENT);
f->flags |= IXL_FILTER_DEL;
return (0);
}
/*
** Tasklet handler for MSIX Adminq interrupts
** - done outside interrupt context since it might sleep
*/
static void
ixlv_do_adminq(void *context, int pending)
{
struct ixlv_sc *sc = context;
mtx_lock(&sc->mtx);
ixlv_do_adminq_locked(sc);
mtx_unlock(&sc->mtx);
return;
}
static void
ixlv_do_adminq_locked(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
device_t dev = sc->dev;
u16 result = 0;
u32 reg, oldreg;
i40e_status ret;
bool aq_error = false;
IXLV_CORE_LOCK_ASSERT(sc);
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = sc->aq_buffer;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
ret = i40e_clean_arq_element(hw, &event, &result);
if (ret)
break;
ixlv_vc_completion(sc, v_msg->v_opcode,
v_msg->v_retval, event.msg_buf, event.msg_len);
if (result != 0)
bzero(event.msg_buf, IXL_AQ_BUF_SZ);
} while (result);
/* check for Admin queue errors */
oldreg = reg = rd32(hw, hw->aq.arq.len);
if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
device_printf(dev, "ARQ VF Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
aq_error = true;
}
if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
device_printf(dev, "ARQ Overflow Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
aq_error = true;
}
if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
device_printf(dev, "ARQ Critical Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
aq_error = true;
}
if (oldreg != reg)
wr32(hw, hw->aq.arq.len, reg);
oldreg = reg = rd32(hw, hw->aq.asq.len);
if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
device_printf(dev, "ASQ VF Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
aq_error = true;
}
if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
device_printf(dev, "ASQ Overflow Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
aq_error = true;
}
if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
device_printf(dev, "ASQ Critical Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
aq_error = true;
}
if (oldreg != reg)
wr32(hw, hw->aq.asq.len, reg);
if (aq_error) {
/* Need to reset adapter */
device_printf(dev, "WARNING: Resetting!\n");
sc->init_state = IXLV_RESET_REQUIRED;
ixlv_stop(sc);
ixlv_init_locked(sc);
}
ixlv_enable_adminq_irq(hw);
}
static void
ixlv_add_sysctls(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
struct i40e_eth_stats *es = &vsi->eth_stats;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct sysctl_oid *vsi_node, *queue_node;
struct sysctl_oid_list *vsi_list, *queue_list;
#define QUEUE_NAME_LEN 32
char queue_namebuf[QUEUE_NAME_LEN];
struct ixl_queue *queues = vsi->queues;
struct tx_ring *txr;
struct rx_ring *rxr;
/* Driver statistics sysctls */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &sc->watchdog_events,
"Watchdog timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &sc->admin_irq,
"Admin Queue IRQ Handled");
/* VSI statistics sysctls */
vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
CTLFLAG_RD, NULL, "VSI-specific statistics");
vsi_list = SYSCTL_CHILDREN(vsi_node);
struct ixl_sysctl_info ctls[] =
{
{&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
{&es->rx_unicast, "ucast_pkts_rcvd",
"Unicast Packets Received"},
{&es->rx_multicast, "mcast_pkts_rcvd",
"Multicast Packets Received"},
{&es->rx_broadcast, "bcast_pkts_rcvd",
"Broadcast Packets Received"},
{&es->rx_discards, "rx_discards", "Discarded RX packets"},
{&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
{&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
{&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
{&es->tx_multicast, "mcast_pkts_txd",
"Multicast Packets Transmitted"},
{&es->tx_broadcast, "bcast_pkts_txd",
"Broadcast Packets Transmitted"},
{&es->tx_errors, "tx_errors", "TX packet errors"},
// end
{0,0,0}
};
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != NULL)
{
SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
}
/* Queue sysctls */
for (int q = 0; q < vsi->num_queues; q++) {
snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
txr = &(queues[q].txr);
rxr = &(queues[q].rxr);
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
"m_defrag() failed");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
CTLFLAG_RD, &(queues[q].dropped_pkts),
"Driver dropped packets");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(queues[q].irqs),
"irqs on this queue");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
"Queue No Descriptor Available");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets),
"Queue Packets Transmitted");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
CTLFLAG_RD, &(txr->tx_bytes),
"Queue Bytes Transmitted");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &(rxr->rx_packets),
"Queue Packets Received");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
CTLFLAG_RD, &(rxr->itr), 0,
"Queue Rx ITR Interval");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
#ifdef IXL_DEBUG
/* Examine queue state */
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixlv_sysctl_qtx_tail_handler, "IU",
"Queue Transmit Descriptor Tail");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixlv_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
CTLFLAG_RD, &(txr.watchdog_timer), 0,
"Ticks before watchdog event is triggered");
#endif
}
}
static void
ixlv_init_filters(struct ixlv_sc *sc)
{
sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
SLIST_INIT(sc->mac_filters);
sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
SLIST_INIT(sc->vlan_filters);
return;
}
static void
ixlv_free_filters(struct ixlv_sc *sc)
{
struct ixlv_mac_filter *f;
struct ixlv_vlan_filter *v;
while (!SLIST_EMPTY(sc->mac_filters)) {
f = SLIST_FIRST(sc->mac_filters);
SLIST_REMOVE_HEAD(sc->mac_filters, next);
free(f, M_DEVBUF);
}
while (!SLIST_EMPTY(sc->vlan_filters)) {
v = SLIST_FIRST(sc->vlan_filters);
SLIST_REMOVE_HEAD(sc->vlan_filters, next);
free(v, M_DEVBUF);
}
return;
}
#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler
* Retrieves I40E_QTX_TAIL1 value from hardware
* for a sysctl.
*/
static int
ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
int error;
u32 val;
que = ((struct ixl_queue *)oidp->oid_arg1);
if (!que) return 0;
val = rd32(que->vsi->hw, que->txr.tail);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
return (0);
}
/**
* ixlv_sysctl_qrx_tail_handler
* Retrieves I40E_QRX_TAIL1 value from hardware
* for a sysctl.
*/
static int
ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
int error;
u32 val;
que = ((struct ixl_queue *)oidp->oid_arg1);
if (!que) return 0;
val = rd32(que->vsi->hw, que->rxr.tail);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
return (0);
}
#endif
Index: head/sys/dev/ixl/ixl_pf_iov.c
===================================================================
--- head/sys/dev/ixl/ixl_pf_iov.c (revision 328217)
+++ head/sys/dev/ixl/ixl_pf_iov.c (revision 328218)
@@ -1,1865 +1,1865 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl_pf_iov.h"
/* Private functions */
static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
static bool ixl_zero_mac(const uint8_t *addr);
static bool ixl_bcast_mac(const uint8_t *addr);
static int ixl_vc_opcode_level(uint16_t opcode);
static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
enum i40e_queue_type *last_type, uint16_t *last_queue);
static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
void
ixl_initialize_sriov(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
nvlist_t *pf_schema, *vf_schema;
int iov_error;
/* SR-IOV is only supported when MSI-X is in use. */
if (pf->msix <= 1)
return;
pf_schema = pci_iov_schema_alloc_node();
vf_schema = pci_iov_schema_alloc_node();
pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
IOV_SCHEMA_HASDEFAULT, TRUE);
pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
IOV_SCHEMA_HASDEFAULT, FALSE);
pci_iov_schema_add_bool(vf_schema, "allow-promisc",
IOV_SCHEMA_HASDEFAULT, FALSE);
pci_iov_schema_add_uint16(vf_schema, "num-queues",
IOV_SCHEMA_HASDEFAULT,
max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
if (iov_error != 0) {
device_printf(dev,
"Failed to initialize SR-IOV (error=%d)\n",
iov_error);
} else
device_printf(dev, "SR-IOV ready\n");
pf->vc_debug_lvl = 1;
}
/*
* Allocate the VSI for a VF.
*/
static int
ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
{
device_t dev;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
struct i40e_vsi_context vsi_ctx;
int i;
enum i40e_status_code code;
hw = &pf->hw;
vsi = &pf->vsi;
dev = pf->dev;
vsi_ctx.pf_num = hw->pf_id;
vsi_ctx.uplink_seid = pf->veb_seid;
vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
vsi_ctx.info.switch_id = htole16(0);
vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
vsi_ctx.info.sec_flags = 0;
if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
vsi_ctx.info.valid_sections |=
htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
/* ERJ: Only scattered allocation is supported for VFs right now */
for (i = 0; i < vf->qtag.num_active; i++)
vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
vsi_ctx.info.tc_mapping[0] = htole16(
(0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
if (code != I40E_SUCCESS)
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
vf->vsi.seid = vsi_ctx.seid;
vf->vsi.vsi_num = vsi_ctx.vsi_number;
// vf->vsi.first_queue = vf->qtag.qidx[0];
vf->vsi.num_queues = vf->qtag.num_active;
code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
if (code != I40E_SUCCESS)
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
if (code != I40E_SUCCESS) {
device_printf(dev, "Failed to disable BW limit: %d\n",
ixl_adminq_err_to_errno(hw->aq.asq_last_status));
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
}
memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
return (0);
}
static int
ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_hw *hw;
int error;
hw = &pf->hw;
error = ixl_vf_alloc_vsi(pf, vf);
if (error != 0)
return (error);
vf->vsi.hw_filters_add = 0;
vf->vsi.hw_filters_del = 0;
ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
ixl_reconfigure_filters(&vf->vsi);
return (0);
}
static void
ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
uint32_t val)
{
uint32_t qtable;
int index, shift;
/*
* Two queues are mapped in a single register, so we have to do some
* gymnastics to convert the queue number into a register index and
* shift.
*/
index = qnum / 2;
shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
qtable |= val << shift;
i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
}
static void
ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_hw *hw;
uint32_t qtable;
int i;
hw = &pf->hw;
/*
* Contiguous mappings aren't actually supported by the hardware,
* so we have to use non-contiguous mappings.
*/
i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
/* Enable LAN traffic on this VF */
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
/* Program index of each VF queue into PF queue space
* (This is only needed if QTABLE is enabled) */
for (i = 0; i < vf->vsi.num_queues; i++) {
qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
I40E_VPLAN_QTABLE_QINDEX_SHIFT;
wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
}
for (; i < IXL_MAX_VSI_QUEUES; i++)
wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
I40E_VPLAN_QTABLE_QINDEX_MASK);
/* Map queues allocated to VF to its VSI;
* This mapping matches the VF-wide mapping since the VF
* is only given a single VSI */
for (i = 0; i < vf->vsi.num_queues; i++)
ixl_vf_map_vsi_queue(hw, vf, i,
ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
/* Set rest of VSI queues as unused. */
for (; i < IXL_MAX_VSI_QUEUES; i++)
ixl_vf_map_vsi_queue(hw, vf, i,
I40E_VSILAN_QTABLE_QINDEX_0_MASK);
ixl_flush(hw);
}
static void
ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
{
struct i40e_hw *hw;
hw = &pf->hw;
if (vsi->seid == 0)
return;
i40e_aq_delete_element(hw, vsi->seid, NULL);
}
static void
ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
{
wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
ixl_flush(hw);
}
static void
ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
{
wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
ixl_flush(hw);
}
static void
ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_hw *hw;
uint32_t vfint_reg, vpint_reg;
int i;
hw = &pf->hw;
ixl_vf_vsi_release(pf, &vf->vsi);
/* Index 0 has a special register. */
ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
ixl_vf_disable_queue_intr(hw, vfint_reg);
}
/* Index 0 has a special register. */
ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
ixl_vf_unregister_intr(hw, vpint_reg);
}
vf->vsi.num_queues = 0;
}
static int
ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_hw *hw;
int i;
uint16_t global_vf_num;
uint32_t ciad;
hw = &pf->hw;
global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
(global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
ciad = rd32(hw, I40E_PF_PCI_CIAD);
if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
return (0);
DELAY(1);
}
return (ETIMEDOUT);
}
static void
ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_hw *hw;
uint32_t vfrtrig;
hw = &pf->hw;
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
ixl_flush(hw);
ixl_reinit_vf(pf, vf);
}
static void
ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_hw *hw;
uint32_t vfrstat, vfrtrig;
int i, error;
hw = &pf->hw;
error = ixl_flush_pcie(pf, vf);
if (error != 0)
device_printf(pf->dev,
"Timed out waiting for PCIe activity to stop on VF-%d\n",
vf->vf_num);
for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
DELAY(10);
vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
break;
}
if (i == IXL_VF_RESET_TIMEOUT)
device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
if (vf->vsi.seid != 0)
ixl_disable_rings(&vf->vsi);
ixl_vf_release_resources(pf, vf);
ixl_vf_setup_vsi(pf, vf);
ixl_vf_map_queues(pf, vf);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
ixl_flush(hw);
}
static int
ixl_vc_opcode_level(uint16_t opcode)
{
switch (opcode) {
case I40E_VIRTCHNL_OP_GET_STATS:
return (10);
default:
return (5);
}
}
static void
ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
enum i40e_status_code status, void *msg, uint16_t len)
{
struct i40e_hw *hw;
int global_vf_id;
hw = &pf->hw;
global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
"Sending msg (op=%s[%d], status=%d) to VF-%d\n",
ixl_vc_opcode_str(op), op, status, vf->vf_num);
i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
}
static void
ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
{
ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
}
static void
ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
enum i40e_status_code status, const char *file, int line)
{
I40E_VC_DEBUG(pf, 1,
"Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
status, vf->vf_num, file, line);
ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
}
static void
ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_version_info reply;
if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
I40E_ERR_PARAM);
return;
}
vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
sizeof(reply));
}
static void
ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
if (msg_size != 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
I40E_ERR_PARAM);
return;
}
ixl_reset_vf(pf, vf);
/* No response to a reset message. */
}
static void
ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vf_resource reply;
if ((vf->version == 0 && msg_size != 0) ||
(vf->version == 1 && msg_size != 4)) {
device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
" for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
vf->version);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_ERR_PARAM);
return;
}
bzero(&reply, sizeof(reply));
if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
else
/* Force VF RSS setup by PF in 1.1+ VFs */
reply.vf_offload_flags = *(u32 *)msg & (
I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
reply.num_vsis = 1;
reply.num_queue_pairs = vf->vsi.num_queues;
reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
reply.rss_key_size = 52;
reply.rss_lut_size = 64;
reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_SUCCESS, &reply, sizeof(reply));
}
static int
ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
struct i40e_virtchnl_txq_info *info)
{
struct i40e_hw *hw;
struct i40e_hmc_obj_txq txq;
uint16_t global_queue_num, global_vf_num;
enum i40e_status_code status;
uint32_t qtx_ctl;
hw = &pf->hw;
global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
bzero(&txq, sizeof(txq));
DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
if (status != I40E_SUCCESS)
return (EINVAL);
txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
txq.head_wb_ena = info->headwb_enabled;
txq.head_wb_addr = info->dma_headwb_addr;
txq.qlen = info->ring_len;
txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
txq.rdylist_act = 0;
status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
if (status != I40E_SUCCESS)
return (EINVAL);
qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
(hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
(global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
ixl_flush(hw);
ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
return (0);
}
static int
ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
struct i40e_virtchnl_rxq_info *info)
{
struct i40e_hw *hw;
struct i40e_hmc_obj_rxq rxq;
uint16_t global_queue_num;
enum i40e_status_code status;
hw = &pf->hw;
global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
bzero(&rxq, sizeof(rxq));
DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
vf->vf_num, global_queue_num, info->queue_id);
if (info->databuffer_size > IXL_VF_MAX_BUFFER)
return (EINVAL);
if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
info->max_pkt_size < ETHER_MIN_LEN)
return (EINVAL);
if (info->splithdr_enabled) {
if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
return (EINVAL);
rxq.hsplit_0 = info->rx_split_pos &
(I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
rxq.dtype = 2;
}
status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
if (status != I40E_SUCCESS)
return (EINVAL);
rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
rxq.qlen = info->ring_len;
rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
rxq.dsize = 1;
rxq.crcstrip = 1;
rxq.l2tsel = 1;
rxq.rxmax = info->max_pkt_size;
rxq.tphrdesc_ena = 1;
rxq.tphwdesc_ena = 1;
rxq.tphdata_ena = 1;
rxq.tphhead_ena = 1;
rxq.lrxqthresh = 2;
rxq.prefena = 1;
status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
if (status != I40E_SUCCESS)
return (EINVAL);
ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
return (0);
}
static void
ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vsi_queue_config_info *info;
struct i40e_virtchnl_queue_pair_info *pair;
uint16_t expected_msg_size;
int i;
if (msg_size < sizeof(*info)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
info = msg;
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
if (msg_size != expected_msg_size) {
device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
vf->vf_num, msg_size, expected_msg_size);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
if (info->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < info->num_queue_pairs; i++) {
pair = &info->qpair[i];
if (pair->txq.vsi_id != vf->vsi.vsi_num ||
pair->rxq.vsi_id != vf->vsi.vsi_num ||
pair->txq.queue_id != pair->rxq.queue_id ||
pair->txq.queue_id >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
}
static void
ixl_vf_set_qctl(struct ixl_pf *pf,
const struct i40e_virtchnl_vector_map *vector,
enum i40e_queue_type cur_type, uint16_t cur_queue,
enum i40e_queue_type *last_type, uint16_t *last_queue)
{
uint32_t offset, qctl;
uint16_t itr_indx;
if (cur_type == I40E_QUEUE_TYPE_RX) {
offset = I40E_QINT_RQCTL(cur_queue);
itr_indx = vector->rxitr_idx;
} else {
offset = I40E_QINT_TQCTL(cur_queue);
itr_indx = vector->txitr_idx;
}
qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
(*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
wr32(&pf->hw, offset, qctl);
*last_type = cur_type;
*last_queue = cur_queue;
}
static void
ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
const struct i40e_virtchnl_vector_map *vector)
{
struct i40e_hw *hw;
u_int qindex;
enum i40e_queue_type type, last_type;
uint32_t lnklst_reg;
uint16_t rxq_map, txq_map, cur_queue, last_queue;
hw = &pf->hw;
rxq_map = vector->rxq_map;
txq_map = vector->txq_map;
last_queue = IXL_END_OF_INTR_LNKLST;
last_type = I40E_QUEUE_TYPE_RX;
/*
* The datasheet says to optimize performance, RX queues and TX queues
* should be interleaved in the interrupt linked list, so we process
* both at once here.
*/
while ((rxq_map != 0) || (txq_map != 0)) {
if (txq_map != 0) {
qindex = ffs(txq_map) - 1;
type = I40E_QUEUE_TYPE_TX;
cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
ixl_vf_set_qctl(pf, vector, type, cur_queue,
&last_type, &last_queue);
txq_map &= ~(1 << qindex);
}
if (rxq_map != 0) {
qindex = ffs(rxq_map) - 1;
type = I40E_QUEUE_TYPE_RX;
cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
ixl_vf_set_qctl(pf, vector, type, cur_queue,
&last_type, &last_queue);
rxq_map &= ~(1 << qindex);
}
}
if (vector->vector_id == 0)
lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
else
lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
vf->vf_num);
wr32(hw, lnklst_reg,
(last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
(last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
ixl_flush(hw);
}
static void
ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_irq_map_info *map;
struct i40e_virtchnl_vector_map *vector;
struct i40e_hw *hw;
int i, largest_txq, largest_rxq;
hw = &pf->hw;
if (msg_size < sizeof(*map)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
map = msg;
if (map->num_vectors == 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < map->num_vectors; i++) {
vector = &map->vecmap[i];
if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
vector->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
return;
}
if (vector->rxq_map != 0) {
largest_rxq = fls(vector->rxq_map) - 1;
if (largest_rxq >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
}
if (vector->txq_map != 0) {
largest_txq = fls(vector->txq_map) - 1;
if (largest_txq >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
}
if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
vector->txitr_idx > IXL_MAX_ITR_IDX) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
ixl_vf_config_vector(pf, vf, vector);
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
}
static void
ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_queue_select *select;
int error = 0;
if (msg_size != sizeof(*select)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
select = msg;
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
/* Enable TX rings selected by the VF */
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->tx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
}
/* Skip this queue if it hasn't been configured */
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
continue;
/* Warn if this queue is already marked as enabled */
if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
vf->vf_num, i);
error = ixl_enable_tx_ring(pf, &vf->qtag, i);
if (error)
break;
else
ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
}
}
/* Enable RX rings selected by the VF */
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->rx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
}
/* Skip this queue if it hasn't been configured */
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
continue;
/* Warn if this queue is already marked as enabled */
if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
vf->vf_num, i);
error = ixl_enable_rx_ring(pf, &vf->qtag, i);
if (error)
break;
else
ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
}
}
if (error) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_TIMEOUT);
return;
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
}
static void
ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
struct i40e_virtchnl_queue_select *select;
int error = 0;
if (msg_size != sizeof(*select)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
select = msg;
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
/* Disable TX rings selected by the VF */
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->tx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
}
/* Skip this queue if it hasn't been configured */
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
continue;
/* Warn if this queue is already marked as disabled */
if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
vf->vf_num, i);
continue;
}
error = ixl_disable_tx_ring(pf, &vf->qtag, i);
if (error)
break;
else
ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
}
}
/* Enable RX rings selected by the VF */
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->rx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
}
/* Skip this queue if it hasn't been configured */
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
continue;
/* Warn if this queue is already marked as disabled */
if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
vf->vf_num, i);
continue;
}
error = ixl_disable_rx_ring(pf, &vf->qtag, i);
if (error)
break;
else
ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
}
}
if (error) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_TIMEOUT);
return;
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
}
static bool
ixl_zero_mac(const uint8_t *addr)
{
uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
return (cmp_etheraddr(addr, zero));
}
static bool
ixl_bcast_mac(const uint8_t *addr)
{
return (cmp_etheraddr(addr, ixl_bcast_addr));
}
static int
ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
{
if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
return (EINVAL);
/*
* If the VF is not allowed to change its MAC address, don't let it
* set a MAC filter for an address that is not a multicast address and
* is not its assigned MAC.
*/
if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
!(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
return (EPERM);
return (0);
}
static void
ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_ether_addr_list *addr_list;
struct i40e_virtchnl_ether_addr *addr;
struct ixl_vsi *vsi;
int i;
size_t expected_size;
vsi = &vf->vsi;
if (msg_size < sizeof(*addr_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
I40E_ERR_PARAM);
return;
}
addr_list = msg;
expected_size = sizeof(*addr_list) +
addr_list->num_elements * sizeof(*addr);
if (addr_list->num_elements == 0 ||
addr_list->vsi_id != vsi->vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < addr_list->num_elements; i++) {
if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
return;
}
}
for (i = 0; i < addr_list->num_elements; i++) {
addr = &addr_list->list[i];
ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
}
static void
ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_ether_addr_list *addr_list;
struct i40e_virtchnl_ether_addr *addr;
size_t expected_size;
int i;
if (msg_size < sizeof(*addr_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
I40E_ERR_PARAM);
return;
}
addr_list = msg;
expected_size = sizeof(*addr_list) +
addr_list->num_elements * sizeof(*addr);
if (addr_list->num_elements == 0 ||
addr_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < addr_list->num_elements; i++) {
addr = &addr_list->list[i];
if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
return;
}
}
for (i = 0; i < addr_list->num_elements; i++) {
addr = &addr_list->list[i];
ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
}
static enum i40e_status_code
ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct i40e_vsi_context vsi_ctx;
vsi_ctx.seid = vf->vsi.seid;
bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
}
static void
ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vlan_filter_list *filter_list;
enum i40e_status_code code;
size_t expected_size;
int i;
if (msg_size < sizeof(*filter_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
filter_list = msg;
expected_size = sizeof(*filter_list) +
filter_list->num_elements * sizeof(uint16_t);
if (filter_list->num_elements == 0 ||
filter_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++) {
if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
}
code = ixl_vf_enable_vlan_strip(pf, vf);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
}
for (i = 0; i < filter_list->num_elements; i++)
ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
}
static void
ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vlan_filter_list *filter_list;
int i;
size_t expected_size;
if (msg_size < sizeof(*filter_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
}
filter_list = msg;
expected_size = sizeof(*filter_list) +
filter_list->num_elements * sizeof(uint16_t);
if (filter_list->num_elements == 0 ||
filter_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++) {
if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
}
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++)
ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
}
static void
ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
struct i40e_virtchnl_promisc_info *info;
enum i40e_status_code code;
if (msg_size != sizeof(*info)) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
info = msg;
if (info->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
return;
}
code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
return;
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
}
static void
ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_queue_select *queue;
if (msg_size != sizeof(*queue)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
I40E_ERR_PARAM);
return;
}
queue = msg;
if (queue->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
I40E_ERR_PARAM);
return;
}
ixl_update_eth_stats(&vf->vsi);
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
}
static void
ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
struct i40e_virtchnl_rss_key *key;
struct i40e_aqc_get_set_rss_key_data key_data;
enum i40e_status_code status;
hw = &pf->hw;
if (msg_size < sizeof(*key)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
key = msg;
if (key->key_len > 52) {
device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
vf->vf_num, key->key_len, 52);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
if (key->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
/* Fill out hash using MAC-dependent method */
if (hw->mac.type == I40E_MAC_X722) {
bzero(&key_data, sizeof(key_data));
if (key->key_len <= 40)
bcopy(key->key, key_data.standard_rss_key, key->key_len);
else {
bcopy(key->key, key_data.standard_rss_key, 40);
bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
}
status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
if (status) {
device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_ADMIN_QUEUE_ERROR);
return;
}
} else {
for (int i = 0; i < (key->key_len / 4); i++)
i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
vf->vf_num, key->key[0]);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
}
static void
ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
struct i40e_virtchnl_rss_lut *lut;
enum i40e_status_code status;
hw = &pf->hw;
if (msg_size < sizeof(*lut)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
lut = msg;
if (lut->lut_entries > 64) {
device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
vf->vf_num, lut->lut_entries, 64);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
if (lut->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
/* Fill out LUT using MAC-dependent method */
if (hw->mac.type == I40E_MAC_X722) {
status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
if (status) {
device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_ADMIN_QUEUE_ERROR);
return;
}
} else {
for (int i = 0; i < (lut->lut_entries / 4); i++)
i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
vf->vf_num, lut->lut[0], lut->lut_entries);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
}
static void
ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
struct i40e_virtchnl_rss_hena *hena;
hw = &pf->hw;
if (msg_size < sizeof(*hena)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
I40E_ERR_PARAM);
return;
}
hena = msg;
/* Set HENA */
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32));
DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
vf->vf_num, hena->hena);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
}
void
ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
{
struct ixl_vf *vf;
void *msg;
uint16_t vf_num, msg_size;
uint32_t opcode;
vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
opcode = le32toh(event->desc.cookie_high);
if (vf_num >= pf->num_vfs) {
device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
return;
}
vf = &pf->vfs[vf_num];
msg = event->msg_buf;
msg_size = event->msg_len;
I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
"Got msg %s(%d) from%sVF-%d of size %d\n",
ixl_vc_opcode_str(opcode), opcode,
(vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
vf_num, msg_size);
/* This must be a stray msg from a previously destroyed VF. */
if (!(vf->vf_flags & VF_FLAG_ENABLED))
return;
switch (opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ixl_vf_version_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
ixl_vf_reset_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
break;
/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
default:
i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
break;
}
}
/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
void
ixl_handle_vflr(void *arg, int pending)
{
struct ixl_pf *pf;
struct ixl_vf *vf;
struct i40e_hw *hw;
uint16_t global_vf_num;
uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
int i;
pf = arg;
hw = &pf->hw;
IXL_PF_LOCK(pf);
for (i = 0; i < pf->num_vfs; i++) {
global_vf_num = hw->func_caps.vf_base_id + i;
vf = &pf->vfs[i];
if (!(vf->vf_flags & VF_FLAG_ENABLED))
continue;
vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
if (vflrstat & vflrstat_mask) {
wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
vflrstat_mask);
ixl_reinit_vf(pf, vf);
}
}
icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
ixl_flush(hw);
IXL_PF_UNLOCK(pf);
}
static int
ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
{
switch (err) {
case I40E_AQ_RC_EPERM:
return (EPERM);
case I40E_AQ_RC_ENOENT:
return (ENOENT);
case I40E_AQ_RC_ESRCH:
return (ESRCH);
case I40E_AQ_RC_EINTR:
return (EINTR);
case I40E_AQ_RC_EIO:
return (EIO);
case I40E_AQ_RC_ENXIO:
return (ENXIO);
case I40E_AQ_RC_E2BIG:
return (E2BIG);
case I40E_AQ_RC_EAGAIN:
return (EAGAIN);
case I40E_AQ_RC_ENOMEM:
return (ENOMEM);
case I40E_AQ_RC_EACCES:
return (EACCES);
case I40E_AQ_RC_EFAULT:
return (EFAULT);
case I40E_AQ_RC_EBUSY:
return (EBUSY);
case I40E_AQ_RC_EEXIST:
return (EEXIST);
case I40E_AQ_RC_EINVAL:
return (EINVAL);
case I40E_AQ_RC_ENOTTY:
return (ENOTTY);
case I40E_AQ_RC_ENOSPC:
return (ENOSPC);
case I40E_AQ_RC_ENOSYS:
return (ENOSYS);
case I40E_AQ_RC_ERANGE:
return (ERANGE);
case I40E_AQ_RC_EFLUSHED:
return (EINVAL); /* No exact equivalent in errno.h */
case I40E_AQ_RC_BAD_ADDR:
return (EFAULT);
case I40E_AQ_RC_EMODE:
return (EPERM);
case I40E_AQ_RC_EFBIG:
return (EFBIG);
default:
return (EINVAL);
}
}
int
ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
{
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *pf_vsi;
enum i40e_status_code ret;
int i, error;
pf = device_get_softc(dev);
hw = &pf->hw;
pf_vsi = &pf->vsi;
IXL_PF_LOCK(pf);
- pf->vfs = mallocarray(num_vfs, sizeof(struct ixl_vf), M_IXL,
- M_NOWAIT | M_ZERO);
+ pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
+ M_ZERO);
if (pf->vfs == NULL) {
error = ENOMEM;
goto fail;
}
for (i = 0; i < num_vfs; i++)
sysctl_ctx_init(&pf->vfs[i].ctx);
ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1, FALSE, &pf->veb_seid, FALSE, NULL);
if (ret != I40E_SUCCESS) {
error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
device_printf(dev, "add_veb failed; code=%d error=%d", ret,
error);
goto fail;
}
pf->num_vfs = num_vfs;
IXL_PF_UNLOCK(pf);
return (0);
fail:
free(pf->vfs, M_IXL);
pf->vfs = NULL;
IXL_PF_UNLOCK(pf);
return (error);
}
void
ixl_iov_uninit(device_t dev)
{
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
struct ifnet *ifp;
struct ixl_vf *vfs;
int i, num_vfs;
pf = device_get_softc(dev);
hw = &pf->hw;
vsi = &pf->vsi;
ifp = vsi->ifp;
IXL_PF_LOCK(pf);
for (i = 0; i < pf->num_vfs; i++) {
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
DDPRINTF(dev, "VF %d: %d released\n",
i, pf->vfs[i].qtag.num_allocated);
DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
}
if (pf->veb_seid != 0) {
i40e_aq_delete_element(hw, pf->veb_seid, NULL);
pf->veb_seid = 0;
}
vfs = pf->vfs;
num_vfs = pf->num_vfs;
pf->vfs = NULL;
pf->num_vfs = 0;
IXL_PF_UNLOCK(pf);
/* Do this after the unlock as sysctl_ctx_free might sleep. */
for (i = 0; i < num_vfs; i++)
sysctl_ctx_free(&vfs[i].ctx);
free(vfs, M_IXL);
}
static int
ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
{
device_t dev = pf->dev;
int error;
/* Validate, and clamp value if invalid */
if (num_queues < 1 || num_queues > 16)
device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
num_queues, vf->vf_num);
if (num_queues < 1) {
device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
num_queues = 1;
} else if (num_queues > 16) {
device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
num_queues = 16;
}
error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
if (error) {
device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
num_queues, vf->vf_num);
return (ENOSPC);
}
DDPRINTF(dev, "VF %d: %d allocated, %d active",
vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
return (0);
}
int
ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
{
char sysctl_name[QUEUE_NAME_LEN];
struct ixl_pf *pf;
struct ixl_vf *vf;
const void *mac;
size_t size;
int error;
int vf_num_queues;
pf = device_get_softc(dev);
vf = &pf->vfs[vfnum];
IXL_PF_LOCK(pf);
vf->vf_num = vfnum;
vf->vsi.back = pf;
vf->vf_flags = VF_FLAG_ENABLED;
SLIST_INIT(&vf->vsi.ftl);
/* Reserve queue allocation from PF */
vf_num_queues = nvlist_get_number(params, "num-queues");
error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
if (error != 0)
goto out;
error = ixl_vf_setup_vsi(pf, vf);
if (error != 0)
goto out;
if (nvlist_exists_binary(params, "mac-addr")) {
mac = nvlist_get_binary(params, "mac-addr", &size);
bcopy(mac, vf->mac, ETHER_ADDR_LEN);
if (nvlist_get_bool(params, "allow-set-mac"))
vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
} else
/*
* If the administrator has not specified a MAC address then
* we must allow the VF to choose one.
*/
vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
if (nvlist_get_bool(params, "mac-anti-spoof"))
vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
if (nvlist_get_bool(params, "allow-promisc"))
vf->vf_flags |= VF_FLAG_PROMISC_CAP;
vf->vf_flags |= VF_FLAG_VLAN_CAP;
ixl_reset_vf(pf, vf);
out:
IXL_PF_UNLOCK(pf);
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
}
return (error);
}
Index: head/sys/dev/ixl/ixl_pf_main.c
===================================================================
--- head/sys/dev/ixl/ixl_pf_main.c (revision 328217)
+++ head/sys/dev/ixl/ixl_pf_main.c (revision 328218)
@@ -1,6091 +1,6090 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl_pf.h"
#ifdef PCI_IOV
#include "ixl_pf_iov.h"
#endif
#ifdef IXL_IW
#include "ixl_iw.h"
#include "ixl_iw_int.h"
#endif
#ifdef DEV_NETMAP
#include <net/netmap.h>
#include <sys/selinfo.h>
#include <dev/netmap/netmap_kern.h>
#endif /* DEV_NETMAP */
static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
static u64 ixl_max_aq_speed_to_value(u8);
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
/* Sysctls */
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
/* Debug Sysctls */
static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
#ifdef IXL_DEBUG
static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
#endif
#ifdef IXL_IW
extern int ixl_enable_iwarp;
#endif
const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const char * const ixl_fc_string[6] = {
"None",
"Rx",
"Tx",
"Full",
"Priority",
"Default"
};
MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
void
ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
{
va_list args;
if (!(mask & pf->dbg_mask))
return;
/* Re-implement device_printf() */
device_print_prettyname(pf->dev);
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
/*
** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
*/
void
ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
{
u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
sbuf_printf(buf,
"fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
IXL_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
IXL_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack,
oem_ver, oem_build, oem_patch);
}
void
ixl_print_nvm_version(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *sbuf;
sbuf = sbuf_new_auto();
ixl_nvm_version_str(hw, sbuf);
sbuf_finish(sbuf);
device_printf(dev, "%s\n", sbuf_data(sbuf));
sbuf_delete(sbuf);
}
static void
ixl_configure_tx_itr(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
vsi->tx_itr_setting = pf->tx_itr;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
vsi->tx_itr_setting);
txr->itr = vsi->tx_itr_setting;
txr->latency = IXL_AVE_LATENCY;
}
}
static void
ixl_configure_rx_itr(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
vsi->rx_itr_setting = pf->rx_itr;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
vsi->rx_itr_setting);
rxr->itr = vsi->rx_itr_setting;
rxr->latency = IXL_AVE_LATENCY;
}
}
/*
* Write PF ITR values to queue ITR registers.
*/
void
ixl_configure_itr(struct ixl_pf *pf)
{
ixl_configure_tx_itr(pf);
ixl_configure_rx_itr(pf);
}
/*********************************************************************
* Init entry point
*
* This routine is used in two ways. It is used by the stack as
* init entry point in network interface structure. It is also used
* by the driver as a hw/sw initialization routine to get to a
* consistent state.
*
* return 0 on success, positive on failure
**********************************************************************/
void
ixl_init_locked(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ifnet *ifp = vsi->ifp;
device_t dev = pf->dev;
struct i40e_filter_control_settings filter;
u8 tmpaddr[ETHER_ADDR_LEN];
int ret;
INIT_DEBUGOUT("ixl_init_locked: begin");
IXL_PF_LOCK_ASSERT(pf);
ixl_stop_locked(pf);
/*
* If the aq is dead here, it probably means something outside of the driver
* did something to the adapter, like a PF reset.
* So rebuild the driver's state here if that occurs.
*/
if (!i40e_check_asq_alive(&pf->hw)) {
device_printf(dev, "Admin Queue is down; resetting...\n");
ixl_teardown_hw_structs(pf);
ixl_reset(pf);
}
/* Get the latest mac address... User might use a LAA */
bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
I40E_ETH_LENGTH_OF_ADDRESS);
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
bcopy(tmpaddr, hw->mac.addr,
I40E_ETH_LENGTH_OF_ADDRESS);
ret = i40e_aq_mac_address_write(hw,
I40E_AQC_WRITE_TYPE_LAA_ONLY,
hw->mac.addr, NULL);
if (ret) {
device_printf(dev, "LLA address"
"change failed!!\n");
return;
}
}
ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
/* Set the various hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TSO)
ifp->if_hwassist |= CSUM_TSO;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
/* Set up the device filtering */
bzero(&filter, sizeof(filter));
filter.enable_ethtype = TRUE;
filter.enable_macvlan = TRUE;
filter.enable_fdir = FALSE;
filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
if (i40e_set_filter_control(hw, &filter))
device_printf(dev, "i40e_set_filter_control() failed\n");
/* Prepare the VSI: rings, hmc contexts, etc... */
if (ixl_initialize_vsi(vsi)) {
device_printf(dev, "initialize vsi failed!!\n");
return;
}
/* Set up RSS */
ixl_config_rss(pf);
/* Add protocol filters to list */
ixl_init_filters(vsi);
/* Setup vlan's if needed */
ixl_setup_vlan_filters(vsi);
/* Set up MSI/X routing and the ITR settings */
if (pf->msix > 1) {
ixl_configure_queue_intr_msix(pf);
ixl_configure_itr(pf);
} else
ixl_configure_legacy(pf);
ixl_enable_rings(vsi);
i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
ixl_reconfigure_filters(vsi);
/* And now turn on interrupts */
ixl_enable_intr(vsi);
/* Get link info */
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
/* Start the local timer */
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
/* Now inform the stack we're ready */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
#ifdef IXL_IW
if (ixl_enable_iwarp && pf->iw_enabled) {
ret = ixl_iw_pf_init(pf);
if (ret)
device_printf(dev,
"initialize iwarp failed, code %d\n", ret);
}
#endif
}
/*********************************************************************
*
* Get the hardware capabilities
*
**********************************************************************/
int
ixl_get_hw_capabilities(struct ixl_pf *pf)
{
struct i40e_aqc_list_capabilities_element_resp *buf;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
int error, len;
u16 needed;
bool again = TRUE;
len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
retry:
if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate cap memory\n");
return (ENOMEM);
}
/* This populates the hw struct */
error = i40e_aq_discover_capabilities(hw, buf, len,
&needed, i40e_aqc_opc_list_func_capabilities, NULL);
free(buf, M_DEVBUF);
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
(again == TRUE)) {
/* retry once with a larger buffer */
again = FALSE;
len = needed;
goto retry;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
device_printf(dev, "capability discovery failed: %d\n",
pf->hw.aq.asq_last_status);
return (ENODEV);
}
/* Capture this PF's starting queue pair */
pf->qbase = hw->func_caps.base_queue;
#ifdef IXL_DEBUG
device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
"msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
hw->pf_id, hw->func_caps.num_vfs,
hw->func_caps.num_msix_vectors,
hw->func_caps.num_msix_vectors_vf,
hw->func_caps.fd_filters_guaranteed,
hw->func_caps.fd_filters_best_effort,
hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
hw->func_caps.base_queue);
#endif
/* Print a subset of the capability information. */
device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
(hw->func_caps.mdio_port_mode == 2) ? "I2C" :
(hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
"MDIO shared");
struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
if (osdep->i2c_intfc_num != -1)
pf->has_i2c = true;
return (error);
}
void
ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
{
device_t dev = vsi->dev;
/* Enable/disable TXCSUM/TSO4 */
if (!(ifp->if_capenable & IFCAP_TXCSUM)
&& !(ifp->if_capenable & IFCAP_TSO4)) {
if (mask & IFCAP_TXCSUM) {
ifp->if_capenable |= IFCAP_TXCSUM;
/* enable TXCSUM, restore TSO if previously enabled */
if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
ifp->if_capenable |= IFCAP_TSO4;
}
}
else if (mask & IFCAP_TSO4) {
ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
device_printf(dev,
"TSO4 requires txcsum, enabling both...\n");
}
} else if((ifp->if_capenable & IFCAP_TXCSUM)
&& !(ifp->if_capenable & IFCAP_TSO4)) {
if (mask & IFCAP_TXCSUM)
ifp->if_capenable &= ~IFCAP_TXCSUM;
else if (mask & IFCAP_TSO4)
ifp->if_capenable |= IFCAP_TSO4;
} else if((ifp->if_capenable & IFCAP_TXCSUM)
&& (ifp->if_capenable & IFCAP_TSO4)) {
if (mask & IFCAP_TXCSUM) {
vsi->flags |= IXL_FLAGS_KEEP_TSO4;
ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
device_printf(dev,
"TSO4 requires txcsum, disabling both...\n");
} else if (mask & IFCAP_TSO4)
ifp->if_capenable &= ~IFCAP_TSO4;
}
/* Enable/disable TXCSUM_IPV6/TSO6 */
if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
&& !(ifp->if_capenable & IFCAP_TSO6)) {
if (mask & IFCAP_TXCSUM_IPV6) {
ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
ifp->if_capenable |= IFCAP_TSO6;
}
} else if (mask & IFCAP_TSO6) {
ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
device_printf(dev,
"TSO6 requires txcsum6, enabling both...\n");
}
} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
&& !(ifp->if_capenable & IFCAP_TSO6)) {
if (mask & IFCAP_TXCSUM_IPV6)
ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
else if (mask & IFCAP_TSO6)
ifp->if_capenable |= IFCAP_TSO6;
} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
&& (ifp->if_capenable & IFCAP_TSO6)) {
if (mask & IFCAP_TXCSUM_IPV6) {
vsi->flags |= IXL_FLAGS_KEEP_TSO6;
ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
device_printf(dev,
"TSO6 requires txcsum6, disabling both...\n");
} else if (mask & IFCAP_TSO6)
ifp->if_capenable &= ~IFCAP_TSO6;
}
}
/* For the set_advertise sysctl */
void
ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp abilities;
/* Set initial sysctl values */
status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
NULL);
if (status) {
/* Non-fatal error */
device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
__func__, status);
return;
}
pf->advertised_speed =
ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
}
int
ixl_teardown_hw_structs(struct ixl_pf *pf)
{
enum i40e_status_code status = 0;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
/* Shutdown LAN HMC */
if (hw->hmc.hmc_obj) {
status = i40e_shutdown_lan_hmc(hw);
if (status) {
device_printf(dev,
"init: LAN HMC shutdown failure; status %d\n", status);
goto err_out;
}
}
// XXX: This gets called when we know the adminq is inactive;
// so we already know it's setup when we get here.
/* Shutdown admin queue */
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
"init: Admin Queue shutdown failure; status %d\n", status);
err_out:
return (status);
}
int
ixl_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u8 set_fc_err_mask;
int error = 0;
// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
i40e_clear_hw(hw);
error = i40e_pf_reset(hw);
if (error) {
device_printf(dev, "init: PF reset failure");
error = EIO;
goto err_out;
}
error = i40e_init_adminq(hw);
if (error) {
device_printf(dev, "init: Admin queue init failure;"
" status code %d", error);
error = EIO;
goto err_out;
}
i40e_clear_pxe_mode(hw);
error = ixl_get_hw_capabilities(pf);
if (error) {
device_printf(dev, "init: Error retrieving HW capabilities;"
" status code %d\n", error);
goto err_out;
}
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
device_printf(dev, "init: LAN HMC init failed; status code %d\n",
error);
error = EIO;
goto err_out;
}
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (error) {
device_printf(dev, "init: LAN HMC config failed; status code %d\n",
error);
error = EIO;
goto err_out;
}
// XXX: possible fix for panic, but our failure recovery is still broken
error = ixl_switch_config(pf);
if (error) {
device_printf(dev, "init: ixl_switch_config() failed: %d\n",
error);
goto err_out;
}
error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
NULL);
if (error) {
device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
" aq_err %d\n", error, hw->aq.asq_last_status);
error = EIO;
goto err_out;
}
error = i40e_set_fc(hw, &set_fc_err_mask, true);
if (error) {
device_printf(dev, "init: setting link flow control failed; retcode %d,"
" fc_err_mask 0x%02x\n", error, set_fc_err_mask);
goto err_out;
}
// XXX: (Rebuild VSIs?)
/* Firmware delay workaround */
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4)) {
i40e_msec_delay(75);
error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
if (error) {
device_printf(dev, "init: link restart failed, aq_err %d\n",
hw->aq.asq_last_status);
goto err_out;
}
}
err_out:
return (error);
}
/*
** MSIX Interrupt Handlers and Tasklets
*/
void
ixl_handle_que(void *context, int pending)
{
struct ixl_queue *que = context;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
struct ifnet *ifp = vsi->ifp;
bool more;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
more = ixl_rxeof(que, IXL_RX_LIMIT);
IXL_TX_LOCK(txr);
ixl_txeof(que);
if (!drbr_empty(ifp, txr->br))
ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
if (more) {
taskqueue_enqueue(que->tq, &que->task);
return;
}
}
/* Reenable this interrupt - hmmm */
ixl_enable_queue(hw, que->me);
return;
}
/*********************************************************************
*
* Legacy Interrupt Service routine
*
**********************************************************************/
void
ixl_intr(void *arg)
{
struct ixl_pf *pf = arg;
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
struct ifnet *ifp = vsi->ifp;
struct tx_ring *txr = &que->txr;
u32 icr0;
bool more_tx, more_rx;
pf->admin_irq++;
/* Protect against spurious interrupts */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
icr0 = rd32(hw, I40E_PFINT_ICR0);
#ifdef PCI_IOV
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
taskqueue_enqueue(pf->tq, &pf->vflr_task);
#endif
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
taskqueue_enqueue(pf->tq, &pf->adminq);
}
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
++que->irqs;
more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
IXL_TX_LOCK(txr);
more_tx = ixl_txeof(que);
if (!drbr_empty(vsi->ifp, txr->br))
more_tx = 1;
IXL_TX_UNLOCK(txr);
}
ixl_enable_intr0(hw);
}
/*********************************************************************
*
* MSIX VSI Interrupt Service routine
*
**********************************************************************/
void
ixl_msix_que(void *arg)
{
struct ixl_queue *que = arg;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
bool more_tx, more_rx;
/* Protect against spurious interrupts */
if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
++que->irqs;
more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
IXL_TX_LOCK(txr);
more_tx = ixl_txeof(que);
/*
** Make certain that if the stack
** has anything queued the task gets
** scheduled to handle it.
*/
if (!drbr_empty(vsi->ifp, txr->br))
more_tx = 1;
IXL_TX_UNLOCK(txr);
ixl_set_queue_rx_itr(que);
ixl_set_queue_tx_itr(que);
if (more_tx || more_rx)
taskqueue_enqueue(que->tq, &que->task);
else
ixl_enable_queue(hw, que->me);
return;
}
/*********************************************************************
*
* MSIX Admin Queue Interrupt Service routine
*
**********************************************************************/
void
ixl_msix_adminq(void *arg)
{
struct ixl_pf *pf = arg;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u32 reg, mask, rstat_reg;
bool do_task = FALSE;
++pf->admin_irq;
reg = rd32(hw, I40E_PFINT_ICR0);
mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* Check on the cause */
if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
do_task = TRUE;
}
if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
ixl_handle_mdd_event(pf);
mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
}
if (reg & I40E_PFINT_ICR0_GRST_MASK) {
device_printf(dev, "Reset Requested!\n");
rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
device_printf(dev, "Reset type: ");
switch (rstat_reg) {
/* These others might be handled similarly to an EMPR reset */
case I40E_RESET_CORER:
printf("CORER\n");
break;
case I40E_RESET_GLOBR:
printf("GLOBR\n");
break;
case I40E_RESET_EMPR:
printf("EMPR\n");
atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
break;
default:
printf("POR\n");
break;
}
/* overload admin queue task to check reset progress */
do_task = TRUE;
}
if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
device_printf(dev, "ECC Error detected!\n");
}
if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
reg = rd32(hw, I40E_PFHMC_ERRORINFO);
if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
device_printf(dev, "HMC Error detected!\n");
device_printf(dev, "INFO 0x%08x\n", reg);
reg = rd32(hw, I40E_PFHMC_ERRORDATA);
device_printf(dev, "DATA 0x%08x\n", reg);
wr32(hw, I40E_PFHMC_ERRORINFO, 0);
}
}
if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
device_printf(dev, "PCI Exception detected!\n");
}
#ifdef PCI_IOV
if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
taskqueue_enqueue(pf->tq, &pf->vflr_task);
}
#endif
if (do_task)
taskqueue_enqueue(pf->tq, &pf->adminq);
else
ixl_enable_intr0(hw);
}
void
ixl_set_promisc(struct ixl_vsi *vsi)
{
struct ifnet *ifp = vsi->ifp;
struct i40e_hw *hw = vsi->hw;
int err, mcnt = 0;
bool uni = FALSE, multi = FALSE;
if (ifp->if_flags & IFF_ALLMULTI)
multi = TRUE;
else { /* Need to count the multicast addresses */
struct ifmultiaddr *ifma;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
if (mcnt == MAX_MULTICAST_ADDR)
break;
mcnt++;
}
if_maddr_runlock(ifp);
}
if (mcnt >= MAX_MULTICAST_ADDR)
multi = TRUE;
if (ifp->if_flags & IFF_PROMISC)
uni = TRUE;
err = i40e_aq_set_vsi_unicast_promiscuous(hw,
vsi->seid, uni, NULL, TRUE);
err = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, multi, NULL);
return;
}
/*********************************************************************
* Filter Routines
*
* Routines for multicast and vlan filter management.
*
*********************************************************************/
void
ixl_add_multi(struct ixl_vsi *vsi)
{
struct ifmultiaddr *ifma;
struct ifnet *ifp = vsi->ifp;
struct i40e_hw *hw = vsi->hw;
int mcnt = 0, flags;
IOCTL_DEBUGOUT("ixl_add_multi: begin");
if_maddr_rlock(ifp);
/*
** First just get a count, to decide if we
** we simply use multicast promiscuous.
*/
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
mcnt++;
}
if_maddr_runlock(ifp);
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete existing MC filters */
ixl_del_hw_filters(vsi, mcnt);
i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, TRUE, NULL);
return;
}
mcnt = 0;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
ixl_add_mc_filter(vsi,
(u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
mcnt++;
}
if_maddr_runlock(ifp);
if (mcnt > 0) {
flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
ixl_add_hw_filters(vsi, flags, mcnt);
}
IOCTL_DEBUGOUT("ixl_add_multi: end");
return;
}
void
ixl_del_multi(struct ixl_vsi *vsi)
{
struct ifnet *ifp = vsi->ifp;
struct ifmultiaddr *ifma;
struct ixl_mac_filter *f;
int mcnt = 0;
bool match = FALSE;
IOCTL_DEBUGOUT("ixl_del_multi: begin");
/* Search for removed multicast addresses */
if_maddr_rlock(ifp);
SLIST_FOREACH(f, &vsi->ftl, next) {
if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
match = FALSE;
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
if (cmp_etheraddr(f->macaddr, mc_addr)) {
match = TRUE;
break;
}
}
if (match == FALSE) {
f->flags |= IXL_FILTER_DEL;
mcnt++;
}
}
}
if_maddr_runlock(ifp);
if (mcnt > 0)
ixl_del_hw_filters(vsi, mcnt);
}
/*********************************************************************
* Timer routine
*
* This routine checks for link status,updates statistics,
* and runs the watchdog check.
*
* Only runs when the driver is configured UP and RUNNING.
*
**********************************************************************/
void
ixl_local_timer(void *arg)
{
struct ixl_pf *pf = arg;
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = pf->dev;
struct tx_ring *txr;
int hung = 0;
u32 mask;
s32 timer, new_timer;
IXL_PF_LOCK_ASSERT(pf);
/* Fire off the adminq task */
taskqueue_enqueue(pf->tq, &pf->adminq);
/* Update stats */
ixl_update_stats_counters(pf);
/* Check status of the queues */
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++, que++) {
txr = &que->txr;
timer = atomic_load_acq_32(&txr->watchdog_timer);
if (timer > 0) {
new_timer = timer - hz;
if (new_timer <= 0) {
atomic_store_rel_32(&txr->watchdog_timer, -1);
device_printf(dev, "WARNING: queue %d "
"appears to be hung!\n", que->me);
++hung;
} else {
/*
* If this fails, that means something in the TX path has updated
* the watchdog, so it means the TX path is still working and
* the watchdog doesn't need to countdown.
*/
atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
/* Any queues with outstanding work get a sw irq */
wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
}
}
}
/* Reset when a queue shows hung */
if (hung)
goto hung;
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
return;
hung:
device_printf(dev, "WARNING: Resetting!\n");
pf->watchdog_events++;
ixl_init_locked(pf);
}
void
ixl_link_up_msg(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ifnet *ifp = pf->vsi.ifp;
log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
ifp->if_xname,
ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
(hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
"Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
"Clause 108 RS-FEC" : "None",
(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
(hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
ixl_fc_string[1] : ixl_fc_string[0]);
}
/*
** Note: this routine updates the OS on the link state
** the real check of the hardware only happens with
** a link interrupt.
*/
void
ixl_update_link_status(struct ixl_pf *pf)
{
struct ixl_vsi *vsi = &pf->vsi;
struct ifnet *ifp = vsi->ifp;
device_t dev = pf->dev;
if (pf->link_up) {
if (vsi->link_active == FALSE) {
vsi->link_active = TRUE;
ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
if_link_state_change(ifp, LINK_STATE_UP);
ixl_link_up_msg(pf);
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
if (bootverbose)
device_printf(dev, "Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
vsi->link_active = FALSE;
}
}
return;
}
/*********************************************************************
*
* This routine disables all traffic on the adapter by issuing a
* global reset on the MAC and deallocates TX/RX buffers.
*
**********************************************************************/
void
ixl_stop_locked(struct ixl_pf *pf)
{
struct ixl_vsi *vsi = &pf->vsi;
struct ifnet *ifp = vsi->ifp;
INIT_DEBUGOUT("ixl_stop: begin\n");
IXL_PF_LOCK_ASSERT(pf);
#ifdef IXL_IW
/* Stop iWARP device */
if (ixl_enable_iwarp && pf->iw_enabled)
ixl_iw_pf_stop(pf);
#endif
/* Stop the local timer */
callout_stop(&pf->timer);
ixl_disable_rings_intr(vsi);
ixl_disable_rings(vsi);
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
void
ixl_stop(struct ixl_pf *pf)
{
IXL_PF_LOCK(pf);
ixl_stop_locked(pf);
IXL_PF_UNLOCK(pf);
}
/*********************************************************************
*
* Setup MSIX Interrupt resources and handlers for the VSI
*
**********************************************************************/
int
ixl_setup_legacy(struct ixl_pf *pf)
{
device_t dev = pf->dev;
int error, rid = 0;
if (pf->msix == 1)
rid = 1;
pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_SHAREABLE | RF_ACTIVE);
if (pf->res == NULL) {
device_printf(dev, "bus_alloc_resource_any() for"
" legacy/msi interrupt\n");
return (ENXIO);
}
/* Set the handler function */
error = bus_setup_intr(dev, pf->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixl_intr, pf, &pf->tag);
if (error) {
pf->res = NULL;
device_printf(dev, "bus_setup_intr() for legacy/msi"
" interrupt handler failed, error %d\n", error);
return (ENXIO);
}
error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
if (error) {
/* non-fatal */
device_printf(dev, "bus_describe_intr() for Admin Queue"
" interrupt name failed, error %d\n", error);
}
return (0);
}
int
ixl_setup_adminq_tq(struct ixl_pf *pf)
{
device_t dev = pf->dev;
int error = 0;
/* Tasklet for Admin Queue interrupts */
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
#ifdef PCI_IOV
/* VFLR Tasklet */
TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
#endif
/* Create and start Admin Queue taskqueue */
pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
taskqueue_thread_enqueue, &pf->tq);
if (!pf->tq) {
device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
return (ENOMEM);
}
error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
device_get_nameunit(dev));
if (error) {
device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
error);
taskqueue_free(pf->tq);
return (error);
}
return (0);
}
int
ixl_setup_queue_tqs(struct ixl_vsi *vsi)
{
struct ixl_queue *que = vsi->queues;
device_t dev = vsi->dev;
#ifdef RSS
int cpu_id = 0;
cpuset_t cpu_mask;
#endif
/* Create queue tasks and start queue taskqueues */
for (int i = 0; i < vsi->num_queues; i++, que++) {
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixl_handle_que, que);
que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
#ifdef RSS
CPU_SETOF(cpu_id, &cpu_mask);
taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
&cpu_mask, "%s (bucket %d)",
device_get_nameunit(dev), cpu_id);
#else
taskqueue_start_threads(&que->tq, 1, PI_NET,
"%s (que %d)", device_get_nameunit(dev), que->me);
#endif
}
return (0);
}
void
ixl_free_adminq_tq(struct ixl_pf *pf)
{
if (pf->tq) {
taskqueue_free(pf->tq);
pf->tq = NULL;
}
}
void
ixl_free_queue_tqs(struct ixl_vsi *vsi)
{
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++) {
if (que->tq) {
taskqueue_free(que->tq);
que->tq = NULL;
}
}
}
int
ixl_setup_adminq_msix(struct ixl_pf *pf)
{
device_t dev = pf->dev;
int rid, error = 0;
/* Admin IRQ rid is 1, vector is 0 */
rid = 1;
/* Get interrupt resource from bus */
pf->res = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (!pf->res) {
device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
" interrupt failed [rid=%d]\n", rid);
return (ENXIO);
}
/* Then associate interrupt with handler */
error = bus_setup_intr(dev, pf->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixl_msix_adminq, pf, &pf->tag);
if (error) {
pf->res = NULL;
device_printf(dev, "bus_setup_intr() for Admin Queue"
" interrupt handler failed, error %d\n", error);
return (ENXIO);
}
error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
if (error) {
/* non-fatal */
device_printf(dev, "bus_describe_intr() for Admin Queue"
" interrupt name failed, error %d\n", error);
}
pf->admvec = 0;
return (0);
}
/*
* Allocate interrupt resources from bus and associate an interrupt handler
* to those for the VSI's queues.
*/
int
ixl_setup_queue_msix(struct ixl_vsi *vsi)
{
device_t dev = vsi->dev;
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
int error, rid, vector = 1;
/* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
int cpu_id = i;
rid = vector + 1;
txr = &que->txr;
que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (!que->res) {
device_printf(dev, "bus_alloc_resource_any() for"
" Queue %d interrupt failed [rid=%d]\n",
que->me, rid);
return (ENXIO);
}
/* Set the handler function */
error = bus_setup_intr(dev, que->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixl_msix_que, que, &que->tag);
if (error) {
device_printf(dev, "bus_setup_intr() for Queue %d"
" interrupt handler failed, error %d\n",
que->me, error);
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
return (error);
}
error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
if (error) {
device_printf(dev, "bus_describe_intr() for Queue %d"
" interrupt name failed, error %d\n",
que->me, error);
}
/* Bind the vector to a CPU */
#ifdef RSS
cpu_id = rss_getcpu(i % rss_getnumbuckets());
#endif
error = bus_bind_intr(dev, que->res, cpu_id);
if (error) {
device_printf(dev, "bus_bind_intr() for Queue %d"
" to CPU %d failed, error %d\n",
que->me, cpu_id, error);
}
que->msix = vector;
}
return (0);
}
/*
* When used in a virtualized environment PCI BUSMASTER capability may not be set
* so explicity set it here and rewrite the ENABLE in the MSIX control register
* at this point to cause the host to successfully initialize us.
*/
void
ixl_set_busmaster(device_t dev)
{
u16 pci_cmd_word;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
}
/*
* rewrite the ENABLE in the MSIX control register
* to cause the host to successfully initialize us.
*/
void
ixl_set_msix_enable(device_t dev)
{
int msix_ctrl, rid;
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
}
/*
* Allocate MSI/X vectors from the OS.
* Returns 0 for legacy, 1 for MSI, >1 for MSIX.
*/
int
ixl_init_msix(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
int auto_max_queues;
int rid, want, vectors, queues, available;
#ifdef IXL_IW
int iw_want, iw_vectors;
pf->iw_msix = 0;
#endif
/* Override by tuneable */
if (!pf->enable_msix)
goto no_msix;
/* Ensure proper operation in virtualized environment */
ixl_set_busmaster(dev);
/* First try MSI/X */
rid = PCIR_BAR(IXL_MSIX_BAR);
pf->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!pf->msix_mem) {
/* May not be enabled */
device_printf(pf->dev,
"Unable to map MSIX table\n");
goto no_msix;
}
available = pci_msix_count(dev);
if (available < 2) {
/* system has msix disabled (0), or only one vector (1) */
bus_release_resource(dev, SYS_RES_MEMORY,
rid, pf->msix_mem);
pf->msix_mem = NULL;
goto no_msix;
}
/* Clamp max number of queues based on:
* - # of MSI-X vectors available
* - # of cpus available
* - # of queues that can be assigned to the LAN VSI
*/
auto_max_queues = min(mp_ncpus, available - 1);
if (hw->mac.type == I40E_MAC_X722)
auto_max_queues = min(auto_max_queues, 128);
else
auto_max_queues = min(auto_max_queues, 64);
/* Override with tunable value if tunable is less than autoconfig count */
if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
queues = pf->max_queues;
/* Use autoconfig amount if that's lower */
else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
device_printf(dev, "ixl_max_queues (%d) is too large, using "
"autoconfig amount (%d)...\n",
pf->max_queues, auto_max_queues);
queues = auto_max_queues;
}
/* Limit maximum auto-configured queues to 8 if no user value is set */
else
queues = min(auto_max_queues, 8);
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
/*
** Want one vector (RX/TX pair) per queue
** plus an additional for the admin queue.
*/
want = queues + 1;
if (want <= available) /* Have enough */
vectors = want;
else {
device_printf(pf->dev,
"MSIX Configuration Problem, "
"%d vectors available but %d wanted!\n",
available, want);
pf->msix_mem = NULL;
goto no_msix; /* Will go to Legacy setup */
}
#ifdef IXL_IW
if (ixl_enable_iwarp) {
/* iWARP wants additional vector for CQP */
iw_want = mp_ncpus + 1;
available -= vectors;
if (available > 0) {
iw_vectors = (available >= iw_want) ?
iw_want : available;
vectors += iw_vectors;
} else
iw_vectors = 0;
}
#endif
ixl_set_msix_enable(dev);
if (pci_alloc_msix(dev, &vectors) == 0) {
device_printf(pf->dev,
"Using MSIX interrupts with %d vectors\n", vectors);
pf->msix = vectors;
#ifdef IXL_IW
if (ixl_enable_iwarp)
pf->iw_msix = iw_vectors;
#endif
pf->vsi.num_queues = queues;
#ifdef RSS
/*
* If we're doing RSS, the number of queues needs to
* match the number of RSS buckets that are configured.
*
* + If there's more queues than RSS buckets, we'll end
* up with queues that get no traffic.
*
* + If there's more RSS buckets than queues, we'll end
* up having multiple RSS buckets map to the same queue,
* so there'll be some contention.
*/
if (queues != rss_getnumbuckets()) {
device_printf(dev,
"%s: queues (%d) != RSS buckets (%d)"
"; performance will be impacted.\n",
__func__, queues, rss_getnumbuckets());
}
#endif
return (vectors);
}
no_msix:
vectors = pci_msi_count(dev);
pf->vsi.num_queues = 1;
pf->max_queues = 1;
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
device_printf(pf->dev, "Using an MSI interrupt\n");
else {
vectors = 0;
device_printf(pf->dev, "Using a Legacy interrupt\n");
}
return (vectors);
}
/*
* Configure admin queue/misc interrupt cause registers in hardware.
*/
void
ixl_configure_intr0_msix(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 reg;
/* First set up the adminq - vector 0 */
wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
rd32(hw, I40E_PFINT_ICR0); /* read to clear */
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
/*
* 0x7FF is the end of the queue list.
* This means we won't use MSI-X vector 0 for a queue interrupt
* in MSIX mode.
*/
wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
wr32(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
}
/*
* Configure queue interrupt cause registers in hardware.
*/
void
ixl_configure_queue_intr_msix(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
u32 reg;
u16 vector = 1;
for (int i = 0; i < vsi->num_queues; i++, vector++) {
wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
/* First queue type is RX / 0 */
wr32(hw, I40E_PFINT_LNKLSTN(i), i);
reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(i), reg);
reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_TQCTL(i), reg);
}
}
/*
* Configure for MSI single vector operation
*/
void
ixl_configure_legacy(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
struct rx_ring *rxr = &que->rxr;
struct tx_ring *txr = &que->txr;
u32 reg;
/* Configure ITR */
vsi->tx_itr_setting = pf->tx_itr;
wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
vsi->tx_itr_setting);
txr->itr = vsi->tx_itr_setting;
vsi->rx_itr_setting = pf->rx_itr;
wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
vsi->rx_itr_setting);
rxr->itr = vsi->rx_itr_setting;
/* Setup "other" causes */
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
| I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
| I40E_PFINT_ICR0_ENA_GRST_MASK
| I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
| I40E_PFINT_ICR0_ENA_GPIO_MASK
| I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
| I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
| I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
| I40E_PFINT_ICR0_ENA_VFLR_MASK
| I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
/* No ITR for non-queue interrupts */
wr32(hw, I40E_PFINT_STAT_CTL0,
IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the q int */
reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
| (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
| (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), reg);
reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
| (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
| (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(0), reg);
}
int
ixl_allocate_pci_resources(struct ixl_pf *pf)
{
int rid;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
/* Map BAR0 */
rid = PCIR_BAR(0);
pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (!(pf->pci_mem)) {
device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
return (ENXIO);
}
/* Save off the PCI information */
hw->vendor_id = pci_get_vendor(dev);
hw->device_id = pci_get_device(dev);
hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
hw->subsystem_vendor_id =
pci_read_config(dev, PCIR_SUBVEND_0, 2);
hw->subsystem_device_id =
pci_read_config(dev, PCIR_SUBDEV_0, 2);
hw->bus.device = pci_get_slot(dev);
hw->bus.func = pci_get_function(dev);
/* Save off register access information */
pf->osdep.mem_bus_space_tag =
rman_get_bustag(pf->pci_mem);
pf->osdep.mem_bus_space_handle =
rman_get_bushandle(pf->pci_mem);
pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
pf->osdep.flush_reg = I40E_GLGEN_STAT;
pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
pf->hw.back = &pf->osdep;
return (0);
}
/*
* Teardown and release the admin queue/misc vector
* interrupt.
*/
int
ixl_teardown_adminq_msix(struct ixl_pf *pf)
{
device_t dev = pf->dev;
int rid, error = 0;
if (pf->admvec) /* we are doing MSIX */
rid = pf->admvec + 1;
else
(pf->msix != 0) ? (rid = 1):(rid = 0);
if (pf->tag != NULL) {
bus_teardown_intr(dev, pf->res, pf->tag);
if (error) {
device_printf(dev, "bus_teardown_intr() for"
" interrupt 0 failed\n");
// return (ENXIO);
}
pf->tag = NULL;
}
if (pf->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
if (error) {
device_printf(dev, "bus_release_resource() for"
" interrupt 0 failed [rid=%d]\n", rid);
// return (ENXIO);
}
pf->res = NULL;
}
return (0);
}
int
ixl_teardown_queue_msix(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ixl_queue *que = vsi->queues;
device_t dev = vsi->dev;
int rid, error = 0;
/* We may get here before stations are setup */
if ((pf->msix < 2) || (que == NULL))
return (0);
/* Release all MSIX queue resources */
for (int i = 0; i < vsi->num_queues; i++, que++) {
rid = que->msix + 1;
if (que->tag != NULL) {
error = bus_teardown_intr(dev, que->res, que->tag);
if (error) {
device_printf(dev, "bus_teardown_intr() for"
" Queue %d interrupt failed\n",
que->me);
// return (ENXIO);
}
que->tag = NULL;
}
if (que->res != NULL) {
error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
if (error) {
device_printf(dev, "bus_release_resource() for"
" Queue %d interrupt failed [rid=%d]\n",
que->me, rid);
// return (ENXIO);
}
que->res = NULL;
}
}
return (0);
}
void
ixl_free_pci_resources(struct ixl_pf *pf)
{
device_t dev = pf->dev;
int memrid;
ixl_teardown_queue_msix(&pf->vsi);
ixl_teardown_adminq_msix(pf);
if (pf->msix > 0)
pci_release_msi(dev);
memrid = PCIR_BAR(IXL_MSIX_BAR);
if (pf->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
memrid, pf->msix_mem);
if (pf->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), pf->pci_mem);
return;
}
void
ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
{
/* Display supported media types */
if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
|| phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
}
/*********************************************************************
*
* Setup networking device structure and register an interface.
*
**********************************************************************/
int
ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ifnet *ifp;
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
struct i40e_aq_get_phy_abilities_resp abilities;
enum i40e_status_code aq_error = 0;
INIT_DEBUGOUT("ixl_setup_interface: begin");
ifp = vsi->ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "can not allocate ifnet structure\n");
return (-1);
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
ifp->if_init = ixl_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixl_ioctl;
#if __FreeBSD_version >= 1100036
if_setgetcounterfn(ifp, ixl_get_counter);
#endif
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
ifp->if_snd.ifq_maxlen = que->num_desc - 2;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
/* Set TSO limits */
ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
/*
* Tell the upper layer(s) we support long frames.
*/
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_HWCSUM;
ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
ifp->if_capabilities |= IFCAP_TSO;
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
ifp->if_capabilities |= IFCAP_LRO;
/* VLAN capabilties */
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_HWTSO
| IFCAP_VLAN_MTU
| IFCAP_VLAN_HWCSUM;
ifp->if_capenable = ifp->if_capabilities;
/*
** Don't turn this on by default, if vlans are
** created on another pseudo device (eg. lagg)
** then vlan events are not passed thru, breaking
** operation, but with HW FILTER off it works. If
** using vlans directly on the ixl driver you can
** enable this and get full hardware tag filtering.
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
ixl_media_status);
aq_error = i40e_aq_get_phy_capabilities(hw,
FALSE, TRUE, &abilities, NULL);
/* May need delay to detect fiber correctly */
if (aq_error == I40E_ERR_UNKNOWN_PHY) {
i40e_msec_delay(200);
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
TRUE, &abilities, NULL);
}
if (aq_error) {
if (aq_error == I40E_ERR_UNKNOWN_PHY)
device_printf(dev, "Unknown PHY type detected!\n");
else
device_printf(dev,
"Error getting supported media types, err %d,"
" AQ error %d\n", aq_error, hw->aq.asq_last_status);
return (0);
}
pf->supported_speeds = abilities.link_speed;
ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
ixl_add_ifmedia(vsi, hw->phy.phy_types);
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
ether_ifattach(ifp, hw->mac.addr);
return (0);
}
/*
** Run when the Admin Queue gets a link state change interrupt.
*/
void
ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
/* Request link status from adapter */
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
/* Print out message if an unqualified module is found */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
device_printf(dev, "Link failed because "
"an unqualified module was detected!\n");
/* Update OS link info */
ixl_update_link_status(pf);
}
/*********************************************************************
*
* Get Firmware Switch configuration
* - this will need to be more robust when more complex
* switch configurations are enabled.
*
**********************************************************************/
int
ixl_switch_config(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = vsi->dev;
struct i40e_aqc_get_switch_config_resp *sw_config;
u8 aq_buf[I40E_AQ_LARGE_BUF];
int ret;
u16 next = 0;
memset(&aq_buf, 0, sizeof(aq_buf));
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
ret = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (ret) {
device_printf(dev, "aq_get_switch_config() failed, error %d,"
" aq_error %d\n", ret, pf->hw.aq.asq_last_status);
return (ret);
}
if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
device_printf(dev,
"Switch config: header reported: %d in structure, %d total\n",
sw_config->header.num_reported, sw_config->header.num_total);
for (int i = 0; i < sw_config->header.num_reported; i++) {
device_printf(dev,
"-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
sw_config->element[i].element_type,
sw_config->element[i].seid,
sw_config->element[i].uplink_seid,
sw_config->element[i].downlink_seid);
}
}
/* Simplified due to a single VSI */
vsi->uplink_seid = sw_config->element[0].uplink_seid;
vsi->downlink_seid = sw_config->element[0].downlink_seid;
vsi->seid = sw_config->element[0].seid;
return (ret);
}
/*********************************************************************
*
* Initialize the VSI: this handles contexts, which means things
* like the number of descriptors, buffer size,
* plus we init the rings thru this function.
*
**********************************************************************/
int
ixl_initialize_vsi(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = vsi->back;
struct ixl_queue *que = vsi->queues;
device_t dev = vsi->dev;
struct i40e_hw *hw = vsi->hw;
struct i40e_vsi_context ctxt;
int tc_queues;
int err = 0;
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = vsi->seid;
if (pf->veb_seid != 0)
ctxt.uplink_seid = pf->veb_seid;
ctxt.pf_num = hw->pf_id;
err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
if (err) {
device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
" aq_error %d\n", err, hw->aq.asq_last_status);
return (err);
}
ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
"get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
"vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
"pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
ctxt.uplink_seid, ctxt.vsi_number,
ctxt.vsis_allocated, ctxt.vsis_unallocated,
ctxt.flags, ctxt.pf_num, ctxt.vf_num,
ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
/*
** Set the queue and traffic class bits
** - when multiple traffic classes are supported
** this will need to be more robust.
*/
ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
/* In contig mode, que_mapping[0] is first queue index used by this VSI */
ctxt.info.queue_mapping[0] = 0;
/*
* This VSI will only use traffic class 0; start traffic class 0's
* queue allocation at queue 0, and assign it 2^tc_queues queues (though
* the driver may not use all of them).
*/
tc_queues = bsrl(pf->qtag.num_allocated);
ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
& I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
& I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
/* Set VLAN receive stripping mode */
ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
else
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
#ifdef IXL_IW
/* Set TCP Enable for iWARP capable VSI */
if (ixl_enable_iwarp && pf->iw_enabled) {
ctxt.info.valid_sections |=
htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
}
#endif
/* Save VSI number and info for use later */
vsi->vsi_num = ctxt.vsi_number;
bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
/* Reset VSI statistics */
ixl_vsi_reset_stats(vsi);
vsi->hw_filters_add = 0;
vsi->hw_filters_del = 0;
ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (err) {
device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
" aq_error %d\n", err, hw->aq.asq_last_status);
return (err);
}
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
struct i40e_hmc_obj_txq tctx;
struct i40e_hmc_obj_rxq rctx;
u32 txctl;
u16 size;
/* Setup the HMC TX Context */
size = que->num_desc * sizeof(struct i40e_tx_desc);
memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
tctx.new_context = 1;
tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
tctx.qlen = que->num_desc;
tctx.fc_ena = 0;
tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
/* Enable HEAD writeback */
tctx.head_wb_ena = 1;
tctx.head_wb_addr = txr->dma.pa +
(que->num_desc * sizeof(struct i40e_tx_desc));
tctx.rdylist_act = 0;
err = i40e_clear_lan_tx_queue_context(hw, i);
if (err) {
device_printf(dev, "Unable to clear TX context\n");
break;
}
err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
if (err) {
device_printf(dev, "Unable to set TX context\n");
break;
}
/* Associate the ring with this PF */
txctl = I40E_QTX_CTL_PF_QUEUE;
txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(i), txctl);
ixl_flush(hw);
/* Do ring (re)init */
ixl_init_tx_ring(que);
/* Next setup the HMC RX Context */
if (vsi->max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
/* Set up an RX context for the HMC */
memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
/* ignore header split for now */
rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
vsi->max_frame_size : max_rxmax;
rctx.dtype = 0;
rctx.dsize = 1; /* do 32byte descriptors */
rctx.hsplit_0 = 0; /* no HDR split initially */
rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
rctx.qlen = que->num_desc;
rctx.tphrdesc_ena = 1;
rctx.tphwdesc_ena = 1;
rctx.tphdata_ena = 0;
rctx.tphhead_ena = 0;
rctx.lrxqthresh = 2;
rctx.crcstrip = 1;
rctx.l2tsel = 1;
rctx.showiv = 1;
rctx.fc_ena = 0;
rctx.prefena = 1;
err = i40e_clear_lan_rx_queue_context(hw, i);
if (err) {
device_printf(dev,
"Unable to clear RX context %d\n", i);
break;
}
err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
if (err) {
device_printf(dev, "Unable to set RX context %d\n", i);
break;
}
err = ixl_init_rx_ring(que);
if (err) {
device_printf(dev, "Fail in init_rx_ring %d\n", i);
break;
}
#ifdef DEV_NETMAP
/* preserve queue */
if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
struct netmap_adapter *na = NA(vsi->ifp);
struct netmap_kring *kring = &na->rx_rings[i];
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
} else
#endif /* DEV_NETMAP */
wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
}
return (err);
}
/*********************************************************************
*
* Free all VSI structs.
*
**********************************************************************/
void
ixl_free_vsi(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ixl_queue *que = vsi->queues;
/* Free station queues */
if (!vsi->queues)
goto free_filters;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
if (!mtx_initialized(&txr->mtx)) /* uninitialized */
continue;
IXL_TX_LOCK(txr);
ixl_free_que_tx(que);
if (txr->base)
i40e_free_dma_mem(&pf->hw, &txr->dma);
IXL_TX_UNLOCK(txr);
IXL_TX_LOCK_DESTROY(txr);
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
continue;
IXL_RX_LOCK(rxr);
ixl_free_que_rx(que);
if (rxr->base)
i40e_free_dma_mem(&pf->hw, &rxr->dma);
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
}
free(vsi->queues, M_DEVBUF);
free_filters:
/* Free VSI filter list */
ixl_free_mac_filters(vsi);
}
void
ixl_free_mac_filters(struct ixl_vsi *vsi)
{
struct ixl_mac_filter *f;
while (!SLIST_EMPTY(&vsi->ftl)) {
f = SLIST_FIRST(&vsi->ftl);
SLIST_REMOVE_HEAD(&vsi->ftl, next);
free(f, M_DEVBUF);
}
}
/*
* Fill out fields in queue struct and setup tx/rx memory and structs
*/
static int
ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
int error = 0;
int rsize, tsize;
que->num_desc = pf->ringsz;
que->me = index;
que->vsi = vsi;
txr->que = que;
txr->tail = I40E_QTX_TAIL(que->me);
/* Initialize the TX lock */
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
/* Create the TX descriptor ring */
tsize = roundup2((que->num_desc *
sizeof(struct i40e_tx_desc)) +
sizeof(u32), DBA_ALIGN);
if (i40e_allocate_dma_mem(hw,
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
device_printf(dev,
"Unable to allocate TX Descriptor memory\n");
error = ENOMEM;
goto fail;
}
txr->base = (struct i40e_tx_desc *)txr->dma.va;
bzero((void *)txr->base, tsize);
/* Now allocate transmit soft structs for the ring */
if (ixl_allocate_tx_data(que)) {
device_printf(dev,
"Critical Failure setting up TX structures\n");
error = ENOMEM;
goto fail;
}
/* Allocate a buf ring */
txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
M_NOWAIT, &txr->mtx);
if (txr->br == NULL) {
device_printf(dev,
"Critical Failure setting up TX buf ring\n");
error = ENOMEM;
goto fail;
}
rsize = roundup2(que->num_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
rxr->que = que;
rxr->tail = I40E_QRX_TAIL(que->me);
/* Initialize the RX side lock */
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
if (i40e_allocate_dma_mem(hw,
&rxr->dma, i40e_mem_reserved, rsize, 4096)) {
device_printf(dev,
"Unable to allocate RX Descriptor memory\n");
error = ENOMEM;
goto fail;
}
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
/* Allocate receive soft structs for the ring*/
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
error = ENOMEM;
goto fail;
}
return (0);
fail:
if (rxr->base)
i40e_free_dma_mem(&pf->hw, &rxr->dma);
if (mtx_initialized(&rxr->mtx))
mtx_destroy(&rxr->mtx);
if (txr->br) {
buf_ring_free(txr->br, M_DEVBUF);
txr->br = NULL;
}
if (txr->base)
i40e_free_dma_mem(&pf->hw, &txr->dma);
if (mtx_initialized(&txr->mtx))
mtx_destroy(&txr->mtx);
return (error);
}
/*********************************************************************
*
* Allocate memory for the VSI (virtual station interface) and their
* associated queues, rings and the descriptors associated with each,
* called only once at attach.
*
**********************************************************************/
int
ixl_setup_stations(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct ixl_vsi *vsi;
struct ixl_queue *que;
int error = 0;
vsi = &pf->vsi;
vsi->back = (void *)pf;
vsi->hw = &pf->hw;
vsi->id = 0;
vsi->num_vlans = 0;
vsi->back = pf;
/* Get memory for the station queues */
if (!(vsi->queues =
- (struct ixl_queue *) mallocarray(vsi->num_queues,
- sizeof(struct ixl_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
error = ENOMEM;
return (error);
}
/* Then setup each queue */
for (int i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
error = ixl_setup_queue(que, pf, i);
if (error)
return (error);
}
return (0);
}
/*
** Provide a update to the queue RX
** interrupt moderation value.
*/
void
ixl_set_queue_rx_itr(struct ixl_queue *que)
{
struct ixl_vsi *vsi = que->vsi;
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
struct rx_ring *rxr = &que->rxr;
u16 rx_itr;
u16 rx_latency = 0;
int rx_bytes;
/* Idle, do nothing */
if (rxr->bytes == 0)
return;
if (pf->dynamic_rx_itr) {
rx_bytes = rxr->bytes/rxr->itr;
rx_itr = rxr->itr;
/* Adjust latency range */
switch (rxr->latency) {
case IXL_LOW_LATENCY:
if (rx_bytes > 10) {
rx_latency = IXL_AVE_LATENCY;
rx_itr = IXL_ITR_20K;
}
break;
case IXL_AVE_LATENCY:
if (rx_bytes > 20) {
rx_latency = IXL_BULK_LATENCY;
rx_itr = IXL_ITR_8K;
} else if (rx_bytes <= 10) {
rx_latency = IXL_LOW_LATENCY;
rx_itr = IXL_ITR_100K;
}
break;
case IXL_BULK_LATENCY:
if (rx_bytes <= 20) {
rx_latency = IXL_AVE_LATENCY;
rx_itr = IXL_ITR_20K;
}
break;
}
rxr->latency = rx_latency;
if (rx_itr != rxr->itr) {
/* do an exponential smoothing */
rx_itr = (10 * rx_itr * rxr->itr) /
((9 * rx_itr) + rxr->itr);
rxr->itr = min(rx_itr, IXL_MAX_ITR);
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
que->me), rxr->itr);
}
} else { /* We may have have toggled to non-dynamic */
if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
vsi->rx_itr_setting = pf->rx_itr;
/* Update the hardware if needed */
if (rxr->itr != vsi->rx_itr_setting) {
rxr->itr = vsi->rx_itr_setting;
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
que->me), rxr->itr);
}
}
rxr->bytes = 0;
rxr->packets = 0;
return;
}
/*
** Provide a update to the queue TX
** interrupt moderation value.
*/
void
ixl_set_queue_tx_itr(struct ixl_queue *que)
{
struct ixl_vsi *vsi = que->vsi;
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
u16 tx_itr;
u16 tx_latency = 0;
int tx_bytes;
/* Idle, do nothing */
if (txr->bytes == 0)
return;
if (pf->dynamic_tx_itr) {
tx_bytes = txr->bytes/txr->itr;
tx_itr = txr->itr;
switch (txr->latency) {
case IXL_LOW_LATENCY:
if (tx_bytes > 10) {
tx_latency = IXL_AVE_LATENCY;
tx_itr = IXL_ITR_20K;
}
break;
case IXL_AVE_LATENCY:
if (tx_bytes > 20) {
tx_latency = IXL_BULK_LATENCY;
tx_itr = IXL_ITR_8K;
} else if (tx_bytes <= 10) {
tx_latency = IXL_LOW_LATENCY;
tx_itr = IXL_ITR_100K;
}
break;
case IXL_BULK_LATENCY:
if (tx_bytes <= 20) {
tx_latency = IXL_AVE_LATENCY;
tx_itr = IXL_ITR_20K;
}
break;
}
txr->latency = tx_latency;
if (tx_itr != txr->itr) {
/* do an exponential smoothing */
tx_itr = (10 * tx_itr * txr->itr) /
((9 * tx_itr) + txr->itr);
txr->itr = min(tx_itr, IXL_MAX_ITR);
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
que->me), txr->itr);
}
} else { /* We may have have toggled to non-dynamic */
if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
vsi->tx_itr_setting = pf->tx_itr;
/* Update the hardware if needed */
if (txr->itr != vsi->tx_itr_setting) {
txr->itr = vsi->tx_itr_setting;
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
que->me), txr->itr);
}
}
txr->bytes = 0;
txr->packets = 0;
return;
}
void
ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
struct sysctl_ctx_list *ctx, const char *sysctl_name)
{
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
struct sysctl_oid_list *vsi_list;
tree = device_get_sysctl_tree(pf->dev);
child = SYSCTL_CHILDREN(tree);
vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
CTLFLAG_RD, NULL, "VSI Number");
vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
}
#ifdef IXL_DEBUG
/**
* ixl_sysctl_qtx_tail_handler
* Retrieves I40E_QTX_TAIL value from hardware
* for a sysctl.
*/
static int
ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
int error;
u32 val;
que = ((struct ixl_queue *)oidp->oid_arg1);
if (!que) return 0;
val = rd32(que->vsi->hw, que->txr.tail);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
return (0);
}
/**
* ixl_sysctl_qrx_tail_handler
* Retrieves I40E_QRX_TAIL value from hardware
* for a sysctl.
*/
static int
ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
int error;
u32 val;
que = ((struct ixl_queue *)oidp->oid_arg1);
if (!que) return 0;
val = rd32(que->vsi->hw, que->rxr.tail);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
return (0);
}
#endif
/*
* Used to set the Tx ITR value for all of the PF LAN VSI's queues.
* Writes to the ITR registers immediately.
*/
static int
ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
device_t dev = pf->dev;
int error = 0;
int requested_tx_itr;
requested_tx_itr = pf->tx_itr;
error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (pf->dynamic_tx_itr) {
device_printf(dev,
"Cannot set TX itr value while dynamic TX itr is enabled\n");
return (EINVAL);
}
if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
device_printf(dev,
"Invalid TX itr value; value must be between 0 and %d\n",
IXL_MAX_ITR);
return (EINVAL);
}
pf->tx_itr = requested_tx_itr;
ixl_configure_tx_itr(pf);
return (error);
}
/*
* Used to set the Rx ITR value for all of the PF LAN VSI's queues.
* Writes to the ITR registers immediately.
*/
static int
ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
device_t dev = pf->dev;
int error = 0;
int requested_rx_itr;
requested_rx_itr = pf->rx_itr;
error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (pf->dynamic_rx_itr) {
device_printf(dev,
"Cannot set RX itr value while dynamic RX itr is enabled\n");
return (EINVAL);
}
if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
device_printf(dev,
"Invalid RX itr value; value must be between 0 and %d\n",
IXL_MAX_ITR);
return (EINVAL);
}
pf->rx_itr = requested_rx_itr;
ixl_configure_rx_itr(pf);
return (error);
}
void
ixl_add_hw_stats(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *queues = vsi->queues;
struct i40e_hw_port_stats *pf_stats = &pf->stats;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct sysctl_oid_list *vsi_list;
struct sysctl_oid *queue_node;
struct sysctl_oid_list *queue_list;
struct tx_ring *txr;
struct rx_ring *rxr;
char queue_namebuf[QUEUE_NAME_LEN];
/* Driver statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &pf->watchdog_events,
"Watchdog timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &pf->admin_irq,
"Admin Queue IRQ Handled");
ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
/* Queue statistics */
for (int q = 0; q < vsi->num_queues; q++) {
snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
queue_list = SYSCTL_CHILDREN(queue_node);
txr = &(queues[q].txr);
rxr = &(queues[q].rxr);
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
"m_defrag() failed");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(queues[q].irqs),
"irqs on this queue");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
CTLFLAG_RD, &(queues[q].mss_too_small),
"TSO sends with an MSS less than 64");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
"Queue No Descriptor Available");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets),
"Queue Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
CTLFLAG_RD, &(txr->tx_bytes),
"Queue Bytes Transmitted");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &(rxr->rx_packets),
"Queue Packets Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
CTLFLAG_RD, &(rxr->desc_errs),
"Queue Rx Descriptor Errors");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
CTLFLAG_RD, &(rxr->itr), 0,
"Queue Rx ITR Interval");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
#ifdef IXL_DEBUG
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
CTLFLAG_RD, &(rxr->not_done),
"Queue Rx Descriptors not Done");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
CTLFLAG_RD, &(rxr->next_refresh), 0,
"Queue Rx Descriptors not Done");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
CTLFLAG_RD, &(rxr->next_check), 0,
"Queue Rx Descriptors not Done");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixl_sysctl_qtx_tail_handler, "IU",
"Queue Transmit Descriptor Tail");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixl_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
#endif
}
/* MAC stats */
ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
}
void
ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_eth_stats *eth_stats)
{
struct ixl_sysctl_info ctls[] =
{
{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
"Unicast Packets Received"},
{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
"Multicast Packets Received"},
{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
"Broadcast Packets Received"},
{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
{&eth_stats->tx_multicast, "mcast_pkts_txd",
"Multicast Packets Transmitted"},
{&eth_stats->tx_broadcast, "bcast_pkts_txd",
"Broadcast Packets Transmitted"},
// end
{0,0,0}
};
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != 0)
{
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
}
}
void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_hw_port_stats *stats)
{
struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
CTLFLAG_RD, NULL, "Mac Statistics");
struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
struct i40e_eth_stats *eth_stats = &stats->eth;
ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
struct ixl_sysctl_info ctls[] =
{
{&stats->crc_errors, "crc_errors", "CRC Errors"},
{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
/* Packet Reception Stats */
{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
/* Packet Transmission Stats */
{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
/* Flow control */
{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
/* End */
{0,0,0}
};
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != 0)
{
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
}
}
void
ixl_set_rss_key(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
enum i40e_status_code status;
#ifdef RSS
u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
#else
u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c,
0x35897377, 0x328b25e1, 0x4fa98922,
0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
0x0, 0x0, 0x0};
#endif
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
#endif
/* Fill out hash function seed */
if (hw->mac.type == I40E_MAC_X722) {
struct i40e_aqc_get_set_rss_key_data key_data;
bcopy(rss_seed, key_data.standard_rss_key, 40);
status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
if (status)
device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
}
}
/*
* Configure enabled PCTYPES for RSS.
*/
void
ixl_set_rss_pctypes(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u64 set_hena = 0, hena;
#ifdef RSS
u32 rss_hash_config;
rss_hash_config = rss_gethashconfig();
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
if (hw->mac.type == I40E_MAC_X722)
set_hena = IXL_DEFAULT_RSS_HENA_X722;
else
set_hena = IXL_DEFAULT_RSS_HENA_XL710;
#endif
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
hena |= set_hena;
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
}
void
ixl_set_rss_hlut(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
int i, que_id;
int lut_entry_width;
u32 lut = 0;
enum i40e_status_code status;
if (hw->mac.type == I40E_MAC_X722)
lut_entry_width = 7;
else
lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
/* Populate the LUT with max no. of queues in round robin fashion */
u8 hlut_buf[512];
for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
#ifdef RSS
/*
* Fetch the RSS bucket id for the given indirection entry.
* Cap it at the number of configured buckets (which is
* num_queues.)
*/
que_id = rss_get_indirection_to_bucket(i);
que_id = que_id % vsi->num_queues;
#else
que_id = i % vsi->num_queues;
#endif
lut = (que_id & ((0x1 << lut_entry_width) - 1));
hlut_buf[i] = lut;
}
if (hw->mac.type == I40E_MAC_X722) {
status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
if (status)
device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
ixl_flush(hw);
}
}
/*
** Setup the PF's RSS parameters.
*/
void
ixl_config_rss(struct ixl_pf *pf)
{
ixl_set_rss_key(pf);
ixl_set_rss_pctypes(pf);
ixl_set_rss_hlut(pf);
}
/*
** This routine is run via an vlan config EVENT,
** it enables us to use the HW Filter table since
** we can get the vlan id. This just creates the
** entry in the soft version of the VFTA, init will
** repopulate the real table.
*/
void
ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct i40e_hw *hw = vsi->hw;
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
if (ifp->if_softc != arg) /* Not our event */
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
IXL_PF_LOCK(pf);
++vsi->num_vlans;
ixl_add_filter(vsi, hw->mac.addr, vtag);
IXL_PF_UNLOCK(pf);
}
/*
** This routine is run via an vlan
** unconfig EVENT, remove our entry
** in the soft vfta.
*/
void
ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct i40e_hw *hw = vsi->hw;
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
if (ifp->if_softc != arg)
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
IXL_PF_LOCK(pf);
--vsi->num_vlans;
ixl_del_filter(vsi, hw->mac.addr, vtag);
IXL_PF_UNLOCK(pf);
}
/*
** This routine updates vlan filters, called by init
** it scans the filter table and then updates the hw
** after a soft reset.
*/
void
ixl_setup_vlan_filters(struct ixl_vsi *vsi)
{
struct ixl_mac_filter *f;
int cnt = 0, flags;
if (vsi->num_vlans == 0)
return;
/*
** Scan the filter list for vlan entries,
** mark them for addition and then call
** for the AQ update.
*/
SLIST_FOREACH(f, &vsi->ftl, next) {
if (f->flags & IXL_FILTER_VLAN) {
f->flags |=
(IXL_FILTER_ADD |
IXL_FILTER_USED);
cnt++;
}
}
if (cnt == 0) {
printf("setup vlan: no filters found!\n");
return;
}
flags = IXL_FILTER_VLAN;
flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
ixl_add_hw_filters(vsi, flags, cnt);
return;
}
/*
** Initialize filter list and add filters that the hardware
** needs to know about.
**
** Requires VSI's filter list & seid to be set before calling.
*/
void
ixl_init_filters(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
/* Add broadcast address */
ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
/*
* Prevent Tx flow control frames from being sent out by
* non-firmware transmitters.
* This affects every VSI in the PF.
*/
if (pf->enable_tx_fc_filter)
i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
}
/*
** This routine adds mulicast filters
*/
void
ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
{
struct ixl_mac_filter *f;
/* Does one already exist */
f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
if (f != NULL)
return;
f = ixl_get_filter(vsi);
if (f == NULL) {
printf("WARNING: no filter available!!\n");
return;
}
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
f->vlan = IXL_VLAN_ANY;
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
| IXL_FILTER_MC);
return;
}
void
ixl_reconfigure_filters(struct ixl_vsi *vsi)
{
ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
}
/*
** This routine adds macvlan filters
*/
void
ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f, *tmp;
struct ixl_pf *pf;
device_t dev;
DEBUGOUT("ixl_add_filter: begin");
pf = vsi->back;
dev = pf->dev;
/* Does one already exist */
f = ixl_find_filter(vsi, macaddr, vlan);
if (f != NULL)
return;
/*
** Is this the first vlan being registered, if so we
** need to remove the ANY filter that indicates we are
** not in a vlan, and replace that with a 0 filter.
*/
if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
if (tmp != NULL) {
ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
ixl_add_filter(vsi, macaddr, 0);
}
}
f = ixl_get_filter(vsi);
if (f == NULL) {
device_printf(dev, "WARNING: no filter available!!\n");
return;
}
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
f->vlan = vlan;
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
if (f->vlan != IXL_VLAN_ANY)
f->flags |= IXL_FILTER_VLAN;
else
vsi->num_macs++;
ixl_add_hw_filters(vsi, f->flags, 1);
return;
}
void
ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
f = ixl_find_filter(vsi, macaddr, vlan);
if (f == NULL)
return;
f->flags |= IXL_FILTER_DEL;
ixl_del_hw_filters(vsi, 1);
vsi->num_macs--;
/* Check if this is the last vlan removal */
if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
/* Switch back to a non-vlan filter */
ixl_del_filter(vsi, macaddr, 0);
ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
}
return;
}
/*
** Find the filter with both matching mac addr and vlan id
*/
struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
bool match = FALSE;
SLIST_FOREACH(f, &vsi->ftl, next) {
if (!cmp_etheraddr(f->macaddr, macaddr))
continue;
if (f->vlan == vlan) {
match = TRUE;
break;
}
}
if (!match)
f = NULL;
return (f);
}
/*
** This routine takes additions to the vsi filter
** table and creates an Admin Queue call to create
** the filters in the hardware.
*/
void
ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
{
struct i40e_aqc_add_macvlan_element_data *a, *b;
struct ixl_mac_filter *f;
struct ixl_pf *pf;
struct i40e_hw *hw;
device_t dev;
int err, j = 0;
pf = vsi->back;
dev = pf->dev;
hw = &pf->hw;
IXL_PF_LOCK_ASSERT(pf);
- a = mallocarray(cnt, sizeof(struct i40e_aqc_add_macvlan_element_data),
+ a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (a == NULL) {
device_printf(dev, "add_hw_filters failed to get memory\n");
return;
}
/*
** Scan the filter list, each time we find one
** we add it to the admin queue array and turn off
** the add bit.
*/
SLIST_FOREACH(f, &vsi->ftl, next) {
if (f->flags == flags) {
b = &a[j]; // a pox on fvl long names :)
bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
if (f->vlan == IXL_VLAN_ANY) {
b->vlan_tag = 0;
b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
} else {
b->vlan_tag = f->vlan;
b->flags = 0;
}
b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
f->flags &= ~IXL_FILTER_ADD;
j++;
}
if (j == cnt)
break;
}
if (j > 0) {
err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
if (err)
device_printf(dev, "aq_add_macvlan err %d, "
"aq_error %d\n", err, hw->aq.asq_last_status);
else
vsi->hw_filters_add += j;
}
free(a, M_DEVBUF);
return;
}
/*
** This routine takes removals in the vsi filter
** table and creates an Admin Queue call to delete
** the filters in the hardware.
*/
void
ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
{
struct i40e_aqc_remove_macvlan_element_data *d, *e;
struct ixl_pf *pf;
struct i40e_hw *hw;
device_t dev;
struct ixl_mac_filter *f, *f_temp;
int err, j = 0;
DEBUGOUT("ixl_del_hw_filters: begin\n");
pf = vsi->back;
hw = &pf->hw;
dev = pf->dev;
- d = mallocarray(cnt,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (d == NULL) {
printf("del hw filter failed to get memory\n");
return;
}
SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
if (f->flags & IXL_FILTER_DEL) {
e = &d[j]; // a pox on fvl long names :)
bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
/* delete entry from vsi list */
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
free(f, M_DEVBUF);
j++;
}
if (j == cnt)
break;
}
if (j > 0) {
err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
int sc = 0;
for (int i = 0; i < j; i++)
sc += (!d[i].error_code);
vsi->hw_filters_del += sc;
device_printf(dev,
"Failed to remove %d/%d filters, aq error %d\n",
j - sc, j, hw->aq.asq_last_status);
} else
vsi->hw_filters_del += j;
}
free(d, M_DEVBUF);
DEBUGOUT("ixl_del_hw_filters: end\n");
return;
}
int
ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
struct i40e_hw *hw = &pf->hw;
int error = 0;
u32 reg;
u16 pf_qidx;
pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
ixl_dbg(pf, IXL_DBG_EN_DIS,
"Enabling PF TX ring %4d / VSI TX ring %4d...\n",
pf_qidx, vsi_qidx);
i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
reg |= I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK;
wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
/* Verify the enable took */
for (int j = 0; j < 10; j++) {
reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
break;
i40e_msec_delay(10);
}
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
device_printf(pf->dev, "TX queue %d still disabled!\n",
pf_qidx);
error = ETIMEDOUT;
}
return (error);
}
int
ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
struct i40e_hw *hw = &pf->hw;
int error = 0;
u32 reg;
u16 pf_qidx;
pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
ixl_dbg(pf, IXL_DBG_EN_DIS,
"Enabling PF RX ring %4d / VSI RX ring %4d...\n",
pf_qidx, vsi_qidx);
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
reg |= I40E_QRX_ENA_QENA_REQ_MASK |
I40E_QRX_ENA_QENA_STAT_MASK;
wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
/* Verify the enable took */
for (int j = 0; j < 10; j++) {
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
break;
i40e_msec_delay(10);
}
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
device_printf(pf->dev, "RX queue %d still disabled!\n",
pf_qidx);
error = ETIMEDOUT;
}
return (error);
}
int
ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
int error = 0;
error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
/* Called function already prints error message */
if (error)
return (error);
error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
return (error);
}
/* For PF VSI only */
int
ixl_enable_rings(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = vsi->back;
int error = 0;
for (int i = 0; i < vsi->num_queues; i++) {
error = ixl_enable_ring(pf, &pf->qtag, i);
if (error)
return (error);
}
return (error);
}
int
ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
struct i40e_hw *hw = &pf->hw;
int error = 0;
u32 reg;
u16 pf_qidx;
pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
i40e_usec_delay(500);
reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
/* Verify the disable took */
for (int j = 0; j < 10; j++) {
reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
i40e_msec_delay(10);
}
if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
device_printf(pf->dev, "TX queue %d still enabled!\n",
pf_qidx);
error = ETIMEDOUT;
}
return (error);
}
int
ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
struct i40e_hw *hw = &pf->hw;
int error = 0;
u32 reg;
u16 pf_qidx;
pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
/* Verify the disable took */
for (int j = 0; j < 10; j++) {
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
i40e_msec_delay(10);
}
if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
device_printf(pf->dev, "RX queue %d still enabled!\n",
pf_qidx);
error = ETIMEDOUT;
}
return (error);
}
int
ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
int error = 0;
error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
/* Called function already prints error message */
if (error)
return (error);
error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
return (error);
}
/* For PF VSI only */
int
ixl_disable_rings(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = vsi->back;
int error = 0;
for (int i = 0; i < vsi->num_queues; i++) {
error = ixl_disable_ring(pf, &pf->qtag, i);
if (error)
return (error);
}
return (error);
}
/**
* ixl_handle_mdd_event
*
* Called from interrupt handler to identify possibly malicious vfs
* (But also detects events from the PF, as well)
**/
void
ixl_handle_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
bool mdd_detected = false;
bool pf_mdd_detected = false;
u32 reg;
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
I40E_GL_MDET_TX_PF_NUM_SHIFT;
u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
device_printf(dev,
"Malicious Driver Detection event %d"
" on TX queue %d, pf number %d\n",
event, queue, pf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
}
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
I40E_GL_MDET_RX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
device_printf(dev,
"Malicious Driver Detection event %d"
" on RX queue %d, pf number %d\n",
event, queue, pf_num);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
}
if (mdd_detected) {
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
device_printf(dev,
"MDD TX event is for this function!");
pf_mdd_detected = true;
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
device_printf(dev,
"MDD RX event is for this function!");
pf_mdd_detected = true;
}
}
/* re-enable mdd interrupt cause */
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
ixl_flush(hw);
}
void
ixl_enable_intr(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
if (pf->msix > 1) {
for (int i = 0; i < vsi->num_queues; i++, que++)
ixl_enable_queue(hw, que->me);
} else
ixl_enable_intr0(hw);
}
void
ixl_disable_rings_intr(struct ixl_vsi *vsi)
{
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++)
ixl_disable_queue(hw, que->me);
}
void
ixl_enable_intr0(struct i40e_hw *hw)
{
u32 reg;
/* Use IXL_ITR_NONE so ITR isn't updated here */
reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, reg);
}
void
ixl_disable_intr0(struct i40e_hw *hw)
{
u32 reg;
reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
wr32(hw, I40E_PFINT_DYN_CTL0, reg);
ixl_flush(hw);
}
void
ixl_enable_queue(struct i40e_hw *hw, int id)
{
u32 reg;
reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
}
void
ixl_disable_queue(struct i40e_hw *hw, int id)
{
u32 reg;
reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
}
void
ixl_update_stats_counters(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_vf *vf;
struct i40e_hw_port_stats *nsd = &pf->stats;
struct i40e_hw_port_stats *osd = &pf->stats_offsets;
/* Update hw stats */
ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
pf->stat_offsets_loaded,
&osd->crc_errors, &nsd->crc_errors);
ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
pf->stat_offsets_loaded,
&osd->illegal_bytes, &nsd->illegal_bytes);
ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
I40E_GLPRT_GORCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_bytes, &nsd->eth.rx_bytes);
ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
I40E_GLPRT_GOTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_bytes, &nsd->eth.tx_bytes);
ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_unicast,
&nsd->eth.rx_unicast);
ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
I40E_GLPRT_UPTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_unicast,
&nsd->eth.tx_unicast);
ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
I40E_GLPRT_MPRCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_multicast,
&nsd->eth.rx_multicast);
ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
I40E_GLPRT_MPTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_multicast,
&nsd->eth.tx_multicast);
ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
I40E_GLPRT_BPRCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_broadcast,
&nsd->eth.rx_broadcast);
ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
I40E_GLPRT_BPTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_broadcast,
&nsd->eth.tx_broadcast);
ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
pf->stat_offsets_loaded,
&osd->tx_dropped_link_down,
&nsd->tx_dropped_link_down);
ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
pf->stat_offsets_loaded,
&osd->mac_local_faults,
&nsd->mac_local_faults);
ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
pf->stat_offsets_loaded,
&osd->mac_remote_faults,
&nsd->mac_remote_faults);
ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_length_errors,
&nsd->rx_length_errors);
/* Flow control (LFC) stats */
ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xon_rx, &nsd->link_xon_rx);
ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xon_tx, &nsd->link_xon_tx);
ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xoff_rx, &nsd->link_xoff_rx);
ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
pf->stat_offsets_loaded,
&osd->link_xoff_tx, &nsd->link_xoff_tx);
/* Packet size stats rx */
ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
I40E_GLPRT_PRC64L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_64, &nsd->rx_size_64);
ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
I40E_GLPRT_PRC127L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_127, &nsd->rx_size_127);
ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
I40E_GLPRT_PRC255L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_255, &nsd->rx_size_255);
ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
I40E_GLPRT_PRC511L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_511, &nsd->rx_size_511);
ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
I40E_GLPRT_PRC1023L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_1023, &nsd->rx_size_1023);
ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
I40E_GLPRT_PRC1522L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_1522, &nsd->rx_size_1522);
ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
I40E_GLPRT_PRC9522L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_big, &nsd->rx_size_big);
/* Packet size stats tx */
ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
I40E_GLPRT_PTC64L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_64, &nsd->tx_size_64);
ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
I40E_GLPRT_PTC127L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_127, &nsd->tx_size_127);
ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
I40E_GLPRT_PTC255L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_255, &nsd->tx_size_255);
ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
I40E_GLPRT_PTC511L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_511, &nsd->tx_size_511);
ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
I40E_GLPRT_PTC1023L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_1023, &nsd->tx_size_1023);
ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
I40E_GLPRT_PTC1522L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_1522, &nsd->tx_size_1522);
ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
I40E_GLPRT_PTC9522L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_big, &nsd->tx_size_big);
ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_undersize, &nsd->rx_undersize);
ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_fragments, &nsd->rx_fragments);
ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_oversize, &nsd->rx_oversize);
ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_jabber, &nsd->rx_jabber);
pf->stat_offsets_loaded = true;
/* End hw stats */
/* Update vsi stats */
ixl_update_vsi_stats(vsi);
for (int i = 0; i < pf->num_vfs; i++) {
vf = &pf->vfs[i];
if (vf->vf_flags & VF_FLAG_ENABLED)
ixl_update_eth_stats(&pf->vfs[i].vsi);
}
}
int
ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
bool is_up = false;
int error = 0;
is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
/* Teardown */
if (is_up)
ixl_stop(pf);
error = i40e_shutdown_lan_hmc(hw);
if (error)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", error);
ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
error = i40e_shutdown_adminq(hw);
if (error)
device_printf(dev,
"Shutdown Admin queue failed with code %d\n", error);
/* Setup */
error = i40e_init_adminq(hw);
if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
error);
}
error = ixl_setup_adminq_msix(pf);
if (error) {
device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
error);
}
ixl_configure_intr0_msix(pf);
ixl_enable_intr0(hw);
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
device_printf(dev, "init_lan_hmc failed: %d\n", error);
}
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (error) {
device_printf(dev, "configure_lan_hmc failed: %d\n", error);
}
if (is_up)
ixl_init(pf);
return (0);
}
void
ixl_handle_empr_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
int count = 0;
u32 reg;
/* Typically finishes within 3-4 seconds */
while (count++ < 100) {
reg = rd32(hw, I40E_GLGEN_RSTAT)
& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
if (reg)
i40e_msec_delay(100);
else
break;
}
ixl_dbg(pf, IXL_DBG_INFO,
"EMPR reset wait count: %d\n", count);
device_printf(dev, "Rebuilding driver state...\n");
ixl_rebuild_hw_structs_after_reset(pf);
device_printf(dev, "Rebuilding driver state done.\n");
atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
}
/*
** Tasklet handler for MSIX Adminq interrupts
** - do outside interrupt since it might sleep
*/
void
ixl_do_adminq(void *context, int pending)
{
struct ixl_pf *pf = context;
struct i40e_hw *hw = &pf->hw;
struct i40e_arq_event_info event;
i40e_status ret;
device_t dev = pf->dev;
u32 loop = 0;
u16 opcode, result;
if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
/* Flag cleared at end of this function */
ixl_handle_empr_reset(pf);
return;
}
/* Admin Queue handling */
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!event.msg_buf) {
device_printf(dev, "%s: Unable to allocate memory for Admin"
" Queue event!\n", __func__);
return;
}
IXL_PF_LOCK(pf);
/* clean and process any events */
do {
ret = i40e_clean_arq_element(hw, &event, &result);
if (ret)
break;
opcode = LE16_TO_CPU(event.desc.opcode);
ixl_dbg(pf, IXL_DBG_AQ,
"Admin Queue event: %#06x\n", opcode);
switch (opcode) {
case i40e_aqc_opc_get_link_status:
ixl_link_event(pf, &event);
break;
case i40e_aqc_opc_send_msg_to_pf:
#ifdef PCI_IOV
ixl_handle_vf_msg(pf, &event);
#endif
break;
case i40e_aqc_opc_event_lan_overflow:
default:
break;
}
} while (result && (loop++ < IXL_ADM_LIMIT));
free(event.msg_buf, M_DEVBUF);
/*
* If there are still messages to process, reschedule ourselves.
* Otherwise, re-enable our interrupt.
*/
if (result > 0)
taskqueue_enqueue(pf->tq, &pf->adminq);
else
ixl_enable_intr0(hw);
IXL_PF_UNLOCK(pf);
}
/**
* Update VSI-specific ethernet statistics counters.
**/
void
ixl_update_eth_stats(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *es;
struct i40e_eth_stats *oes;
struct i40e_hw_port_stats *nsd;
u16 stat_idx = vsi->info.stat_counter_idx;
es = &vsi->eth_stats;
oes = &vsi->eth_stats_offsets;
nsd = &pf->stats;
/* Gather up the stats that the hw collects */
ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_errors, &es->tx_errors);
ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_discards, &es->rx_discards);
ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
I40E_GLV_GORCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
I40E_GLV_UPRCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_unicast, &es->rx_unicast);
ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
I40E_GLV_MPRCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_multicast, &es->rx_multicast);
ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
I40E_GLV_BPRCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_broadcast, &es->rx_broadcast);
ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
I40E_GLV_GOTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_bytes, &es->tx_bytes);
ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
I40E_GLV_UPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_unicast, &es->tx_unicast);
ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
I40E_GLV_MPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_multicast, &es->tx_multicast);
ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
vsi->stat_offsets_loaded = true;
}
void
ixl_update_vsi_stats(struct ixl_vsi *vsi)
{
struct ixl_pf *pf;
struct ifnet *ifp;
struct i40e_eth_stats *es;
u64 tx_discards;
struct i40e_hw_port_stats *nsd;
pf = vsi->back;
ifp = vsi->ifp;
es = &vsi->eth_stats;
nsd = &pf->stats;
ixl_update_eth_stats(vsi);
tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
for (int i = 0; i < vsi->num_queues; i++)
tx_discards += vsi->queues[i].txr.br->br_drops;
/* Update ifnet stats */
IXL_SET_IPACKETS(vsi, es->rx_unicast +
es->rx_multicast +
es->rx_broadcast);
IXL_SET_OPACKETS(vsi, es->tx_unicast +
es->tx_multicast +
es->tx_broadcast);
IXL_SET_IBYTES(vsi, es->rx_bytes);
IXL_SET_OBYTES(vsi, es->tx_bytes);
IXL_SET_IMCASTS(vsi, es->rx_multicast);
IXL_SET_OMCASTS(vsi, es->tx_multicast);
IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
nsd->rx_jabber);
IXL_SET_OERRORS(vsi, es->tx_errors);
IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
IXL_SET_OQDROPS(vsi, tx_discards);
IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
IXL_SET_COLLISIONS(vsi, 0);
}
/**
* Reset all of the stats for the given pf
**/
void
ixl_pf_reset_stats(struct ixl_pf *pf)
{
bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
pf->stat_offsets_loaded = false;
}
/**
* Resets all stats of the given vsi
**/
void
ixl_vsi_reset_stats(struct ixl_vsi *vsi)
{
bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
vsi->stat_offsets_loaded = false;
}
/**
* Read and update a 48 bit stat from the hw
*
* Since the device stats are not reset at PFReset, they likely will not
* be zeroed when the driver starts. We'll save the first values read
* and use them as offsets to be subtracted from the raw values in order
* to report stats that count from zero.
**/
void
ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
bool offset_loaded, u64 *offset, u64 *stat)
{
u64 new_data;
#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
new_data = rd64(hw, loreg);
#else
/*
* Use two rd32's instead of one rd64; FreeBSD versions before
* 10 don't support 64-bit bus reads/writes.
*/
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
#endif
if (!offset_loaded)
*offset = new_data;
if (new_data >= *offset)
*stat = new_data - *offset;
else
*stat = (new_data + ((u64)1 << 48)) - *offset;
*stat &= 0xFFFFFFFFFFFFULL;
}
/**
* Read and update a 32 bit stat from the hw
**/
void
ixl_stat_update32(struct i40e_hw *hw, u32 reg,
bool offset_loaded, u64 *offset, u64 *stat)
{
u32 new_data;
new_data = rd32(hw, reg);
if (!offset_loaded)
*offset = new_data;
if (new_data >= *offset)
*stat = (u32)(new_data - *offset);
else
*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
}
void
ixl_add_device_sysctls(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid_list *ctx_list =
SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
struct sysctl_oid *debug_node;
struct sysctl_oid_list *debug_list;
struct sysctl_oid *fec_node;
struct sysctl_oid_list *fec_list;
/* Set up sysctls */
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_current_speed, "A", "Current Port Speed");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
pf, 0, ixl_sysctl_unallocated_queues, "I",
"Queues not allocated to a PF or VF");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_pf_tx_itr, "I",
"Immediately set TX ITR value for all queues");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_pf_rx_itr, "I",
"Immediately set RX ITR value for all queues");
SYSCTL_ADD_INT(ctx, ctx_list,
OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
&pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
SYSCTL_ADD_INT(ctx, ctx_list,
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
/* Add FEC sysctls for 25G adapters */
/*
* XXX: These settings can be changed, but that isn't supported,
* so these are read-only for now.
*/
if (hw->device_id == I40E_DEV_ID_25G_B
|| hw->device_id == I40E_DEV_ID_25G_SFP28) {
fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
fec_list = SYSCTL_CHILDREN(fec_node);
SYSCTL_ADD_PROC(ctx, fec_list,
OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
SYSCTL_ADD_PROC(ctx, fec_list,
OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
SYSCTL_ADD_PROC(ctx, fec_list,
OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
SYSCTL_ADD_PROC(ctx, fec_list,
OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
SYSCTL_ADD_PROC(ctx, fec_list,
OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
}
/* Add sysctls meant to print debug information, but don't list them
* in "sysctl -a" output. */
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
debug_list = SYSCTL_CHILDREN(debug_node);
SYSCTL_ADD_UINT(ctx, debug_list,
OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
&pf->hw.debug_mask, 0, "Shared code debug message level");
SYSCTL_ADD_UINT(ctx, debug_list,
OID_AUTO, "core_debug_mask", CTLFLAG_RW,
&pf->dbg_mask, 0, "Non-hared code debug message level");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
if (pf->has_i2c) {
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
}
#ifdef PCI_IOV
SYSCTL_ADD_UINT(ctx, debug_list,
OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
0, "PF/VF Virtual Channel debug level");
#endif
}
/*
* Primarily for finding out how many queues can be assigned to VFs,
* at runtime.
*/
static int
ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int queues;
IXL_PF_LOCK(pf);
queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
IXL_PF_UNLOCK(pf);
return sysctl_handle_int(oidp, NULL, queues, req);
}
/*
** Set flow control using sysctl:
** 0 - off
** 1 - rx pause
** 2 - tx pause
** 3 - full
*/
int
ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
int requested_fc, error = 0;
enum i40e_status_code aq_error = 0;
u8 fc_aq_err = 0;
/* Get request */
requested_fc = pf->fc;
error = sysctl_handle_int(oidp, &requested_fc, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (requested_fc < 0 || requested_fc > 3) {
device_printf(dev,
"Invalid fc mode; valid modes are 0 through 3\n");
return (EINVAL);
}
/* Set fc ability for port */
hw->fc.requested_mode = requested_fc;
aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
if (aq_error) {
device_printf(dev,
"%s: Error setting new fc mode %d; fc_err %#x\n",
__func__, aq_error, fc_aq_err);
return (EIO);
}
pf->fc = requested_fc;
/* Get new link state */
i40e_msec_delay(250);
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
return (0);
}
char *
ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
{
int index;
char *speeds[] = {
"Unknown",
"100 Mbps",
"1 Gbps",
"10 Gbps",
"40 Gbps",
"20 Gbps",
"25 Gbps",
};
switch (link_speed) {
case I40E_LINK_SPEED_100MB:
index = 1;
break;
case I40E_LINK_SPEED_1GB:
index = 2;
break;
case I40E_LINK_SPEED_10GB:
index = 3;
break;
case I40E_LINK_SPEED_40GB:
index = 4;
break;
case I40E_LINK_SPEED_20GB:
index = 5;
break;
case I40E_LINK_SPEED_25GB:
index = 6;
break;
case I40E_LINK_SPEED_UNKNOWN:
default:
index = 0;
break;
}
return speeds[index];
}
int
ixl_current_speed(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
int error = 0;
ixl_update_link_status(pf);
error = sysctl_handle_string(oidp,
ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
8, req);
return (error);
}
static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
{
static u16 speedmap[6] = {
(I40E_LINK_SPEED_100MB | (0x1 << 8)),
(I40E_LINK_SPEED_1GB | (0x2 << 8)),
(I40E_LINK_SPEED_10GB | (0x4 << 8)),
(I40E_LINK_SPEED_20GB | (0x8 << 8)),
(I40E_LINK_SPEED_25GB | (0x10 << 8)),
(I40E_LINK_SPEED_40GB | (0x20 << 8))
};
u8 retval = 0;
for (int i = 0; i < 6; i++) {
if (to_aq)
retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
else
retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
}
return (retval);
}
int
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
enum i40e_status_code aq_error = 0;
/* Get current capability information */
aq_error = i40e_aq_get_phy_capabilities(hw,
FALSE, FALSE, &abilities, NULL);
if (aq_error) {
device_printf(dev,
"%s: Error getting phy capabilities %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EIO);
}
/* Prepare new config */
bzero(&config, sizeof(config));
config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
config.phy_type = abilities.phy_type;
config.phy_type_ext = abilities.phy_type_ext;
config.abilities = abilities.abilities
| I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
if (aq_error) {
device_printf(dev,
"%s: Error setting new phy config %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EIO);
}
return (0);
}
/*
** Control link advertise speed:
** Flags:
** 0x1 - advertise 100 Mb
** 0x2 - advertise 1G
** 0x4 - advertise 10G
** 0x8 - advertise 20G
** 0x10 - advertise 25G
** 0x20 - advertise 40G
**
** Set to 0 to disable link
*/
int
ixl_set_advertise(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u8 converted_speeds;
int requested_ls = 0;
int error = 0;
/* Read in new mode */
requested_ls = pf->advertised_speed;
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
/* Check if changing speeds is supported */
switch (hw->device_id) {
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
device_printf(dev, "Changing advertised speeds not supported"
" on this device.\n");
return (EINVAL);
}
if (requested_ls < 0 || requested_ls > 0xff) {
}
/* Check for valid value */
converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
device_printf(dev, "Invalid advertised speed; "
"valid flags are: 0x%02x\n",
ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
return (EINVAL);
}
error = ixl_set_advertised_speeds(pf, requested_ls);
if (error)
return (error);
pf->advertised_speed = requested_ls;
ixl_update_link_status(pf);
return (0);
}
/*
* Input: bitmap of enum i40e_aq_link_speed
*/
static u64
ixl_max_aq_speed_to_value(u8 link_speeds)
{
if (link_speeds & I40E_LINK_SPEED_40GB)
return IF_Gbps(40);
if (link_speeds & I40E_LINK_SPEED_25GB)
return IF_Gbps(25);
if (link_speeds & I40E_LINK_SPEED_20GB)
return IF_Gbps(20);
if (link_speeds & I40E_LINK_SPEED_10GB)
return IF_Gbps(10);
if (link_speeds & I40E_LINK_SPEED_1GB)
return IF_Gbps(1);
if (link_speeds & I40E_LINK_SPEED_100MB)
return IF_Mbps(100);
else
/* Minimum supported link speed */
return IF_Mbps(100);
}
/*
** Get the width and transaction speed of
** the bus this adapter is plugged into.
*/
void
ixl_get_bus_info(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u16 link;
u32 offset, num_ports;
u64 max_speed;
/* Some devices don't use PCIE */
if (hw->mac.type == I40E_MAC_X722)
return;
/* Read PCI Express Capabilities Link Status Register */
pci_find_cap(dev, PCIY_EXPRESS, &offset);
link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
/* Fill out hw struct with PCIE info */
i40e_set_pci_config_data(hw, link);
/* Use info to print out bandwidth messages */
device_printf(dev,"PCI Express Bus: Speed %s %s\n",
((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
(hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
(hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
(hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
(hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
(hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
(hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
("Unknown"));
/*
* If adapter is in slot with maximum supported speed,
* no warning message needs to be printed out.
*/
if (hw->bus.speed >= i40e_bus_speed_8000
&& hw->bus.width >= i40e_bus_width_pcie_x8)
return;
num_ports = bitcount32(hw->func_caps.valid_functions);
max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
device_printf(dev, "PCI-Express bandwidth available"
" for this device may be insufficient for"
" optimal performance.\n");
device_printf(dev, "Please move the device to a different"
" PCI-e link with more lanes and/or higher"
" transfer rate.\n");
}
}
static int
ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
struct sbuf *sbuf;
sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
ixl_nvm_version_str(hw, sbuf);
sbuf_finish(sbuf);
sbuf_delete(sbuf);
return 0;
}
void
ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
{
if ((nvma->command == I40E_NVM_READ) &&
((nvma->config & 0xFF) == 0xF) &&
(((nvma->config & 0xF00) >> 8) == 0xF) &&
(nvma->offset == 0) &&
(nvma->data_size == 1)) {
// device_printf(dev, "- Get Driver Status Command\n");
}
else if (nvma->command == I40E_NVM_READ) {
}
else {
switch (nvma->command) {
case 0xB:
device_printf(dev, "- command: I40E_NVM_READ\n");
break;
case 0xC:
device_printf(dev, "- command: I40E_NVM_WRITE\n");
break;
default:
device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
break;
}
device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
}
}
int
ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_nvm_access *nvma;
device_t dev = pf->dev;
enum i40e_status_code status = 0;
int perrno;
DEBUGFUNC("ixl_handle_nvmupd_cmd");
/* Sanity checks */
if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
ifd->ifd_data == NULL) {
device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
__func__);
device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
__func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
device_printf(dev, "%s: data pointer: %p\n", __func__,
ifd->ifd_data);
return (EINVAL);
}
nvma = (struct i40e_nvm_access *)ifd->ifd_data;
if (pf->dbg_mask & IXL_DBG_NVMUPD)
ixl_print_nvm_cmd(dev, nvma);
if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
int count = 0;
while (count++ < 100) {
i40e_msec_delay(100);
if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
break;
}
}
if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
IXL_PF_LOCK(pf);
status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
IXL_PF_UNLOCK(pf);
} else {
perrno = -EBUSY;
}
if (status)
device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
i40e_stat_str(hw, status), perrno);
/*
* -EPERM is actually ERESTART, which the kernel interprets as it needing
* to run this ioctl again. So use -EACCES for -EPERM instead.
*/
if (perrno == -EPERM)
return (-EACCES);
else
return (perrno);
}
/*********************************************************************
*
* Media Ioctl callback
*
* This routine is called whenever the user queries the status of
* the interface using ifconfig.
*
**********************************************************************/
void
ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ixl_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
INIT_DEBUGOUT("ixl_media_status: begin");
IXL_PF_LOCK(pf);
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (!pf->link_up) {
IXL_PF_UNLOCK(pf);
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
/* Hardware always does full-duplex */
ifmr->ifm_active |= IFM_FDX;
switch (hw->phy.link_info.phy_type) {
/* 100 M */
case I40E_PHY_TYPE_100BASE_TX:
ifmr->ifm_active |= IFM_100_TX;
break;
/* 1 G */
case I40E_PHY_TYPE_1000BASE_T:
ifmr->ifm_active |= IFM_1000_T;
break;
case I40E_PHY_TYPE_1000BASE_SX:
ifmr->ifm_active |= IFM_1000_SX;
break;
case I40E_PHY_TYPE_1000BASE_LX:
ifmr->ifm_active |= IFM_1000_LX;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ifmr->ifm_active |= IFM_OTHER;
break;
/* 10 G */
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
ifmr->ifm_active |= IFM_10G_TWINAX;
break;
case I40E_PHY_TYPE_10GBASE_SR:
ifmr->ifm_active |= IFM_10G_SR;
break;
case I40E_PHY_TYPE_10GBASE_LR:
ifmr->ifm_active |= IFM_10G_LR;
break;
case I40E_PHY_TYPE_10GBASE_T:
ifmr->ifm_active |= IFM_10G_T;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_10GBASE_AOC:
ifmr->ifm_active |= IFM_OTHER;
break;
/* 25 G */
case I40E_PHY_TYPE_25GBASE_KR:
ifmr->ifm_active |= IFM_25G_KR;
break;
case I40E_PHY_TYPE_25GBASE_CR:
ifmr->ifm_active |= IFM_25G_CR;
break;
case I40E_PHY_TYPE_25GBASE_SR:
ifmr->ifm_active |= IFM_25G_SR;
break;
case I40E_PHY_TYPE_25GBASE_LR:
ifmr->ifm_active |= IFM_UNKNOWN;
break;
/* 40 G */
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
ifmr->ifm_active |= IFM_40G_CR4;
break;
case I40E_PHY_TYPE_40GBASE_SR4:
ifmr->ifm_active |= IFM_40G_SR4;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ifmr->ifm_active |= IFM_40G_LR4;
break;
case I40E_PHY_TYPE_XLAUI:
ifmr->ifm_active |= IFM_OTHER;
break;
case I40E_PHY_TYPE_1000BASE_KX:
ifmr->ifm_active |= IFM_1000_KX;
break;
case I40E_PHY_TYPE_SGMII:
ifmr->ifm_active |= IFM_1000_SGMII;
break;
/* ERJ: What's the difference between these? */
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
ifmr->ifm_active |= IFM_10G_CR1;
break;
case I40E_PHY_TYPE_10GBASE_KX4:
ifmr->ifm_active |= IFM_10G_KX4;
break;
case I40E_PHY_TYPE_10GBASE_KR:
ifmr->ifm_active |= IFM_10G_KR;
break;
case I40E_PHY_TYPE_SFI:
ifmr->ifm_active |= IFM_10G_SFI;
break;
/* Our single 20G media type */
case I40E_PHY_TYPE_20GBASE_KR2:
ifmr->ifm_active |= IFM_20G_KR2;
break;
case I40E_PHY_TYPE_40GBASE_KR4:
ifmr->ifm_active |= IFM_40G_KR4;
break;
case I40E_PHY_TYPE_XLPPI:
case I40E_PHY_TYPE_40GBASE_AOC:
ifmr->ifm_active |= IFM_40G_XLPPI;
break;
/* Unknown to driver */
default:
ifmr->ifm_active |= IFM_UNKNOWN;
break;
}
/* Report flow control status as well */
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
IXL_PF_UNLOCK(pf);
}
void
ixl_init(void *arg)
{
struct ixl_pf *pf = arg;
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
}
/*
* NOTE: Fortville does not support forcing media speeds. Instead,
* use the set_advertise sysctl to set the speeds Fortville
* will advertise or be allowed to operate at.
*/
int
ixl_media_change(struct ifnet * ifp)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ifmedia *ifm = &vsi->media;
INIT_DEBUGOUT("ixl_media_change: begin");
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
return (ENODEV);
}
/*********************************************************************
* Ioctl entry point
*
* ixl_ioctl is called when the user wants to configure the
* interface.
*
* return 0 on success, positive on failure
**********************************************************************/
int
ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ixl_pf *pf = vsi->back;
struct ifreq *ifr = (struct ifreq *)data;
struct ifdrv *ifd = (struct ifdrv *)data;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
bool avoid_reset = FALSE;
#endif
int error = 0;
switch (command) {
case SIOCSIFADDR:
IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = TRUE;
#endif
#ifdef INET6
if (ifa->ifa_addr->sa_family == AF_INET6)
avoid_reset = TRUE;
#endif
#if defined(INET) || defined(INET6)
/*
** Calling init results in link renegotiation,
** so we avoid doing it when possible.
*/
if (avoid_reset) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
ixl_init(pf);
#ifdef INET
if (!(ifp->if_flags & IFF_NOARP))
arp_ifinit(ifp, ifa);
#endif
} else
error = ether_ioctl(ifp, command, data);
break;
#endif
case SIOCSIFMTU:
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (ifr->ifr_mtu > IXL_MAX_FRAME -
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
error = EINVAL;
} else {
IXL_PF_LOCK(pf);
ifp->if_mtu = ifr->ifr_mtu;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
}
break;
case SIOCSIFFLAGS:
IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
IXL_PF_LOCK(pf);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
if ((ifp->if_flags ^ pf->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
ixl_set_promisc(vsi);
}
} else {
IXL_PF_UNLOCK(pf);
ixl_init(pf);
IXL_PF_LOCK(pf);
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ixl_stop_locked(pf);
}
}
pf->if_flags = ifp->if_flags;
IXL_PF_UNLOCK(pf);
break;
case SIOCSDRVSPEC:
case SIOCGDRVSPEC:
IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
"Info)\n");
/* NVM update command */
if (ifd->ifd_cmd == I40E_NVM_ACCESS)
error = ixl_handle_nvmupd_cmd(pf, ifd);
else
error = EINVAL;
break;
case SIOCADDMULTI:
IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
ixl_disable_rings_intr(vsi);
ixl_add_multi(vsi);
ixl_enable_intr(vsi);
IXL_PF_UNLOCK(pf);
}
break;
case SIOCDELMULTI:
IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
ixl_disable_rings_intr(vsi);
ixl_del_multi(vsi);
ixl_enable_intr(vsi);
IXL_PF_UNLOCK(pf);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
break;
case SIOCSIFCAP:
{
int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
ixl_cap_txcsum_tso(vsi, ifp, mask);
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (mask & IFCAP_VLAN_HWFILTER)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
}
VLAN_CAPABILITIES(ifp);
break;
}
#if __FreeBSD_version >= 1003000
case SIOCGI2C:
{
struct ifi2creq i2c;
int i;
IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
if (!pf->has_i2c)
return (ENOTTY);
error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
if (error != 0)
break;
if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
error = EINVAL;
break;
}
if (i2c.len > sizeof(i2c.data)) {
error = EINVAL;
break;
}
for (i = 0; i < i2c.len; i++)
if (ixl_read_i2c_byte(pf, i2c.offset + i,
i2c.dev_addr, &i2c.data[i]))
return (EIO);
error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
break;
}
#endif
default:
IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
int
ixl_find_i2c_interface(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
bool i2c_en, port_matched;
u32 reg;
for (int i = 0; i < 4; i++) {
reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
>> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
& BIT(hw->port);
if (i2c_en && port_matched)
return (i);
}
return (-1);
}
static char *
ixl_phy_type_string(u32 bit_pos, bool ext)
{
static char * phy_types_str[32] = {
"SGMII",
"1000BASE-KX",
"10GBASE-KX4",
"10GBASE-KR",
"40GBASE-KR4",
"XAUI",
"XFI",
"SFI",
"XLAUI",
"XLPPI",
"40GBASE-CR4",
"10GBASE-CR1",
"Reserved (12)",
"Reserved (13)",
"Reserved (14)",
"Reserved (15)",
"Reserved (16)",
"100BASE-TX",
"1000BASE-T",
"10GBASE-T",
"10GBASE-SR",
"10GBASE-LR",
"10GBASE-SFP+Cu",
"10GBASE-CR1",
"40GBASE-CR4",
"40GBASE-SR4",
"40GBASE-LR4",
"1000BASE-SX",
"1000BASE-LX",
"1000BASE-T Optical",
"20GBASE-KR2",
"Reserved (31)"
};
static char * ext_phy_types_str[4] = {
"25GBASE-KR",
"25GBASE-CR",
"25GBASE-SR",
"25GBASE-LR"
};
if (ext && bit_pos > 3) return "Invalid_Ext";
if (bit_pos > 31) return "Invalid";
return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
}
int
ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
struct i40e_aq_desc desc;
enum i40e_status_code status;
struct i40e_aqc_get_link_status *aq_link_status =
(struct i40e_aqc_get_link_status *)&desc.params.raw;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
if (status) {
device_printf(dev,
"%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
__func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
return (EIO);
}
bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
return (0);
}
static char *
ixl_phy_type_string_ls(u8 val)
{
if (val >= 0x1F)
return ixl_phy_type_string(val - 0x1F, true);
else
return ixl_phy_type_string(val, false);
}
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
device_t dev = pf->dev;
struct sbuf *buf;
int error = 0;
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
return (ENOMEM);
}
struct i40e_aqc_get_link_status link_status;
error = ixl_aq_get_link_status(pf, &link_status);
if (error) {
sbuf_delete(buf);
return (error);
}
/* TODO: Add 25G types */
sbuf_printf(buf, "\n"
"PHY Type : 0x%02x<%s>\n"
"Speed : 0x%02x\n"
"Link info: 0x%02x\n"
"AN info : 0x%02x\n"
"Ext info : 0x%02x\n"
"Loopback : 0x%02x\n"
"Max Frame: %d\n"
"Config : 0x%02x\n"
"Power : 0x%02x",
link_status.phy_type,
ixl_phy_type_string_ls(link_status.phy_type),
link_status.link_speed,
link_status.link_info,
link_status.an_info,
link_status.ext_info,
link_status.loopback,
link_status.max_frame_size,
link_status.config,
link_status.power_desc);
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
static int
ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp abilities;
struct sbuf *buf;
int error = 0;
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
return (ENOMEM);
}
status = i40e_aq_get_phy_capabilities(hw,
FALSE, FALSE, &abilities, NULL);
if (status) {
device_printf(dev,
"%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
__func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
sbuf_delete(buf);
return (EIO);
}
sbuf_printf(buf, "\n"
"PHY Type : %08x",
abilities.phy_type);
if (abilities.phy_type != 0) {
sbuf_printf(buf, "<");
for (int i = 0; i < 32; i++)
if ((1 << i) & abilities.phy_type)
sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
sbuf_printf(buf, ">\n");
}
sbuf_printf(buf, "PHY Ext : %02x",
abilities.phy_type_ext);
if (abilities.phy_type_ext != 0) {
sbuf_printf(buf, "<");
for (int i = 0; i < 4; i++)
if ((1 << i) & abilities.phy_type_ext)
sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
sbuf_printf(buf, ">");
}
sbuf_printf(buf, "\n");
sbuf_printf(buf,
"Speed : %02x\n"
"Abilities: %02x\n"
"EEE cap : %04x\n"
"EEER reg : %08x\n"
"D3 Lpan : %02x\n"
"ID : %02x %02x %02x %02x\n"
"ModType : %02x %02x %02x\n"
"ModType E: %01x\n"
"FEC Cfg : %02x\n"
"Ext CC : %02x",
abilities.link_speed,
abilities.abilities, abilities.eee_capability,
abilities.eeer_val, abilities.d3_lpan,
abilities.phy_id[0], abilities.phy_id[1],
abilities.phy_id[2], abilities.phy_id[3],
abilities.module_type[0], abilities.module_type[1],
abilities.module_type[2], abilities.phy_type_ext >> 5,
abilities.phy_type_ext & 0x1F,
abilities.ext_comp_code);
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
static int
ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_mac_filter *f;
char *buf, *buf_i;
int error = 0;
int ftl_len = 0;
int ftl_counter = 0;
int buf_len = 0;
int entry_len = 42;
SLIST_FOREACH(f, &vsi->ftl, next) {
ftl_len++;
}
if (ftl_len < 1) {
sysctl_handle_string(oidp, "(none)", 6, req);
return (0);
}
buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
sprintf(buf_i++, "\n");
SLIST_FOREACH(f, &vsi->ftl, next) {
sprintf(buf_i,
MAC_FORMAT ", vlan %4d, flags %#06x",
MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
buf_i += entry_len;
/* don't print '\n' for last entry */
if (++ftl_counter != ftl_len) {
sprintf(buf_i, "\n");
buf_i++;
}
}
error = sysctl_handle_string(oidp, buf, strlen(buf), req);
if (error)
printf("sysctl error: %d\n", error);
free(buf, M_DEVBUF);
return error;
}
#define IXL_SW_RES_SIZE 0x14
int
ixl_res_alloc_cmp(const void *a, const void *b)
{
const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
return ((int)one->resource_type - (int)two->resource_type);
}
/*
* Longest string length: 25
*/
char *
ixl_switch_res_type_string(u8 type)
{
static char * ixl_switch_res_type_strings[0x14] = {
"VEB",
"VSI",
"Perfect Match MAC address",
"S-tag",
"(Reserved)",
"Multicast hash entry",
"Unicast hash entry",
"VLAN",
"VSI List entry",
"(Reserved)",
"VLAN Statistic Pool",
"Mirror Rule",
"Queue Set",
"Inner VLAN Forward filter",
"(Reserved)",
"Inner MAC",
"IP",
"GRE/VN1 Key",
"VN2 Key",
"Tunneling Port"
};
if (type < 0x14)
return ixl_switch_res_type_strings[type];
else
return "(Reserved)";
}
static int
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *buf;
enum i40e_status_code status;
int error = 0;
u8 num_entries;
struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for output.\n");
return (ENOMEM);
}
bzero(resp, sizeof(resp));
status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
resp,
IXL_SW_RES_SIZE,
NULL);
if (status) {
device_printf(dev,
"%s: get_switch_resource_alloc() error %s, aq error %s\n",
__func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
sbuf_delete(buf);
return (error);
}
/* Sort entries by type for display */
qsort(resp, num_entries,
sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
&ixl_res_alloc_cmp);
sbuf_cat(buf, "\n");
sbuf_printf(buf, "# of entries: %d\n", num_entries);
sbuf_printf(buf,
" Type | Guaranteed | Total | Used | Un-allocated\n"
" | (this) | (all) | (this) | (all) \n");
for (int i = 0; i < num_entries; i++) {
sbuf_printf(buf,
"%25s | %10d %5d %6d %12d",
ixl_switch_res_type_string(resp[i].resource_type),
resp[i].guaranteed,
resp[i].total,
resp[i].used,
resp[i].total_unalloced);
if (i < num_entries - 1)
sbuf_cat(buf, "\n");
}
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
/*
** Caller must init and delete sbuf; this function will clear and
** finish it for caller.
**
** XXX: Cannot use the SEID for this, since there is no longer a
** fixed mapping between SEID and element type.
*/
char *
ixl_switch_element_string(struct sbuf *s,
struct i40e_aqc_switch_config_element_resp *element)
{
sbuf_clear(s);
switch (element->element_type) {
case I40E_AQ_SW_ELEM_TYPE_MAC:
sbuf_printf(s, "MAC %3d", element->element_info);
break;
case I40E_AQ_SW_ELEM_TYPE_PF:
sbuf_printf(s, "PF %3d", element->element_info);
break;
case I40E_AQ_SW_ELEM_TYPE_VF:
sbuf_printf(s, "VF %3d", element->element_info);
break;
case I40E_AQ_SW_ELEM_TYPE_EMP:
sbuf_cat(s, "EMP");
break;
case I40E_AQ_SW_ELEM_TYPE_BMC:
sbuf_cat(s, "BMC");
break;
case I40E_AQ_SW_ELEM_TYPE_PV:
sbuf_cat(s, "PV");
break;
case I40E_AQ_SW_ELEM_TYPE_VEB:
sbuf_cat(s, "VEB");
break;
case I40E_AQ_SW_ELEM_TYPE_PA:
sbuf_cat(s, "PA");
break;
case I40E_AQ_SW_ELEM_TYPE_VSI:
sbuf_printf(s, "VSI %3d", element->element_info);
break;
default:
sbuf_cat(s, "?");
break;
}
sbuf_finish(s);
return sbuf_data(s);
}
static int
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *buf;
struct sbuf *nmbuf;
enum i40e_status_code status;
int error = 0;
u16 next = 0;
u8 aq_buf[I40E_AQ_LARGE_BUF];
struct i40e_aqc_get_switch_config_resp *sw_config;
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
return (ENOMEM);
}
status = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (status) {
device_printf(dev,
"%s: aq_get_switch_config() error %s, aq error %s\n",
__func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
sbuf_delete(buf);
return error;
}
if (next)
device_printf(dev, "%s: TODO: get more config with SEID %d\n",
__func__, next);
nmbuf = sbuf_new_auto();
if (!nmbuf) {
device_printf(dev, "Could not allocate sbuf for name output.\n");
sbuf_delete(buf);
return (ENOMEM);
}
sbuf_cat(buf, "\n");
/* Assuming <= 255 elements in switch */
sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
/* Exclude:
** Revision -- all elements are revision 1 for now
*/
sbuf_printf(buf,
"SEID ( Name ) | Uplink | Downlink | Conn Type\n"
" | | | (uplink)\n");
for (int i = 0; i < sw_config->header.num_reported; i++) {
// "%4d (%8s) | %8s %8s %#8x",
sbuf_printf(buf, "%4d", sw_config->element[i].seid);
sbuf_cat(buf, " ");
sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
&sw_config->element[i]));
sbuf_cat(buf, " | ");
sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
sbuf_cat(buf, " ");
sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
sbuf_cat(buf, " ");
sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
if (i < sw_config->header.num_reported - 1)
sbuf_cat(buf, "\n");
}
sbuf_delete(nmbuf);
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
static int
ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *buf;
int error = 0;
enum i40e_status_code status;
u32 reg;
struct i40e_aqc_get_set_rss_key_data key_data;
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for output.\n");
return (ENOMEM);
}
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
if (status)
device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
} else {
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
sbuf_printf(buf, "%4D", (u_char *)&reg, "");
}
}
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
static int
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *buf;
int error = 0;
enum i40e_status_code status;
u8 hlut[512];
u32 reg;
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for output.\n");
return (ENOMEM);
}
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
bzero(hlut, sizeof(hlut));
status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
if (status)
device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
sbuf_printf(buf, "%512D", (u_char *)hlut, "");
} else {
for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
reg = rd32(hw, I40E_PFQF_HLUT(i));
sbuf_printf(buf, "%4D", (u_char *)&reg, "");
}
}
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
static int
ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
u64 hena;
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
return sysctl_handle_long(oidp, NULL, hena, req);
}
/*
* Sysctl to disable firmware's link management
*
* 1 - Disable link management on this port
* 0 - Re-enable link management
*
* On normal NVMs, firmware manages link by default.
*/
static int
ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
int requested_mode = -1;
enum i40e_status_code status = 0;
int error = 0;
/* Read in new mode */
error = sysctl_handle_int(oidp, &requested_mode, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
/* Check for sane value */
if (requested_mode < 0 || requested_mode > 1) {
device_printf(dev, "Valid modes are 0 or 1\n");
return (EINVAL);
}
/* Set new mode */
status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
if (status) {
device_printf(dev,
"%s: Error setting new phy debug mode %s,"
" aq error: %s\n", __func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
return (EIO);
}
return (0);
}
/*
* Sysctl to read a byte from I2C bus.
*
* Input: 32-bit value:
* bits 0-7: device address (0xA0 or 0xA2)
* bits 8-15: offset (0-255)
* bits 16-31: unused
* Output: 8-bit value read
*/
static int
ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
device_t dev = pf->dev;
int input = -1, error = 0;
device_printf(dev, "%s: start\n", __func__);
u8 dev_addr, offset, output;
/* Read in I2C read parameters */
error = sysctl_handle_int(oidp, &input, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
/* Validate device address */
dev_addr = input & 0xFF;
if (dev_addr != 0xA0 && dev_addr != 0xA2) {
return (EINVAL);
}
offset = (input >> 8) & 0xFF;
error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
if (error)
return (error);
device_printf(dev, "%02X\n", output);
return (0);
}
/*
* Sysctl to write a byte to the I2C bus.
*
* Input: 32-bit value:
* bits 0-7: device address (0xA0 or 0xA2)
* bits 8-15: offset (0-255)
* bits 16-23: value to write
* bits 24-31: unused
* Output: 8-bit value written
*/
static int
ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
device_t dev = pf->dev;
int input = -1, error = 0;
u8 dev_addr, offset, value;
/* Read in I2C write parameters */
error = sysctl_handle_int(oidp, &input, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
/* Validate device address */
dev_addr = input & 0xFF;
if (dev_addr != 0xA0 && dev_addr != 0xA2) {
return (EINVAL);
}
offset = (input >> 8) & 0xFF;
value = (input >> 16) & 0xFF;
error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
if (error)
return (error);
device_printf(dev, "%02X written\n", value);
return (0);
}
static int
ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
u8 bit_pos, int *is_set)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
enum i40e_status_code status;
status = i40e_aq_get_phy_capabilities(hw,
FALSE, FALSE, abilities, NULL);
if (status) {
device_printf(dev,
"%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
__func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
return (EIO);
}
*is_set = !!(abilities->phy_type_ext & bit_pos);
return (0);
}
static int
ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
u8 bit_pos, int set)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
struct i40e_aq_set_phy_config config;
enum i40e_status_code status;
/* Set new PHY config */
memset(&config, 0, sizeof(config));
config.fec_config = abilities->phy_type_ext & ~(bit_pos);
if (set)
config.fec_config |= bit_pos;
if (config.fec_config != abilities->phy_type_ext) {
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.phy_type = abilities->phy_type;
config.phy_type_ext = abilities->phy_type_ext;
config.link_speed = abilities->link_speed;
config.eee_capability = abilities->eee_capability;
config.eeer = abilities->eeer_val;
config.low_power_ctrl = abilities->d3_lpan;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
device_printf(dev,
"%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
__func__, i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
return (EIO);
}
}
return (0);
}
static int
ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
if (error)
return (error);
/* Read in new mode */
error = sysctl_handle_int(oidp, &mode, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
}
static int
ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
if (error)
return (error);
/* Read in new mode */
error = sysctl_handle_int(oidp, &mode, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
}
static int
ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
if (error)
return (error);
/* Read in new mode */
error = sysctl_handle_int(oidp, &mode, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
}
static int
ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
if (error)
return (error);
/* Read in new mode */
error = sysctl_handle_int(oidp, &mode, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
}
static int
ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
if (error)
return (error);
/* Read in new mode */
error = sysctl_handle_int(oidp, &mode, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
}
Index: head/sys/dev/kbd/kbd.c
===================================================================
--- head/sys/dev/kbd/kbd.c (revision 328217)
+++ head/sys/dev/kbd/kbd.c (revision 328218)
@@ -1,1478 +1,1477 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1999 Kazutaka YOKOTA <yokota@zodiac.mech.utsunomiya-u.ac.jp>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer as
* the first lines of this file unmodified.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_kbd.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/poll.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/selinfo.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <sys/kbio.h>
#include <dev/kbd/kbdreg.h>
#define KBD_INDEX(dev) dev2unit(dev)
#define KB_QSIZE 512
#define KB_BUFSIZE 64
typedef struct genkbd_softc {
int gkb_flags; /* flag/status bits */
#define KB_ASLEEP (1 << 0)
struct selinfo gkb_rsel;
char gkb_q[KB_QSIZE]; /* input queue */
unsigned int gkb_q_start;
unsigned int gkb_q_length;
} genkbd_softc_t;
static SLIST_HEAD(, keyboard_driver) keyboard_drivers =
SLIST_HEAD_INITIALIZER(keyboard_drivers);
SET_DECLARE(kbddriver_set, const keyboard_driver_t);
/* local arrays */
/*
* We need at least one entry each in order to initialize a keyboard
* for the kernel console. The arrays will be increased dynamically
* when necessary.
*/
static int keyboards = 1;
static keyboard_t *kbd_ini;
static keyboard_t **keyboard = &kbd_ini;
static keyboard_switch_t *kbdsw_ini;
keyboard_switch_t **kbdsw = &kbdsw_ini;
static int keymap_restrict_change;
static SYSCTL_NODE(_hw, OID_AUTO, kbd, CTLFLAG_RD, 0, "kbd");
SYSCTL_INT(_hw_kbd, OID_AUTO, keymap_restrict_change, CTLFLAG_RW,
&keymap_restrict_change, 0, "restrict ability to change keymap");
#define ARRAY_DELTA 4
static int
kbd_realloc_array(void)
{
keyboard_t **new_kbd;
keyboard_switch_t **new_kbdsw;
- u_int newsize;
+ int newsize;
int s;
s = spltty();
newsize = rounddown(keyboards + ARRAY_DELTA, ARRAY_DELTA);
- new_kbd = mallocarray(newsize, sizeof(*new_kbd), M_DEVBUF,
- M_NOWAIT|M_ZERO);
+ new_kbd = malloc(sizeof(*new_kbd)*newsize, M_DEVBUF, M_NOWAIT|M_ZERO);
if (new_kbd == NULL) {
splx(s);
return (ENOMEM);
}
- new_kbdsw = mallocarray(newsize, sizeof(*new_kbdsw), M_DEVBUF,
+ new_kbdsw = malloc(sizeof(*new_kbdsw)*newsize, M_DEVBUF,
M_NOWAIT|M_ZERO);
if (new_kbdsw == NULL) {
free(new_kbd, M_DEVBUF);
splx(s);
return (ENOMEM);
}
bcopy(keyboard, new_kbd, sizeof(*keyboard)*keyboards);
bcopy(kbdsw, new_kbdsw, sizeof(*kbdsw)*keyboards);
if (keyboards > 1) {
free(keyboard, M_DEVBUF);
free(kbdsw, M_DEVBUF);
}
keyboard = new_kbd;
kbdsw = new_kbdsw;
keyboards = newsize;
splx(s);
if (bootverbose)
printf("kbd: new array size %d\n", keyboards);
return (0);
}
/*
* Low-level keyboard driver functions
* Keyboard subdrivers, such as the AT keyboard driver and the USB keyboard
* driver, call these functions to initialize the keyboard_t structure
* and register it to the virtual keyboard driver `kbd'.
*/
/* initialize the keyboard_t structure */
void
kbd_init_struct(keyboard_t *kbd, char *name, int type, int unit, int config,
int port, int port_size)
{
kbd->kb_flags = KB_NO_DEVICE; /* device has not been found */
kbd->kb_name = name;
kbd->kb_type = type;
kbd->kb_unit = unit;
kbd->kb_config = config & ~KB_CONF_PROBE_ONLY;
kbd->kb_led = 0; /* unknown */
kbd->kb_io_base = port;
kbd->kb_io_size = port_size;
kbd->kb_data = NULL;
kbd->kb_keymap = NULL;
kbd->kb_accentmap = NULL;
kbd->kb_fkeytab = NULL;
kbd->kb_fkeytab_size = 0;
kbd->kb_delay1 = KB_DELAY1; /* these values are advisory only */
kbd->kb_delay2 = KB_DELAY2;
kbd->kb_count = 0L;
bzero(kbd->kb_lastact, sizeof(kbd->kb_lastact));
}
void
kbd_set_maps(keyboard_t *kbd, keymap_t *keymap, accentmap_t *accmap,
fkeytab_t *fkeymap, int fkeymap_size)
{
kbd->kb_keymap = keymap;
kbd->kb_accentmap = accmap;
kbd->kb_fkeytab = fkeymap;
kbd->kb_fkeytab_size = fkeymap_size;
}
/* declare a new keyboard driver */
int
kbd_add_driver(keyboard_driver_t *driver)
{
if (SLIST_NEXT(driver, link))
return (EINVAL);
SLIST_INSERT_HEAD(&keyboard_drivers, driver, link);
return (0);
}
int
kbd_delete_driver(keyboard_driver_t *driver)
{
SLIST_REMOVE(&keyboard_drivers, driver, keyboard_driver, link);
SLIST_NEXT(driver, link) = NULL;
return (0);
}
/* register a keyboard and associate it with a function table */
int
kbd_register(keyboard_t *kbd)
{
const keyboard_driver_t **list;
const keyboard_driver_t *p;
keyboard_t *mux;
keyboard_info_t ki;
int index;
mux = kbd_get_keyboard(kbd_find_keyboard("kbdmux", -1));
for (index = 0; index < keyboards; ++index) {
if (keyboard[index] == NULL)
break;
}
if (index >= keyboards) {
if (kbd_realloc_array())
return (-1);
}
kbd->kb_index = index;
KBD_UNBUSY(kbd);
KBD_VALID(kbd);
kbd->kb_active = 0; /* disabled until someone calls kbd_enable() */
kbd->kb_token = NULL;
kbd->kb_callback.kc_func = NULL;
kbd->kb_callback.kc_arg = NULL;
SLIST_FOREACH(p, &keyboard_drivers, link) {
if (strcmp(p->name, kbd->kb_name) == 0) {
keyboard[index] = kbd;
kbdsw[index] = p->kbdsw;
if (mux != NULL) {
bzero(&ki, sizeof(ki));
strcpy(ki.kb_name, kbd->kb_name);
ki.kb_unit = kbd->kb_unit;
(void)kbdd_ioctl(mux, KBADDKBD, (caddr_t) &ki);
}
return (index);
}
}
SET_FOREACH(list, kbddriver_set) {
p = *list;
if (strcmp(p->name, kbd->kb_name) == 0) {
keyboard[index] = kbd;
kbdsw[index] = p->kbdsw;
if (mux != NULL) {
bzero(&ki, sizeof(ki));
strcpy(ki.kb_name, kbd->kb_name);
ki.kb_unit = kbd->kb_unit;
(void)kbdd_ioctl(mux, KBADDKBD, (caddr_t) &ki);
}
return (index);
}
}
return (-1);
}
int
kbd_unregister(keyboard_t *kbd)
{
int error;
int s;
if ((kbd->kb_index < 0) || (kbd->kb_index >= keyboards))
return (ENOENT);
if (keyboard[kbd->kb_index] != kbd)
return (ENOENT);
s = spltty();
if (KBD_IS_BUSY(kbd)) {
error = (*kbd->kb_callback.kc_func)(kbd, KBDIO_UNLOADING,
kbd->kb_callback.kc_arg);
if (error) {
splx(s);
return (error);
}
if (KBD_IS_BUSY(kbd)) {
splx(s);
return (EBUSY);
}
}
KBD_INVALID(kbd);
keyboard[kbd->kb_index] = NULL;
kbdsw[kbd->kb_index] = NULL;
splx(s);
return (0);
}
/* find a function table by the driver name */
keyboard_switch_t *
kbd_get_switch(char *driver)
{
const keyboard_driver_t **list;
const keyboard_driver_t *p;
SLIST_FOREACH(p, &keyboard_drivers, link) {
if (strcmp(p->name, driver) == 0)
return (p->kbdsw);
}
SET_FOREACH(list, kbddriver_set) {
p = *list;
if (strcmp(p->name, driver) == 0)
return (p->kbdsw);
}
return (NULL);
}
/*
* Keyboard client functions
* Keyboard clients, such as the console driver `syscons' and the keyboard
* cdev driver, use these functions to claim and release a keyboard for
* exclusive use.
*/
/*
* find the keyboard specified by a driver name and a unit number
* starting at given index
*/
int
kbd_find_keyboard2(char *driver, int unit, int index)
{
int i;
if ((index < 0) || (index >= keyboards))
return (-1);
for (i = index; i < keyboards; ++i) {
if (keyboard[i] == NULL)
continue;
if (!KBD_IS_VALID(keyboard[i]))
continue;
if (strcmp("*", driver) && strcmp(keyboard[i]->kb_name, driver))
continue;
if ((unit != -1) && (keyboard[i]->kb_unit != unit))
continue;
return (i);
}
return (-1);
}
/* find the keyboard specified by a driver name and a unit number */
int
kbd_find_keyboard(char *driver, int unit)
{
return (kbd_find_keyboard2(driver, unit, 0));
}
/* allocate a keyboard */
int
kbd_allocate(char *driver, int unit, void *id, kbd_callback_func_t *func,
void *arg)
{
int index;
int s;
if (func == NULL)
return (-1);
s = spltty();
index = kbd_find_keyboard(driver, unit);
if (index >= 0) {
if (KBD_IS_BUSY(keyboard[index])) {
splx(s);
return (-1);
}
keyboard[index]->kb_token = id;
KBD_BUSY(keyboard[index]);
keyboard[index]->kb_callback.kc_func = func;
keyboard[index]->kb_callback.kc_arg = arg;
kbdd_clear_state(keyboard[index]);
}
splx(s);
return (index);
}
int
kbd_release(keyboard_t *kbd, void *id)
{
int error;
int s;
s = spltty();
if (!KBD_IS_VALID(kbd) || !KBD_IS_BUSY(kbd)) {
error = EINVAL;
} else if (kbd->kb_token != id) {
error = EPERM;
} else {
kbd->kb_token = NULL;
KBD_UNBUSY(kbd);
kbd->kb_callback.kc_func = NULL;
kbd->kb_callback.kc_arg = NULL;
kbdd_clear_state(kbd);
error = 0;
}
splx(s);
return (error);
}
int
kbd_change_callback(keyboard_t *kbd, void *id, kbd_callback_func_t *func,
void *arg)
{
int error;
int s;
s = spltty();
if (!KBD_IS_VALID(kbd) || !KBD_IS_BUSY(kbd)) {
error = EINVAL;
} else if (kbd->kb_token != id) {
error = EPERM;
} else if (func == NULL) {
error = EINVAL;
} else {
kbd->kb_callback.kc_func = func;
kbd->kb_callback.kc_arg = arg;
error = 0;
}
splx(s);
return (error);
}
/* get a keyboard structure */
keyboard_t *
kbd_get_keyboard(int index)
{
if ((index < 0) || (index >= keyboards))
return (NULL);
if (keyboard[index] == NULL)
return (NULL);
if (!KBD_IS_VALID(keyboard[index]))
return (NULL);
return (keyboard[index]);
}
/*
* The back door for the console driver; configure keyboards
* This function is for the kernel console to initialize keyboards
* at very early stage.
*/
int
kbd_configure(int flags)
{
const keyboard_driver_t **list;
const keyboard_driver_t *p;
SLIST_FOREACH(p, &keyboard_drivers, link) {
if (p->configure != NULL)
(*p->configure)(flags);
}
SET_FOREACH(list, kbddriver_set) {
p = *list;
if (p->configure != NULL)
(*p->configure)(flags);
}
return (0);
}
#ifdef KBD_INSTALL_CDEV
/*
* Virtual keyboard cdev driver functions
* The virtual keyboard driver dispatches driver functions to
* appropriate subdrivers.
*/
#define KBD_UNIT(dev) dev2unit(dev)
static d_open_t genkbdopen;
static d_close_t genkbdclose;
static d_read_t genkbdread;
static d_write_t genkbdwrite;
static d_ioctl_t genkbdioctl;
static d_poll_t genkbdpoll;
static struct cdevsw kbd_cdevsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDGIANT,
.d_open = genkbdopen,
.d_close = genkbdclose,
.d_read = genkbdread,
.d_write = genkbdwrite,
.d_ioctl = genkbdioctl,
.d_poll = genkbdpoll,
.d_name = "kbd",
};
int
kbd_attach(keyboard_t *kbd)
{
if (kbd->kb_index >= keyboards)
return (EINVAL);
if (keyboard[kbd->kb_index] != kbd)
return (EINVAL);
kbd->kb_dev = make_dev(&kbd_cdevsw, kbd->kb_index, UID_ROOT, GID_WHEEL,
0600, "%s%r", kbd->kb_name, kbd->kb_unit);
make_dev_alias(kbd->kb_dev, "kbd%r", kbd->kb_index);
kbd->kb_dev->si_drv1 = malloc(sizeof(genkbd_softc_t), M_DEVBUF,
M_WAITOK | M_ZERO);
printf("kbd%d at %s%d\n", kbd->kb_index, kbd->kb_name, kbd->kb_unit);
return (0);
}
int
kbd_detach(keyboard_t *kbd)
{
if (kbd->kb_index >= keyboards)
return (EINVAL);
if (keyboard[kbd->kb_index] != kbd)
return (EINVAL);
free(kbd->kb_dev->si_drv1, M_DEVBUF);
destroy_dev(kbd->kb_dev);
return (0);
}
/*
* Generic keyboard cdev driver functions
* Keyboard subdrivers may call these functions to implement common
* driver functions.
*/
static void
genkbd_putc(genkbd_softc_t *sc, char c)
{
unsigned int p;
if (sc->gkb_q_length == KB_QSIZE)
return;
p = (sc->gkb_q_start + sc->gkb_q_length) % KB_QSIZE;
sc->gkb_q[p] = c;
sc->gkb_q_length++;
}
static size_t
genkbd_getc(genkbd_softc_t *sc, char *buf, size_t len)
{
/* Determine copy size. */
if (sc->gkb_q_length == 0)
return (0);
if (len >= sc->gkb_q_length)
len = sc->gkb_q_length;
if (len >= KB_QSIZE - sc->gkb_q_start)
len = KB_QSIZE - sc->gkb_q_start;
/* Copy out data and progress offset. */
memcpy(buf, sc->gkb_q + sc->gkb_q_start, len);
sc->gkb_q_start = (sc->gkb_q_start + len) % KB_QSIZE;
sc->gkb_q_length -= len;
return (len);
}
static kbd_callback_func_t genkbd_event;
static int
genkbdopen(struct cdev *dev, int mode, int flag, struct thread *td)
{
keyboard_t *kbd;
genkbd_softc_t *sc;
int s;
int i;
s = spltty();
sc = dev->si_drv1;
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) {
splx(s);
return (ENXIO);
}
i = kbd_allocate(kbd->kb_name, kbd->kb_unit, sc,
genkbd_event, (void *)sc);
if (i < 0) {
splx(s);
return (EBUSY);
}
/* assert(i == kbd->kb_index) */
/* assert(kbd == kbd_get_keyboard(i)) */
/*
* NOTE: even when we have successfully claimed a keyboard,
* the device may still be missing (!KBD_HAS_DEVICE(kbd)).
*/
sc->gkb_q_length = 0;
splx(s);
return (0);
}
static int
genkbdclose(struct cdev *dev, int mode, int flag, struct thread *td)
{
keyboard_t *kbd;
genkbd_softc_t *sc;
int s;
/*
* NOTE: the device may have already become invalid.
* kbd == NULL || !KBD_IS_VALID(kbd)
*/
s = spltty();
sc = dev->si_drv1;
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) {
/* XXX: we shall be forgiving and don't report error... */
} else {
kbd_release(kbd, (void *)sc);
}
splx(s);
return (0);
}
static int
genkbdread(struct cdev *dev, struct uio *uio, int flag)
{
keyboard_t *kbd;
genkbd_softc_t *sc;
u_char buffer[KB_BUFSIZE];
int len;
int error;
int s;
/* wait for input */
s = spltty();
sc = dev->si_drv1;
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) {
splx(s);
return (ENXIO);
}
while (sc->gkb_q_length == 0) {
if (flag & O_NONBLOCK) {
splx(s);
return (EWOULDBLOCK);
}
sc->gkb_flags |= KB_ASLEEP;
error = tsleep(sc, PZERO | PCATCH, "kbdrea", 0);
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((kbd == NULL) || !KBD_IS_VALID(kbd)) {
splx(s);
return (ENXIO); /* our keyboard has gone... */
}
if (error) {
sc->gkb_flags &= ~KB_ASLEEP;
splx(s);
return (error);
}
}
splx(s);
/* copy as much input as possible */
error = 0;
while (uio->uio_resid > 0) {
len = imin(uio->uio_resid, sizeof(buffer));
len = genkbd_getc(sc, buffer, len);
if (len <= 0)
break;
error = uiomove(buffer, len, uio);
if (error)
break;
}
return (error);
}
static int
genkbdwrite(struct cdev *dev, struct uio *uio, int flag)
{
keyboard_t *kbd;
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((kbd == NULL) || !KBD_IS_VALID(kbd))
return (ENXIO);
return (ENODEV);
}
static int
genkbdioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
{
keyboard_t *kbd;
int error;
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((kbd == NULL) || !KBD_IS_VALID(kbd))
return (ENXIO);
error = kbdd_ioctl(kbd, cmd, arg);
if (error == ENOIOCTL)
error = ENODEV;
return (error);
}
static int
genkbdpoll(struct cdev *dev, int events, struct thread *td)
{
keyboard_t *kbd;
genkbd_softc_t *sc;
int revents;
int s;
revents = 0;
s = spltty();
sc = dev->si_drv1;
kbd = kbd_get_keyboard(KBD_INDEX(dev));
if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) {
revents = POLLHUP; /* the keyboard has gone */
} else if (events & (POLLIN | POLLRDNORM)) {
if (sc->gkb_q_length > 0)
revents = events & (POLLIN | POLLRDNORM);
else
selrecord(td, &sc->gkb_rsel);
}
splx(s);
return (revents);
}
static int
genkbd_event(keyboard_t *kbd, int event, void *arg)
{
genkbd_softc_t *sc;
size_t len;
u_char *cp;
int mode;
u_int c;
/* assert(KBD_IS_VALID(kbd)) */
sc = (genkbd_softc_t *)arg;
switch (event) {
case KBDIO_KEYINPUT:
break;
case KBDIO_UNLOADING:
/* the keyboard is going... */
kbd_release(kbd, (void *)sc);
if (sc->gkb_flags & KB_ASLEEP) {
sc->gkb_flags &= ~KB_ASLEEP;
wakeup(sc);
}
selwakeuppri(&sc->gkb_rsel, PZERO);
return (0);
default:
return (EINVAL);
}
/* obtain the current key input mode */
if (kbdd_ioctl(kbd, KDGKBMODE, (caddr_t)&mode))
mode = K_XLATE;
/* read all pending input */
while (kbdd_check_char(kbd)) {
c = kbdd_read_char(kbd, FALSE);
if (c == NOKEY)
continue;
if (c == ERRKEY) /* XXX: ring bell? */
continue;
if (!KBD_IS_BUSY(kbd))
/* the device is not open, discard the input */
continue;
/* store the byte as is for K_RAW and K_CODE modes */
if (mode != K_XLATE) {
genkbd_putc(sc, KEYCHAR(c));
continue;
}
/* K_XLATE */
if (c & RELKEY) /* key release is ignored */
continue;
/* process special keys; most of them are just ignored... */
if (c & SPCLKEY) {
switch (KEYCHAR(c)) {
default:
/* ignore them... */
continue;
case BTAB: /* a backtab: ESC [ Z */
genkbd_putc(sc, 0x1b);
genkbd_putc(sc, '[');
genkbd_putc(sc, 'Z');
continue;
}
}
/* normal chars, normal chars with the META, function keys */
switch (KEYFLAGS(c)) {
case 0: /* a normal char */
genkbd_putc(sc, KEYCHAR(c));
break;
case MKEY: /* the META flag: prepend ESC */
genkbd_putc(sc, 0x1b);
genkbd_putc(sc, KEYCHAR(c));
break;
case FKEY | SPCLKEY: /* a function key, return string */
cp = kbdd_get_fkeystr(kbd, KEYCHAR(c), &len);
if (cp != NULL) {
while (len-- > 0)
genkbd_putc(sc, *cp++);
}
break;
}
}
/* wake up sleeping/polling processes */
if (sc->gkb_q_length > 0) {
if (sc->gkb_flags & KB_ASLEEP) {
sc->gkb_flags &= ~KB_ASLEEP;
wakeup(sc);
}
selwakeuppri(&sc->gkb_rsel, PZERO);
}
return (0);
}
#endif /* KBD_INSTALL_CDEV */
/*
* Generic low-level keyboard functions
* The low-level functions in the keyboard subdriver may use these
* functions.
*/
#ifndef KBD_DISABLE_KEYMAP_LOAD
static int key_change_ok(struct keyent_t *, struct keyent_t *, struct thread *);
static int keymap_change_ok(keymap_t *, keymap_t *, struct thread *);
static int accent_change_ok(accentmap_t *, accentmap_t *, struct thread *);
static int fkey_change_ok(fkeytab_t *, fkeyarg_t *, struct thread *);
#endif
int
genkbd_commonioctl(keyboard_t *kbd, u_long cmd, caddr_t arg)
{
keymap_t *mapp;
okeymap_t *omapp;
keyarg_t *keyp;
fkeyarg_t *fkeyp;
int s;
int i, j;
int error;
s = spltty();
switch (cmd) {
case KDGKBINFO: /* get keyboard information */
((keyboard_info_t *)arg)->kb_index = kbd->kb_index;
i = imin(strlen(kbd->kb_name) + 1,
sizeof(((keyboard_info_t *)arg)->kb_name));
bcopy(kbd->kb_name, ((keyboard_info_t *)arg)->kb_name, i);
((keyboard_info_t *)arg)->kb_unit = kbd->kb_unit;
((keyboard_info_t *)arg)->kb_type = kbd->kb_type;
((keyboard_info_t *)arg)->kb_config = kbd->kb_config;
((keyboard_info_t *)arg)->kb_flags = kbd->kb_flags;
break;
case KDGKBTYPE: /* get keyboard type */
*(int *)arg = kbd->kb_type;
break;
case KDGETREPEAT: /* get keyboard repeat rate */
((int *)arg)[0] = kbd->kb_delay1;
((int *)arg)[1] = kbd->kb_delay2;
break;
case GIO_KEYMAP: /* get keyboard translation table */
error = copyout(kbd->kb_keymap, *(void **)arg,
sizeof(keymap_t));
splx(s);
return (error);
case OGIO_KEYMAP: /* get keyboard translation table (compat) */
mapp = kbd->kb_keymap;
omapp = (okeymap_t *)arg;
omapp->n_keys = mapp->n_keys;
for (i = 0; i < NUM_KEYS; i++) {
for (j = 0; j < NUM_STATES; j++)
omapp->key[i].map[j] =
mapp->key[i].map[j];
omapp->key[i].spcl = mapp->key[i].spcl;
omapp->key[i].flgs = mapp->key[i].flgs;
}
break;
case PIO_KEYMAP: /* set keyboard translation table */
case OPIO_KEYMAP: /* set keyboard translation table (compat) */
#ifndef KBD_DISABLE_KEYMAP_LOAD
mapp = malloc(sizeof *mapp, M_TEMP, M_WAITOK);
if (cmd == OPIO_KEYMAP) {
omapp = (okeymap_t *)arg;
mapp->n_keys = omapp->n_keys;
for (i = 0; i < NUM_KEYS; i++) {
for (j = 0; j < NUM_STATES; j++)
mapp->key[i].map[j] =
omapp->key[i].map[j];
mapp->key[i].spcl = omapp->key[i].spcl;
mapp->key[i].flgs = omapp->key[i].flgs;
}
} else {
error = copyin(*(void **)arg, mapp, sizeof *mapp);
if (error != 0) {
splx(s);
free(mapp, M_TEMP);
return (error);
}
}
error = keymap_change_ok(kbd->kb_keymap, mapp, curthread);
if (error != 0) {
splx(s);
free(mapp, M_TEMP);
return (error);
}
bzero(kbd->kb_accentmap, sizeof(*kbd->kb_accentmap));
bcopy(mapp, kbd->kb_keymap, sizeof(*kbd->kb_keymap));
free(mapp, M_TEMP);
break;
#else
splx(s);
return (ENODEV);
#endif
case GIO_KEYMAPENT: /* get keyboard translation table entry */
keyp = (keyarg_t *)arg;
if (keyp->keynum >= sizeof(kbd->kb_keymap->key) /
sizeof(kbd->kb_keymap->key[0])) {
splx(s);
return (EINVAL);
}
bcopy(&kbd->kb_keymap->key[keyp->keynum], &keyp->key,
sizeof(keyp->key));
break;
case PIO_KEYMAPENT: /* set keyboard translation table entry */
#ifndef KBD_DISABLE_KEYMAP_LOAD
keyp = (keyarg_t *)arg;
if (keyp->keynum >= sizeof(kbd->kb_keymap->key) /
sizeof(kbd->kb_keymap->key[0])) {
splx(s);
return (EINVAL);
}
error = key_change_ok(&kbd->kb_keymap->key[keyp->keynum],
&keyp->key, curthread);
if (error != 0) {
splx(s);
return (error);
}
bcopy(&keyp->key, &kbd->kb_keymap->key[keyp->keynum],
sizeof(keyp->key));
break;
#else
splx(s);
return (ENODEV);
#endif
case GIO_DEADKEYMAP: /* get accent key translation table */
bcopy(kbd->kb_accentmap, arg, sizeof(*kbd->kb_accentmap));
break;
case PIO_DEADKEYMAP: /* set accent key translation table */
#ifndef KBD_DISABLE_KEYMAP_LOAD
error = accent_change_ok(kbd->kb_accentmap,
(accentmap_t *)arg, curthread);
if (error != 0) {
splx(s);
return (error);
}
bcopy(arg, kbd->kb_accentmap, sizeof(*kbd->kb_accentmap));
break;
#else
splx(s);
return (ENODEV);
#endif
case GETFKEY: /* get functionkey string */
fkeyp = (fkeyarg_t *)arg;
if (fkeyp->keynum >= kbd->kb_fkeytab_size) {
splx(s);
return (EINVAL);
}
bcopy(kbd->kb_fkeytab[fkeyp->keynum].str, fkeyp->keydef,
kbd->kb_fkeytab[fkeyp->keynum].len);
fkeyp->flen = kbd->kb_fkeytab[fkeyp->keynum].len;
break;
case SETFKEY: /* set functionkey string */
#ifndef KBD_DISABLE_KEYMAP_LOAD
fkeyp = (fkeyarg_t *)arg;
if (fkeyp->keynum >= kbd->kb_fkeytab_size) {
splx(s);
return (EINVAL);
}
error = fkey_change_ok(&kbd->kb_fkeytab[fkeyp->keynum],
fkeyp, curthread);
if (error != 0) {
splx(s);
return (error);
}
kbd->kb_fkeytab[fkeyp->keynum].len = min(fkeyp->flen, MAXFK);
bcopy(fkeyp->keydef, kbd->kb_fkeytab[fkeyp->keynum].str,
kbd->kb_fkeytab[fkeyp->keynum].len);
break;
#else
splx(s);
return (ENODEV);
#endif
default:
splx(s);
return (ENOIOCTL);
}
splx(s);
return (0);
}
#ifndef KBD_DISABLE_KEYMAP_LOAD
#define RESTRICTED_KEY(key, i) \
((key->spcl & (0x80 >> i)) && \
(key->map[i] == RBT || key->map[i] == SUSP || \
key->map[i] == STBY || key->map[i] == DBG || \
key->map[i] == PNC || key->map[i] == HALT || \
key->map[i] == PDWN))
static int
key_change_ok(struct keyent_t *oldkey, struct keyent_t *newkey, struct thread *td)
{
int i;
/* Low keymap_restrict_change means any changes are OK. */
if (keymap_restrict_change <= 0)
return (0);
/* High keymap_restrict_change means only root can change the keymap. */
if (keymap_restrict_change >= 2) {
for (i = 0; i < NUM_STATES; i++)
if (oldkey->map[i] != newkey->map[i])
return priv_check(td, PRIV_KEYBOARD);
if (oldkey->spcl != newkey->spcl)
return priv_check(td, PRIV_KEYBOARD);
if (oldkey->flgs != newkey->flgs)
return priv_check(td, PRIV_KEYBOARD);
return (0);
}
/* Otherwise we have to see if any special keys are being changed. */
for (i = 0; i < NUM_STATES; i++) {
/*
* If either the oldkey or the newkey action is restricted
* then we must make sure that the action doesn't change.
*/
if (!RESTRICTED_KEY(oldkey, i) && !RESTRICTED_KEY(newkey, i))
continue;
if ((oldkey->spcl & (0x80 >> i)) == (newkey->spcl & (0x80 >> i))
&& oldkey->map[i] == newkey->map[i])
continue;
return priv_check(td, PRIV_KEYBOARD);
}
return (0);
}
static int
keymap_change_ok(keymap_t *oldmap, keymap_t *newmap, struct thread *td)
{
int keycode, error;
for (keycode = 0; keycode < NUM_KEYS; keycode++) {
if ((error = key_change_ok(&oldmap->key[keycode],
&newmap->key[keycode], td)) != 0)
return (error);
}
return (0);
}
static int
accent_change_ok(accentmap_t *oldmap, accentmap_t *newmap, struct thread *td)
{
struct acc_t *oldacc, *newacc;
int accent, i;
if (keymap_restrict_change <= 2)
return (0);
if (oldmap->n_accs != newmap->n_accs)
return priv_check(td, PRIV_KEYBOARD);
for (accent = 0; accent < oldmap->n_accs; accent++) {
oldacc = &oldmap->acc[accent];
newacc = &newmap->acc[accent];
if (oldacc->accchar != newacc->accchar)
return priv_check(td, PRIV_KEYBOARD);
for (i = 0; i < NUM_ACCENTCHARS; ++i) {
if (oldacc->map[i][0] != newacc->map[i][0])
return priv_check(td, PRIV_KEYBOARD);
if (oldacc->map[i][0] == 0) /* end of table */
break;
if (oldacc->map[i][1] != newacc->map[i][1])
return priv_check(td, PRIV_KEYBOARD);
}
}
return (0);
}
static int
fkey_change_ok(fkeytab_t *oldkey, fkeyarg_t *newkey, struct thread *td)
{
if (keymap_restrict_change <= 3)
return (0);
if (oldkey->len != newkey->flen ||
bcmp(oldkey->str, newkey->keydef, oldkey->len) != 0)
return priv_check(td, PRIV_KEYBOARD);
return (0);
}
#endif
/* get a pointer to the string associated with the given function key */
u_char *
genkbd_get_fkeystr(keyboard_t *kbd, int fkey, size_t *len)
{
if (kbd == NULL)
return (NULL);
fkey -= F_FN;
if (fkey > kbd->kb_fkeytab_size)
return (NULL);
*len = kbd->kb_fkeytab[fkey].len;
return (kbd->kb_fkeytab[fkey].str);
}
/* diagnostic dump */
static char *
get_kbd_type_name(int type)
{
static struct {
int type;
char *name;
} name_table[] = {
{ KB_84, "AT 84" },
{ KB_101, "AT 101/102" },
{ KB_OTHER, "generic" },
};
int i;
for (i = 0; i < nitems(name_table); ++i) {
if (type == name_table[i].type)
return (name_table[i].name);
}
return ("unknown");
}
void
genkbd_diag(keyboard_t *kbd, int level)
{
if (level > 0) {
printf("kbd%d: %s%d, %s (%d), config:0x%x, flags:0x%x",
kbd->kb_index, kbd->kb_name, kbd->kb_unit,
get_kbd_type_name(kbd->kb_type), kbd->kb_type,
kbd->kb_config, kbd->kb_flags);
if (kbd->kb_io_base > 0)
printf(", port:0x%x-0x%x", kbd->kb_io_base,
kbd->kb_io_base + kbd->kb_io_size - 1);
printf("\n");
}
}
#define set_lockkey_state(k, s, l) \
if (!((s) & l ## DOWN)) { \
int i; \
(s) |= l ## DOWN; \
(s) ^= l ## ED; \
i = (s) & LOCK_MASK; \
(void)kbdd_ioctl((k), KDSETLED, (caddr_t)&i); \
}
static u_int
save_accent_key(keyboard_t *kbd, u_int key, int *accents)
{
int i;
/* make an index into the accent map */
i = key - F_ACC + 1;
if ((i > kbd->kb_accentmap->n_accs)
|| (kbd->kb_accentmap->acc[i - 1].accchar == 0)) {
/* the index is out of range or pointing to an empty entry */
*accents = 0;
return (ERRKEY);
}
/*
* If the same accent key has been hit twice, produce the accent
* char itself.
*/
if (i == *accents) {
key = kbd->kb_accentmap->acc[i - 1].accchar;
*accents = 0;
return (key);
}
/* remember the index and wait for the next key */
*accents = i;
return (NOKEY);
}
static u_int
make_accent_char(keyboard_t *kbd, u_int ch, int *accents)
{
struct acc_t *acc;
int i;
acc = &kbd->kb_accentmap->acc[*accents - 1];
*accents = 0;
/*
* If the accent key is followed by the space key,
* produce the accent char itself.
*/
if (ch == ' ')
return (acc->accchar);
/* scan the accent map */
for (i = 0; i < NUM_ACCENTCHARS; ++i) {
if (acc->map[i][0] == 0) /* end of table */
break;
if (acc->map[i][0] == ch)
return (acc->map[i][1]);
}
/* this char cannot be accented... */
return (ERRKEY);
}
int
genkbd_keyaction(keyboard_t *kbd, int keycode, int up, int *shiftstate,
int *accents)
{
struct keyent_t *key;
int state = *shiftstate;
int action;
int f;
int i;
i = keycode;
f = state & (AGRS | ALKED);
if ((f == AGRS1) || (f == AGRS2) || (f == ALKED))
i += ALTGR_OFFSET;
key = &kbd->kb_keymap->key[i];
i = ((state & SHIFTS) ? 1 : 0)
| ((state & CTLS) ? 2 : 0)
| ((state & ALTS) ? 4 : 0);
if (((key->flgs & FLAG_LOCK_C) && (state & CLKED))
|| ((key->flgs & FLAG_LOCK_N) && (state & NLKED)) )
i ^= 1;
if (up) { /* break: key released */
action = kbd->kb_lastact[keycode];
kbd->kb_lastact[keycode] = NOP;
switch (action) {
case LSHA:
if (state & SHIFTAON) {
set_lockkey_state(kbd, state, ALK);
state &= ~ALKDOWN;
}
action = LSH;
/* FALL THROUGH */
case LSH:
state &= ~SHIFTS1;
break;
case RSHA:
if (state & SHIFTAON) {
set_lockkey_state(kbd, state, ALK);
state &= ~ALKDOWN;
}
action = RSH;
/* FALL THROUGH */
case RSH:
state &= ~SHIFTS2;
break;
case LCTRA:
if (state & SHIFTAON) {
set_lockkey_state(kbd, state, ALK);
state &= ~ALKDOWN;
}
action = LCTR;
/* FALL THROUGH */
case LCTR:
state &= ~CTLS1;
break;
case RCTRA:
if (state & SHIFTAON) {
set_lockkey_state(kbd, state, ALK);
state &= ~ALKDOWN;
}
action = RCTR;
/* FALL THROUGH */
case RCTR:
state &= ~CTLS2;
break;
case LALTA:
if (state & SHIFTAON) {
set_lockkey_state(kbd, state, ALK);
state &= ~ALKDOWN;
}
action = LALT;
/* FALL THROUGH */
case LALT:
state &= ~ALTS1;
break;
case RALTA:
if (state & SHIFTAON) {
set_lockkey_state(kbd, state, ALK);
state &= ~ALKDOWN;
}
action = RALT;
/* FALL THROUGH */
case RALT:
state &= ~ALTS2;
break;
case ASH:
state &= ~AGRS1;
break;
case META:
state &= ~METAS1;
break;
case NLK:
state &= ~NLKDOWN;
break;
case CLK:
state &= ~CLKDOWN;
break;
case SLK:
state &= ~SLKDOWN;
break;
case ALK:
state &= ~ALKDOWN;
break;
case NOP:
/* release events of regular keys are not reported */
*shiftstate &= ~SHIFTAON;
return (NOKEY);
}
*shiftstate = state & ~SHIFTAON;
return (SPCLKEY | RELKEY | action);
} else { /* make: key pressed */
action = key->map[i];
state &= ~SHIFTAON;
if (key->spcl & (0x80 >> i)) {
/* special keys */
if (kbd->kb_lastact[keycode] == NOP)
kbd->kb_lastact[keycode] = action;
if (kbd->kb_lastact[keycode] != action)
action = NOP;
switch (action) {
/* LOCKING KEYS */
case NLK:
set_lockkey_state(kbd, state, NLK);
break;
case CLK:
set_lockkey_state(kbd, state, CLK);
break;
case SLK:
set_lockkey_state(kbd, state, SLK);
break;
case ALK:
set_lockkey_state(kbd, state, ALK);
break;
/* NON-LOCKING KEYS */
case SPSC: case RBT: case SUSP: case STBY:
case DBG: case NEXT: case PREV: case PNC:
case HALT: case PDWN:
*accents = 0;
break;
case BTAB:
*accents = 0;
action |= BKEY;
break;
case LSHA:
state |= SHIFTAON;
action = LSH;
/* FALL THROUGH */
case LSH:
state |= SHIFTS1;
break;
case RSHA:
state |= SHIFTAON;
action = RSH;
/* FALL THROUGH */
case RSH:
state |= SHIFTS2;
break;
case LCTRA:
state |= SHIFTAON;
action = LCTR;
/* FALL THROUGH */
case LCTR:
state |= CTLS1;
break;
case RCTRA:
state |= SHIFTAON;
action = RCTR;
/* FALL THROUGH */
case RCTR:
state |= CTLS2;
break;
case LALTA:
state |= SHIFTAON;
action = LALT;
/* FALL THROUGH */
case LALT:
state |= ALTS1;
break;
case RALTA:
state |= SHIFTAON;
action = RALT;
/* FALL THROUGH */
case RALT:
state |= ALTS2;
break;
case ASH:
state |= AGRS1;
break;
case META:
state |= METAS1;
break;
case NOP:
*shiftstate = state;
return (NOKEY);
default:
/* is this an accent (dead) key? */
*shiftstate = state;
if (action >= F_ACC && action <= L_ACC) {
action = save_accent_key(kbd, action,
accents);
switch (action) {
case NOKEY:
case ERRKEY:
return (action);
default:
if (state & METAS)
return (action | MKEY);
else
return (action);
}
/* NOT REACHED */
}
/* other special keys */
if (*accents > 0) {
*accents = 0;
return (ERRKEY);
}
if (action >= F_FN && action <= L_FN)
action |= FKEY;
/* XXX: return fkey string for the FKEY? */
return (SPCLKEY | action);
}
*shiftstate = state;
return (SPCLKEY | action);
} else {
/* regular keys */
kbd->kb_lastact[keycode] = NOP;
*shiftstate = state;
if (*accents > 0) {
/* make an accented char */
action = make_accent_char(kbd, action, accents);
if (action == ERRKEY)
return (action);
}
if (state & METAS)
action |= MKEY;
return (action);
}
}
/* NOT REACHED */
}
Index: head/sys/dev/liquidio/base/lio_request_manager.c
===================================================================
--- head/sys/dev/liquidio/base/lio_request_manager.c (revision 328217)
+++ head/sys/dev/liquidio/base/lio_request_manager.c (revision 328218)
@@ -1,858 +1,858 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_main.h"
#include "lio_network.h"
#include "cn23xx_pf_device.h"
#include "lio_rxtx.h"
struct lio_iq_post_status {
int status;
int index;
};
static void lio_check_db_timeout(void *arg, int pending);
static void __lio_check_db_timeout(struct octeon_device *oct,
uint64_t iq_no);
/* Return 0 on success, 1 on failure */
int
lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
uint32_t num_descs)
{
struct lio_instr_queue *iq;
struct lio_iq_config *conf = NULL;
struct lio_tq *db_tq;
struct lio_request_list *request_buf;
bus_size_t max_size;
uint32_t iq_no = (uint32_t)txpciq.s.q_no;
uint32_t q_size;
int error, i;
if (LIO_CN23XX_PF(oct))
conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)));
if (conf == NULL) {
lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id);
return (1);
}
q_size = (uint32_t)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
iq->oct_dev = oct;
max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs;
error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
max_size, /* maxsize */
LIO_MAX_SG, /* nsegments */
PAGE_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&iq->txtag);
if (error) {
lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
iq_no);
return (1);
}
iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma);
if (!iq->base_addr) {
lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
iq_no);
return (1);
}
iq->max_count = num_descs;
/*
* Initialize a list to holds requests that have been posted to
* Octeon but has yet to be fetched by octeon
*/
- iq->request_list = mallocarray(num_descs, sizeof(*iq->request_list),
+ iq->request_list = malloc(sizeof(*iq->request_list) * num_descs,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (iq->request_list == NULL) {
lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n",
iq_no);
return (1);
}
lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n",
iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma),
iq->max_count);
/* Create the descriptor buffer dma maps */
request_buf = iq->request_list;
for (i = 0; i < num_descs; i++, request_buf++) {
error = bus_dmamap_create(iq->txtag, 0, &request_buf->map);
if (error) {
lio_dev_err(oct, "Unable to create TX DMA map\n");
return (1);
}
}
iq->txpciq.txpciq64 = txpciq.txpciq64;
iq->fill_cnt = 0;
iq->host_write_index = 0;
iq->octeon_read_index = 0;
iq->flush_index = 0;
iq->last_db_time = 0;
iq->db_timeout = (uint32_t)conf->db_timeout;
atomic_store_rel_int(&iq->instr_pending, 0);
/* Initialize the lock for this instruction queue */
mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF);
mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF);
mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF);
mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL,
MTX_DEF);
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
db_tq = &oct->check_db_tq[iq_no];
db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
taskqueue_thread_enqueue, &db_tq->tq);
if (db_tq->tq == NULL) {
lio_dev_err(oct, "check db wq create failed for iq %d\n",
iq_no);
return (1);
}
TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
(void *)db_tq);
db_tq->ctxul = iq_no;
db_tq->ctxptr = oct;
taskqueue_start_threads(&db_tq->tq, 1, PI_NET,
"lio%d_check_db_timeout:%d",
oct->octeon_id, iq_no);
taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1);
/* Allocate a buf ring */
oct->instr_queue[iq_no]->br =
buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
&oct->instr_queue[iq_no]->enq_lock);
if (oct->instr_queue[iq_no]->br == NULL) {
lio_dev_err(oct, "Critical Failure setting up buf ring\n");
return (1);
}
return (0);
}
int
lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no)
{
struct lio_instr_queue *iq = oct->instr_queue[iq_no];
struct lio_request_list *request_buf;
struct lio_mbuf_free_info *finfo;
uint64_t desc_size = 0, q_size;
int i;
lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no);
if (oct->check_db_tq[iq_no].tq != NULL) {
while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq,
&oct->check_db_tq[iq_no].work,
NULL))
taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq,
&oct->check_db_tq[iq_no].work);
taskqueue_free(oct->check_db_tq[iq_no].tq);
oct->check_db_tq[iq_no].tq = NULL;
}
if (LIO_CN23XX_PF(oct))
desc_size =
LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf));
request_buf = iq->request_list;
for (i = 0; i < iq->max_count; i++, request_buf++) {
if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) ||
(request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) {
if (request_buf->buf != NULL) {
finfo = request_buf->buf;
bus_dmamap_sync(iq->txtag, request_buf->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(iq->txtag,
request_buf->map);
m_freem(finfo->mb);
request_buf->buf = NULL;
if (request_buf->map != NULL) {
bus_dmamap_destroy(iq->txtag,
request_buf->map);
request_buf->map = NULL;
}
} else if (request_buf->map != NULL) {
bus_dmamap_unload(iq->txtag, request_buf->map);
bus_dmamap_destroy(iq->txtag, request_buf->map);
request_buf->map = NULL;
}
}
}
if (iq->br != NULL) {
buf_ring_free(iq->br, M_DEVBUF);
iq->br = NULL;
}
if (iq->request_list != NULL) {
free(iq->request_list, M_DEVBUF);
iq->request_list = NULL;
}
if (iq->txtag != NULL) {
bus_dma_tag_destroy(iq->txtag);
iq->txtag = NULL;
}
if (iq->base_addr) {
q_size = iq->max_count * desc_size;
lio_dma_free((uint32_t)q_size, iq->base_addr);
oct->io_qmask.iq &= ~(1ULL << iq_no);
bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue));
oct->num_iqs--;
return (0);
}
return (1);
}
/* Return 0 on success, 1 on failure */
int
lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index,
union octeon_txpciq txpciq, uint32_t num_descs)
{
uint32_t iq_no = (uint32_t)txpciq.s.q_no;
if (oct->instr_queue[iq_no]->oct_dev != NULL) {
lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n",
iq_no);
oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
return (0);
}
oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->ifidx = ifidx;
if (lio_init_instr_queue(oct, txpciq, num_descs)) {
lio_delete_instr_queue(oct, iq_no);
return (1);
}
oct->num_iqs++;
if (oct->fn_list.enable_io_queues(oct))
return (1);
return (0);
}
int
lio_wait_for_instr_fetch(struct octeon_device *oct)
{
int i, retry = 1000, pending, instr_cnt = 0;
do {
instr_cnt = 0;
for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
pending = atomic_load_acq_int(
&oct->instr_queue[i]->instr_pending);
if (pending)
__lio_check_db_timeout(oct, i);
instr_cnt += pending;
}
if (instr_cnt == 0)
break;
lio_sleep_timeout(1);
} while (retry-- && instr_cnt);
return (instr_cnt);
}
static inline void
lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq)
{
if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) {
lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt);
/* make sure doorbell write goes through */
__compiler_membar();
iq->fill_cnt = 0;
iq->last_db_time = ticks;
return;
}
}
static inline void
__lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
{
uint8_t *iqptr, cmdsize;
cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
memcpy(iqptr, cmd, cmdsize);
}
static inline struct lio_iq_post_status
__lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
{
struct lio_iq_post_status st;
st.status = LIO_IQ_SEND_OK;
/*
* This ensures that the read index does not wrap around to the same
* position if queue gets full before Octeon could fetch any instr.
*/
if (atomic_load_acq_int(&iq->instr_pending) >=
(int32_t)(iq->max_count - 1)) {
st.status = LIO_IQ_SEND_FAILED;
st.index = -1;
return (st);
}
if (atomic_load_acq_int(&iq->instr_pending) >=
(int32_t)(iq->max_count - 2))
st.status = LIO_IQ_SEND_STOP;
__lio_copy_cmd_into_iq(iq, cmd);
/* "index" is returned, host_write_index is modified. */
st.index = iq->host_write_index;
iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
iq->max_count);
iq->fill_cnt++;
/*
* Flush the command into memory. We need to be sure the data is in
* memory before indicating that the instruction is pending.
*/
wmb();
atomic_add_int(&iq->instr_pending, 1);
return (st);
}
static inline void
__lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf,
int reqtype)
{
iq->request_list[idx].buf = buf;
iq->request_list[idx].reqtype = reqtype;
}
/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
struct lio_instr_queue *iq, uint32_t budget)
{
struct lio_soft_command *sc;
struct octeon_instr_irh *irh = NULL;
struct lio_mbuf_free_info *finfo;
void *buf;
uint32_t inst_count = 0;
uint32_t old = iq->flush_index;
int reqtype;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
buf = iq->request_list[old].buf;
finfo = buf;
if (reqtype == LIO_REQTYPE_NONE)
goto skip_this;
switch (reqtype) {
case LIO_REQTYPE_NORESP_NET:
lio_free_mbuf(iq, buf);
break;
case LIO_REQTYPE_NORESP_NET_SG:
lio_free_sgmbuf(iq, buf);
break;
case LIO_REQTYPE_RESP_NET:
case LIO_REQTYPE_SOFT_COMMAND:
sc = buf;
if (LIO_CN23XX_PF(oct))
irh = (struct octeon_instr_irh *)
&sc->cmd.cmd3.irh;
if (irh->rflag) {
/*
* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
* process sc. Add sc to the ordered soft
* command response list because we expect
* a response from Octeon.
*/
mtx_lock(&oct->response_list
[LIO_ORDERED_SC_LIST].lock);
atomic_add_int(&oct->response_list
[LIO_ORDERED_SC_LIST].
pending_req_count, 1);
STAILQ_INSERT_TAIL(&oct->response_list
[LIO_ORDERED_SC_LIST].
head, &sc->node, entries);
mtx_unlock(&oct->response_list
[LIO_ORDERED_SC_LIST].lock);
} else {
if (sc->callback != NULL) {
/* This callback must not sleep */
sc->callback(oct, LIO_REQUEST_DONE,
sc->callback_arg);
}
}
break;
default:
lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n",
__func__, reqtype, buf, old);
}
iq->request_list[old].buf = NULL;
iq->request_list[old].reqtype = 0;
skip_this:
inst_count++;
old = lio_incr_index(old, 1, iq->max_count);
if ((budget) && (inst_count >= budget))
break;
}
iq->flush_index = old;
return (inst_count);
}
/* Can only be called from process context */
int
lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
uint32_t budget)
{
uint32_t inst_processed = 0;
uint32_t tot_inst_processed = 0;
int tx_done = 1;
if (!mtx_trylock(&iq->iq_flush_running_lock))
return (tx_done);
mtx_lock(&iq->lock);
iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
do {
/* Process any outstanding IQ packets. */
if (iq->flush_index == iq->octeon_read_index)
break;
if (budget)
inst_processed =
lio_process_iq_request_list(oct, iq,
budget -
tot_inst_processed);
else
inst_processed =
lio_process_iq_request_list(oct, iq, 0);
if (inst_processed) {
atomic_subtract_int(&iq->instr_pending, inst_processed);
iq->stats.instr_processed += inst_processed;
}
tot_inst_processed += inst_processed;
inst_processed = 0;
} while (tot_inst_processed < budget);
if (budget && (tot_inst_processed >= budget))
tx_done = 0;
iq->last_db_time = ticks;
mtx_unlock(&iq->lock);
mtx_unlock(&iq->iq_flush_running_lock);
return (tx_done);
}
/*
* Process instruction queue after timeout.
* This routine gets called from a taskqueue or when removing the module.
*/
static void
__lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no)
{
struct lio_instr_queue *iq;
uint64_t next_time;
if (oct == NULL)
return;
iq = oct->instr_queue[iq_no];
if (iq == NULL)
return;
if (atomic_load_acq_int(&iq->instr_pending)) {
/* If ticks - last_db_time < db_timeout do nothing */
next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout);
if (!lio_check_timeout(ticks, next_time))
return;
iq->last_db_time = ticks;
/* Flush the instruction queue */
lio_flush_iq(oct, iq, 0);
lio_enable_irq(NULL, iq);
}
if (oct->props.ifp != NULL && iq->br != NULL) {
if (mtx_trylock(&iq->enq_lock)) {
if (!drbr_empty(oct->props.ifp, iq->br))
lio_mq_start_locked(oct->props.ifp, iq);
mtx_unlock(&iq->enq_lock);
}
}
}
/*
* Called by the Poll thread at regular intervals to check the instruction
* queue for commands to be posted and for commands that were fetched by Octeon.
*/
static void
lio_check_db_timeout(void *arg, int pending)
{
struct lio_tq *db_tq = (struct lio_tq *)arg;
struct octeon_device *oct = db_tq->ctxptr;
uint64_t iq_no = db_tq->ctxul;
uint32_t delay = 10;
__lio_check_db_timeout(oct, iq_no);
taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work,
lio_ms_to_ticks(delay));
}
int
lio_send_command(struct octeon_device *oct, uint32_t iq_no,
uint32_t force_db, void *cmd, void *buf,
uint32_t datasize, uint32_t reqtype)
{
struct lio_iq_post_status st;
struct lio_instr_queue *iq = oct->instr_queue[iq_no];
/*
* Get the lock and prevent other tasks and tx interrupt handler
* from running.
*/
mtx_lock(&iq->post_lock);
st = __lio_post_command2(iq, cmd);
if (st.status != LIO_IQ_SEND_FAILED) {
__lio_add_to_request_list(iq, st.index, buf, reqtype);
LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
if (force_db || (st.status == LIO_IQ_SEND_STOP))
lio_ring_doorbell(oct, iq);
} else {
LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
mtx_unlock(&iq->post_lock);
/*
* This is only done here to expedite packets being flushed for
* cases where there are no IQ completion interrupts.
*/
return (st.status);
}
void
lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc,
uint8_t opcode, uint8_t subcode, uint32_t irh_ossp,
uint64_t ossp0, uint64_t ossp1)
{
struct lio_config *lio_cfg;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_pki_ih3 *pki_ih3;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__));
KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__));
lio_cfg = lio_get_conf(oct);
if (LIO_CN23XX_PF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
pki_ih3->w = 1;
pki_ih3->raw = 1;
pki_ih3->utag = 1;
pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
pki_ih3->utt = 1;
pki_ih3->tag = LIO_CONTROL;
pki_ih3->tagtype = LIO_ATOMIC_TAG;
pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
pki_ih3->pm = 0x7;
pki_ih3->sl = 8;
if (sc->datasize)
ih3->dlengsz = sc->datasize;
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
irh->opcode = opcode;
irh->subcode = subcode;
/* opcode/subcode specific parameters (ossp) */
irh->ossp = irh_ossp;
sc->cmd.cmd3.ossp[0] = ossp0;
sc->cmd.cmd3.ossp[1] = ossp1;
if (sc->rdatasize) {
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = sc->rdatasize;
irh->rflag = 1;
/* PKI IH3 */
/* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
ih3->fsz = LIO_SOFTCMDRESP_IH3;
} else {
irh->rflag = 0;
/* PKI IH3 */
/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
ih3->fsz = LIO_PCICMD_O3;
}
}
}
int
lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc)
{
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
uint32_t len = 0;
if (LIO_CN23XX_PF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL",
__func__, __LINE__));
sc->cmd.cmd3.dptr = sc->dmadptr;
}
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
if (irh->rflag) {
KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL",
__func__, __LINE__));
KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL",
__func__, __LINE__));
*sc->status_word = COMPLETION_WORD_INIT;
sc->cmd.cmd3.rptr = sc->dmarptr;
}
len = (uint32_t)ih3->dlengsz;
}
if (sc->wait_time)
sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time);
return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
len, LIO_REQTYPE_SOFT_COMMAND));
}
int
lio_setup_sc_buffer_pool(struct octeon_device *oct)
{
struct lio_soft_command *sc;
uint64_t dma_addr;
int i;
STAILQ_INIT(&oct->sc_buf_pool.head);
mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF);
atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0);
for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) {
sc = (struct lio_soft_command *)
lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, (vm_paddr_t *)&dma_addr);
if (sc == NULL) {
lio_free_sc_buffer_pool(oct);
return (1);
}
sc->dma_addr = dma_addr;
sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
}
return (0);
}
int
lio_free_sc_buffer_pool(struct octeon_device *oct)
{
struct lio_stailq_node *tmp, *tmp2;
struct lio_soft_command *sc;
mtx_lock(&oct->sc_buf_pool.lock);
STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) {
sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head,
struct lio_soft_command, node);
STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries);
lio_dma_free(sc->size, sc);
}
STAILQ_INIT(&oct->sc_buf_pool.head);
mtx_unlock(&oct->sc_buf_pool.lock);
return (0);
}
struct lio_soft_command *
lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize,
uint32_t rdatasize, uint32_t ctxsize)
{
struct lio_soft_command *sc = NULL;
struct lio_stailq_node *tmp;
uint64_t dma_addr;
uint32_t size;
uint32_t offset = sizeof(struct lio_soft_command);
KASSERT((offset + datasize + rdatasize + ctxsize) <=
LIO_SOFT_COMMAND_BUFFER_SIZE,
("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE",
__func__, __LINE__));
mtx_lock(&oct->sc_buf_pool.lock);
if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) {
mtx_unlock(&oct->sc_buf_pool.lock);
return (NULL);
}
tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries);
STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries);
atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1);
mtx_unlock(&oct->sc_buf_pool.lock);
sc = (struct lio_soft_command *)tmp;
dma_addr = sc->dma_addr;
size = sc->size;
bzero(sc, sc->size);
sc->dma_addr = dma_addr;
sc->size = size;
if (ctxsize) {
sc->ctxptr = (uint8_t *)sc + offset;
sc->ctxsize = ctxsize;
}
/* Start data at 128 byte boundary */
offset = (offset + ctxsize + 127) & 0xffffff80;
if (datasize) {
sc->virtdptr = (uint8_t *)sc + offset;
sc->dmadptr = dma_addr + offset;
sc->datasize = datasize;
}
/* Start rdata at 128 byte boundary */
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__,
__LINE__));
sc->virtrptr = (uint8_t *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
rdatasize - 8);
}
return (sc);
}
void
lio_free_soft_command(struct octeon_device *oct,
struct lio_soft_command *sc)
{
mtx_lock(&oct->sc_buf_pool.lock);
STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1);
mtx_unlock(&oct->sc_buf_pool.lock);
}
Index: head/sys/dev/liquidio/lio_main.c
===================================================================
--- head/sys/dev/liquidio/lio_main.c (revision 328217)
+++ head/sys/dev/liquidio/lio_main.c (revision 328218)
@@ -1,2310 +1,2310 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_ctrl.h"
#include "lio_main.h"
#include "lio_network.h"
#include "cn23xx_pf_device.h"
#include "lio_image.h"
#include "lio_ioctl.h"
#include "lio_rxtx.h"
#include "lio_rss.h"
/* Number of milliseconds to wait for DDR initialization */
#define LIO_DDR_TIMEOUT 10000
#define LIO_MAX_FW_TYPE_LEN 8
static char fw_type[LIO_MAX_FW_TYPE_LEN];
TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
/*
* Integers that specify number of queues per PF.
* Valid range is 0 to 64.
* Use 0 to derive from CPU count.
*/
static int num_queues_per_pf0;
static int num_queues_per_pf1;
TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
#ifdef RSS
static int lio_rss = 1;
TUNABLE_INT("hw.lio.rss", &lio_rss);
#endif /* RSS */
/* Hardware LRO */
unsigned int lio_hwlro = 0;
TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
/*
* Bitmask indicating which consoles have debug
* output redirected to syslog.
*/
static unsigned long console_bitmask;
TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
/*
* \brief determines if a given console has debug enabled.
* @param console console to check
* @returns 1 = enabled. 0 otherwise
*/
int
lio_console_debug_enabled(uint32_t console)
{
return (console_bitmask >> (console)) & 0x1;
}
static int lio_detach(device_t dev);
static int lio_device_init(struct octeon_device *octeon_dev);
static int lio_chip_specific_setup(struct octeon_device *oct);
static void lio_watchdog(void *param);
static int lio_load_firmware(struct octeon_device *oct);
static int lio_nic_starter(struct octeon_device *oct);
static int lio_init_nic_module(struct octeon_device *oct);
static int lio_setup_nic_devices(struct octeon_device *octeon_dev);
static int lio_link_info(struct lio_recv_info *recv_info, void *ptr);
static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
void *buf);
static int lio_set_rxcsum_command(struct ifnet *ifp, int command,
uint8_t rx_cmd);
static int lio_setup_glists(struct octeon_device *oct, struct lio *lio,
int num_iqs);
static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
static inline void lio_update_link_status(struct ifnet *ifp,
union octeon_link_status *ls);
static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
static int lio_stop_nic_module(struct octeon_device *oct);
static void lio_destroy_resources(struct octeon_device *oct);
static int lio_setup_rx_oom_poll_fn(struct ifnet *ifp);
static void lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid);
static void lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp,
uint16_t vid);
static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device *oct);
static int lio_wait_for_oq_pkts(struct octeon_device *oct);
int lio_send_rss_param(struct lio *lio);
static int lio_dbg_console_print(struct octeon_device *oct,
uint32_t console_num, char *prefix,
char *suffix);
/* Polling interval for determining when NIC application is alive */
#define LIO_STARTER_POLL_INTERVAL_MS 100
/*
* vendor_info_array.
* This array contains the list of IDs on which the driver should load.
*/
struct lio_vendor_info {
uint16_t vendor_id;
uint16_t device_id;
uint16_t subdevice_id;
uint8_t revision_id;
uint8_t index;
};
static struct lio_vendor_info lio_pci_tbl[] = {
/* CN2350 10G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
0x02, 0},
/* CN2350 10G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
0x02, 0},
/* CN2360 10G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
0x02, 1},
/* CN2350 25G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
0x02, 2},
/* CN2360 25G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
0x02, 3},
{0, 0, 0, 0, 0}
};
static char *lio_strings[] = {
"LiquidIO 2350 10GbE Server Adapter",
"LiquidIO 2360 10GbE Server Adapter",
"LiquidIO 2350 25GbE Server Adapter",
"LiquidIO 2360 25GbE Server Adapter",
};
struct lio_if_cfg_resp {
uint64_t rh;
struct octeon_if_cfg_info cfg_info;
uint64_t status;
};
struct lio_if_cfg_context {
int octeon_id;
volatile int cond;
};
struct lio_rx_ctl_context {
int octeon_id;
volatile int cond;
};
static int
lio_probe(device_t dev)
{
struct lio_vendor_info *tbl;
uint16_t vendor_id;
uint16_t device_id;
uint16_t subdevice_id;
uint8_t revision_id;
char device_ver[256];
vendor_id = pci_get_vendor(dev);
if (vendor_id != PCI_VENDOR_ID_CAVIUM)
return (ENXIO);
device_id = pci_get_device(dev);
subdevice_id = pci_get_subdevice(dev);
revision_id = pci_get_revid(dev);
tbl = lio_pci_tbl;
while (tbl->vendor_id) {
if ((vendor_id == tbl->vendor_id) &&
(device_id == tbl->device_id) &&
(subdevice_id == tbl->subdevice_id) &&
(revision_id == tbl->revision_id)) {
sprintf(device_ver, "%s, Version - %s",
lio_strings[tbl->index], LIO_VERSION);
device_set_desc_copy(dev, device_ver);
return (BUS_PROBE_DEFAULT);
}
tbl++;
}
return (ENXIO);
}
static int
lio_attach(device_t device)
{
struct octeon_device *oct_dev = NULL;
uint64_t scratch1;
uint32_t error;
int timeout, ret = 1;
uint8_t bus, dev, function;
oct_dev = lio_allocate_device(device);
if (oct_dev == NULL) {
device_printf(device, "Error: Unable to allocate device\n");
return (-ENOMEM);
}
oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
oct_dev->device = device;
bus = pci_get_bus(device);
dev = pci_get_slot(device);
function = pci_get_function(device);
lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
pci_get_vendor(device), pci_get_device(device), bus, dev,
function);
if (lio_device_init(oct_dev)) {
lio_dev_err(oct_dev, "Failed to init device\n");
lio_detach(device);
return (-ENOMEM);
}
scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
if (!(scratch1 & 4ULL)) {
/*
* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
* the lio watchdog kernel thread is running for this
* NIC. Each NIC gets one watchdog kernel thread.
*/
scratch1 |= 4ULL;
lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
error = kproc_create(lio_watchdog, oct_dev,
&oct_dev->watchdog_task, 0, 0,
"liowd/%02hhx:%02hhx.%hhx", bus,
dev, function);
if (!error) {
kproc_resume(oct_dev->watchdog_task);
} else {
oct_dev->watchdog_task = NULL;
lio_dev_err(oct_dev,
"failed to create kernel_thread\n");
lio_detach(device);
return (-1);
}
}
oct_dev->rx_pause = 1;
oct_dev->tx_pause = 1;
timeout = 0;
while (timeout < LIO_NIC_STARTER_TIMEOUT) {
lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
timeout += LIO_STARTER_POLL_INTERVAL_MS;
/*
* During the boot process interrupts are not available.
* So polling for first control message from FW.
*/
if (cold)
lio_droq_bh(oct_dev->droq[0], 0);
if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
ret = lio_nic_starter(oct_dev);
break;
}
}
if (ret) {
lio_dev_err(oct_dev, "Firmware failed to start\n");
lio_detach(device);
return (-EIO);
}
lio_dev_dbg(oct_dev, "Device is ready\n");
return (0);
}
static int
lio_detach(device_t dev)
{
struct octeon_device *oct_dev = device_get_softc(dev);
lio_dev_dbg(oct_dev, "Stopping device\n");
if (oct_dev->watchdog_task) {
uint64_t scratch1;
kproc_suspend(oct_dev->watchdog_task, 0);
scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
scratch1 &= ~4ULL;
lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
}
if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
lio_stop_nic_module(oct_dev);
/*
* Reset the octeon device and cleanup all memory allocated for
* the octeon device by driver.
*/
lio_destroy_resources(oct_dev);
lio_dev_info(oct_dev, "Device removed\n");
/*
* This octeon device has been removed. Update the global
* data structure to reflect this. Free the device structure.
*/
lio_free_device_mem(oct_dev);
return (0);
}
static int
lio_shutdown(device_t dev)
{
struct octeon_device *oct_dev = device_get_softc(dev);
struct lio *lio = if_getsoftc(oct_dev->props.ifp);
lio_send_rx_ctrl_cmd(lio, 0);
return (0);
}
static int
lio_suspend(device_t dev)
{
return (ENXIO);
}
static int
lio_resume(device_t dev)
{
return (ENXIO);
}
static int
lio_event(struct module *mod, int event, void *junk)
{
switch (event) {
case MOD_LOAD:
lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
break;
default:
break;
}
return (0);
}
/*********************************************************************
* FreeBSD Device Interface Entry Points
* *******************************************************************/
static device_method_t lio_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, lio_probe),
DEVMETHOD(device_attach, lio_attach),
DEVMETHOD(device_detach, lio_detach),
DEVMETHOD(device_shutdown, lio_shutdown),
DEVMETHOD(device_suspend, lio_suspend),
DEVMETHOD(device_resume, lio_resume),
DEVMETHOD_END
};
static driver_t lio_driver = {
LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
};
devclass_t lio_devclass;
DRIVER_MODULE(lio, pci, lio_driver, lio_devclass, lio_event, 0);
MODULE_DEPEND(lio, pci, 1, 1, 1);
MODULE_DEPEND(lio, ether, 1, 1, 1);
MODULE_DEPEND(lio, firmware, 1, 1, 1);
static bool
fw_type_is_none(void)
{
return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
}
/*
* \brief Device initialization for each Octeon device that is probed
* @param octeon_dev octeon device
*/
static int
lio_device_init(struct octeon_device *octeon_dev)
{
unsigned long ddr_timeout = LIO_DDR_TIMEOUT;
char *dbg_enb = NULL;
int fw_loaded = 0;
int i, j, ret;
uint8_t bus, dev, function;
char bootcmd[] = "\n";
bus = pci_get_bus(octeon_dev->device);
dev = pci_get_slot(octeon_dev->device);
function = pci_get_function(octeon_dev->device);
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
/* Enable access to the octeon device */
if (pci_enable_busmaster(octeon_dev->device)) {
lio_dev_err(octeon_dev, "pci_enable_device failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
/* Identify the Octeon type and map the BAR address space. */
if (lio_chip_specific_setup(octeon_dev)) {
lio_dev_err(octeon_dev, "Chip specific setup failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
/*
* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
* since that is what is required for the reference to be removed
* during de-initialization (see 'octeon_destroy_resources').
*/
lio_register_device(octeon_dev, bus, dev, function, true);
octeon_dev->app_mode = LIO_DRV_INVALID_APP;
if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
fw_loaded = 0;
/* Do a soft reset of the Octeon device. */
if (octeon_dev->fn_list.soft_reset(octeon_dev))
return (1);
/* things might have changed */
if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
fw_loaded = 0;
else
fw_loaded = 1;
} else {
fw_loaded = 1;
}
/*
* Initialize the dispatch mechanism used to push packets arriving on
* Octeon Output queues.
*/
if (lio_init_dispatch_list(octeon_dev))
return (1);
lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
lio_core_drv_init, octeon_dev);
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev,
"Failed to configure device registers\n");
return (ret);
}
/* Initialize soft command buffer pool */
if (lio_setup_sc_buffer_pool(octeon_dev)) {
lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_SC_BUFF_POOL_INIT_DONE);
if (lio_allocate_ioq_vector(octeon_dev)) {
lio_dev_err(octeon_dev,
"IOQ vector allocation failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
octeon_dev->instr_queue[i] =
malloc(sizeof(struct lio_instr_queue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (octeon_dev->instr_queue[i] == NULL)
return (1);
}
/* Setup the data structures that manage this Octeon's Input queues. */
if (lio_setup_instr_queue0(octeon_dev)) {
lio_dev_err(octeon_dev,
"Instruction queue initialization failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_INSTR_QUEUE_INIT_DONE);
/*
* Initialize lists to manage the requests of different types that
* arrive from user & kernel applications for this octeon device.
*/
if (lio_setup_response_list(octeon_dev)) {
lio_dev_err(octeon_dev, "Response list allocation failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (octeon_dev->droq[i] == NULL)
return (1);
}
if (lio_setup_output_queue0(octeon_dev)) {
lio_dev_err(octeon_dev, "Output queue initialization failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
/*
* Setup the interrupt handler and record the INT SUM register address
*/
if (lio_setup_interrupt(octeon_dev,
octeon_dev->sriov_info.num_pf_rings))
return (1);
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
/*
* Send Credit for Octeon Output queues. Credits are always sent BEFORE
* the output queue is enabled.
* This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
* case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
* Otherwise, it is possible that the DRV_ACTIVE message will be sent
* before any credits have been issued, causing the ring to be reset
* (and the f/w appear to never have started).
*/
for (j = 0; j < octeon_dev->num_oqs; j++)
lio_write_csr32(octeon_dev,
octeon_dev->droq[j]->pkts_credit_reg,
octeon_dev->droq[j]->max_count);
/* Enable the input and output queues for this Octeon device */
ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev, "Failed to enable input/output queues");
return (ret);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
if (!fw_loaded) {
lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
if (!ddr_timeout) {
lio_dev_info(octeon_dev,
"WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
}
lio_sleep_timeout(LIO_RESET_MSECS);
/*
* Wait for the octeon to initialize DDR after the
* soft-reset.
*/
while (!ddr_timeout) {
if (pause("-", lio_ms_to_ticks(100))) {
/* user probably pressed Control-C */
return (1);
}
}
ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
if (ret) {
lio_dev_err(octeon_dev,
"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
ret);
return (1);
}
if (lio_wait_for_bootloader(octeon_dev, 1100)) {
lio_dev_err(octeon_dev, "Board not responding\n");
return (1);
}
/* Divert uboot to take commands from host instead. */
ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
lio_dev_dbg(octeon_dev, "Initializing consoles\n");
ret = lio_init_consoles(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev, "Could not access board consoles\n");
return (1);
}
/*
* If console debug enabled, specify empty string to
* use default enablement ELSE specify NULL string for
* 'disabled'.
*/
dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
ret = lio_add_console(octeon_dev, 0, dbg_enb);
if (ret) {
lio_dev_err(octeon_dev, "Could not access board console\n");
return (1);
} else if (lio_console_debug_enabled(0)) {
/*
* If console was added AND we're logging console output
* then set our console print function.
*/
octeon_dev->console[0].print = lio_dbg_console_print;
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_CONSOLE_INIT_DONE);
lio_dev_dbg(octeon_dev, "Loading firmware\n");
ret = lio_load_firmware(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev, "Could not load firmware to board\n");
return (1);
}
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
return (0);
}
/*
* \brief PCI FLR for each Octeon device.
* @param oct octeon device
*/
static void
lio_pci_flr(struct octeon_device *oct)
{
uint32_t exppos, status;
pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
pci_save_state(oct->device);
/* Quiesce the device completely */
pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
/* Wait for Transaction Pending bit clean */
lio_mdelay(100);
status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
if (status & PCIEM_STA_TRANSACTION_PND) {
lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
lio_mdelay(5);
status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
if (status & PCIEM_STA_TRANSACTION_PND)
lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
}
pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
lio_mdelay(100);
pci_restore_state(oct->device);
}
/*
* \brief Debug console print function
* @param octeon_dev octeon device
* @param console_num console number
* @param prefix first portion of line to display
* @param suffix second portion of line to display
*
* The OCTEON debug console outputs entire lines (excluding '\n').
* Normally, the line will be passed in the 'prefix' parameter.
* However, due to buffering, it is possible for a line to be split into two
* parts, in which case they will be passed as the 'prefix' parameter and
* 'suffix' parameter.
*/
static int
lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
char *prefix, char *suffix)
{
if (prefix != NULL && suffix != NULL)
lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
else if (prefix != NULL)
lio_dev_info(oct, "%u: %s\n", console_num, prefix);
else if (suffix != NULL)
lio_dev_info(oct, "%u: %s\n", console_num, suffix);
return (0);
}
static void
lio_watchdog(void *param)
{
int core_num;
uint16_t mask_of_crashed_or_stuck_cores = 0;
struct octeon_device *oct = param;
bool err_msg_was_printed[12];
bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
while (1) {
kproc_suspend_check(oct->watchdog_task);
mask_of_crashed_or_stuck_cores =
(uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
if (mask_of_crashed_or_stuck_cores) {
struct octeon_device *other_oct;
oct->cores_crashed = true;
other_oct = lio_get_other_octeon_device(oct);
if (other_oct != NULL)
other_oct->cores_crashed = true;
for (core_num = 0; core_num < LIO_MAX_CORES;
core_num++) {
bool core_crashed_or_got_stuck;
core_crashed_or_got_stuck =
(mask_of_crashed_or_stuck_cores >>
core_num) & 1;
if (core_crashed_or_got_stuck &&
!err_msg_was_printed[core_num]) {
lio_dev_err(oct,
"ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
core_num);
err_msg_was_printed[core_num] = true;
}
}
}
/* sleep for two seconds */
pause("-", lio_ms_to_ticks(2000));
}
}
static int
lio_chip_specific_setup(struct octeon_device *oct)
{
char *s;
uint32_t dev_id, rev_id;
int ret = 1;
dev_id = lio_read_pci_cfg(oct, 0);
rev_id = pci_get_revid(oct->device);
oct->subdevice_id = pci_get_subdevice(oct->device);
switch (dev_id) {
case LIO_CN23XX_PF_PCIID:
oct->chip_id = LIO_CN23XX_PF_VID;
if (pci_get_function(oct->device) == 0) {
if (num_queues_per_pf0 < 0) {
lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
num_queues_per_pf0);
num_queues_per_pf0 = 0;
}
oct->sriov_info.num_pf_rings = num_queues_per_pf0;
} else {
if (num_queues_per_pf1 < 0) {
lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
num_queues_per_pf1);
num_queues_per_pf1 = 0;
}
oct->sriov_info.num_pf_rings = num_queues_per_pf1;
}
ret = lio_cn23xx_pf_setup_device(oct);
s = "CN23XX";
break;
default:
s = "?";
lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
}
if (!ret)
lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
lio_get_conf(oct)->card_name, LIO_VERSION);
return (ret);
}
static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device *oct)
{
struct octeon_device *other_oct;
other_oct = lio_get_device(oct->octeon_id + 1);
if ((other_oct != NULL) && other_oct->device) {
int oct_busnum, other_oct_busnum;
oct_busnum = pci_get_bus(oct->device);
other_oct_busnum = pci_get_bus(other_oct->device);
if (oct_busnum == other_oct_busnum) {
int oct_slot, other_oct_slot;
oct_slot = pci_get_slot(oct->device);
other_oct_slot = pci_get_slot(other_oct->device);
if (oct_slot == other_oct_slot)
return (other_oct);
}
}
return (NULL);
}
/*
* \brief Load firmware to device
* @param oct octeon device
*
* Maps device to firmware filename, requests firmware, and downloads it
*/
static int
lio_load_firmware(struct octeon_device *oct)
{
const struct firmware *fw;
char *tmp_fw_type = NULL;
int ret = 0;
char fw_name[LIO_MAX_FW_FILENAME_LEN];
if (fw_type[0] == '\0')
tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
else
tmp_fw_type = fw_type;
sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
fw = firmware_get(fw_name);
if (fw == NULL) {
lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
fw_name);
return (EINVAL);
}
ret = lio_download_firmware(oct, fw->data, fw->datasize);
firmware_put(fw, FIRMWARE_UNLOAD);
return (ret);
}
static int
lio_nic_starter(struct octeon_device *oct)
{
int ret = 0;
atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
if (lio_init_nic_module(oct)) {
lio_dev_err(oct, "NIC initialization failed\n");
ret = -1;
#ifdef CAVIUM_ONiLY_23XX_VF
} else {
if (octeon_enable_sriov(oct) < 0)
ret = -1;
#endif
}
} else {
lio_dev_err(oct,
"Unexpected application running on NIC (%d). Check firmware.\n",
oct->app_mode);
ret = -1;
}
return (ret);
}
static int
lio_init_nic_module(struct octeon_device *oct)
{
int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
int retval = 0;
lio_dev_dbg(oct, "Initializing network interfaces\n");
/*
* only default iq and oq were initialized
* initialize the rest as well
*/
/* run port_config command for each port */
oct->ifcount = num_nic_ports;
bzero(&oct->props, sizeof(struct lio_if_props));
oct->props.gmxport = -1;
retval = lio_setup_nic_devices(oct);
if (retval) {
lio_dev_err(oct, "Setup NIC devices failed\n");
goto lio_init_failure;
}
lio_dev_dbg(oct, "Network interfaces ready\n");
return (retval);
lio_init_failure:
oct->ifcount = 0;
return (retval);
}
static int
lio_ifmedia_update(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct ifmedia *ifm;
ifm = &lio->ifmedia;
/* We only support Ethernet media type. */
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
break;
case IFM_10G_CX4:
case IFM_10G_SR:
case IFM_10G_T:
case IFM_10G_TWINAX:
default:
/* We don't support changing the media type. */
lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
IFM_SUBTYPE(ifm->ifm_media));
return (EINVAL);
}
return (0);
}
static int
lio_get_media_subtype(struct octeon_device *oct)
{
switch(oct->subdevice_id) {
case LIO_CN2350_10G_SUBDEVICE:
case LIO_CN2350_10G_SUBDEVICE1:
case LIO_CN2360_10G_SUBDEVICE:
return (IFM_10G_SR);
case LIO_CN2350_25G_SUBDEVICE:
case LIO_CN2360_25G_SUBDEVICE:
return (IFM_25G_SR);
}
return (IFM_10G_SR);
}
static uint64_t
lio_get_baudrate(struct octeon_device *oct)
{
switch(oct->subdevice_id) {
case LIO_CN2350_10G_SUBDEVICE:
case LIO_CN2350_10G_SUBDEVICE1:
case LIO_CN2360_10G_SUBDEVICE:
return (IF_Gbps(10));
case LIO_CN2350_25G_SUBDEVICE:
case LIO_CN2360_25G_SUBDEVICE:
return (IF_Gbps(25));
}
return (IF_Gbps(10));
}
static void
lio_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct lio *lio = if_getsoftc(ifp);
/* Report link down if the driver isn't running. */
if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
ifmr->ifm_active |= IFM_NONE;
return;
}
/* Setup the default interface info. */
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (lio->linfo.link.s.link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
} else {
ifmr->ifm_active |= IFM_NONE;
return;
}
ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
if (lio->linfo.link.s.duplex)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
}
static uint64_t
lio_get_counter(if_t ifp, ift_counter cnt)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
uint64_t counter = 0;
int i, q_no;
switch (cnt) {
case IFCOUNTER_IPACKETS:
for (i = 0; i < oct->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
counter += oct->droq[q_no]->stats.rx_pkts_received;
}
break;
case IFCOUNTER_OPACKETS:
for (i = 0; i < oct->num_iqs; i++) {
q_no = lio->linfo.txpciq[i].s.q_no;
counter += oct->instr_queue[q_no]->stats.tx_done;
}
break;
case IFCOUNTER_IBYTES:
for (i = 0; i < oct->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
counter += oct->droq[q_no]->stats.rx_bytes_received;
}
break;
case IFCOUNTER_OBYTES:
for (i = 0; i < oct->num_iqs; i++) {
q_no = lio->linfo.txpciq[i].s.q_no;
counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
}
break;
case IFCOUNTER_IQDROPS:
for (i = 0; i < oct->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
counter += oct->droq[q_no]->stats.rx_dropped;
}
break;
case IFCOUNTER_OQDROPS:
for (i = 0; i < oct->num_iqs; i++) {
q_no = lio->linfo.txpciq[i].s.q_no;
counter += oct->instr_queue[q_no]->stats.tx_dropped;
}
break;
case IFCOUNTER_IMCASTS:
counter = oct->link_stats.fromwire.total_mcst;
break;
case IFCOUNTER_OMCASTS:
counter = oct->link_stats.fromhost.mcast_pkts_sent;
break;
case IFCOUNTER_COLLISIONS:
counter = oct->link_stats.fromhost.total_collisions;
break;
case IFCOUNTER_IERRORS:
counter = oct->link_stats.fromwire.fcs_err +
oct->link_stats.fromwire.l2_err +
oct->link_stats.fromwire.frame_err;
break;
default:
return (if_get_counter_default(ifp, cnt));
}
return (counter);
}
static int
lio_init_ifnet(struct lio *lio)
{
struct octeon_device *oct = lio->oct_dev;
if_t ifp = lio->ifp;
/* ifconfig entrypoint for media type/status reporting */
ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
lio_ifmedia_status);
/* set the default interface values */
ifmedia_add(&lio->ifmedia,
(IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
0, NULL);
ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
if_initname(ifp, device_get_name(oct->device),
device_get_unit(oct->device));
if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
if_setioctlfn(ifp, lio_ioctl);
if_setgetcounterfn(ifp, lio_get_counter);
if_settransmitfn(ifp, lio_mq_start);
if_setqflushfn(ifp, lio_qflush);
if_setinitfn(ifp, lio_open);
if_setmtu(ifp, lio->linfo.link.s.mtu);
lio->mtu = lio->linfo.link.s.mtu;
if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
IFCAP_TSO | IFCAP_LRO |
IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setbaudrate(ifp, lio_get_baudrate(oct));
return (0);
}
static void
lio_tcp_lro_free(struct octeon_device *octeon_dev, struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct lio_droq *droq;
int q_no;
int i;
for (i = 0; i < octeon_dev->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
droq = octeon_dev->droq[q_no];
if (droq->lro.ifp) {
tcp_lro_free(&droq->lro);
droq->lro.ifp = NULL;
}
}
}
static int
lio_tcp_lro_init(struct octeon_device *octeon_dev, struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct lio_droq *droq;
struct lro_ctrl *lro;
int i, q_no, ret = 0;
for (i = 0; i < octeon_dev->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
droq = octeon_dev->droq[q_no];
lro = &droq->lro;
ret = tcp_lro_init(lro);
if (ret) {
lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
ret);
goto lro_init_failed;
}
lro->ifp = ifp;
}
return (ret);
lro_init_failed:
lio_tcp_lro_free(octeon_dev, ifp);
return (ret);
}
static int
lio_setup_nic_devices(struct octeon_device *octeon_dev)
{
union octeon_if_cfg if_cfg;
struct lio *lio = NULL;
struct ifnet *ifp = NULL;
struct lio_version *vdata;
struct lio_soft_command *sc;
struct lio_if_cfg_context *ctx;
struct lio_if_cfg_resp *resp;
struct lio_if_props *props;
int num_iqueues, num_oqueues, retval;
unsigned int base_queue;
unsigned int gmx_port_id;
uint32_t ctx_size, data_size;
uint32_t ifidx_or_pfnum, resp_size;
uint8_t mac[ETHER_HDR_LEN], i, j;
/* This is to handle link status changes */
lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_INFO,
lio_link_info, octeon_dev);
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct lio_if_cfg_resp);
ctx_size = sizeof(struct lio_if_cfg_context);
data_size = sizeof(struct lio_version);
sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
ctx_size);
if (sc == NULL)
return (ENOMEM);
resp = (struct lio_if_cfg_resp *)sc->virtrptr;
ctx = (struct lio_if_cfg_context *)sc->ctxptr;
vdata = (struct lio_version *)sc->virtdptr;
*((uint64_t *)vdata) = 0;
vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
num_iqueues = octeon_dev->sriov_info.num_pf_rings;
num_oqueues = octeon_dev->sriov_info.num_pf_rings;
base_queue = octeon_dev->sriov_info.pf_srn;
gmx_port_id = octeon_dev->pf_num;
ifidx_or_pfnum = octeon_dev->pf_num;
lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
ifidx_or_pfnum, num_iqueues, num_oqueues);
ctx->cond = 0;
ctx->octeon_id = lio_get_device_id(octeon_dev);
if_cfg.if_cfg64 = 0;
if_cfg.s.num_iqueues = num_iqueues;
if_cfg.s.num_oqueues = num_oqueues;
if_cfg.s.base_queue = base_queue;
if_cfg.s.gmx_port_id = gmx_port_id;
sc->iq_no = 0;
lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_IF_CFG, 0,
if_cfg.if_cfg64, 0);
sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
sc->wait_time = 3000;
retval = lio_send_soft_command(octeon_dev, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
retval);
/* Soft instr is freed by driver in case of failure. */
goto setup_nic_dev_fail;
}
/*
* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
lio_sleep_cond(octeon_dev, &ctx->cond);
retval = resp->status;
if (retval) {
lio_dev_err(octeon_dev, "iq/oq config failed\n");
goto setup_nic_dev_fail;
}
lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
(sizeof(struct octeon_if_cfg_info)) >> 3);
num_iqueues = bitcount64(resp->cfg_info.iqmask);
num_oqueues = bitcount64(resp->cfg_info.oqmask);
if (!(num_iqueues) || !(num_oqueues)) {
lio_dev_err(octeon_dev,
"Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
LIO_CAST64(resp->cfg_info.iqmask),
LIO_CAST64(resp->cfg_info.oqmask));
goto setup_nic_dev_fail;
}
lio_dev_dbg(octeon_dev,
"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
i, LIO_CAST64(resp->cfg_info.iqmask),
LIO_CAST64(resp->cfg_info.oqmask),
num_iqueues, num_oqueues);
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
lio_dev_err(octeon_dev, "Device allocation failed\n");
goto setup_nic_dev_fail;
}
lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
if (lio == NULL) {
lio_dev_err(octeon_dev, "Lio allocation failed\n");
goto setup_nic_dev_fail;
}
if_setsoftc(ifp, lio);
ifp->if_hw_tsomax = LIO_MAX_FRAME_SIZE;
ifp->if_hw_tsomaxsegcount = LIO_MAX_SG;
ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
lio->ifidx = ifidx_or_pfnum;
props = &octeon_dev->props;
props->gmxport = resp->cfg_info.linfo.gmxport;
props->ifp = ifp;
lio->linfo.num_rxpciq = num_oqueues;
lio->linfo.num_txpciq = num_iqueues;
for (j = 0; j < num_oqueues; j++) {
lio->linfo.rxpciq[j].rxpciq64 =
resp->cfg_info.linfo.rxpciq[j].rxpciq64;
}
for (j = 0; j < num_iqueues; j++) {
lio->linfo.txpciq[j].txpciq64 =
resp->cfg_info.linfo.txpciq[j].txpciq64;
}
lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
lio->linfo.link.link_status64 =
resp->cfg_info.linfo.link.link_status64;
/*
* Point to the properties for octeon device to which this
* interface belongs.
*/
lio->oct_dev = octeon_dev;
lio->ifp = ifp;
lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
lio_init_ifnet(lio);
/* 64-bit swap required on LE machines */
lio_swap_8B_data(&lio->linfo.hw_addr, 1);
for (j = 0; j < 6; j++)
mac[j] = *((uint8_t *)(
((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
ether_ifattach(ifp, mac);
/*
* By default all interfaces on a single Octeon uses the same
* tx and rx queues
*/
lio->txq = lio->linfo.txpciq[0].s.q_no;
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
lio->linfo.num_rxpciq)) {
lio_dev_err(octeon_dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
lio_dev_err(octeon_dev, "Gather list allocation failed\n");
goto setup_nic_dev_fail;
}
if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
goto setup_nic_dev_fail;
if (lio_hwlro &&
(if_getcapenable(ifp) & IFCAP_LRO) &&
(if_getcapenable(ifp) & IFCAP_RXCSUM) &&
(if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
LIO_LROIPV4 | LIO_LROIPV6);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
else
lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
if (lio_setup_rx_oom_poll_fn(ifp))
goto setup_nic_dev_fail;
lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
lio->link_changes++;
lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
/*
* Sending command to firmware to enable Rx checksum offload
* by default at the time of setup of Liquidio driver for
* this device
*/
lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
LIO_CMD_RXCSUM_ENABLE);
lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
LIO_CMD_TXCSUM_ENABLE);
#ifdef RSS
if (lio_rss) {
if (lio_send_rss_param(lio))
goto setup_nic_dev_fail;
} else
#endif /* RSS */
lio_set_feature(ifp, LIO_CMD_SET_FNV,
LIO_CMD_FNV_ENABLE);
lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
lio_free_soft_command(octeon_dev, sc);
lio->vlan_attach =
EVENTHANDLER_REGISTER(vlan_config,
lio_vlan_rx_add_vid, lio,
EVENTHANDLER_PRI_FIRST);
lio->vlan_detach =
EVENTHANDLER_REGISTER(vlan_unconfig,
lio_vlan_rx_kill_vid, lio,
EVENTHANDLER_PRI_FIRST);
/* Update stats periodically */
callout_init(&lio->stats_timer, 0);
lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
lio_add_hw_stats(lio);
}
return (0);
setup_nic_dev_fail:
lio_free_soft_command(octeon_dev, sc);
while (i--) {
lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
lio_destroy_nic_device(octeon_dev, i);
}
return (ENODEV);
}
static int
lio_link_info(struct lio_recv_info *recv_info, void *ptr)
{
struct octeon_device *oct = (struct octeon_device *)ptr;
struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt;
union octeon_link_status *ls;
int gmxport = 0, i;
lio_dev_dbg(oct, "%s Called\n", __func__);
if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
goto nic_info_err;
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
LIO_DROQ_INFO_SIZE);
lio_swap_8B_data((uint64_t *)ls,
(sizeof(union octeon_link_status)) >> 3);
if (oct->props.gmxport == gmxport)
lio_update_link_status(oct->props.ifp, ls);
nic_info_err:
for (i = 0; i < recv_pkt->buffer_count; i++)
lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
lio_free_recv_info(recv_info);
return (0);
}
void
lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
{
bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(iq->txtag, finfo->map);
m_freem(finfo->mb);
}
void
lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
{
struct lio_gather *g;
struct octeon_device *oct;
struct lio *lio;
int iq_no;
g = finfo->g;
iq_no = iq->txpciq.s.q_no;
oct = iq->oct_dev;
lio = if_getsoftc(oct->props.ifp);
mtx_lock(&lio->glist_lock[iq_no]);
STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
mtx_unlock(&lio->glist_lock[iq_no]);
bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(iq->txtag, finfo->map);
m_freem(finfo->mb);
}
static void
lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
{
struct lio_soft_command *sc = (struct lio_soft_command *)buf;
struct lio_if_cfg_resp *resp;
struct lio_if_cfg_context *ctx;
resp = (struct lio_if_cfg_resp *)sc->virtrptr;
ctx = (struct lio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
LIO_CAST64(resp->status), status);
ctx->cond = 1;
snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
resp->cfg_info.lio_firmware_version);
/*
* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
}
static int
lio_is_mac_changed(uint8_t *new, uint8_t *old)
{
return ((new[0] != old[0]) || (new[1] != old[1]) ||
(new[2] != old[2]) || (new[3] != old[3]) ||
(new[4] != old[4]) || (new[5] != old[5]));
}
void
lio_open(void *arg)
{
struct lio *lio = arg;
struct ifnet *ifp = lio->ifp;
struct octeon_device *oct = lio->oct_dev;
uint8_t *mac_new, mac_old[ETHER_HDR_LEN];
int ret = 0;
lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
/* Ready for link status updates */
lio->intf_open = 1;
lio_dev_info(oct, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
lio_send_rx_ctrl_cmd(lio, 1);
mac_new = IF_LLADDR(ifp);
memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
if (lio_is_mac_changed(mac_new, mac_old)) {
ret = lio_set_mac(ifp, mac_new);
if (ret)
lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
}
/* Now inform the stack we're ready */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
lio_dev_info(oct, "Interface is opened\n");
}
static int
lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
ret);
}
return (ret);
}
static int
lio_stop_nic_module(struct octeon_device *oct)
{
int i, j;
struct lio *lio;
lio_dev_dbg(oct, "Stopping network interfaces\n");
if (!oct->ifcount) {
lio_dev_err(oct, "Init for Octeon was not completed\n");
return (1);
}
mtx_lock(&oct->cmd_resp_wqlock);
oct->cmd_resp_state = LIO_DRV_OFFLINE;
mtx_unlock(&oct->cmd_resp_wqlock);
for (i = 0; i < oct->ifcount; i++) {
lio = if_getsoftc(oct->props.ifp);
for (j = 0; j < oct->num_oqs; j++)
lio_unregister_droq_ops(oct,
lio->linfo.rxpciq[j].s.q_no);
}
callout_drain(&lio->stats_timer);
for (i = 0; i < oct->ifcount; i++)
lio_destroy_nic_device(oct, i);
lio_dev_dbg(oct, "Network interface stopped\n");
return (0);
}
static void
lio_delete_glists(struct octeon_device *oct, struct lio *lio)
{
struct lio_gather *g;
int i;
if (lio->glist_lock != NULL) {
free((void *)lio->glist_lock, M_DEVBUF);
lio->glist_lock = NULL;
}
if (lio->ghead == NULL)
return;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
do {
g = (struct lio_gather *)
lio_delete_first_node(&lio->ghead[i]);
free(g, M_DEVBUF);
} while (g);
if ((lio->glists_virt_base != NULL) &&
(lio->glists_virt_base[i] != NULL)) {
lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i]);
}
}
free(lio->glists_virt_base, M_DEVBUF);
lio->glists_virt_base = NULL;
free(lio->glists_dma_base, M_DEVBUF);
lio->glists_dma_base = NULL;
free(lio->ghead, M_DEVBUF);
lio->ghead = NULL;
}
static int
lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
struct lio_gather *g;
int i, j;
- lio->glist_lock = mallocarray(num_iqs, sizeof(*lio->glist_lock),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
if (lio->glist_lock == NULL)
return (1);
- lio->ghead = mallocarray(num_iqs, sizeof(*lio->ghead), M_DEVBUF,
+ lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (lio->ghead == NULL) {
free((void *)lio->glist_lock, M_DEVBUF);
lio->glist_lock = NULL;
return (1);
}
lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
LIO_SG_ENTRY_SIZE);
/*
* allocate memory to store virtual and dma base address of
* per glist consistent memory
*/
- lio->glists_virt_base = mallocarray(num_iqs, sizeof(void *), M_DEVBUF,
+ lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
M_NOWAIT | M_ZERO);
- lio->glists_dma_base = mallocarray(num_iqs, sizeof(vm_paddr_t),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
lio_delete_glists(oct, lio);
return (1);
}
for (i = 0; i < num_iqs; i++) {
mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
STAILQ_INIT(&lio->ghead[i]);
lio->glists_virt_base[i] =
lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
(vm_paddr_t *)&lio->glists_dma_base[i]);
if (lio->glists_virt_base[i] == NULL) {
lio_delete_glists(oct, lio);
return (1);
}
for (j = 0; j < lio->tx_qsize; j++) {
g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
if (g == NULL)
break;
g->sg = (struct lio_sg_entry *)
((uint64_t)lio->glists_virt_base[i] +
(j * lio->glist_entry_size));
g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
(j * lio->glist_entry_size);
STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
}
if (j != lio->tx_qsize) {
lio_delete_glists(oct, lio);
return (1);
}
}
return (0);
}
void
lio_stop(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
if_link_state_change(ifp, LINK_STATE_DOWN);
lio->intf_open = 0;
lio->linfo.link.s.link_up = 0;
lio->link_changes++;
lio_send_rx_ctrl_cmd(lio, 0);
/* Tell the stack that the interface is no longer active */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
lio_dev_info(oct, "Interface is stopped\n");
}
static void
lio_check_rx_oom_status(struct lio *lio)
{
struct lio_droq *droq;
struct octeon_device *oct = lio->oct_dev;
int desc_refilled;
int q, q_no = 0;
for (q = 0; q < oct->num_oqs; q++) {
q_no = lio->linfo.rxpciq[q].s.q_no;
droq = oct->droq[q_no];
if (droq == NULL)
continue;
if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
mtx_lock(&droq->lock);
desc_refilled = lio_droq_refill(oct, droq);
/*
* Flush the droq descriptor data to memory to be sure
* that when we update the credits the data in memory
* is accurate.
*/
wmb();
lio_write_csr32(oct, droq->pkts_credit_reg,
desc_refilled);
/* make sure mmio write completes */
__compiler_membar();
mtx_unlock(&droq->lock);
}
}
}
static void
lio_poll_check_rx_oom_status(void *arg, int pending __unused)
{
struct lio_tq *rx_status_tq = arg;
struct lio *lio = rx_status_tq->ctxptr;
if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
lio_check_rx_oom_status(lio);
taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
lio_ms_to_ticks(50));
}
static int
lio_setup_rx_oom_poll_fn(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
struct lio_tq *rx_status_tq;
rx_status_tq = &lio->rx_status_tq;
rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
taskqueue_thread_enqueue,
&rx_status_tq->tq);
if (rx_status_tq->tq == NULL) {
lio_dev_err(oct, "unable to create lio rx oom status tq\n");
return (-1);
}
TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
lio_poll_check_rx_oom_status, (void *)rx_status_tq);
rx_status_tq->ctxptr = lio;
taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
"lio%d_rx_oom_status",
oct->octeon_id);
taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
lio_ms_to_ticks(50));
return (0);
}
static void
lio_cleanup_rx_oom_poll_fn(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
if (lio->rx_status_tq.tq != NULL) {
while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
&lio->rx_status_tq.work, NULL))
taskqueue_drain_timeout(lio->rx_status_tq.tq,
&lio->rx_status_tq.work);
taskqueue_free(lio->rx_status_tq.tq);
lio->rx_status_tq.tq = NULL;
}
}
static void
lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct ifnet *ifp = oct->props.ifp;
struct lio *lio;
if (ifp == NULL) {
lio_dev_err(oct, "%s No ifp ptr for index %d\n",
__func__, ifidx);
return;
}
lio = if_getsoftc(ifp);
lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
lio_dev_dbg(oct, "NIC device cleanup\n");
if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
lio_stop(ifp);
if (lio_wait_for_pending_requests(oct))
lio_dev_err(oct, "There were pending requests\n");
if (lio_wait_for_instr_fetch(oct))
lio_dev_err(oct, "IQ had pending instructions\n");
if (lio_wait_for_oq_pkts(oct))
lio_dev_err(oct, "OQ had pending packets\n");
if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
ether_ifdetach(ifp);
lio_tcp_lro_free(oct, ifp);
lio_cleanup_rx_oom_poll_fn(ifp);
lio_delete_glists(oct, lio);
EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
free(lio, M_DEVBUF);
if_free(ifp);
oct->props.gmxport = -1;
oct->props.ifp = NULL;
}
static void
print_link_info(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
struct octeon_link_info *linfo = &lio->linfo;
if (linfo->link.s.link_up) {
lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
linfo->link.s.speed,
(linfo->link.s.duplex) ? "Full" : "Half");
} else {
lio_dev_info(lio->oct_dev, "Link Down\n");
}
}
}
static inline void
lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls)
{
struct lio *lio = if_getsoftc(ifp);
int changed = (lio->linfo.link.link_status64 != ls->link_status64);
lio->linfo.link.link_status64 = ls->link_status64;
if ((lio->intf_open) && (changed)) {
print_link_info(ifp);
lio->link_changes++;
if (lio->linfo.link.s.link_up)
if_link_state_change(ifp, LINK_STATE_UP);
else
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
/*
* \brief Callback for rx ctrl
* @param status status of request
* @param buf pointer to resp structure
*/
static void
lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
{
struct lio_soft_command *sc = (struct lio_soft_command *)buf;
struct lio_rx_ctl_context *ctx;
ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (status)
lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
LIO_CAST64(status));
ctx->cond = 1;
/*
* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
}
static void
lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct lio_soft_command *sc;
struct lio_rx_ctl_context *ctx;
union octeon_cmd *ncmd;
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
int ctx_size = sizeof(struct lio_rx_ctl_context);
int retval;
if (oct->props.rx_on == start_stop)
return;
sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
if (sc == NULL)
return;
ncmd = (union octeon_cmd *)sc->virtdptr;
ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
ctx->cond = 0;
ctx->octeon_id = lio_get_device_id(oct);
ncmd->cmd64 = 0;
ncmd->s.cmd = LIO_CMD_RX_CTL;
ncmd->s.param1 = start_stop;
lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
0, 0);
sc->callback = lio_rx_ctl_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
retval = lio_send_soft_command(oct, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_dev_err(oct, "Failed to send RX Control message\n");
} else {
/*
* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
lio_sleep_cond(oct, &ctx->cond);
oct->props.rx_on = start_stop;
}
lio_free_soft_command(oct, sc);
}
static void
lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
if (if_getsoftc(ifp) != arg) /* Not our event */
return;
if ((vid == 0) || (vid > 4095)) /* Invalid */
return;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
}
static void
lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
if (if_getsoftc(ifp) != arg) /* Not our event */
return;
if ((vid == 0) || (vid > 4095)) /* Invalid */
return;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct,
"Kill VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
}
static int
lio_wait_for_oq_pkts(struct octeon_device *oct)
{
int i, pending_pkts, pkt_cnt = 0, retry = 100;
do {
pending_pkts = 0;
for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
&oct->droq[i]->droq_task);
}
}
pkt_cnt = 0;
lio_sleep_timeout(1);
} while (retry-- && pending_pkts);
return (pkt_cnt);
}
static void
lio_destroy_resources(struct octeon_device *oct)
{
int i, refcount;
switch (atomic_load_acq_int(&oct->status)) {
case LIO_DEV_RUNNING:
case LIO_DEV_CORE_OK:
/* No more instructions will be forwarded. */
atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
oct->app_mode = LIO_DRV_INVALID_APP;
lio_dev_dbg(oct, "Device state is now %s\n",
lio_get_state_string(&oct->status));
lio_sleep_timeout(100);
/* fallthrough */
case LIO_DEV_HOST_OK:
/* fallthrough */
case LIO_DEV_CONSOLE_INIT_DONE:
/* Remove any consoles */
lio_remove_consoles(oct);
/* fallthrough */
case LIO_DEV_IO_QUEUES_DONE:
if (lio_wait_for_pending_requests(oct))
lio_dev_err(oct, "There were pending requests\n");
if (lio_wait_for_instr_fetch(oct))
lio_dev_err(oct, "IQ had pending instructions\n");
/*
* Disable the input and output queues now. No more packets will
* arrive from Octeon, but we should wait for all packet
* processing to finish.
*/
oct->fn_list.disable_io_queues(oct);
if (lio_wait_for_oq_pkts(oct))
lio_dev_err(oct, "OQ had pending packets\n");
/* fallthrough */
case LIO_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
if (oct->msix_on) {
for (i = 0; i < oct->num_msix_irqs - 1; i++) {
if (oct->ioq_vector[i].tag != NULL) {
bus_teardown_intr(oct->device,
oct->ioq_vector[i].msix_res,
oct->ioq_vector[i].tag);
oct->ioq_vector[i].tag = NULL;
}
if (oct->ioq_vector[i].msix_res != NULL) {
bus_release_resource(oct->device,
SYS_RES_IRQ,
oct->ioq_vector[i].vector,
oct->ioq_vector[i].msix_res);
oct->ioq_vector[i].msix_res = NULL;
}
}
/* non-iov vector's argument is oct struct */
if (oct->tag != NULL) {
bus_teardown_intr(oct->device, oct->msix_res,
oct->tag);
oct->tag = NULL;
}
if (oct->msix_res != NULL) {
bus_release_resource(oct->device, SYS_RES_IRQ,
oct->aux_vector,
oct->msix_res);
oct->msix_res = NULL;
}
pci_release_msi(oct->device);
}
/* fallthrough */
case LIO_DEV_IN_RESET:
case LIO_DEV_DROQ_INIT_DONE:
/* Wait for any pending operations */
lio_mdelay(100);
for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
lio_delete_droq(oct, i);
}
/* fallthrough */
case LIO_DEV_RESP_LIST_INIT_DONE:
for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
if (oct->droq[i] != NULL) {
free(oct->droq[i], M_DEVBUF);
oct->droq[i] = NULL;
}
}
lio_delete_response_list(oct);
/* fallthrough */
case LIO_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
lio_delete_instr_queue(oct, i);
}
/* fallthrough */
case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
if (oct->instr_queue[i] != NULL) {
free(oct->instr_queue[i], M_DEVBUF);
oct->instr_queue[i] = NULL;
}
}
lio_free_ioq_vector(oct);
/* fallthrough */
case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
lio_free_sc_buffer_pool(oct);
/* fallthrough */
case LIO_DEV_DISPATCH_INIT_DONE:
lio_delete_dispatch_list(oct);
/* fallthrough */
case LIO_DEV_PCI_MAP_DONE:
refcount = lio_deregister_device(oct);
if (fw_type_is_none())
lio_pci_flr(oct);
if (!refcount)
oct->fn_list.soft_reset(oct);
lio_unmap_pci_barx(oct, 0);
lio_unmap_pci_barx(oct, 1);
/* fallthrough */
case LIO_DEV_PCI_ENABLE_DONE:
/* Disable the device, releasing the PCI INT */
pci_disable_busmaster(oct->device);
/* fallthrough */
case LIO_DEV_BEGIN_STATE:
break;
} /* end switch (oct->status) */
}
Index: head/sys/dev/mpr/mpr.c
===================================================================
--- head/sys/dev/mpr/mpr.c (revision 328217)
+++ head/sys/dev/mpr/mpr.c (revision 328218)
@@ -1,3831 +1,3831 @@
/*-
* Copyright (c) 2009 Yahoo! Inc.
* Copyright (c) 2011-2015 LSI Corp.
* Copyright (c) 2013-2016 Avago Technologies
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Communications core for Avago Technologies (LSI) MPT3 */
/* TODO Move headers to mprvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/selinfo.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/bio.h>
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
#include <sys/smp.h>
#include <sys/queue.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/endian.h>
#include <sys/eventhandler.h>
#include <sys/sbuf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <sys/proc.h>
#include <dev/pci/pcivar.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/scsi/scsi_all.h>
#include <dev/mpr/mpi/mpi2_type.h>
#include <dev/mpr/mpi/mpi2.h>
#include <dev/mpr/mpi/mpi2_ioc.h>
#include <dev/mpr/mpi/mpi2_sas.h>
#include <dev/mpr/mpi/mpi2_pci.h>
#include <dev/mpr/mpi/mpi2_cnfg.h>
#include <dev/mpr/mpi/mpi2_init.h>
#include <dev/mpr/mpi/mpi2_tool.h>
#include <dev/mpr/mpr_ioctl.h>
#include <dev/mpr/mprvar.h>
#include <dev/mpr/mpr_table.h>
#include <dev/mpr/mpr_sas.h>
static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag);
static int mpr_init_queues(struct mpr_softc *sc);
static void mpr_resize_queues(struct mpr_softc *sc);
static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag);
static int mpr_transition_operational(struct mpr_softc *sc);
static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching);
static void mpr_iocfacts_free(struct mpr_softc *sc);
static void mpr_startup(void *arg);
static int mpr_send_iocinit(struct mpr_softc *sc);
static int mpr_alloc_queues(struct mpr_softc *sc);
static int mpr_alloc_hw_queues(struct mpr_softc *sc);
static int mpr_alloc_replies(struct mpr_softc *sc);
static int mpr_alloc_requests(struct mpr_softc *sc);
static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc);
static int mpr_attach_log(struct mpr_softc *sc);
static __inline void mpr_complete_command(struct mpr_softc *sc,
struct mpr_command *cm);
static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *reply);
static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
static void mpr_periodic(void *);
static int mpr_reregister_events(struct mpr_softc *sc);
static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts);
static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag);
static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS);
static void mpr_parse_debug(struct mpr_softc *sc, char *list);
SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters");
MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory");
/*
* Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
* any state and back to its initialization state machine.
*/
static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
/*
* Added this union to smoothly convert le64toh cm->cm_desc.Words.
* Compiler only supports uint64_t to be passed as an argument.
* Otherwise it will throw this error:
* "aggregate value used where an integer was expected"
*/
typedef union _reply_descriptor {
u64 word;
struct {
u32 low;
u32 high;
} u;
} reply_descriptor, request_descriptor;
/* Rate limit chain-fail messages to 1 per minute */
static struct timeval mpr_chainfail_interval = { 60, 0 };
/*
* sleep_flag can be either CAN_SLEEP or NO_SLEEP.
* If this function is called from process context, it can sleep
* and there is no harm to sleep, in case if this fuction is called
* from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
* based on sleep flags driver will call either msleep, pause or DELAY.
* msleep and pause are of same variant, but pause is used when mpr_mtx
* is not hold by driver.
*/
static int
mpr_diag_reset(struct mpr_softc *sc,int sleep_flag)
{
uint32_t reg;
int i, error, tries = 0;
uint8_t first_wait_done = FALSE;
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
/* Clear any pending interrupts */
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
/*
* Force NO_SLEEP for threads prohibited to sleep
* e.a Thread from interrupt handler are prohibited to sleep.
*/
#if __FreeBSD_version >= 1000029
if (curthread->td_no_sleeping)
#else //__FreeBSD_version < 1000029
if (curthread->td_pflags & TDP_NOSLEEPING)
#endif //__FreeBSD_version >= 1000029
sleep_flag = NO_SLEEP;
mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag);
/* Push the magic sequence */
error = ETIMEDOUT;
while (tries++ < 20) {
for (i = 0; i < sizeof(mpt2_reset_magic); i++)
mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
mpt2_reset_magic[i]);
/* wait 100 msec */
if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
"mprdiag", hz/10);
else if (sleep_flag == CAN_SLEEP)
pause("mprdiag", hz/10);
else
DELAY(100 * 1000);
reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
error = 0;
break;
}
}
if (error) {
mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n",
error);
return (error);
}
/* Send the actual reset. XXX need to refresh the reg? */
reg |= MPI2_DIAG_RESET_ADAPTER;
mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n",
reg);
mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg);
/* Wait up to 300 seconds in 50ms intervals */
error = ETIMEDOUT;
for (i = 0; i < 6000; i++) {
/*
* Wait 50 msec. If this is the first time through, wait 256
* msec to satisfy Diag Reset timing requirements.
*/
if (first_wait_done) {
if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
"mprdiag", hz/20);
else if (sleep_flag == CAN_SLEEP)
pause("mprdiag", hz/20);
else
DELAY(50 * 1000);
} else {
DELAY(256 * 1000);
first_wait_done = TRUE;
}
/*
* Check for the RESET_ADAPTER bit to be cleared first, then
* wait for the RESET state to be cleared, which takes a little
* longer.
*/
reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
if (reg & MPI2_DIAG_RESET_ADAPTER) {
continue;
}
reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
error = 0;
break;
}
}
if (error) {
mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n",
error);
return (error);
}
mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n");
return (0);
}
static int
mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag)
{
int error;
MPR_FUNCTRACE(sc);
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
error = 0;
mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
MPI2_DOORBELL_FUNCTION_SHIFT);
if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"Doorbell handshake failed\n");
error = ETIMEDOUT;
}
mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
return (error);
}
static int
mpr_transition_ready(struct mpr_softc *sc)
{
uint32_t reg, state;
int error, tries = 0;
int sleep_flags;
MPR_FUNCTRACE(sc);
/* If we are in attach call, do not sleep */
sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE)
? CAN_SLEEP : NO_SLEEP;
error = 0;
mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n",
__func__, sleep_flags);
while (tries++ < 1200) {
reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg);
/*
* Ensure the IOC is ready to talk. If it's not, try
* resetting it.
*/
if (reg & MPI2_DOORBELL_USED) {
mpr_dprint(sc, MPR_INIT, " Not ready, sending diag "
"reset\n");
mpr_diag_reset(sc, sleep_flags);
DELAY(50000);
continue;
}
/* Is the adapter owned by another peer? */
if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
(MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the "
"control of another peer host, aborting "
"initialization.\n");
error = ENXIO;
break;
}
state = reg & MPI2_IOC_STATE_MASK;
if (state == MPI2_IOC_STATE_READY) {
/* Ready to go! */
error = 0;
break;
} else if (state == MPI2_IOC_STATE_FAULT) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault "
"state 0x%x, resetting\n",
state & MPI2_DOORBELL_FAULT_CODE_MASK);
mpr_diag_reset(sc, sleep_flags);
} else if (state == MPI2_IOC_STATE_OPERATIONAL) {
/* Need to take ownership */
mpr_message_unit_reset(sc, sleep_flags);
} else if (state == MPI2_IOC_STATE_RESET) {
/* Wait a bit, IOC might be in transition */
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"IOC in unexpected reset state\n");
} else {
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"IOC in unknown state 0x%x\n", state);
error = EINVAL;
break;
}
/* Wait 50ms for things to settle down. */
DELAY(50000);
}
if (error)
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"Cannot transition IOC to ready\n");
mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
return (error);
}
static int
mpr_transition_operational(struct mpr_softc *sc)
{
uint32_t reg, state;
int error;
MPR_FUNCTRACE(sc);
error = 0;
reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg);
state = reg & MPI2_IOC_STATE_MASK;
if (state != MPI2_IOC_STATE_READY) {
mpr_dprint(sc, MPR_INIT, "IOC not ready\n");
if ((error = mpr_transition_ready(sc)) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"failed to transition ready, exit\n");
return (error);
}
}
error = mpr_send_iocinit(sc);
mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
return (error);
}
static void
mpr_resize_queues(struct mpr_softc *sc)
{
int reqcr, prireqcr;
/*
* Size the queues. Since the reply queues always need one free
* entry, we'll deduct one reply message here. The LSI documents
* suggest instead to add a count to the request queue, but I think
* that it's better to deduct from reply queue.
*/
prireqcr = MAX(1, sc->max_prireqframes);
prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
reqcr = MAX(2, sc->max_reqframes);
reqcr = MIN(reqcr, sc->facts->RequestCredit);
sc->num_reqs = prireqcr + reqcr;
sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
/*
* Figure out the number of MSIx-based queues. If the firmware or
* user has done something crazy and not allowed enough credit for
* the queues to be useful then don't enable multi-queue.
*/
if (sc->facts->MaxMSIxVectors < 2)
sc->msi_msgs = 1;
if (sc->msi_msgs > 1) {
sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus);
sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
if (sc->num_reqs / sc->msi_msgs < 2)
sc->msi_msgs = 1;
}
mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n",
sc->msi_msgs, sc->num_reqs, sc->num_replies);
}
/*
* This is called during attach and when re-initializing due to a Diag Reset.
* IOC Facts is used to allocate many of the structures needed by the driver.
* If called from attach, de-allocation is not required because the driver has
* not allocated any structures yet, but if called from a Diag Reset, previously
* allocated structures based on IOC Facts will need to be freed and re-
* allocated bases on the latest IOC Facts.
*/
static int
mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching)
{
int error;
Mpi2IOCFactsReply_t saved_facts;
uint8_t saved_mode, reallocating;
mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__);
/* Save old IOC Facts and then only reallocate if Facts have changed */
if (!attaching) {
bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
}
/*
* Get IOC Facts. In all cases throughout this function, panic if doing
* a re-initialization and only return the error if attaching so the OS
* can handle it.
*/
if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) {
if (attaching) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get "
"IOC Facts with error %d, exit\n", error);
return (error);
} else {
panic("%s failed to get IOC Facts with error %d\n",
__func__, error);
}
}
MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts);
snprintf(sc->fw_version, sizeof(sc->fw_version),
"%02d.%02d.%02d.%02d",
sc->facts->FWVersion.Struct.Major,
sc->facts->FWVersion.Struct.Minor,
sc->facts->FWVersion.Struct.Unit,
sc->facts->FWVersion.Struct.Dev);
mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
MPR_DRIVER_VERSION);
mpr_dprint(sc, MPR_INFO,
"IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
"\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
"\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
"\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"
"\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV");
/*
* If the chip doesn't support event replay then a hard reset will be
* required to trigger a full discovery. Do the reset here then
* retransition to Ready. A hard reset might have already been done,
* but it doesn't hurt to do it again. Only do this if attaching, not
* for a Diag Reset.
*/
if (attaching && ((sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) {
mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n");
mpr_diag_reset(sc, NO_SLEEP);
if ((error = mpr_transition_ready(sc)) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
"transition to ready with error %d, exit\n",
error);
return (error);
}
}
/*
* Set flag if IR Firmware is loaded. If the RAID Capability has
* changed from the previous IOC Facts, log a warning, but only if
* checking this after a Diag Reset and not during attach.
*/
saved_mode = sc->ir_firmware;
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
sc->ir_firmware = 1;
if (!attaching) {
if (sc->ir_firmware != saved_mode) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode "
"in IOC Facts does not match previous mode\n");
}
}
/* Only deallocate and reallocate if relevant IOC Facts have changed */
reallocating = FALSE;
sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED;
if ((!attaching) &&
((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
(saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
(saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
(saved_facts.RequestCredit != sc->facts->RequestCredit) ||
(saved_facts.ProductID != sc->facts->ProductID) ||
(saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
(saved_facts.IOCRequestFrameSize !=
sc->facts->IOCRequestFrameSize) ||
(saved_facts.IOCMaxChainSegmentSize !=
sc->facts->IOCMaxChainSegmentSize) ||
(saved_facts.MaxTargets != sc->facts->MaxTargets) ||
(saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
(saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
(saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
(saved_facts.MaxReplyDescriptorPostQueueDepth !=
sc->facts->MaxReplyDescriptorPostQueueDepth) ||
(saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
(saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
(saved_facts.MaxPersistentEntries !=
sc->facts->MaxPersistentEntries))) {
reallocating = TRUE;
/* Record that we reallocated everything */
sc->mpr_flags |= MPR_FLAGS_REALLOCATED;
}
/*
* Some things should be done if attaching or re-allocating after a Diag
* Reset, but are not needed after a Diag Reset if the FW has not
* changed.
*/
if (attaching || reallocating) {
/*
* Check if controller supports FW diag buffers and set flag to
* enable each type.
*/
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
enabled = TRUE;
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
enabled = TRUE;
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
enabled = TRUE;
/*
* Set flags for some supported items.
*/
if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
sc->eedp_enabled = TRUE;
if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
sc->control_TLR = TRUE;
if (sc->facts->IOCCapabilities &
MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
sc->atomic_desc_capable = TRUE;
mpr_resize_queues(sc);
/*
* Initialize all Tail Queues
*/
TAILQ_INIT(&sc->req_list);
TAILQ_INIT(&sc->high_priority_req_list);
TAILQ_INIT(&sc->chain_list);
TAILQ_INIT(&sc->prp_page_list);
TAILQ_INIT(&sc->tm_list);
}
/*
* If doing a Diag Reset and the FW is significantly different
* (reallocating will be set above in IOC Facts comparison), then all
* buffers based on the IOC Facts will need to be freed before they are
* reallocated.
*/
if (reallocating) {
mpr_iocfacts_free(sc);
mprsas_realloc_targets(sc, saved_facts.MaxTargets +
saved_facts.MaxVolumes);
}
/*
* Any deallocation has been completed. Now start reallocating
* if needed. Will only need to reallocate if attaching or if the new
* IOC Facts are different from the previous IOC Facts after a Diag
* Reset. Targets have already been allocated above if needed.
*/
error = 0;
while (attaching || reallocating) {
if ((error = mpr_alloc_hw_queues(sc)) != 0)
break;
if ((error = mpr_alloc_replies(sc)) != 0)
break;
if ((error = mpr_alloc_requests(sc)) != 0)
break;
if ((error = mpr_alloc_queues(sc)) != 0)
break;
break;
}
if (error) {
mpr_dprint(sc, MPR_INIT|MPR_ERROR,
"Failed to alloc queues with error %d\n", error);
mpr_free(sc);
return (error);
}
/* Always initialize the queues */
bzero(sc->free_queue, sc->fqdepth * 4);
mpr_init_queues(sc);
/*
* Always get the chip out of the reset state, but only panic if not
* attaching. If attaching and there is an error, that is handled by
* the OS.
*/
error = mpr_transition_operational(sc);
if (error != 0) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
"transition to operational with error %d\n", error);
mpr_free(sc);
return (error);
}
/*
* Finish the queue initialization.
* These are set here instead of in mpr_init_queues() because the
* IOC resets these values during the state transition in
* mpr_transition_operational(). The free index is set to 1
* because the corresponding index in the IOC is set to 0, and the
* IOC treats the queues as full if both are set to the same value.
* Hence the reason that the queue can't hold all of the possible
* replies.
*/
sc->replypostindex = 0;
mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
/*
* Attach the subsystems so they can prepare their event masks.
* XXX Should be dynamic so that IM/IR and user modules can attach
*/
error = 0;
while (attaching) {
mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n");
if ((error = mpr_attach_log(sc)) != 0)
break;
if ((error = mpr_attach_sas(sc)) != 0)
break;
if ((error = mpr_attach_user(sc)) != 0)
break;
break;
}
if (error) {
mpr_dprint(sc, MPR_INIT|MPR_ERROR,
"Failed to attach all subsystems: error %d\n", error);
mpr_free(sc);
return (error);
}
/*
* XXX If the number of MSI-X vectors changes during re-init, this
* won't see it and adjust.
*/
if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_ERROR,
"Failed to setup interrupts\n");
mpr_free(sc);
return (error);
}
return (error);
}
/*
* This is called if memory is being free (during detach for example) and when
* buffers need to be reallocated due to a Diag Reset.
*/
static void
mpr_iocfacts_free(struct mpr_softc *sc)
{
struct mpr_command *cm;
int i;
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
if (sc->free_busaddr != 0)
bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
if (sc->free_queue != NULL)
bus_dmamem_free(sc->queues_dmat, sc->free_queue,
sc->queues_map);
if (sc->queues_dmat != NULL)
bus_dma_tag_destroy(sc->queues_dmat);
if (sc->chain_busaddr != 0)
bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
if (sc->chain_frames != NULL)
bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
sc->chain_map);
if (sc->chain_dmat != NULL)
bus_dma_tag_destroy(sc->chain_dmat);
if (sc->sense_busaddr != 0)
bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
if (sc->sense_frames != NULL)
bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
sc->sense_map);
if (sc->sense_dmat != NULL)
bus_dma_tag_destroy(sc->sense_dmat);
if (sc->prp_page_busaddr != 0)
bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map);
if (sc->prp_pages != NULL)
bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages,
sc->prp_page_map);
if (sc->prp_page_dmat != NULL)
bus_dma_tag_destroy(sc->prp_page_dmat);
if (sc->reply_busaddr != 0)
bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
if (sc->reply_frames != NULL)
bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
sc->reply_map);
if (sc->reply_dmat != NULL)
bus_dma_tag_destroy(sc->reply_dmat);
if (sc->req_busaddr != 0)
bus_dmamap_unload(sc->req_dmat, sc->req_map);
if (sc->req_frames != NULL)
bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
if (sc->req_dmat != NULL)
bus_dma_tag_destroy(sc->req_dmat);
if (sc->chains != NULL)
free(sc->chains, M_MPR);
if (sc->prps != NULL)
free(sc->prps, M_MPR);
if (sc->commands != NULL) {
for (i = 1; i < sc->num_reqs; i++) {
cm = &sc->commands[i];
bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
}
free(sc->commands, M_MPR);
}
if (sc->buffer_dmat != NULL)
bus_dma_tag_destroy(sc->buffer_dmat);
mpr_pci_free_interrupts(sc);
free(sc->queues, M_MPR);
sc->queues = NULL;
}
/*
* The terms diag reset and hard reset are used interchangeably in the MPI
* docs to mean resetting the controller chip. In this code diag reset
* cleans everything up, and the hard reset function just sends the reset
* sequence to the chip. This should probably be refactored so that every
* subsystem gets a reset notification of some sort, and can clean up
* appropriately.
*/
int
mpr_reinit(struct mpr_softc *sc)
{
int error;
struct mprsas_softc *sassc;
sassc = sc->sassc;
MPR_FUNCTRACE(sc);
mtx_assert(&sc->mpr_mtx, MA_OWNED);
mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n");
if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) {
mpr_dprint(sc, MPR_INIT, "Reset already in progress\n");
return 0;
}
/*
* Make sure the completion callbacks can recognize they're getting
* a NULL cm_reply due to a reset.
*/
sc->mpr_flags |= MPR_FLAGS_DIAGRESET;
/*
* Mask interrupts here.
*/
mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n");
mpr_mask_intr(sc);
error = mpr_diag_reset(sc, CAN_SLEEP);
if (error != 0) {
panic("%s hard reset failed with error %d\n", __func__, error);
}
/* Restore the PCI state, including the MSI-X registers */
mpr_pci_restore(sc);
/* Give the I/O subsystem special priority to get itself prepared */
mprsas_handle_reinit(sc);
/*
* Get IOC Facts and allocate all structures based on this information.
* The attach function will also call mpr_iocfacts_allocate at startup.
* If relevant values have changed in IOC Facts, this function will free
* all of the memory based on IOC Facts and reallocate that memory.
*/
if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) {
panic("%s IOC Facts based allocation failed with error %d\n",
__func__, error);
}
/*
* Mapping structures will be re-allocated after getting IOC Page8, so
* free these structures here.
*/
mpr_mapping_exit(sc);
/*
* The static page function currently read is IOC Page8. Others can be
* added in future. It's possible that the values in IOC Page8 have
* changed after a Diag Reset due to user modification, so always read
* these. Interrupts are masked, so unmask them before getting config
* pages.
*/
mpr_unmask_intr(sc);
sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET;
mpr_base_static_config_pages(sc);
/*
* Some mapping info is based in IOC Page8 data, so re-initialize the
* mapping tables.
*/
mpr_mapping_initialize(sc);
/*
* Restart will reload the event masks clobbered by the reset, and
* then enable the port.
*/
mpr_reregister_events(sc);
/* the end of discovery will release the simq, so we're done. */
mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n",
sc, sc->replypostindex, sc->replyfreeindex);
mprsas_release_simq_reinit(sassc);
mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
return 0;
}
/* Wait for the chip to ACK a word that we've put into its FIFO
* Wait for <timeout> seconds. In single loop wait for busy loop
* for 500 microseconds.
* Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
* */
static int
mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag)
{
u32 cntdn, count;
u32 int_status;
u32 doorbell;
count = 0;
cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
do {
int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), "
"timeout(%d)\n", __func__, count, timeout);
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
mpr_dprint(sc, MPR_FAULT,
"fault_state(0x%04x)!\n", doorbell);
return (EFAULT);
}
} else if (int_status == 0xFFFFFFFF)
goto out;
/*
* If it can sleep, sleep for 1 milisecond, else busy loop for
* 0.5 milisecond
*/
if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba",
hz/1000);
else if (sleep_flag == CAN_SLEEP)
pause("mprdba", hz/1000);
else
DELAY(500);
count++;
} while (--cntdn);
out:
mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), "
"int_status(%x)!\n", __func__, count, int_status);
return (ETIMEDOUT);
}
/* Wait for the chip to signal that the next word in its FIFO can be fetched */
static int
mpr_wait_db_int(struct mpr_softc *sc)
{
int retry;
for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) {
if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
return (0);
DELAY(2000);
}
return (ETIMEDOUT);
}
/* Step through the synchronous command state machine, i.e. "Doorbell mode" */
static int
mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
int req_sz, int reply_sz, int timeout)
{
uint32_t *data32;
uint16_t *data16;
int i, count, ioc_sz, residual;
int sleep_flags = CAN_SLEEP;
#if __FreeBSD_version >= 1000029
if (curthread->td_no_sleeping)
#else //__FreeBSD_version < 1000029
if (curthread->td_pflags & TDP_NOSLEEPING)
#endif //__FreeBSD_version >= 1000029
sleep_flags = NO_SLEEP;
/* Step 1 */
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
/* Step 2 */
if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
return (EBUSY);
/* Step 3
* Announce that a message is coming through the doorbell. Messages
* are pushed at 32bit words, so round up if needed.
*/
count = (req_sz + 3) / 4;
mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
(MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
(count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
/* Step 4 */
if (mpr_wait_db_int(sc) ||
(mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n");
return (ENXIO);
}
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n");
return (ENXIO);
}
/* Step 5 */
/* Clock out the message data synchronously in 32-bit dwords*/
data32 = (uint32_t *)req;
for (i = 0; i < count; i++) {
mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
mpr_dprint(sc, MPR_FAULT,
"Timeout while writing doorbell\n");
return (ENXIO);
}
}
/* Step 6 */
/* Clock in the reply in 16-bit words. The total length of the
* message is always in the 4th byte, so clock out the first 2 words
* manually, then loop the rest.
*/
data16 = (uint16_t *)reply;
if (mpr_wait_db_int(sc) != 0) {
mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n");
return (ENXIO);
}
data16[0] =
mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
if (mpr_wait_db_int(sc) != 0) {
mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n");
return (ENXIO);
}
data16[1] =
mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
/* Number of 32bit words in the message */
ioc_sz = reply->MsgLength;
/*
* Figure out how many 16bit words to clock in without overrunning.
* The precision loss with dividing reply_sz can safely be
* ignored because the messages can only be multiples of 32bits.
*/
residual = 0;
count = MIN((reply_sz / 4), ioc_sz) * 2;
if (count < ioc_sz * 2) {
residual = ioc_sz * 2 - count;
mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d "
"residual message words\n", residual);
}
for (i = 2; i < count; i++) {
if (mpr_wait_db_int(sc) != 0) {
mpr_dprint(sc, MPR_FAULT,
"Timeout reading doorbell %d\n", i);
return (ENXIO);
}
data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) &
MPI2_DOORBELL_DATA_MASK;
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
}
/*
* Pull out residual words that won't fit into the provided buffer.
* This keeps the chip from hanging due to a driver programming
* error.
*/
while (residual--) {
if (mpr_wait_db_int(sc) != 0) {
mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n");
return (ENXIO);
}
(void)mpr_regread(sc, MPI2_DOORBELL_OFFSET);
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
}
/* Step 7 */
if (mpr_wait_db_int(sc) != 0) {
mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n");
return (ENXIO);
}
if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n");
mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
return (0);
}
static void
mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm)
{
request_descriptor rd;
MPR_FUNCTRACE(sc);
mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n",
cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags &
MPR_FLAGS_SHUTDOWN))
mtx_assert(&sc->mpr_mtx, MA_OWNED);
if (++sc->io_cmds_active > sc->io_cmds_highwater)
sc->io_cmds_highwater++;
if (sc->atomic_desc_capable) {
rd.u.low = cm->cm_desc.Words.Low;
mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET,
rd.u.low);
} else {
rd.u.low = cm->cm_desc.Words.Low;
rd.u.high = cm->cm_desc.Words.High;
rd.word = htole64(rd.word);
mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
rd.u.low);
mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
rd.u.high);
}
}
/*
* Just the FACTS, ma'am.
*/
static int
mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
{
MPI2_DEFAULT_REPLY *reply;
MPI2_IOC_FACTS_REQUEST request;
int error, req_sz, reply_sz;
MPR_FUNCTRACE(sc);
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
reply = (MPI2_DEFAULT_REPLY *)facts;
bzero(&request, req_sz);
request.Function = MPI2_FUNCTION_IOC_FACTS;
error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
return (error);
}
static int
mpr_send_iocinit(struct mpr_softc *sc)
{
MPI2_IOC_INIT_REQUEST init;
MPI2_DEFAULT_REPLY reply;
int req_sz, reply_sz, error;
struct timeval now;
uint64_t time_in_msec;
MPR_FUNCTRACE(sc);
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
bzero(&init, req_sz);
bzero(&reply, reply_sz);
/*
* Fill in the init block. Note that most addresses are
* deliberately in the lower 32bits of memory. This is a micro-
* optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
*/
init.Function = MPI2_FUNCTION_IOC_INIT;
init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
init.MsgVersion = htole16(MPI2_VERSION);
init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize);
init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
init.SenseBufferAddressHigh = 0;
init.SystemReplyAddressHigh = 0;
init.SystemRequestFrameBaseAddress.High = 0;
init.SystemRequestFrameBaseAddress.Low =
htole32((uint32_t)sc->req_busaddr);
init.ReplyDescriptorPostQueueAddress.High = 0;
init.ReplyDescriptorPostQueueAddress.Low =
htole32((uint32_t)sc->post_busaddr);
init.ReplyFreeQueueAddress.High = 0;
init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
getmicrotime(&now);
time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
init.HostPageSize = HOST_PAGE_SIZE_4K;
error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
error = ENXIO;
mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
return (error);
}
void
mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *addr;
addr = arg;
*addr = segs[0].ds_addr;
}
static int
mpr_alloc_queues(struct mpr_softc *sc)
{
struct mpr_queue *q;
int nq, i;
nq = sc->msi_msgs;
mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq);
- sc->queues = mallocarray(nq, sizeof(struct mpr_queue), M_MPR,
+ sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR,
M_NOWAIT|M_ZERO);
if (sc->queues == NULL)
return (ENOMEM);
for (i = 0; i < nq; i++) {
q = &sc->queues[i];
mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q);
q->sc = sc;
q->qnum = i;
}
return (0);
}
static int
mpr_alloc_hw_queues(struct mpr_softc *sc)
{
bus_addr_t queues_busaddr;
uint8_t *queues;
int qsize, fqsize, pqsize;
/*
* The reply free queue contains 4 byte entries in multiples of 16 and
* aligned on a 16 byte boundary. There must always be an unused entry.
* This queue supplies fresh reply frames for the firmware to use.
*
* The reply descriptor post queue contains 8 byte entries in
* multiples of 16 and aligned on a 16 byte boundary. This queue
* contains filled-in reply frames sent from the firmware to the host.
*
* These two queues are allocated together for simplicity.
*/
sc->fqdepth = roundup2(sc->num_replies + 1, 16);
sc->pqdepth = roundup2(sc->num_replies + 1, 16);
fqsize= sc->fqdepth * 4;
pqsize = sc->pqdepth * 8;
qsize = fqsize + pqsize;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
qsize, /* maxsize */
1, /* nsegments */
qsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->queues_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
&sc->queues_map)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n");
return (ENOMEM);
}
bzero(queues, qsize);
bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
mpr_memaddr_cb, &queues_busaddr, 0);
sc->free_queue = (uint32_t *)queues;
sc->free_busaddr = queues_busaddr;
sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
sc->post_busaddr = queues_busaddr + fqsize;
return (0);
}
static int
mpr_alloc_replies(struct mpr_softc *sc)
{
int rsize, num_replies;
/*
* sc->num_replies should be one less than sc->fqdepth. We need to
* allocate space for sc->fqdepth replies, but only sc->num_replies
* replies can be used at once.
*/
num_replies = max(sc->fqdepth, sc->num_replies);
rsize = sc->facts->ReplyFrameSize * num_replies * 4;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
4, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->reply_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
BUS_DMA_NOWAIT, &sc->reply_map)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n");
return (ENOMEM);
}
bzero(sc->reply_frames, rsize);
bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
mpr_memaddr_cb, &sc->reply_busaddr, 0);
return (0);
}
static int
mpr_alloc_requests(struct mpr_softc *sc)
{
struct mpr_command *cm;
struct mpr_chain *chain;
int i, rsize, nsegs;
rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->req_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
BUS_DMA_NOWAIT, &sc->req_map)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n");
return (ENOMEM);
}
bzero(sc->req_frames, rsize);
bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
mpr_memaddr_cb, &sc->req_busaddr, 0);
/*
* Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to
* get the size of a Chain Frame. Previous versions use the size as a
* Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize
* is 0, use the default value. The IOCMaxChainSegmentSize is the
* number of 16-byte elelements that can fit in a Chain Frame, which is
* the size of an IEEE Simple SGE.
*/
if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) {
sc->chain_seg_size =
htole16(sc->facts->IOCMaxChainSegmentSize);
if (sc->chain_seg_size == 0) {
sc->chain_frame_size = MPR_DEFAULT_CHAIN_SEG_SIZE *
MPR_MAX_CHAIN_ELEMENT_SIZE;
} else {
sc->chain_frame_size = sc->chain_seg_size *
MPR_MAX_CHAIN_ELEMENT_SIZE;
}
} else {
sc->chain_frame_size = sc->facts->IOCRequestFrameSize * 4;
}
rsize = sc->chain_frame_size * sc->max_chains;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->chain_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
BUS_DMA_NOWAIT, &sc->chain_map)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
return (ENOMEM);
}
bzero(sc->chain_frames, rsize);
bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize,
mpr_memaddr_cb, &sc->chain_busaddr, 0);
rsize = MPR_SENSE_LEN * sc->num_reqs;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sense_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
BUS_DMA_NOWAIT, &sc->sense_map)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n");
return (ENOMEM);
}
bzero(sc->sense_frames, rsize);
bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
mpr_memaddr_cb, &sc->sense_busaddr, 0);
sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR,
M_WAITOK | M_ZERO);
if (!sc->chains) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
return (ENOMEM);
}
for (i = 0; i < sc->max_chains; i++) {
chain = &sc->chains[i];
chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
i * sc->chain_frame_size);
chain->chain_busaddr = sc->chain_busaddr +
i * sc->chain_frame_size;
mpr_free_chain(sc, chain);
sc->chain_free_lowwater++;
}
/*
* Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports
* these devices.
*/
if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) &&
(sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) {
if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM)
return (ENOMEM);
}
/* XXX Need to pick a more precise value */
nsegs = (MAXPHYS / PAGE_SIZE) + 1;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
nsegs, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&sc->mpr_mtx, /* lockarg */
&sc->buffer_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n");
return (ENOMEM);
}
/*
* SMID 0 cannot be used as a free command per the firmware spec.
* Just drop that command instead of risking accounting bugs.
*/
sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs,
M_MPR, M_WAITOK | M_ZERO);
if (!sc->commands) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n");
return (ENOMEM);
}
for (i = 1; i < sc->num_reqs; i++) {
cm = &sc->commands[i];
cm->cm_req = sc->req_frames +
i * sc->facts->IOCRequestFrameSize * 4;
cm->cm_req_busaddr = sc->req_busaddr +
i * sc->facts->IOCRequestFrameSize * 4;
cm->cm_sense = &sc->sense_frames[i];
cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN;
cm->cm_desc.Default.SMID = i;
cm->cm_sc = sc;
TAILQ_INIT(&cm->cm_chain_list);
TAILQ_INIT(&cm->cm_prp_page_list);
callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0);
/* XXX Is a failure here a critical problem? */
if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap)
== 0) {
if (i <= sc->facts->HighPriorityCredit)
mpr_free_high_priority_command(sc, cm);
else
mpr_free_command(sc, cm);
} else {
panic("failed to allocate command %d\n", i);
sc->num_reqs = i;
break;
}
}
return (0);
}
/*
* Allocate contiguous buffers for PCIe NVMe devices for building native PRPs,
* which are scatter/gather lists for NVMe devices.
*
* This buffer must be contiguous due to the nature of how NVMe PRPs are built
* and translated by FW.
*
* returns ENOMEM if memory could not be allocated, otherwise returns 0.
*/
static int
mpr_alloc_nvme_prp_pages(struct mpr_softc *sc)
{
int PRPs_per_page, PRPs_required, pages_required;
int rsize, i;
struct mpr_prp_page *prp_page;
/*
* Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number
* of PRPs (NVMe's Scatter/Gather Element) needed per I/O is:
* MAX_IO_SIZE / PAGE_SIZE = 256
*
* 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs
* required for the remainder of the 1MB I/O. 512 PRPs can fit into one
* page (4096 / 8 = 512), so only one page is required for each I/O.
*
* Each of these buffers will need to be contiguous. For simplicity,
* only one buffer is allocated here, which has all of the space
* required for the NVMe Queue Depth. If there are problems allocating
* this one buffer, this function will need to change to allocate
* individual, contiguous NVME_QDEPTH buffers.
*
* The real calculation will use the real max io size. Above is just an
* example.
*
*/
PRPs_required = sc->maxio / PAGE_SIZE;
PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1;
pages_required = (PRPs_required / PRPs_per_page) + 1;
sc->prp_buffer_size = PAGE_SIZE * pages_required;
rsize = sc->prp_buffer_size * NVME_QDEPTH;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
4, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->prp_page_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA "
"tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages,
BUS_DMA_NOWAIT, &sc->prp_page_map)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n");
return (ENOMEM);
}
bzero(sc->prp_pages, rsize);
bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages,
rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0);
sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR,
M_WAITOK | M_ZERO);
for (i = 0; i < NVME_QDEPTH; i++) {
prp_page = &sc->prps[i];
prp_page->prp_page = (uint64_t *)(sc->prp_pages +
i * sc->prp_buffer_size);
prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr +
i * sc->prp_buffer_size);
mpr_free_prp_page(sc, prp_page);
sc->prp_pages_free_lowwater++;
}
return (0);
}
static int
mpr_init_queues(struct mpr_softc *sc)
{
int i;
memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
/*
* According to the spec, we need to use one less reply than we
* have space for on the queue. So sc->num_replies (the number we
* use) should be less than sc->fqdepth (allocated size).
*/
if (sc->num_replies >= sc->fqdepth)
return (EINVAL);
/*
* Initialize all of the free queue entries.
*/
for (i = 0; i < sc->fqdepth; i++) {
sc->free_queue[i] = sc->reply_busaddr +
(i * sc->facts->ReplyFrameSize * 4);
}
sc->replyfreeindex = sc->num_replies;
return (0);
}
/* Get the driver parameter tunables. Lowest priority are the driver defaults.
* Next are the global settings, if they exist. Highest are the per-unit
* settings, if they exist.
*/
void
mpr_get_tunables(struct mpr_softc *sc)
{
char tmpstr[80], mpr_debug[80];
/* XXX default to some debugging for now */
sc->mpr_debug = MPR_INFO | MPR_FAULT;
sc->disable_msix = 0;
sc->disable_msi = 0;
sc->max_msix = MPR_MSIX_MAX;
sc->max_chains = MPR_CHAIN_FRAMES;
sc->max_io_pages = MPR_MAXIO_PAGES;
sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
sc->use_phynum = 1;
sc->max_reqframes = MPR_REQ_FRAMES;
sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
sc->max_replyframes = MPR_REPLY_FRAMES;
sc->max_evtframes = MPR_EVT_REPLY_FRAMES;
/*
* Grab the global variables.
*/
bzero(mpr_debug, 80);
if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0)
mpr_parse_debug(sc, mpr_debug);
TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix);
TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi);
TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix);
TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains);
TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages);
TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes);
/* Grab the unit-instance variables */
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level",
device_get_unit(sc->mpr_dev));
bzero(mpr_debug, 80);
if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0)
mpr_parse_debug(sc, mpr_debug);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_msix);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids",
device_get_unit(sc->mpr_dev));
TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
}
static void
mpr_setup_sysctl(struct mpr_softc *sc)
{
struct sysctl_ctx_list *sysctl_ctx = NULL;
struct sysctl_oid *sysctl_tree = NULL;
char tmpstr[80], tmpstr2[80];
/*
* Setup the sysctl variable so the user can change the debug level
* on the fly.
*/
snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d",
device_get_unit(sc->mpr_dev));
snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev));
sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev);
if (sysctl_ctx != NULL)
sysctl_tree = device_get_sysctl_tree(sc->mpr_dev);
if (sysctl_tree == NULL) {
sysctl_ctx_init(&sc->sysctl_ctx);
sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2,
CTLFLAG_RD, 0, tmpstr);
if (sc->sysctl_tree == NULL)
return;
sysctl_ctx = &sc->sysctl_ctx;
sysctl_tree = sc->sysctl_tree;
}
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
sc, 0, mpr_debug_sysctl, "A", "mpr debug level");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
"Disable the use of MSI-X interrupts");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0,
"User-defined maximum number of MSIX queues");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0,
"Negotiated number of MSIX queues");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
"Total number of allocated request frames");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
"Total number of allocated high priority request frames");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
"Total number of allocated reply frames");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
"Total number of event frames allocated");
SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version,
strlen(sc->fw_version), "firmware version");
SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION,
strlen(MPR_DRIVER_VERSION), "driver version");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_active", CTLFLAG_RD,
&sc->io_cmds_active, 0, "number of currently active commands");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
&sc->io_cmds_highwater, 0, "maximum active commands seen");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_free", CTLFLAG_RD,
&sc->chain_free, 0, "number of free chain elements");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
&sc->chain_free_lowwater, 0,"lowest number of free chain elements");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_chains", CTLFLAG_RD,
&sc->max_chains, 0,"maximum chain frames that will be allocated");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_io_pages", CTLFLAG_RD,
&sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
"IOCFacts)");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
"enable SSU to SATA SSD/HDD at shutdown");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
&sc->chain_alloc_fail, "chain allocation failures");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "spinup_wait_time", CTLFLAG_RD,
&sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
"spinup after SATA ID error");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
"Use the phy number for enumeration");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_pages_free", CTLFLAG_RD,
&sc->prp_pages_free, 0, "number of free PRP pages");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD,
&sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
&sc->prp_page_alloc_fail, "PRP page allocation failures");
}
static struct mpr_debug_string {
char *name;
int flag;
} mpr_debug_strings[] = {
{"info", MPR_INFO},
{"fault", MPR_FAULT},
{"event", MPR_EVENT},
{"log", MPR_LOG},
{"recovery", MPR_RECOVERY},
{"error", MPR_ERROR},
{"init", MPR_INIT},
{"xinfo", MPR_XINFO},
{"user", MPR_USER},
{"mapping", MPR_MAPPING},
{"trace", MPR_TRACE}
};
enum mpr_debug_level_combiner {
COMB_NONE,
COMB_ADD,
COMB_SUB
};
static int
mpr_debug_sysctl(SYSCTL_HANDLER_ARGS)
{
struct mpr_softc *sc;
struct mpr_debug_string *string;
struct sbuf *sbuf;
char *buffer;
size_t sz;
int i, len, debug, error;
sc = (struct mpr_softc *)arg1;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
debug = sc->mpr_debug;
sbuf_printf(sbuf, "%#x", debug);
sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]);
for (i = 0; i < sz; i++) {
string = &mpr_debug_strings[i];
if (debug & string->flag)
sbuf_printf(sbuf, ",%s", string->name);
}
error = sbuf_finish(sbuf);
sbuf_delete(sbuf);
if (error || req->newptr == NULL)
return (error);
len = req->newlen - req->newidx;
if (len == 0)
return (0);
buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK);
error = SYSCTL_IN(req, buffer, len);
mpr_parse_debug(sc, buffer);
free(buffer, M_MPR);
return (error);
}
static void
mpr_parse_debug(struct mpr_softc *sc, char *list)
{
struct mpr_debug_string *string;
enum mpr_debug_level_combiner op;
char *token, *endtoken;
size_t sz;
int flags, i;
if (list == NULL || *list == '\0')
return;
if (*list == '+') {
op = COMB_ADD;
list++;
} else if (*list == '-') {
op = COMB_SUB;
list++;
} else
op = COMB_NONE;
if (*list == '\0')
return;
flags = 0;
sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]);
while ((token = strsep(&list, ":,")) != NULL) {
/* Handle integer flags */
flags |= strtol(token, &endtoken, 0);
if (token != endtoken)
continue;
/* Handle text flags */
for (i = 0; i < sz; i++) {
string = &mpr_debug_strings[i];
if (strcasecmp(token, string->name) == 0) {
flags |= string->flag;
break;
}
}
}
switch (op) {
case COMB_NONE:
sc->mpr_debug = flags;
break;
case COMB_ADD:
sc->mpr_debug |= flags;
break;
case COMB_SUB:
sc->mpr_debug &= (~flags);
break;
}
return;
}
int
mpr_attach(struct mpr_softc *sc)
{
int error;
MPR_FUNCTRACE(sc);
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF);
callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0);
callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0);
TAILQ_INIT(&sc->event_list);
timevalclear(&sc->lastfail);
if ((error = mpr_transition_ready(sc)) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"Failed to transition ready\n");
return (error);
}
sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR,
M_ZERO|M_NOWAIT);
if (!sc->facts) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT,
"Cannot allocate memory, exit\n");
return (ENOMEM);
}
/*
* Get IOC Facts and allocate all structures based on this information.
* A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC
* Facts. If relevant values have changed in IOC Facts, this function
* will free all of the memory based on IOC Facts and reallocate that
* memory. If this fails, any allocated memory should already be freed.
*/
if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation "
"failed with error %d\n", error);
return (error);
}
/* Start the periodic watchdog check on the IOC Doorbell */
mpr_periodic(sc);
/*
* The portenable will kick off discovery events that will drive the
* rest of the initialization process. The CAM/SAS module will
* hold up the boot sequence until discovery is complete.
*/
sc->mpr_ich.ich_func = mpr_startup;
sc->mpr_ich.ich_arg = sc;
if (config_intrhook_establish(&sc->mpr_ich) != 0) {
mpr_dprint(sc, MPR_INIT|MPR_ERROR,
"Cannot establish MPR config hook\n");
error = EINVAL;
}
/*
* Allow IR to shutdown gracefully when shutdown occurs.
*/
sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
if (sc->shutdown_eh == NULL)
mpr_dprint(sc, MPR_INIT|MPR_ERROR,
"shutdown event registration failed\n");
mpr_setup_sysctl(sc);
sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE;
mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
return (error);
}
/* Run through any late-start handlers. */
static void
mpr_startup(void *arg)
{
struct mpr_softc *sc;
sc = (struct mpr_softc *)arg;
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
mpr_lock(sc);
mpr_unmask_intr(sc);
/* initialize device mapping tables */
mpr_base_static_config_pages(sc);
mpr_mapping_initialize(sc);
mprsas_startup(sc);
mpr_unlock(sc);
mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n");
config_intrhook_disestablish(&sc->mpr_ich);
sc->mpr_ich.ich_arg = NULL;
mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
}
/* Periodic watchdog. Is called with the driver lock already held. */
static void
mpr_periodic(void *arg)
{
struct mpr_softc *sc;
uint32_t db;
sc = (struct mpr_softc *)arg;
if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN)
return;
db = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) ==
IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) {
panic("TEMPERATURE FAULT: STOPPING.");
}
mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
mpr_reinit(sc);
}
callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc);
}
static void
mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *event)
{
MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
MPR_DPRINT_EVENT(sc, generic, event);
switch (event->Event) {
case MPI2_EVENT_LOG_DATA:
mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n");
if (sc->mpr_debug & MPR_EVENT)
hexdump(event->EventData, event->EventDataLength, NULL,
0);
break;
case MPI2_EVENT_LOG_ENTRY_ADDED:
entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
"0x%x Sequence %d:\n", entry->LogEntryQualifier,
entry->LogSequence);
break;
default:
break;
}
return;
}
static int
mpr_attach_log(struct mpr_softc *sc)
{
uint8_t events[16];
bzero(events, 16);
setbit(events, MPI2_EVENT_LOG_DATA);
setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
mpr_register_events(sc, events, mpr_log_evt_handler, NULL,
&sc->mpr_log_eh);
return (0);
}
static int
mpr_detach_log(struct mpr_softc *sc)
{
if (sc->mpr_log_eh != NULL)
mpr_deregister_events(sc, sc->mpr_log_eh);
return (0);
}
/*
* Free all of the driver resources and detach submodules. Should be called
* without the lock held.
*/
int
mpr_free(struct mpr_softc *sc)
{
int error;
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
/* Turn off the watchdog */
mpr_lock(sc);
sc->mpr_flags |= MPR_FLAGS_SHUTDOWN;
mpr_unlock(sc);
/* Lock must not be held for this */
callout_drain(&sc->periodic);
callout_drain(&sc->device_check_callout);
if (((error = mpr_detach_log(sc)) != 0) ||
((error = mpr_detach_sas(sc)) != 0)) {
mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach "
"subsystems, error= %d, exit\n", error);
return (error);
}
mpr_detach_user(sc);
/* Put the IOC back in the READY state. */
mpr_lock(sc);
if ((error = mpr_transition_ready(sc)) != 0) {
mpr_unlock(sc);
return (error);
}
mpr_unlock(sc);
if (sc->facts != NULL)
free(sc->facts, M_MPR);
/*
* Free all buffers that are based on IOC Facts. A Diag Reset may need
* to free these buffers too.
*/
mpr_iocfacts_free(sc);
if (sc->sysctl_tree != NULL)
sysctl_ctx_free(&sc->sysctl_ctx);
/* Deregister the shutdown function */
if (sc->shutdown_eh != NULL)
EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
mtx_destroy(&sc->mpr_mtx);
mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
return (0);
}
static __inline void
mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm)
{
MPR_FUNCTRACE(sc);
if (cm == NULL) {
mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n");
return;
}
if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
if (cm->cm_complete != NULL) {
mpr_dprint(sc, MPR_TRACE,
"%s cm %p calling cm_complete %p data %p reply %p\n",
__func__, cm, cm->cm_complete, cm->cm_complete_data,
cm->cm_reply);
cm->cm_complete(sc, cm);
}
if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm);
wakeup(cm);
}
if (sc->io_cmds_active != 0) {
sc->io_cmds_active--;
} else {
mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is "
"out of sync - resynching to 0\n");
}
}
static void
mpr_sas_log_info(struct mpr_softc *sc , u32 log_info)
{
union loginfo_type {
u32 loginfo;
struct {
u32 subcode:16;
u32 code:8;
u32 originator:4;
u32 bus_type:4;
} dw;
};
union loginfo_type sas_loginfo;
char *originator_str = NULL;
sas_loginfo.loginfo = log_info;
if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
return;
/* each nexus loss loginfo */
if (log_info == 0x31170000)
return;
/* eat the loginfos associated with task aborts */
if ((log_info == 30050000) || (log_info == 0x31140000) ||
(log_info == 0x31130000))
return;
switch (sas_loginfo.dw.originator) {
case 0:
originator_str = "IOP";
break;
case 1:
originator_str = "PL";
break;
case 2:
originator_str = "IR";
break;
}
mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), "
"code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str,
sas_loginfo.dw.code, sas_loginfo.dw.subcode);
}
static void
mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply)
{
MPI2DefaultReply_t *mpi_reply;
u16 sc_status;
mpi_reply = (MPI2DefaultReply_t*)reply;
sc_status = le16toh(mpi_reply->IOCStatus);
if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
}
void
mpr_intr(void *data)
{
struct mpr_softc *sc;
uint32_t status;
sc = (struct mpr_softc *)data;
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
/*
* Check interrupt status register to flush the bus. This is
* needed for both INTx interrupts and driver-driven polling
*/
status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
return;
mpr_lock(sc);
mpr_intr_locked(data);
mpr_unlock(sc);
return;
}
/*
* In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
* chip. Hopefully this theory is correct.
*/
void
mpr_intr_msi(void *data)
{
struct mpr_softc *sc;
sc = (struct mpr_softc *)data;
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
mpr_lock(sc);
mpr_intr_locked(data);
mpr_unlock(sc);
return;
}
/*
* The locking is overly broad and simplistic, but easy to deal with for now.
*/
void
mpr_intr_locked(void *data)
{
MPI2_REPLY_DESCRIPTORS_UNION *desc;
struct mpr_softc *sc;
struct mpr_command *cm = NULL;
uint8_t flags;
u_int pq;
MPI2_DIAG_RELEASE_REPLY *rel_rep;
mpr_fw_diagnostic_buffer_t *pBuffer;
sc = (struct mpr_softc *)data;
pq = sc->replypostindex;
mpr_dprint(sc, MPR_TRACE,
"%s sc %p starting with replypostindex %u\n",
__func__, sc, sc->replypostindex);
for ( ;; ) {
cm = NULL;
desc = &sc->post_queue[sc->replypostindex];
flags = desc->Default.ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) ||
(le32toh(desc->Words.High) == 0xffffffff))
break;
/* increment the replypostindex now, so that event handlers
* and cm completion handlers which decide to do a diag
* reset can zero it without it getting incremented again
* afterwards, and we break out of this loop on the next
* iteration since the reply post queue has been cleared to
* 0xFF and all descriptors look unused (which they are).
*/
if (++sc->replypostindex >= sc->pqdepth)
sc->replypostindex = 0;
switch (flags) {
case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS:
case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS:
cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
cm->cm_reply = NULL;
break;
case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
{
uint32_t baddr;
uint8_t *reply;
/*
* Re-compose the reply address from the address
* sent back from the chip. The ReplyFrameAddress
* is the lower 32 bits of the physical address of
* particular reply frame. Convert that address to
* host format, and then use that to provide the
* offset against the virtual address base
* (sc->reply_frames).
*/
baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
reply = sc->reply_frames +
(baddr - ((uint32_t)sc->reply_busaddr));
/*
* Make sure the reply we got back is in a valid
* range. If not, go ahead and panic here, since
* we'll probably panic as soon as we deference the
* reply pointer anyway.
*/
if ((reply < sc->reply_frames)
|| (reply > (sc->reply_frames +
(sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) {
printf("%s: WARNING: reply %p out of range!\n",
__func__, reply);
printf("%s: reply_frames %p, fqdepth %d, "
"frame size %d\n", __func__,
sc->reply_frames, sc->fqdepth,
sc->facts->ReplyFrameSize * 4);
printf("%s: baddr %#x,\n", __func__, baddr);
/* LSI-TODO. See Linux Code for Graceful exit */
panic("Reply address out of range");
}
if (le16toh(desc->AddressReply.SMID) == 0) {
if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
MPI2_FUNCTION_DIAG_BUFFER_POST) {
/*
* If SMID is 0 for Diag Buffer Post,
* this implies that the reply is due to
* a release function with a status that
* the buffer has been released. Set
* the buffer flags accordingly.
*/
rel_rep =
(MPI2_DIAG_RELEASE_REPLY *)reply;
if ((le16toh(rel_rep->IOCStatus) &
MPI2_IOCSTATUS_MASK) ==
MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
{
pBuffer =
&sc->fw_diag_buffer_list[
rel_rep->BufferType];
pBuffer->valid_data = TRUE;
pBuffer->owned_by_firmware =
FALSE;
pBuffer->immediate = FALSE;
}
} else
mpr_dispatch_event(sc, baddr,
(MPI2_EVENT_NOTIFICATION_REPLY *)
reply);
} else {
cm = &sc->commands[
le16toh(desc->AddressReply.SMID)];
cm->cm_reply = reply;
cm->cm_reply_data =
le32toh(desc->AddressReply.
ReplyFrameAddress);
}
break;
}
case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
default:
/* Unhandled */
mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n",
desc->Default.ReplyFlags);
cm = NULL;
break;
}
if (cm != NULL) {
// Print Error reply frame
if (cm->cm_reply)
mpr_display_reply_info(sc,cm->cm_reply);
mpr_complete_command(sc, cm);
}
desc->Words.Low = 0xffffffff;
desc->Words.High = 0xffffffff;
}
if (pq != sc->replypostindex) {
mpr_dprint(sc, MPR_TRACE,
"%s sc %p writing postindex %d\n",
__func__, sc, sc->replypostindex);
mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET,
sc->replypostindex);
}
return;
}
static void
mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *reply)
{
struct mpr_event_handle *eh;
int event, handled = 0;
event = le16toh(reply->Event);
TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
if (isset(eh->mask, event)) {
eh->callback(sc, data, reply);
handled++;
}
}
if (handled == 0)
mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n",
le16toh(event));
/*
* This is the only place that the event/reply should be freed.
* Anything wanting to hold onto the event data should have
* already copied it into their own storage.
*/
mpr_free_reply(sc, data);
}
static void
mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm)
{
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
if (cm->cm_reply)
MPR_DPRINT_EVENT(sc, generic,
(MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
mpr_free_command(sc, cm);
/* next, send a port enable */
mprsas_startup(sc);
}
/*
* For both register_events and update_events, the caller supplies a bitmap
* of events that it _wants_. These functions then turn that into a bitmask
* suitable for the controller.
*/
int
mpr_register_events(struct mpr_softc *sc, uint8_t *mask,
mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle)
{
struct mpr_event_handle *eh;
int error = 0;
eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO);
if (!eh) {
mpr_dprint(sc, MPR_EVENT|MPR_ERROR,
"Cannot allocate event memory\n");
return (ENOMEM);
}
eh->callback = cb;
eh->data = data;
TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
if (mask != NULL)
error = mpr_update_events(sc, eh, mask);
*handle = eh;
return (error);
}
int
mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle,
uint8_t *mask)
{
MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL;
struct mpr_command *cm = NULL;
struct mpr_event_handle *eh;
int error, i;
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
if ((mask != NULL) && (handle != NULL))
bcopy(mask, &handle->mask[0], 16);
memset(sc->event_mask, 0xff, 16);
TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
for (i = 0; i < 16; i++)
sc->event_mask[i] &= ~eh->mask[i];
}
if ((cm = mpr_alloc_command(sc)) == NULL)
return (EBUSY);
evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
evtreq->MsgFlags = 0;
evtreq->SASBroadcastPrimitiveMasks = 0;
#ifdef MPR_DEBUG_ALL_EVENTS
{
u_char fullmask[16];
memset(fullmask, 0x00, 16);
bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
}
#else
bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
#endif
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
cm->cm_data = NULL;
error = mpr_request_polled(sc, &cm);
if (cm != NULL)
reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
if ((reply == NULL) ||
(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
error = ENXIO;
if (reply)
MPR_DPRINT_EVENT(sc, generic, reply);
mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error);
if (cm != NULL)
mpr_free_command(sc, cm);
return (error);
}
static int
mpr_reregister_events(struct mpr_softc *sc)
{
MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
struct mpr_command *cm;
struct mpr_event_handle *eh;
int error, i;
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
/* first, reregister events */
memset(sc->event_mask, 0xff, 16);
TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
for (i = 0; i < 16; i++)
sc->event_mask[i] &= ~eh->mask[i];
}
if ((cm = mpr_alloc_command(sc)) == NULL)
return (EBUSY);
evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
evtreq->MsgFlags = 0;
evtreq->SASBroadcastPrimitiveMasks = 0;
#ifdef MPR_DEBUG_ALL_EVENTS
{
u_char fullmask[16];
memset(fullmask, 0x00, 16);
bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
}
#else
bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
#endif
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
cm->cm_data = NULL;
cm->cm_complete = mpr_reregister_events_complete;
error = mpr_map_command(sc, cm);
mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__,
error);
return (error);
}
int
mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle)
{
TAILQ_REMOVE(&sc->event_list, handle, eh_list);
free(handle, M_MPR);
return (mpr_update_events(sc, NULL, NULL));
}
/**
* mpr_build_nvme_prp - This function is called for NVMe end devices to build a
* native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry
* of the NVMe message (PRP1). If the data buffer is small enough to be described
* entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to
* describe a larger data buffer. If the data buffer is too large to describe
* using the two PRP entriess inside the NVMe message, then PRP1 describes the
* first data memory segment, and PRP2 contains a pointer to a PRP list located
* elsewhere in memory to describe the remaining data memory segments. The PRP
* list will be contiguous.
* The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
* consists of a list of PRP entries to describe a number of noncontigous
* physical memory segments as a single memory buffer, just as a SGL does. Note
* however, that this function is only used by the IOCTL call, so the memory
* given will be guaranteed to be contiguous. There is no need to translate
* non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous
* space that is one page size each.
*
* Each NVMe message contains two PRP entries. The first (PRP1) either contains
* a PRP list pointer or a PRP element, depending upon the command. PRP2 contains
* the second PRP element if the memory being described fits within 2 PRP
* entries, or a PRP list pointer if the PRP spans more than two entries.
*
* A PRP list pointer contains the address of a PRP list, structured as a linear
* array of PRP entries. Each PRP entry in this list describes a segment of
* physical memory.
*
* Each 64-bit PRP entry comprises an address and an offset field. The address
* always points to the beginning of a PAGE_SIZE physical memory page, and the
* offset describes where within that page the memory segment begins. Only the
* first element in a PRP list may contain a non-zero offest, implying that all
* memory segments following the first begin at the start of a PAGE_SIZE page.
*
* Each PRP element normally describes a chunck of PAGE_SIZE physical memory,
* with exceptions for the first and last elements in the list. If the memory
* being described by the list begins at a non-zero offset within the first page,
* then the first PRP element will contain a non-zero offset indicating where the
* region begins within the page. The last memory segment may end before the end
* of the PAGE_SIZE segment, depending upon the overall size of the memory being
* described by the PRP list.
*
* Since PRP entries lack any indication of size, the overall data buffer length
* is used to determine where the end of the data memory buffer is located, and
* how many PRP entries are required to describe it.
*
* Returns nothing.
*/
void
mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm,
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data,
uint32_t data_in_sz, uint32_t data_out_sz)
{
int prp_size = PRP_ENTRY_SIZE;
uint64_t *prp_entry, *prp1_entry, *prp2_entry;
uint64_t *prp_entry_phys, *prp_page, *prp_page_phys;
uint32_t offset, entry_len, page_mask_result, page_mask;
bus_addr_t paddr;
size_t length;
struct mpr_prp_page *prp_page_info = NULL;
/*
* Not all commands require a data transfer. If no data, just return
* without constructing any PRP.
*/
if (!data_in_sz && !data_out_sz)
return;
/*
* Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is
* located at a 24 byte offset from the start of the NVMe command. Then
* set the current PRP entry pointer to PRP1.
*/
prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
NVME_CMD_PRP1_OFFSET);
prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
NVME_CMD_PRP2_OFFSET);
prp_entry = prp1_entry;
/*
* For the PRP entries, use the specially allocated buffer of
* contiguous memory. PRP Page allocation failures should not happen
* because there should be enough PRP page buffers to account for the
* possible NVMe QDepth.
*/
prp_page_info = mpr_alloc_prp_page(sc);
KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
"used for building a native NVMe SGL.\n", __func__));
prp_page = (uint64_t *)prp_page_info->prp_page;
prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
/*
* Insert the allocated PRP page into the command's PRP page list. This
* will be freed when the command is freed.
*/
TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
/*
* Check if we are within 1 entry of a page boundary we don't want our
* first entry to be a PRP List entry.
*/
page_mask = PAGE_SIZE - 1;
page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) &
page_mask;
if (!page_mask_result)
{
/* Bump up to next page boundary. */
prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size);
prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys +
prp_size);
}
/*
* Set PRP physical pointer, which initially points to the current PRP
* DMA memory page.
*/
prp_entry_phys = prp_page_phys;
/* Get physical address and length of the data buffer. */
paddr = (bus_addr_t)data;
if (data_in_sz)
length = data_in_sz;
else
length = data_out_sz;
/* Loop while the length is not zero. */
while (length)
{
/*
* Check if we need to put a list pointer here if we are at page
* boundary - prp_size (8 bytes).
*/
page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys +
prp_size) & page_mask;
if (!page_mask_result)
{
/*
* This is the last entry in a PRP List, so we need to
* put a PRP list pointer here. What this does is:
* - bump the current memory pointer to the next
* address, which will be the next full page.
* - set the PRP Entry to point to that page. This is
* now the PRP List pointer.
* - bump the PRP Entry pointer the start of the next
* page. Since all of this PRP memory is contiguous,
* no need to get a new page - it's just the next
* address.
*/
prp_entry_phys++;
*prp_entry =
htole64((uint64_t)(uintptr_t)prp_entry_phys);
prp_entry++;
}
/* Need to handle if entry will be part of a page. */
offset = (uint32_t)paddr & page_mask;
entry_len = PAGE_SIZE - offset;
if (prp_entry == prp1_entry)
{
/*
* Must fill in the first PRP pointer (PRP1) before
* moving on.
*/
*prp1_entry = htole64((uint64_t)paddr);
/*
* Now point to the second PRP entry within the
* command (PRP2).
*/
prp_entry = prp2_entry;
}
else if (prp_entry == prp2_entry)
{
/*
* Should the PRP2 entry be a PRP List pointer or just a
* regular PRP pointer? If there is more than one more
* page of data, must use a PRP List pointer.
*/
if (length > PAGE_SIZE)
{
/*
* PRP2 will contain a PRP List pointer because
* more PRP's are needed with this command. The
* list will start at the beginning of the
* contiguous buffer.
*/
*prp2_entry =
htole64(
(uint64_t)(uintptr_t)prp_entry_phys);
/*
* The next PRP Entry will be the start of the
* first PRP List.
*/
prp_entry = prp_page;
}
else
{
/*
* After this, the PRP Entries are complete.
* This command uses 2 PRP's and no PRP list.
*/
*prp2_entry = htole64((uint64_t)paddr);
}
}
else
{
/*
* Put entry in list and bump the addresses.
*
* After PRP1 and PRP2 are filled in, this will fill in
* all remaining PRP entries in a PRP List, one per each
* time through the loop.
*/
*prp_entry = htole64((uint64_t)paddr);
prp_entry++;
prp_entry_phys++;
}
/*
* Bump the phys address of the command's data buffer by the
* entry_len.
*/
paddr += entry_len;
/* Decrement length accounting for last partial page. */
if (entry_len > length)
length = 0;
else
length -= entry_len;
}
}
/*
* mpr_check_pcie_native_sgl - This function is called for PCIe end devices to
* determine if the driver needs to build a native SGL. If so, that native SGL
* is built in the contiguous buffers allocated especially for PCIe SGL
* creation. If the driver will not build a native SGL, return TRUE and a
* normal IEEE SGL will be built. Currently this routine supports NVMe devices
* only.
*
* Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built.
*/
static int
mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm,
bus_dma_segment_t *segs, int segs_left)
{
uint32_t i, sge_dwords, length, offset, entry_len;
uint32_t num_entries, buff_len = 0, sges_in_segment;
uint32_t page_mask, page_mask_result, *curr_buff;
uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset;
uint32_t first_page_data_size, end_residual;
uint64_t *msg_phys;
bus_addr_t paddr;
int build_native_sgl = 0, first_prp_entry;
int prp_size = PRP_ENTRY_SIZE;
Mpi25IeeeSgeChain64_t *main_chain_element = NULL;
struct mpr_prp_page *prp_page_info = NULL;
mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
/*
* Add up the sizes of each segment length to get the total transfer
* size, which will be checked against the Maximum Data Transfer Size.
* If the data transfer length exceeds the MDTS for this device, just
* return 1 so a normal IEEE SGL will be built. F/W will break the I/O
* up into multiple I/O's. [nvme_mdts = 0 means unlimited]
*/
for (i = 0; i < segs_left; i++)
buff_len += htole32(segs[i].ds_len);
if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS))
return 1;
/* Create page_mask (to get offset within page) */
page_mask = PAGE_SIZE - 1;
/*
* Check if the number of elements exceeds the max number that can be
* put in the main message frame (H/W can only translate an SGL that
* is contained entirely in the main message frame).
*/
sges_in_segment = (sc->facts->IOCRequestFrameSize -
offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION);
if (segs_left > sges_in_segment)
build_native_sgl = 1;
else
{
/*
* NVMe uses one PRP for each physical page (or part of physical
* page).
* if 4 pages or less then IEEE is OK
* if > 5 pages then we need to build a native SGL
* if > 4 and <= 5 pages, then check the physical address of
* the first SG entry, then if this first size in the page
* is >= the residual beyond 4 pages then use IEEE,
* otherwise use native SGL
*/
if (buff_len > (PAGE_SIZE * 5))
build_native_sgl = 1;
else if ((buff_len > (PAGE_SIZE * 4)) &&
(buff_len <= (PAGE_SIZE * 5)) )
{
msg_phys = (uint64_t *)segs[0].ds_addr;
first_page_offset =
((uint32_t)(uint64_t)(uintptr_t)msg_phys &
page_mask);
first_page_data_size = PAGE_SIZE - first_page_offset;
end_residual = buff_len % PAGE_SIZE;
/*
* If offset into first page pushes the end of the data
* beyond end of the 5th page, we need the extra PRP
* list.
*/
if (first_page_data_size < end_residual)
build_native_sgl = 1;
/*
* Check if first SG entry size is < residual beyond 4
* pages.
*/
if (htole32(segs[0].ds_len) <
(buff_len - (PAGE_SIZE * 4)))
build_native_sgl = 1;
}
}
/* check if native SGL is needed */
if (!build_native_sgl)
return 1;
/*
* Native SGL is needed.
* Put a chain element in main message frame that points to the first
* chain buffer.
*
* NOTE: The ChainOffset field must be 0 when using a chain pointer to
* a native SGL.
*/
/* Set main message chain element pointer */
main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge;
/*
* For NVMe the chain element needs to be the 2nd SGL entry in the main
* message.
*/
main_chain_element = (Mpi25IeeeSgeChain64_t *)
((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
/*
* For the PRP entries, use the specially allocated buffer of
* contiguous memory. PRP Page allocation failures should not happen
* because there should be enough PRP page buffers to account for the
* possible NVMe QDepth.
*/
prp_page_info = mpr_alloc_prp_page(sc);
KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
"used for building a native NVMe SGL.\n", __func__));
curr_buff = (uint32_t *)prp_page_info->prp_page;
msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
/*
* Insert the allocated PRP page into the command's PRP page list. This
* will be freed when the command is freed.
*/
TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
/*
* Check if we are within 1 entry of a page boundary we don't want our
* first entry to be a PRP List entry.
*/
page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) &
page_mask;
if (!page_mask_result) {
/* Bump up to next page boundary. */
curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size);
msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size);
}
/* Fill in the chain element and make it an NVMe segment type. */
main_chain_element->Address.High =
htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32));
main_chain_element->Address.Low =
htole32((uint32_t)(uintptr_t)msg_phys);
main_chain_element->NextChainOffset = 0;
main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
/* Set SGL pointer to start of contiguous PCIe buffer. */
ptr_sgl = curr_buff;
sge_dwords = 2;
num_entries = 0;
/*
* NVMe has a very convoluted PRP format. One PRP is required for each
* page or partial page. We need to split up OS SG entries if they are
* longer than one page or cross a page boundary. We also have to insert
* a PRP list pointer entry as the last entry in each physical page of
* the PRP list.
*
* NOTE: The first PRP "entry" is actually placed in the first SGL entry
* in the main message in IEEE 64 format. The 2nd entry in the main
* message is the chain element, and the rest of the PRP entries are
* built in the contiguous PCIe buffer.
*/
first_prp_entry = 1;
ptr_first_sgl = (uint32_t *)cm->cm_sge;
for (i = 0; i < segs_left; i++) {
/* Get physical address and length of this SG entry. */
paddr = segs[i].ds_addr;
length = segs[i].ds_len;
/*
* Check whether a given SGE buffer lies on a non-PAGED
* boundary if this is not the first page. If so, this is not
* expected so have FW build the SGL.
*/
if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) {
mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while "
"building NVMe PRPs, low address is 0x%x\n",
(uint32_t)paddr);
return 1;
}
/* Apart from last SGE, if any other SGE boundary is not page
* aligned then it means that hole exists. Existence of hole
* leads to data corruption. So fallback to IEEE SGEs.
*/
if (i != (segs_left - 1)) {
if (((uint32_t)paddr + length) & page_mask) {
mpr_dprint(sc, MPR_ERROR, "Unaligned SGE "
"boundary while building NVMe PRPs, low "
"address: 0x%x and length: %u\n",
(uint32_t)paddr, length);
return 1;
}
}
/* Loop while the length is not zero. */
while (length) {
/*
* Check if we need to put a list pointer here if we are
* at page boundary - prp_size.
*/
page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl +
prp_size) & page_mask;
if (!page_mask_result) {
/*
* Need to put a PRP list pointer here.
*/
msg_phys = (uint64_t *)((uint8_t *)msg_phys +
prp_size);
*ptr_sgl = htole32((uintptr_t)msg_phys);
*(ptr_sgl+1) = htole32((uint64_t)(uintptr_t)
msg_phys >> 32);
ptr_sgl += sge_dwords;
num_entries++;
}
/* Need to handle if entry will be part of a page. */
offset = (uint32_t)paddr & page_mask;
entry_len = PAGE_SIZE - offset;
if (first_prp_entry) {
/*
* Put IEEE entry in first SGE in main message.
* (Simple element, System addr, not end of
* list.)
*/
*ptr_first_sgl = htole32((uint32_t)paddr);
*(ptr_first_sgl + 1) =
htole32((uint32_t)((uint64_t)paddr >> 32));
*(ptr_first_sgl + 2) = htole32(entry_len);
*(ptr_first_sgl + 3) = 0;
/* No longer the first PRP entry. */
first_prp_entry = 0;
} else {
/* Put entry in list. */
*ptr_sgl = htole32((uint32_t)paddr);
*(ptr_sgl + 1) =
htole32((uint32_t)((uint64_t)paddr >> 32));
/* Bump ptr_sgl, msg_phys, and num_entries. */
ptr_sgl += sge_dwords;
msg_phys = (uint64_t *)((uint8_t *)msg_phys +
prp_size);
num_entries++;
}
/* Bump the phys address by the entry_len. */
paddr += entry_len;
/* Decrement length accounting for last partial page. */
if (entry_len > length)
length = 0;
else
length -= entry_len;
}
}
/* Set chain element Length. */
main_chain_element->Length = htole32(num_entries * prp_size);
/* Return 0, indicating we built a native SGL. */
return 0;
}
/*
* Add a chain element as the next SGE for the specified command.
* Reset cm_sge and cm_sgesize to indicate all the available space. Chains are
* only required for IEEE commands. Therefore there is no code for commands
* that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands
* shouldn't be requesting chains).
*/
static int
mpr_add_chain(struct mpr_command *cm, int segsleft)
{
struct mpr_softc *sc = cm->cm_sc;
MPI2_REQUEST_HEADER *req;
MPI25_IEEE_SGE_CHAIN64 *ieee_sgc;
struct mpr_chain *chain;
int sgc_size, current_segs, rem_segs, segs_per_frame;
uint8_t next_chain_offset = 0;
/*
* Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3
* only IEEE commands should be requesting chains. Return some error
* code other than 0.
*/
if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) {
mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to "
"an MPI SGL.\n");
return(ENOBUFS);
}
sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64);
if (cm->cm_sglsize < sgc_size)
panic("MPR: Need SGE Error Code\n");
chain = mpr_alloc_chain(cm->cm_sc);
if (chain == NULL)
return (ENOBUFS);
/*
* Note: a double-linked list is used to make it easier to walk for
* debugging.
*/
TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
/*
* Need to know if the number of frames left is more than 1 or not. If
* more than 1 frame is required, NextChainOffset will need to be set,
* which will just be the last segment of the frame.
*/
rem_segs = 0;
if (cm->cm_sglsize < (sgc_size * segsleft)) {
/*
* rem_segs is the number of segements remaining after the
* segments that will go into the current frame. Since it is
* known that at least one more frame is required, account for
* the chain element. To know if more than one more frame is
* required, just check if there will be a remainder after using
* the current frame (with this chain) and the next frame. If
* so the NextChainOffset must be the last element of the next
* frame.
*/
current_segs = (cm->cm_sglsize / sgc_size) - 1;
rem_segs = segsleft - current_segs;
segs_per_frame = sc->chain_frame_size / sgc_size;
if (rem_segs > segs_per_frame) {
next_chain_offset = segs_per_frame - 1;
}
}
ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain;
ieee_sgc->Length = next_chain_offset ?
htole32((uint32_t)sc->chain_frame_size) :
htole32((uint32_t)rem_segs * (uint32_t)sgc_size);
ieee_sgc->NextChainOffset = next_chain_offset;
ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
ieee_sgc->Address.Low = htole32(chain->chain_busaddr);
ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32);
cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple;
req = (MPI2_REQUEST_HEADER *)cm->cm_req;
req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4;
cm->cm_sglsize = sc->chain_frame_size;
return (0);
}
/*
* Add one scatter-gather element to the scatter-gather list for a command.
* Maintain cm_sglsize and cm_sge as the remaining size and pointer to the
* next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a
* chain, so don't consider any chain additions.
*/
int
mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len,
int segsleft)
{
uint32_t saved_buf_len, saved_address_low, saved_address_high;
u32 sge_flags;
/*
* case 1: >=1 more segment, no room for anything (error)
* case 2: 1 more segment and enough room for it
*/
if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) {
mpr_dprint(cm->cm_sc, MPR_ERROR,
"%s: warning: Not enough room for MPI SGL in frame.\n",
__func__);
return(ENOBUFS);
}
KASSERT(segsleft == 1,
("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n",
segsleft));
/*
* There is one more segment left to add for the MPI SGL and there is
* enough room in the frame to add it. This is the normal case because
* MPI SGL's don't have chains, otherwise something is wrong.
*
* If this is a bi-directional request, need to account for that
* here. Save the pre-filled sge values. These will be used
* either for the 2nd SGL or for a single direction SGL. If
* cm_out_len is non-zero, this is a bi-directional request, so
* fill in the OUT SGL first, then the IN SGL, otherwise just
* fill in the IN SGL. Note that at this time, when filling in
* 2 SGL's for a bi-directional request, they both use the same
* DMA buffer (same cm command).
*/
saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
saved_address_low = sge->Address.Low;
saved_address_high = sge->Address.High;
if (cm->cm_out_len) {
sge->FlagsLength = cm->cm_out_len |
((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_HOST_TO_IOC |
MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
MPI2_SGE_FLAGS_SHIFT);
cm->cm_sglsize -= len;
/* Endian Safe code */
sge_flags = sge->FlagsLength;
sge->FlagsLength = htole32(sge_flags);
sge->Address.High = htole32(sge->Address.High);
sge->Address.Low = htole32(sge->Address.Low);
bcopy(sge, cm->cm_sge, len);
cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
}
sge->FlagsLength = saved_buf_len |
((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_LAST_ELEMENT |
MPI2_SGE_FLAGS_END_OF_LIST |
MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
MPI2_SGE_FLAGS_SHIFT);
if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
sge->FlagsLength |=
((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
MPI2_SGE_FLAGS_SHIFT);
} else {
sge->FlagsLength |=
((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
MPI2_SGE_FLAGS_SHIFT);
}
sge->Address.Low = saved_address_low;
sge->Address.High = saved_address_high;
cm->cm_sglsize -= len;
/* Endian Safe code */
sge_flags = sge->FlagsLength;
sge->FlagsLength = htole32(sge_flags);
sge->Address.High = htole32(sge->Address.High);
sge->Address.Low = htole32(sge->Address.Low);
bcopy(sge, cm->cm_sge, len);
cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
return (0);
}
/*
* Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
* gather list for a command. Maintain cm_sglsize and cm_sge as the
* remaining size and pointer to the next SGE to fill in, respectively.
*/
int
mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft)
{
MPI2_IEEE_SGE_SIMPLE64 *sge = sgep;
int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION);
uint32_t saved_buf_len, saved_address_low, saved_address_high;
uint32_t sge_length;
/*
* case 1: No room for chain or segment (error).
* case 2: Two or more segments left but only room for chain.
* case 3: Last segment and room for it, so set flags.
*/
/*
* There should be room for at least one element, or there is a big
* problem.
*/
if (cm->cm_sglsize < ieee_sge_size)
panic("MPR: Need SGE Error Code\n");
if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) {
if ((error = mpr_add_chain(cm, segsleft)) != 0)
return (error);
}
if (segsleft == 1) {
/*
* If this is a bi-directional request, need to account for that
* here. Save the pre-filled sge values. These will be used
* either for the 2nd SGL or for a single direction SGL. If
* cm_out_len is non-zero, this is a bi-directional request, so
* fill in the OUT SGL first, then the IN SGL, otherwise just
* fill in the IN SGL. Note that at this time, when filling in
* 2 SGL's for a bi-directional request, they both use the same
* DMA buffer (same cm command).
*/
saved_buf_len = sge->Length;
saved_address_low = sge->Address.Low;
saved_address_high = sge->Address.High;
if (cm->cm_out_len) {
sge->Length = cm->cm_out_len;
sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
cm->cm_sglsize -= ieee_sge_size;
/* Endian Safe code */
sge_length = sge->Length;
sge->Length = htole32(sge_length);
sge->Address.High = htole32(sge->Address.High);
sge->Address.Low = htole32(sge->Address.Low);
bcopy(sgep, cm->cm_sge, ieee_sge_size);
cm->cm_sge =
(MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
ieee_sge_size);
}
sge->Length = saved_buf_len;
sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
sge->Address.Low = saved_address_low;
sge->Address.High = saved_address_high;
}
cm->cm_sglsize -= ieee_sge_size;
/* Endian Safe code */
sge_length = sge->Length;
sge->Length = htole32(sge_length);
sge->Address.High = htole32(sge->Address.High);
sge->Address.Low = htole32(sge->Address.Low);
bcopy(sgep, cm->cm_sge, ieee_sge_size);
cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
ieee_sge_size);
return (0);
}
/*
* Add one dma segment to the scatter-gather list for a command.
*/
int
mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags,
int segsleft)
{
MPI2_SGE_SIMPLE64 sge;
MPI2_IEEE_SGE_SIMPLE64 ieee_sge;
if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
ieee_sge.Length = len;
mpr_from_u64(pa, &ieee_sge.Address);
return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft));
} else {
/*
* This driver always uses 64-bit address elements for
* simplicity.
*/
flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
/* Set Endian safe macro in mpr_push_sge */
sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
mpr_from_u64(pa, &sge.Address);
return (mpr_push_sge(cm, &sge, sizeof sge, segsleft));
}
}
static void
mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct mpr_softc *sc;
struct mpr_command *cm;
u_int i, dir, sflags;
cm = (struct mpr_command *)arg;
sc = cm->cm_sc;
/*
* In this case, just print out a warning and let the chip tell the
* user they did the wrong thing.
*/
if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d "
"segments, more than the %d allowed\n", __func__, nsegs,
cm->cm_max_segs);
}
/*
* Set up DMA direction flags. Bi-directional requests are also handled
* here. In that case, both direction flags will be set.
*/
sflags = 0;
if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) {
/*
* We have to add a special case for SMP passthrough, there
* is no easy way to generically handle it. The first
* S/G element is used for the command (therefore the
* direction bit needs to be set). The second one is used
* for the reply. We'll leave it to the caller to make
* sure we only have two buffers.
*/
/*
* Even though the busdma man page says it doesn't make
* sense to have both direction flags, it does in this case.
* We have one s/g element being accessed in each direction.
*/
dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
/*
* Set the direction flag on the first buffer in the SMP
* passthrough request. We'll clear it for the second one.
*/
sflags |= MPI2_SGE_FLAGS_DIRECTION |
MPI2_SGE_FLAGS_END_OF_BUFFER;
} else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) {
sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
dir = BUS_DMASYNC_PREWRITE;
} else
dir = BUS_DMASYNC_PREREAD;
/* Check if a native SG list is needed for an NVMe PCIe device. */
if (cm->cm_targ && cm->cm_targ->is_nvme &&
mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) {
/* A native SG list was built, skip to end. */
goto out;
}
for (i = 0; i < nsegs; i++) {
if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) {
sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
}
error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
sflags, nsegs - i);
if (error != 0) {
/* Resource shortage, roll back! */
if (ratecheck(&sc->lastfail, &mpr_chainfail_interval))
mpr_dprint(sc, MPR_INFO, "Out of chain frames, "
"consider increasing hw.mpr.max_chains.\n");
cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED;
mpr_complete_command(sc, cm);
return;
}
}
out:
bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
mpr_enqueue_request(sc, cm);
return;
}
static void
mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
int error)
{
mpr_data_cb(arg, segs, nsegs, error);
}
/*
* This is the routine to enqueue commands ansynchronously.
* Note that the only error path here is from bus_dmamap_load(), which can
* return EINPROGRESS if it is waiting for resources. Other than this, it's
* assumed that if you have a command in-hand, then you have enough credits
* to use it.
*/
int
mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm)
{
int error = 0;
if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) {
error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
&cm->cm_uio, mpr_data_cb2, cm, 0);
} else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) {
error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
cm->cm_data, mpr_data_cb, cm, 0);
} else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0);
} else {
/* Add a zero-length element as needed */
if (cm->cm_sge != NULL)
mpr_add_dmaseg(cm, 0, 0, 0, 1);
mpr_enqueue_request(sc, cm);
}
return (error);
}
/*
* This is the routine to enqueue commands synchronously. An error of
* EINPROGRESS from mpr_map_command() is ignored since the command will
* be executed and enqueued automatically. Other errors come from msleep().
*/
int
mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout,
int sleep_flag)
{
int error, rc;
struct timeval cur_time, start_time;
struct mpr_command *cm = *cmp;
if (sc->mpr_flags & MPR_FLAGS_DIAGRESET)
return EBUSY;
cm->cm_complete = NULL;
cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED);
error = mpr_map_command(sc, cm);
if ((error != 0) && (error != EINPROGRESS))
return (error);
// Check for context and wait for 50 mSec at a time until time has
// expired or the command has finished. If msleep can't be used, need
// to poll.
#if __FreeBSD_version >= 1000029
if (curthread->td_no_sleeping)
#else //__FreeBSD_version < 1000029
if (curthread->td_pflags & TDP_NOSLEEPING)
#endif //__FreeBSD_version >= 1000029
sleep_flag = NO_SLEEP;
getmicrouptime(&start_time);
if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) {
error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz);
if (error == EWOULDBLOCK) {
/*
* Record the actual elapsed time in the case of a
* timeout for the message below.
*/
getmicrouptime(&cur_time);
timevalsub(&cur_time, &start_time);
}
} else {
while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
mpr_intr_locked(sc);
if (sleep_flag == CAN_SLEEP)
pause("mprwait", hz/20);
else
DELAY(50000);
getmicrouptime(&cur_time);
timevalsub(&cur_time, &start_time);
if (cur_time.tv_sec > timeout) {
error = EWOULDBLOCK;
break;
}
}
}
if (error == EWOULDBLOCK) {
mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d,"
" elapsed=%jd\n", __func__, timeout,
(intmax_t)cur_time.tv_sec);
rc = mpr_reinit(sc);
mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
"failed");
if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
/*
* Tell the caller that we freed the command in a
* reinit.
*/
*cmp = NULL;
}
error = ETIMEDOUT;
}
return (error);
}
/*
* This is the routine to enqueue a command synchonously and poll for
* completion. Its use should be rare.
*/
int
mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp)
{
int error, rc;
struct timeval cur_time, start_time;
struct mpr_command *cm = *cmp;
error = 0;
cm->cm_flags |= MPR_CM_FLAGS_POLLED;
cm->cm_complete = NULL;
mpr_map_command(sc, cm);
getmicrouptime(&start_time);
while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
mpr_intr_locked(sc);
if (mtx_owned(&sc->mpr_mtx))
msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
"mprpoll", hz/20);
else
pause("mprpoll", hz/20);
/*
* Check for real-time timeout and fail if more than 60 seconds.
*/
getmicrouptime(&cur_time);
timevalsub(&cur_time, &start_time);
if (cur_time.tv_sec > 60) {
mpr_dprint(sc, MPR_FAULT, "polling failed\n");
error = ETIMEDOUT;
break;
}
}
if (error) {
mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__);
rc = mpr_reinit(sc);
mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
"failed");
if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
/*
* Tell the caller that we freed the command in a
* reinit.
*/
*cmp = NULL;
}
}
return (error);
}
/*
* The MPT driver had a verbose interface for config pages. In this driver,
* reduce it to much simpler terms, similar to the Linux driver.
*/
int
mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
{
MPI2_CONFIG_REQUEST *req;
struct mpr_command *cm;
int error;
if (sc->mpr_flags & MPR_FLAGS_BUSY) {
return (EBUSY);
}
cm = mpr_alloc_command(sc);
if (cm == NULL) {
return (EBUSY);
}
req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
req->Function = MPI2_FUNCTION_CONFIG;
req->Action = params->action;
req->SGLFlags = 0;
req->ChainOffset = 0;
req->PageAddress = params->page_address;
if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
hdr = &params->hdr.Ext;
req->ExtPageType = hdr->ExtPageType;
req->ExtPageLength = hdr->ExtPageLength;
req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
req->Header.PageLength = 0; /* Must be set to zero */
req->Header.PageNumber = hdr->PageNumber;
req->Header.PageVersion = hdr->PageVersion;
} else {
MPI2_CONFIG_PAGE_HEADER *hdr;
hdr = &params->hdr.Struct;
req->Header.PageType = hdr->PageType;
req->Header.PageNumber = hdr->PageNumber;
req->Header.PageLength = hdr->PageLength;
req->Header.PageVersion = hdr->PageVersion;
}
cm->cm_data = params->buffer;
cm->cm_length = params->length;
if (cm->cm_data != NULL) {
cm->cm_sge = &req->PageBufferSGE;
cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
} else
cm->cm_sge = NULL;
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
cm->cm_complete_data = params;
if (params->callback != NULL) {
cm->cm_complete = mpr_config_complete;
return (mpr_map_command(sc, cm));
} else {
error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP);
if (error) {
mpr_dprint(sc, MPR_FAULT,
"Error %d reading config page\n", error);
if (cm != NULL)
mpr_free_command(sc, cm);
return (error);
}
mpr_config_complete(sc, cm);
}
return (0);
}
int
mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
{
return (EINVAL);
}
static void
mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm)
{
MPI2_CONFIG_REPLY *reply;
struct mpr_config_params *params;
MPR_FUNCTRACE(sc);
params = cm->cm_complete_data;
if (cm->cm_data != NULL) {
bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
}
/*
* XXX KDM need to do more error recovery? This results in the
* device in question not getting probed.
*/
if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
params->status = MPI2_IOCSTATUS_BUSY;
goto done;
}
reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
if (reply == NULL) {
params->status = MPI2_IOCSTATUS_BUSY;
goto done;
}
params->status = reply->IOCStatus;
if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
params->hdr.Ext.ExtPageType = reply->ExtPageType;
params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
params->hdr.Ext.PageType = reply->Header.PageType;
params->hdr.Ext.PageNumber = reply->Header.PageNumber;
params->hdr.Ext.PageVersion = reply->Header.PageVersion;
} else {
params->hdr.Struct.PageType = reply->Header.PageType;
params->hdr.Struct.PageNumber = reply->Header.PageNumber;
params->hdr.Struct.PageLength = reply->Header.PageLength;
params->hdr.Struct.PageVersion = reply->Header.PageVersion;
}
done:
mpr_free_command(sc, cm);
if (params->callback != NULL)
params->callback(sc, params);
return;
}
Index: head/sys/dev/mpr/mpr_mapping.c
===================================================================
--- head/sys/dev/mpr/mpr_mapping.c (revision 328217)
+++ head/sys/dev/mpr/mpr_mapping.c (revision 328218)
@@ -1,3131 +1,3131 @@
/*-
* Copyright (c) 2011-2015 LSI Corp.
* Copyright (c) 2013-2016 Avago Technologies
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* TODO Move headers to mprvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/sysctl.h>
#include <sys/eventhandler.h>
#include <sys/uio.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/mpr/mpi/mpi2_type.h>
#include <dev/mpr/mpi/mpi2.h>
#include <dev/mpr/mpi/mpi2_ioc.h>
#include <dev/mpr/mpi/mpi2_sas.h>
#include <dev/mpr/mpi/mpi2_pci.h>
#include <dev/mpr/mpi/mpi2_cnfg.h>
#include <dev/mpr/mpi/mpi2_init.h>
#include <dev/mpr/mpi/mpi2_tool.h>
#include <dev/mpr/mpr_ioctl.h>
#include <dev/mpr/mprvar.h>
#include <dev/mpr/mpr_mapping.h>
/**
* _mapping_clear_map_entry - Clear a particular mapping entry.
* @map_entry: map table entry
*
* Returns nothing.
*/
static inline void
_mapping_clear_map_entry(struct dev_mapping_table *map_entry)
{
map_entry->physical_id = 0;
map_entry->device_info = 0;
map_entry->phy_bits = 0;
map_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
map_entry->dev_handle = 0;
map_entry->id = -1;
map_entry->missing_count = 0;
map_entry->init_complete = 0;
map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
}
/**
* _mapping_clear_enc_entry - Clear a particular enclosure table entry.
* @enc_entry: enclosure table entry
*
* Returns nothing.
*/
static inline void
_mapping_clear_enc_entry(struct enc_mapping_table *enc_entry)
{
enc_entry->enclosure_id = 0;
enc_entry->start_index = MPR_MAPTABLE_BAD_IDX;
enc_entry->phy_bits = 0;
enc_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
enc_entry->enc_handle = 0;
enc_entry->num_slots = 0;
enc_entry->start_slot = 0;
enc_entry->missing_count = 0;
enc_entry->removal_flag = 0;
enc_entry->skip_search = 0;
enc_entry->init_complete = 0;
}
/**
* _mapping_commit_enc_entry - write a particular enc entry in DPM page0.
* @sc: per adapter object
* @enc_entry: enclosure table entry
*
* Returns 0 for success, non-zero for failure.
*/
static int
_mapping_commit_enc_entry(struct mpr_softc *sc,
struct enc_mapping_table *et_entry)
{
Mpi2DriverMap0Entry_t *dpm_entry;
struct dev_mapping_table *mt_entry;
Mpi2ConfigReply_t mpi_reply;
Mpi2DriverMappingPage0_t config_page;
if (!sc->is_dpm_enable)
return 0;
memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
memcpy(&config_page.Header, (u8 *) sc->dpm_pg0,
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += et_entry->dpm_entry_num;
dpm_entry->PhysicalIdentifier.Low =
( 0xFFFFFFFF & et_entry->enclosure_id);
dpm_entry->PhysicalIdentifier.High =
( et_entry->enclosure_id >> 32);
mt_entry = &sc->mapping_table[et_entry->start_index];
dpm_entry->DeviceIndex = htole16(mt_entry->id);
dpm_entry->MappingInformation = et_entry->num_slots;
dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
dpm_entry->MappingInformation |= et_entry->missing_count;
dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation);
dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits);
dpm_entry->Reserved1 = 0;
mpr_dprint(sc, MPR_MAPPING, "%s: Writing DPM entry %d for enclosure.\n",
__func__, et_entry->dpm_entry_num);
memcpy(&config_page.Entry, (u8 *)dpm_entry,
sizeof(Mpi2DriverMap0Entry_t));
if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
et_entry->dpm_entry_num)) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Write of DPM "
"entry %d for enclosure failed.\n", __func__,
et_entry->dpm_entry_num);
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping =
le32toh(dpm_entry->PhysicalBitsMapping);
return -1;
}
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping =
le32toh(dpm_entry->PhysicalBitsMapping);
return 0;
}
/**
* _mapping_commit_map_entry - write a particular map table entry in DPM page0.
* @sc: per adapter object
* @mt_entry: mapping table entry
*
* Returns 0 for success, non-zero for failure.
*/
static int
_mapping_commit_map_entry(struct mpr_softc *sc,
struct dev_mapping_table *mt_entry)
{
Mpi2DriverMap0Entry_t *dpm_entry;
Mpi2ConfigReply_t mpi_reply;
Mpi2DriverMappingPage0_t config_page;
if (!sc->is_dpm_enable)
return 0;
/*
* It's possible that this Map Entry points to a BAD DPM index. This
* can happen if the Map Entry is a for a missing device and the DPM
* entry that was being used by this device is now being used by some
* new device. So, check for a BAD DPM index and just return if so.
*/
if (mt_entry->dpm_entry_num == MPR_DPM_BAD_IDX) {
mpr_dprint(sc, MPR_MAPPING, "%s: DPM entry location for target "
"%d is invalid. DPM will not be written.\n", __func__,
mt_entry->id);
return 0;
}
memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = dpm_entry + mt_entry->dpm_entry_num;
dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF &
mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = htole16(mt_entry->id);
dpm_entry->MappingInformation = htole16(mt_entry->missing_count);
dpm_entry->PhysicalBitsMapping = 0;
dpm_entry->Reserved1 = 0;
memcpy(&config_page.Entry, (u8 *)dpm_entry,
sizeof(Mpi2DriverMap0Entry_t));
mpr_dprint(sc, MPR_MAPPING, "%s: Writing DPM entry %d for target %d.\n",
__func__, mt_entry->dpm_entry_num, mt_entry->id);
if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
mt_entry->dpm_entry_num)) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Write of DPM "
"entry %d for target %d failed.\n", __func__,
mt_entry->dpm_entry_num, mt_entry->id);
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
return -1;
}
dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
return 0;
}
/**
* _mapping_get_ir_maprange - get start and end index for IR map range.
* @sc: per adapter object
* @start_idx: place holder for start index
* @end_idx: place holder for end index
*
* The IR volumes can be mapped either at start or end of the mapping table
* this function gets the detail of where IR volume mapping starts and ends
* in the device mapping table
*
* Returns nothing.
*/
static void
_mapping_get_ir_maprange(struct mpr_softc *sc, u32 *start_idx, u32 *end_idx)
{
u16 volume_mapping_flags;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
*start_idx = 0;
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
*start_idx = 1;
} else
*start_idx = sc->max_devices - sc->max_volumes;
*end_idx = *start_idx + sc->max_volumes - 1;
}
/**
* _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID
* @sc: per adapter object
* @enc_id: enclosure logical identifier
*
* Returns the index of enclosure entry on success or bad index.
*/
static u8
_mapping_get_enc_idx_from_id(struct mpr_softc *sc, u64 enc_id,
u64 phy_bits)
{
struct enc_mapping_table *et_entry;
u8 enc_idx = 0;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
et_entry = &sc->enclosure_table[enc_idx];
if ((et_entry->enclosure_id == le64toh(enc_id)) &&
(!et_entry->phy_bits || (et_entry->phy_bits &
le32toh(phy_bits))))
return enc_idx;
}
return MPR_ENCTABLE_BAD_IDX;
}
/**
* _mapping_get_enc_idx_from_handle - get enclosure index from handle
* @sc: per adapter object
* @enc_id: enclosure handle
*
* Returns the index of enclosure entry on success or bad index.
*/
static u8
_mapping_get_enc_idx_from_handle(struct mpr_softc *sc, u16 handle)
{
struct enc_mapping_table *et_entry;
u8 enc_idx = 0;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->missing_count)
continue;
if (et_entry->enc_handle == handle)
return enc_idx;
}
return MPR_ENCTABLE_BAD_IDX;
}
/**
* _mapping_get_high_missing_et_idx - get missing enclosure index
* @sc: per adapter object
*
* Search through the enclosure table and identifies the enclosure entry
* with high missing count and returns it's index
*
* Returns the index of enclosure entry on success or bad index.
*/
static u8
_mapping_get_high_missing_et_idx(struct mpr_softc *sc)
{
struct enc_mapping_table *et_entry;
u8 high_missing_count = 0;
u8 enc_idx, high_idx = MPR_ENCTABLE_BAD_IDX;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
et_entry = &sc->enclosure_table[enc_idx];
if ((et_entry->missing_count > high_missing_count) &&
!et_entry->skip_search) {
high_missing_count = et_entry->missing_count;
high_idx = enc_idx;
}
}
return high_idx;
}
/**
* _mapping_get_high_missing_mt_idx - get missing map table index
* @sc: per adapter object
*
* Search through the map table and identifies the device entry
* with high missing count and returns it's index
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_high_missing_mt_idx(struct mpr_softc *sc)
{
u32 map_idx, high_idx = MPR_MAPTABLE_BAD_IDX;
u8 high_missing_count = 0;
u32 start_idx, end_idx, start_idx_ir, end_idx_ir;
struct dev_mapping_table *mt_entry;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
start_idx = 0;
start_idx_ir = 0;
end_idx_ir = 0;
end_idx = sc->max_devices;
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
start_idx = 1;
if (sc->ir_firmware) {
_mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir);
if (start_idx == start_idx_ir)
start_idx = end_idx_ir + 1;
else
end_idx = start_idx_ir;
}
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) {
if (mt_entry->missing_count > high_missing_count) {
high_missing_count = mt_entry->missing_count;
high_idx = map_idx;
}
}
return high_idx;
}
/**
* _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID
* @sc: per adapter object
* @wwid: world wide unique ID of the volume
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_ir_mt_idx_from_wwid(struct mpr_softc *sc, u64 wwid)
{
u32 start_idx, end_idx, map_idx;
struct dev_mapping_table *mt_entry;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
if (mt_entry->physical_id == wwid)
return map_idx;
return MPR_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_mt_idx_from_id - get map table index from a device ID
* @sc: per adapter object
* @dev_id: device identifer (SAS Address)
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_mt_idx_from_id(struct mpr_softc *sc, u64 dev_id)
{
u32 map_idx;
struct dev_mapping_table *mt_entry;
for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->physical_id == dev_id)
return map_idx;
}
return MPR_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle
* @sc: per adapter object
* @wwid: volume device handle
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_ir_mt_idx_from_handle(struct mpr_softc *sc, u16 volHandle)
{
u32 start_idx, end_idx, map_idx;
struct dev_mapping_table *mt_entry;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
if (mt_entry->dev_handle == volHandle)
return map_idx;
return MPR_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_mt_idx_from_handle - get map table index from handle
* @sc: per adapter object
* @dev_id: device handle
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_mt_idx_from_handle(struct mpr_softc *sc, u16 handle)
{
u32 map_idx;
struct dev_mapping_table *mt_entry;
for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->dev_handle == handle)
return map_idx;
}
return MPR_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_free_ir_mt_idx - get first free index for a volume
* @sc: per adapter object
*
* Search through mapping table for free index for a volume and if no free
* index then looks for a volume with high mapping index
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_free_ir_mt_idx(struct mpr_softc *sc)
{
u8 high_missing_count = 0;
u32 start_idx, end_idx, map_idx;
u32 high_idx = MPR_MAPTABLE_BAD_IDX;
struct dev_mapping_table *mt_entry;
/*
* The IN_USE flag should be clear if the entry is available to use.
* This flag is cleared on initialization and and when a volume is
* deleted. All other times this flag should be set. If, for some
* reason, a free entry cannot be found, look for the entry with the
* highest missing count just in case there is one.
*/
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
if (!(mt_entry->device_info & MPR_MAP_IN_USE))
return map_idx;
if (mt_entry->missing_count > high_missing_count) {
high_missing_count = mt_entry->missing_count;
high_idx = map_idx;
}
}
if (high_idx == MPR_MAPTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Could not find a "
"free entry in the mapping table for a Volume. The mapping "
"table is probably corrupt.\n", __func__);
}
return high_idx;
}
/**
* _mapping_get_free_mt_idx - get first free index for a device
* @sc: per adapter object
* @start_idx: offset in the table to start search
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_free_mt_idx(struct mpr_softc *sc, u32 start_idx)
{
u32 map_idx, max_idx = sc->max_devices;
struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx];
u16 volume_mapping_flags;
volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
if (sc->ir_firmware && (volume_mapping_flags ==
MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING))
max_idx -= sc->max_volumes;
for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++)
if (!(mt_entry->device_info & (MPR_MAP_IN_USE |
MPR_DEV_RESERVED)))
return map_idx;
return MPR_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_dpm_idx_from_id - get DPM index from ID
* @sc: per adapter object
* @id: volume WWID or enclosure ID or device ID
*
* Returns the index of DPM entry on success or bad index.
*/
static u16
_mapping_get_dpm_idx_from_id(struct mpr_softc *sc, u64 id, u32 phy_bits)
{
u16 entry_num;
uint64_t PhysicalIdentifier;
Mpi2DriverMap0Entry_t *dpm_entry;
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High;
PhysicalIdentifier = (PhysicalIdentifier << 32) |
dpm_entry->PhysicalIdentifier.Low;
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
dpm_entry++)
if ((id == PhysicalIdentifier) &&
(!phy_bits || !dpm_entry->PhysicalBitsMapping ||
(phy_bits & dpm_entry->PhysicalBitsMapping)))
return entry_num;
return MPR_DPM_BAD_IDX;
}
/**
* _mapping_get_free_dpm_idx - get first available DPM index
* @sc: per adapter object
*
* Returns the index of DPM entry on success or bad index.
*/
static u32
_mapping_get_free_dpm_idx(struct mpr_softc *sc)
{
u16 entry_num;
Mpi2DriverMap0Entry_t *dpm_entry;
u16 current_entry = MPR_DPM_BAD_IDX, missing_cnt, high_missing_cnt = 0;
u64 physical_id;
struct dev_mapping_table *mt_entry;
u32 map_idx;
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += entry_num;
missing_cnt = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
/*
* If entry is used and not missing, then this entry can't be
* used. Look at next one.
*/
if (sc->dpm_entry_used[entry_num] && !missing_cnt)
continue;
/*
* If this entry is not used at all, then the missing count
* doesn't matter. Just use this one. Otherwise, keep looking
* and make sure the entry with the highest missing count is
* used.
*/
if (!sc->dpm_entry_used[entry_num]) {
current_entry = entry_num;
break;
}
if ((current_entry == MPR_DPM_BAD_IDX) ||
(missing_cnt > high_missing_cnt)) {
current_entry = entry_num;
high_missing_cnt = missing_cnt;
}
}
/*
* If an entry has been found to use and it's already marked as used
* it means that some device was already using this entry but it's
* missing, and that means that the connection between the missing
* device's DPM entry and the mapping table needs to be cleared. To do
* this, use the Physical ID of the old device still in the DPM entry
* to find its mapping table entry, then mark its DPM entry as BAD.
*/
if ((current_entry != MPR_DPM_BAD_IDX) &&
sc->dpm_entry_used[current_entry]) {
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += current_entry;
physical_id = dpm_entry->PhysicalIdentifier.High;
physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
map_idx = _mapping_get_mt_idx_from_id(sc, physical_id);
if (map_idx != MPR_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
mt_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
}
}
return current_entry;
}
/**
* _mapping_update_ir_missing_cnt - Updates missing count for a volume
* @sc: per adapter object
* @map_idx: map table index of the volume
* @element: IR configuration change element
* @wwid: IR volume ID.
*
* Updates the missing count in the map table and in the DPM entry for a volume
*
* Returns nothing.
*/
static void
_mapping_update_ir_missing_cnt(struct mpr_softc *sc, u32 map_idx,
Mpi2EventIrConfigElement_t *element, u64 wwid)
{
struct dev_mapping_table *mt_entry;
u8 missing_cnt, reason = element->ReasonCode, update_dpm = 1;
u16 dpm_idx;
Mpi2DriverMap0Entry_t *dpm_entry;
/*
* Depending on the reason code, update the missing count. Always set
* the init_complete flag when here, so just do it first. That flag is
* used for volumes to make sure that the DPM entry has been updated.
* When a volume is deleted, clear the map entry's IN_USE flag so that
* the entry can be used again if another volume is created. Also clear
* its dev_handle entry so that other functions can't find this volume
* by the handle, since it's not defined any longer.
*/
mt_entry = &sc->mapping_table[map_idx];
mt_entry->init_complete = 1;
if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) ||
(reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) {
mt_entry->missing_count = 0;
} else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) {
if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT)
mt_entry->missing_count++;
mt_entry->device_info &= ~MPR_MAP_IN_USE;
mt_entry->dev_handle = 0;
}
/*
* If persistent mapping is enabled, update the DPM with the new missing
* count for the volume. If the DPM index is bad, get a free one. If
* it's bad for a volume that's being deleted do nothing because that
* volume doesn't have a DPM entry.
*/
if (!sc->is_dpm_enable)
return;
dpm_idx = mt_entry->dpm_entry_num;
if (dpm_idx == MPR_DPM_BAD_IDX) {
if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)
{
mpr_dprint(sc, MPR_MAPPING, "%s: Volume being deleted "
"is not in DPM so DPM missing count will not be "
"updated.\n", __func__);
return;
}
}
if (dpm_idx == MPR_DPM_BAD_IDX)
dpm_idx = _mapping_get_free_dpm_idx(sc);
/*
* Got the DPM entry for the volume or found a free DPM entry if this is
* a new volume. Check if the current information is outdated.
*/
if (dpm_idx != MPR_DPM_BAD_IDX) {
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += dpm_idx;
missing_cnt = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
if ((mt_entry->physical_id ==
le64toh(((u64)dpm_entry->PhysicalIdentifier.High << 32) |
(u64)dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt ==
mt_entry->missing_count)) {
mpr_dprint(sc, MPR_MAPPING, "%s: DPM entry for volume "
"with target ID %d does not require an update.\n",
__func__, mt_entry->id);
update_dpm = 0;
}
}
/*
* Update the volume's persistent info if it's new or the ID or missing
* count has changed. If a good DPM index has not been found by now,
* there is no space left in the DPM table.
*/
if ((dpm_idx != MPR_DPM_BAD_IDX) && update_dpm) {
mpr_dprint(sc, MPR_MAPPING, "%s: Update DPM entry for volume "
"with target ID %d.\n", __func__, mt_entry->id);
mt_entry->dpm_entry_num = dpm_idx;
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += dpm_idx;
dpm_entry->PhysicalIdentifier.Low =
(0xFFFFFFFF & mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High =
(mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = map_idx;
dpm_entry->MappingInformation = mt_entry->missing_count;
dpm_entry->PhysicalBitsMapping = 0;
dpm_entry->Reserved1 = 0;
sc->dpm_flush_entry[dpm_idx] = 1;
sc->dpm_entry_used[dpm_idx] = 1;
} else if (dpm_idx == MPR_DPM_BAD_IDX) {
mpr_dprint(sc, MPR_INFO | MPR_MAPPING, "%s: No space to add an "
"entry in the DPM table for volume with target ID %d.\n",
__func__, mt_entry->id);
}
}
/**
* _mapping_add_to_removal_table - add DPM index to the removal table
* @sc: per adapter object
* @dpm_idx: Index of DPM entry to remove
*
* Adds a DPM entry number to the removal table.
*
* Returns nothing.
*/
static void
_mapping_add_to_removal_table(struct mpr_softc *sc, u16 dpm_idx)
{
struct map_removal_table *remove_entry;
u32 i;
/*
* This is only used to remove entries from the DPM in the controller.
* If DPM is not enabled, just return.
*/
if (!sc->is_dpm_enable)
return;
/*
* Find the first available removal_table entry and add the new entry
* there.
*/
remove_entry = sc->removal_table;
for (i = 0; i < sc->max_devices; i++, remove_entry++) {
if (remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX)
continue;
mpr_dprint(sc, MPR_MAPPING, "%s: Adding DPM entry %d to table "
"for removal.\n", __func__, dpm_idx);
remove_entry->dpm_entry_num = dpm_idx;
break;
}
}
/**
* _mapping_inc_missing_count
* @sc: per adapter object
* @map_idx: index into the mapping table for the device that is missing
*
* Increment the missing count in the mapping table for a SAS, SATA, or PCIe
* device that is not responding. If Persitent Mapping is used, increment the
* DPM entry as well. Currently, this function is only called if the target
* goes missing, so after initialization has completed. This means that the
* missing count can only go from 0 to 1 here. The missing count is incremented
* during initialization as well, so that's where a target's missing count can
* go past 1.
*
* Returns nothing.
*/
static void
_mapping_inc_missing_count(struct mpr_softc *sc, u32 map_idx)
{
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
struct dev_mapping_table *mt_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_INFO | MPR_MAPPING, "%s: device is already "
"removed from mapping table\n", __func__);
return;
}
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT)
mt_entry->missing_count++;
/*
* When using Enc/Slot mapping, when a device is removed, it's mapping
* table information should be cleared. Otherwise, the target ID will
* be incorrect if this same device is re-added to a different slot.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
_mapping_clear_map_entry(mt_entry);
}
/*
* When using device mapping, update the missing count in the DPM entry,
* but only if the missing count has changed.
*/
if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) &&
sc->is_dpm_enable &&
mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += mt_entry->dpm_entry_num;
if (dpm_entry->MappingInformation != mt_entry->missing_count) {
dpm_entry->MappingInformation = mt_entry->missing_count;
sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1;
}
}
}
/**
* _mapping_update_missing_count - Update missing count for a device
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the topology change list and if any device is found not
* responding it's associated map table entry and DPM entry is updated
*
* Returns nothing.
*/
static void
_mapping_update_missing_count(struct mpr_softc *sc,
struct _map_topology_change *topo_change)
{
u8 entry;
struct _map_phy_change *phy_change;
u32 map_idx;
for (entry = 0; entry < topo_change->num_entries; entry++) {
phy_change = &topo_change->phy_details[entry];
if (!phy_change->dev_handle || (phy_change->reason !=
MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
continue;
map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change->
dev_handle);
phy_change->is_processed = 1;
_mapping_inc_missing_count(sc, map_idx);
}
}
/**
* _mapping_update_pcie_missing_count - Update missing count for a PCIe device
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the PCIe topology change list and if any device is found not
* responding it's associated map table entry and DPM entry is updated
*
* Returns nothing.
*/
static void
_mapping_update_pcie_missing_count(struct mpr_softc *sc,
struct _map_pcie_topology_change *topo_change)
{
u8 entry;
struct _map_port_change *port_change;
u32 map_idx;
for (entry = 0; entry < topo_change->num_entries; entry++) {
port_change = &topo_change->port_details[entry];
if (!port_change->dev_handle || (port_change->reason !=
MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING))
continue;
map_idx = _mapping_get_mt_idx_from_handle(sc, port_change->
dev_handle);
port_change->is_processed = 1;
_mapping_inc_missing_count(sc, map_idx);
}
}
/**
* _mapping_find_enc_map_space -find map table entries for enclosure
* @sc: per adapter object
* @et_entry: enclosure entry
*
* Search through the mapping table defragment it and provide contiguous
* space in map table for a particular enclosure entry
*
* Returns start index in map table or bad index.
*/
static u32
_mapping_find_enc_map_space(struct mpr_softc *sc,
struct enc_mapping_table *et_entry)
{
u16 vol_mapping_flags;
u32 skip_count, end_of_table, map_idx, enc_idx;
u16 num_found;
u32 start_idx = MPR_MAPTABLE_BAD_IDX;
struct dev_mapping_table *mt_entry;
struct enc_mapping_table *enc_entry;
unsigned char done_flag = 0, found_space;
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
skip_count = sc->num_rsvd_entries;
num_found = 0;
vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
/*
* The end of the mapping table depends on where volumes are kept, if
* IR is enabled.
*/
if (!sc->ir_firmware)
end_of_table = sc->max_devices;
else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)
end_of_table = sc->max_devices;
else
end_of_table = sc->max_devices - sc->max_volumes;
/*
* The skip_count is the number of entries that are reserved at the
* beginning of the mapping table. But, it does not include the number
* of Physical IDs that are reserved for direct attached devices. Look
* through the mapping table after these reserved entries to see if
* the devices for this enclosure are already mapped. The PHY bit check
* is used to make sure that at least one PHY bit is common between the
* enclosure and the device that is already mapped.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Looking for space in the mapping "
"table for added enclosure.\n", __func__);
for (map_idx = (max_num_phy_ids + skip_count);
map_idx < end_of_table; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if ((et_entry->enclosure_id == mt_entry->physical_id) &&
(!mt_entry->phy_bits || (mt_entry->phy_bits &
et_entry->phy_bits))) {
num_found += 1;
if (num_found == et_entry->num_slots) {
start_idx = (map_idx - num_found) + 1;
mpr_dprint(sc, MPR_MAPPING, "%s: Found space "
"in the mapping for enclosure at map index "
"%d.\n", __func__, start_idx);
return start_idx;
}
} else
num_found = 0;
}
/*
* If the enclosure's devices are not mapped already, look for
* contiguous entries in the mapping table that are not reserved. If
* enough entries are found, return the starting index for that space.
*/
num_found = 0;
for (map_idx = (max_num_phy_ids + skip_count);
map_idx < end_of_table; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (!(mt_entry->device_info & MPR_DEV_RESERVED)) {
num_found += 1;
if (num_found == et_entry->num_slots) {
start_idx = (map_idx - num_found) + 1;
mpr_dprint(sc, MPR_MAPPING, "%s: Found space "
"in the mapping for enclosure at map index "
"%d.\n", __func__, start_idx);
return start_idx;
}
} else
num_found = 0;
}
/*
* If here, it means that not enough space in the mapping table was
* found to support this enclosure, so go through the enclosure table to
* see if any enclosure entries have a missing count. If so, get the
* enclosure with the highest missing count and check it to see if there
* is enough space for the new enclosure.
*/
while (!done_flag) {
enc_idx = _mapping_get_high_missing_et_idx(sc);
if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_MAPPING, "%s: Not enough space was "
"found in the mapping for the added enclosure.\n",
__func__);
return MPR_MAPTABLE_BAD_IDX;
}
/*
* Found a missing enclosure. Set the skip_search flag so this
* enclosure is not checked again for a high missing count if
* the loop continues. This way, all missing enclosures can
* have their space added together to find enough space in the
* mapping table for the added enclosure. The space must be
* contiguous.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Space from a missing "
"enclosure was found.\n", __func__);
enc_entry = &sc->enclosure_table[enc_idx];
enc_entry->skip_search = 1;
/*
* Unmark all of the missing enclosure's device's reserved
* space. These will be remarked as reserved if this missing
* enclosure's space is not used.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Clear the reserved flag for "
"all of the map entries for the enclosure.\n", __func__);
mt_entry = &sc->mapping_table[enc_entry->start_index];
for (map_idx = enc_entry->start_index; map_idx <
(enc_entry->start_index + enc_entry->num_slots); map_idx++,
mt_entry++)
mt_entry->device_info &= ~MPR_DEV_RESERVED;
/*
* Now that space has been unreserved, check again to see if
* enough space is available for the new enclosure.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Check if new mapping space is "
"enough for the new enclosure.\n", __func__);
found_space = 0;
num_found = 0;
for (map_idx = (max_num_phy_ids + skip_count);
map_idx < end_of_table; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (!(mt_entry->device_info & MPR_DEV_RESERVED)) {
num_found += 1;
if (num_found == et_entry->num_slots) {
start_idx = (map_idx - num_found) + 1;
found_space = 1;
break;
}
} else
num_found = 0;
}
if (!found_space)
continue;
/*
* If enough space was found, all of the missing enclosures that
* will be used for the new enclosure must be added to the
* removal table. Then all mappings for the enclosure's devices
* and for the enclosure itself need to be cleared. There may be
* more than one enclosure to add to the removal table and
* clear.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Found space in the mapping "
"for enclosure at map index %d.\n", __func__, start_idx);
for (map_idx = start_idx; map_idx < (start_idx + num_found);
map_idx++) {
enc_entry = sc->enclosure_table;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
enc_idx++, enc_entry++) {
if (map_idx < enc_entry->start_index ||
map_idx > (enc_entry->start_index +
enc_entry->num_slots))
continue;
if (!enc_entry->removal_flag) {
mpr_dprint(sc, MPR_MAPPING, "%s: "
"Enclosure %d will be removed from "
"the mapping table.\n", __func__,
enc_idx);
enc_entry->removal_flag = 1;
_mapping_add_to_removal_table(sc,
enc_entry->dpm_entry_num);
}
mt_entry = &sc->mapping_table[map_idx];
_mapping_clear_map_entry(mt_entry);
if (map_idx == (enc_entry->start_index +
enc_entry->num_slots - 1))
_mapping_clear_enc_entry(et_entry);
}
}
/*
* During the search for space for this enclosure, some entries
* in the mapping table may have been unreserved. Go back and
* change all of these to reserved again. Only the enclosures
* with the removal_flag set should be left as unreserved. The
* skip_search flag needs to be cleared as well so that the
* enclosure's space will be looked at the next time space is
* needed.
*/
enc_entry = sc->enclosure_table;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
enc_idx++, enc_entry++) {
if (!enc_entry->removal_flag) {
mpr_dprint(sc, MPR_MAPPING, "%s: Reset the "
"reserved flag for all of the map entries "
"for enclosure %d.\n", __func__, enc_idx);
mt_entry = &sc->mapping_table[enc_entry->
start_index];
for (map_idx = enc_entry->start_index; map_idx <
(enc_entry->start_index +
enc_entry->num_slots); map_idx++,
mt_entry++)
mt_entry->device_info |=
MPR_DEV_RESERVED;
et_entry->skip_search = 0;
}
}
done_flag = 1;
}
return start_idx;
}
/**
* _mapping_get_dev_info -get information about newly added devices
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the topology change event list and issues sas device pg0
* requests for the newly added device and reserved entries in tables
*
* Returns nothing
*/
static void
_mapping_get_dev_info(struct mpr_softc *sc,
struct _map_topology_change *topo_change)
{
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
u8 entry, enc_idx, phy_idx;
u32 map_idx, index, device_info;
struct _map_phy_change *phy_change, *tmp_phy_change;
uint64_t sas_address;
struct enc_mapping_table *et_entry;
struct dev_mapping_table *mt_entry;
u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED;
int rc = 1;
for (entry = 0; entry < topo_change->num_entries; entry++) {
phy_change = &topo_change->phy_details[entry];
if (phy_change->is_processed || !phy_change->dev_handle ||
phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED)
continue;
if (mpr_config_get_sas_device_pg0(sc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
phy_change->dev_handle)) {
phy_change->is_processed = 1;
continue;
}
/*
* Always get SATA Identify information because this is used
* to determine if Start/Stop Unit should be sent to the drive
* when the system is shutdown.
*/
device_info = le32toh(sas_device_pg0.DeviceInfo);
sas_address = le32toh(sas_device_pg0.SASAddress.High);
sas_address = (sas_address << 32) |
le32toh(sas_device_pg0.SASAddress.Low);
if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) {
rc = mprsas_get_sas_address_for_sata_disk(sc,
&sas_address, phy_change->dev_handle, device_info,
&phy_change->is_SATA_SSD);
if (rc) {
mpr_dprint(sc, MPR_ERROR, "%s: failed to get "
"disk type (SSD or HDD) and SAS Address "
"for SATA device with handle 0x%04x\n",
__func__, phy_change->dev_handle);
}
}
phy_change->physical_id = sas_address;
phy_change->slot = le16toh(sas_device_pg0.Slot);
phy_change->device_info = device_info;
/*
* When using Enc/Slot mapping, if this device is an enclosure
* make sure that all of its slots can fit into the mapping
* table.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
/*
* The enclosure should already be in the enclosure
* table due to the Enclosure Add event. If not, just
* continue, nothing can be done.
*/
enc_idx = _mapping_get_enc_idx_from_handle(sc,
topo_change->enc_handle);
if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
phy_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because the enclosure is not in "
"the mapping table\n", __func__,
phy_change->dev_handle);
continue;
}
if (!((phy_change->device_info &
MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
(phy_change->device_info &
(MPI2_SAS_DEVICE_INFO_SSP_TARGET |
MPI2_SAS_DEVICE_INFO_STP_TARGET |
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) {
phy_change->is_processed = 1;
continue;
}
et_entry = &sc->enclosure_table[enc_idx];
/*
* If the enclosure already has a start_index, it's been
* mapped, so go to the next Topo change.
*/
if (et_entry->start_index != MPR_MAPTABLE_BAD_IDX)
continue;
/*
* If the Expander Handle is 0, the devices are direct
* attached. In that case, the start_index must be just
* after the reserved entries. Otherwise, find space in
* the mapping table for the enclosure's devices.
*/
if (!topo_change->exp_handle) {
map_idx = sc->num_rsvd_entries;
et_entry->start_index = map_idx;
} else {
map_idx = _mapping_find_enc_map_space(sc,
et_entry);
et_entry->start_index = map_idx;
/*
* If space cannot be found to hold all of the
* enclosure's devices in the mapping table,
* there's no need to continue checking the
* other devices in this event. Set all of the
* phy_details for this event (if the change is
* for an add) as already processed because none
* of these devices can be added to the mapping
* table.
*/
if (et_entry->start_index ==
MPR_MAPTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING,
"%s: failed to add the enclosure "
"with ID 0x%016jx because there is "
"no free space available in the "
"mapping table for all of the "
"enclosure's devices.\n", __func__,
(uintmax_t)et_entry->enclosure_id);
phy_change->is_processed = 1;
for (phy_idx = 0; phy_idx <
topo_change->num_entries;
phy_idx++) {
tmp_phy_change =
&topo_change->phy_details
[phy_idx];
if (tmp_phy_change->reason ==
add_code)
tmp_phy_change->
is_processed = 1;
}
break;
}
}
/*
* Found space in the mapping table for this enclosure.
* Initialize each mapping table entry for the
* enclosure.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Initialize %d map "
"entries for the enclosure, starting at map index "
" %d.\n", __func__, et_entry->num_slots, map_idx);
mt_entry = &sc->mapping_table[map_idx];
for (index = map_idx; index < (et_entry->num_slots
+ map_idx); index++, mt_entry++) {
mt_entry->device_info = MPR_DEV_RESERVED;
mt_entry->physical_id = et_entry->enclosure_id;
mt_entry->phy_bits = et_entry->phy_bits;
mt_entry->missing_count = 0;
}
}
}
}
/**
* _mapping_get_pcie_dev_info -get information about newly added PCIe devices
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Searches through the PCIe topology change event list and issues PCIe device
* pg0 requests for the newly added PCIe device. If the device is in an
* enclosure, search for available space in the enclosure mapping table for the
* device and reserve that space.
*
* Returns nothing
*/
static void
_mapping_get_pcie_dev_info(struct mpr_softc *sc,
struct _map_pcie_topology_change *topo_change)
{
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
Mpi2ConfigReply_t mpi_reply;
Mpi26PCIeDevicePage0_t pcie_device_pg0;
u8 entry, enc_idx, port_idx;
u32 map_idx, index;
struct _map_port_change *port_change, *tmp_port_change;
uint64_t pcie_wwid;
struct enc_mapping_table *et_entry;
struct dev_mapping_table *mt_entry;
u8 add_code = MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
for (entry = 0; entry < topo_change->num_entries; entry++) {
port_change = &topo_change->port_details[entry];
if (port_change->is_processed || !port_change->dev_handle ||
port_change->reason != MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED)
continue;
if (mpr_config_get_pcie_device_pg0(sc, &mpi_reply,
&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE,
port_change->dev_handle)) {
port_change->is_processed = 1;
continue;
}
pcie_wwid = pcie_device_pg0.WWID.High;
pcie_wwid = (pcie_wwid << 32) | pcie_device_pg0.WWID.Low;
port_change->physical_id = pcie_wwid;
port_change->slot = le16toh(pcie_device_pg0.Slot);
port_change->device_info = le32toh(pcie_device_pg0.DeviceInfo);
/*
* When using Enc/Slot mapping, if this device is an enclosure
* make sure that all of its slots can fit into the mapping
* table.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
/*
* The enclosure should already be in the enclosure
* table due to the Enclosure Add event. If not, just
* continue, nothing can be done.
*/
enc_idx = _mapping_get_enc_idx_from_handle(sc,
topo_change->enc_handle);
if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
port_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because the enclosure is not in "
"the mapping table\n", __func__,
port_change->dev_handle);
continue;
}
if (!(port_change->device_info &
MPI26_PCIE_DEVINFO_NVME)) {
port_change->is_processed = 1;
continue;
}
et_entry = &sc->enclosure_table[enc_idx];
/*
* If the enclosure already has a start_index, it's been
* mapped, so go to the next Topo change.
*/
if (et_entry->start_index != MPR_MAPTABLE_BAD_IDX)
continue;
/*
* If the Switch Handle is 0, the devices are direct
* attached. In that case, the start_index must be just
* after the reserved entries. Otherwise, find space in
* the mapping table for the enclosure's devices.
*/
if (!topo_change->switch_dev_handle) {
map_idx = sc->num_rsvd_entries;
et_entry->start_index = map_idx;
} else {
map_idx = _mapping_find_enc_map_space(sc,
et_entry);
et_entry->start_index = map_idx;
/*
* If space cannot be found to hold all of the
* enclosure's devices in the mapping table,
* there's no need to continue checking the
* other devices in this event. Set all of the
* port_details for this event (if the change is
* for an add) as already processed because none
* of these devices can be added to the mapping
* table.
*/
if (et_entry->start_index ==
MPR_MAPTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING,
"%s: failed to add the enclosure "
"with ID 0x%016jx because there is "
"no free space available in the "
"mapping table for all of the "
"enclosure's devices.\n", __func__,
(uintmax_t)et_entry->enclosure_id);
port_change->is_processed = 1;
for (port_idx = 0; port_idx <
topo_change->num_entries;
port_idx++) {
tmp_port_change =
&topo_change->port_details
[port_idx];
if (tmp_port_change->reason ==
add_code)
tmp_port_change->
is_processed = 1;
}
break;
}
}
/*
* Found space in the mapping table for this enclosure.
* Initialize each mapping table entry for the
* enclosure.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Initialize %d map "
"entries for the enclosure, starting at map index "
" %d.\n", __func__, et_entry->num_slots, map_idx);
mt_entry = &sc->mapping_table[map_idx];
for (index = map_idx; index < (et_entry->num_slots
+ map_idx); index++, mt_entry++) {
mt_entry->device_info = MPR_DEV_RESERVED;
mt_entry->physical_id = et_entry->enclosure_id;
mt_entry->phy_bits = et_entry->phy_bits;
mt_entry->missing_count = 0;
}
}
}
}
/**
* _mapping_set_mid_to_eid -set map table data from enclosure table
* @sc: per adapter object
* @et_entry: enclosure entry
*
* Returns nothing
*/
static inline void
_mapping_set_mid_to_eid(struct mpr_softc *sc,
struct enc_mapping_table *et_entry)
{
struct dev_mapping_table *mt_entry;
u16 slots = et_entry->num_slots, map_idx;
u32 start_idx = et_entry->start_index;
if (start_idx != MPR_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++)
mt_entry->physical_id = et_entry->enclosure_id;
}
}
/**
* _mapping_clear_removed_entries - mark the entries to be cleared
* @sc: per adapter object
*
* Search through the removal table and mark the entries which needs to be
* flushed to DPM and also updates the map table and enclosure table by
* clearing the corresponding entries.
*
* Returns nothing
*/
static void
_mapping_clear_removed_entries(struct mpr_softc *sc)
{
u32 remove_idx;
struct map_removal_table *remove_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
u8 done_flag = 0, num_entries, m, i;
struct enc_mapping_table *et_entry, *from, *to;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
if (sc->is_dpm_enable) {
remove_entry = sc->removal_table;
for (remove_idx = 0; remove_idx < sc->max_devices;
remove_idx++, remove_entry++) {
if (remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *) sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += remove_entry->dpm_entry_num;
dpm_entry->PhysicalIdentifier.Low = 0;
dpm_entry->PhysicalIdentifier.High = 0;
dpm_entry->DeviceIndex = 0;
dpm_entry->MappingInformation = 0;
dpm_entry->PhysicalBitsMapping = 0;
sc->dpm_flush_entry[remove_entry->
dpm_entry_num] = 1;
sc->dpm_entry_used[remove_entry->dpm_entry_num]
= 0;
remove_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
}
}
}
/*
* When using Enc/Slot mapping, if a new enclosure was added and old
* enclosure space was needed, the enclosure table may now have gaps
* that need to be closed. All enclosure mappings need to be contiguous
* so that space can be reused correctly if available.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
num_entries = sc->num_enc_table_entries;
while (!done_flag) {
done_flag = 1;
et_entry = sc->enclosure_table;
for (i = 0; i < num_entries; i++, et_entry++) {
if (!et_entry->enc_handle && et_entry->
init_complete) {
done_flag = 0;
if (i != (num_entries - 1)) {
from = &sc->enclosure_table
[i+1];
to = &sc->enclosure_table[i];
for (m = i; m < (num_entries -
1); m++, from++, to++) {
_mapping_set_mid_to_eid
(sc, to);
*to = *from;
}
_mapping_clear_enc_entry(to);
sc->num_enc_table_entries--;
num_entries =
sc->num_enc_table_entries;
} else {
_mapping_clear_enc_entry
(et_entry);
sc->num_enc_table_entries--;
num_entries =
sc->num_enc_table_entries;
}
}
}
}
}
}
/**
* _mapping_add_new_device -Add the new device into mapping table
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the topology change event list and update map table,
* enclosure table and DPM pages for the newly added devices.
*
* Returns nothing
*/
static void
_mapping_add_new_device(struct mpr_softc *sc,
struct _map_topology_change *topo_change)
{
u8 enc_idx, missing_cnt, is_removed = 0;
u16 dpm_idx;
u32 search_idx, map_idx;
u32 entry;
struct dev_mapping_table *mt_entry;
struct enc_mapping_table *et_entry;
struct _map_phy_change *phy_change;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
Mpi2DriverMap0Entry_t *dpm_entry;
uint64_t temp64_var;
u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER);
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
for (entry = 0; entry < topo_change->num_entries; entry++) {
phy_change = &topo_change->phy_details[entry];
if (phy_change->is_processed)
continue;
if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED ||
!phy_change->dev_handle) {
phy_change->is_processed = 1;
continue;
}
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
enc_idx = _mapping_get_enc_idx_from_handle
(sc, topo_change->enc_handle);
if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
phy_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because the enclosure is not in "
"the mapping table\n", __func__,
phy_change->dev_handle);
continue;
}
/*
* If the enclosure's start_index is BAD here, it means
* that there is no room in the mapping table to cover
* all of the devices that could be in the enclosure.
* There's no reason to process any of the devices for
* this enclosure since they can't be mapped.
*/
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) {
phy_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because there is no free space "
"available in the mapping table\n",
__func__, phy_change->dev_handle);
continue;
}
/*
* Add this device to the mapping table at the correct
* offset where space was found to map the enclosure.
* Then setup the DPM entry information if being used.
*/
map_idx = et_entry->start_index + phy_change->slot -
et_entry->start_slot;
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = phy_change->physical_id;
mt_entry->id = map_idx;
mt_entry->dev_handle = phy_change->dev_handle;
mt_entry->missing_count = 0;
mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
mt_entry->device_info = phy_change->device_info |
(MPR_DEV_RESERVED | MPR_MAP_IN_USE);
if (sc->is_dpm_enable) {
dpm_idx = et_entry->dpm_entry_num;
if (dpm_idx == MPR_DPM_BAD_IDX)
dpm_idx = _mapping_get_dpm_idx_from_id
(sc, et_entry->enclosure_id,
et_entry->phy_bits);
if (dpm_idx == MPR_DPM_BAD_IDX) {
dpm_idx = _mapping_get_free_dpm_idx(sc);
if (dpm_idx != MPR_DPM_BAD_IDX) {
dpm_entry =
(Mpi2DriverMap0Entry_t *)
((u8 *) sc->dpm_pg0 +
hdr_sz);
dpm_entry += dpm_idx;
dpm_entry->
PhysicalIdentifier.Low =
(0xFFFFFFFF &
et_entry->enclosure_id);
dpm_entry->
PhysicalIdentifier.High =
(et_entry->enclosure_id
>> 32);
dpm_entry->DeviceIndex =
(U16)et_entry->start_index;
dpm_entry->MappingInformation =
et_entry->num_slots;
dpm_entry->MappingInformation
<<= map_shift;
dpm_entry->PhysicalBitsMapping
= et_entry->phy_bits;
et_entry->dpm_entry_num =
dpm_idx;
sc->dpm_entry_used[dpm_idx] = 1;
sc->dpm_flush_entry[dpm_idx] =
1;
phy_change->is_processed = 1;
} else {
phy_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR |
MPR_MAPPING, "%s: failed "
"to add the device with "
"handle 0x%04x to "
"persistent table because "
"there is no free space "
"available\n", __func__,
phy_change->dev_handle);
}
} else {
et_entry->dpm_entry_num = dpm_idx;
mt_entry->dpm_entry_num = dpm_idx;
}
}
et_entry->init_complete = 1;
} else if ((ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
/*
* Get the mapping table index for this device. If it's
* not in the mapping table yet, find a free entry if
* one is available. If there are no free entries, look
* for the entry that has the highest missing count. If
* none of that works to find an entry in the mapping
* table, there is a problem. Log a message and just
* continue on.
*/
map_idx = _mapping_get_mt_idx_from_id
(sc, phy_change->physical_id);
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
search_idx = sc->num_rsvd_entries;
if (topo_change->exp_handle)
search_idx += max_num_phy_ids;
map_idx = _mapping_get_free_mt_idx(sc,
search_idx);
}
/*
* If an entry will be used that has a missing device,
* clear its entry from the DPM in the controller.
*/
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
map_idx = _mapping_get_high_missing_mt_idx(sc);
if (map_idx != MPR_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
_mapping_add_to_removal_table(sc,
mt_entry->dpm_entry_num);
is_removed = 1;
mt_entry->init_complete = 0;
}
}
if (map_idx != MPR_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = phy_change->physical_id;
mt_entry->id = map_idx;
mt_entry->dev_handle = phy_change->dev_handle;
mt_entry->missing_count = 0;
mt_entry->device_info = phy_change->device_info
| (MPR_DEV_RESERVED | MPR_MAP_IN_USE);
} else {
phy_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because there is no free space "
"available in the mapping table\n",
__func__, phy_change->dev_handle);
continue;
}
if (sc->is_dpm_enable) {
if (mt_entry->dpm_entry_num !=
MPR_DPM_BAD_IDX) {
dpm_idx = mt_entry->dpm_entry_num;
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *)sc->dpm_pg0 + hdr_sz);
dpm_entry += dpm_idx;
missing_cnt = dpm_entry->
MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
temp64_var = dpm_entry->
PhysicalIdentifier.High;
temp64_var = (temp64_var << 32) |
dpm_entry->PhysicalIdentifier.Low;
/*
* If the Mapping Table's info is not
* the same as the DPM entry, clear the
* init_complete flag so that it's
* updated.
*/
if ((mt_entry->physical_id ==
temp64_var) && !missing_cnt)
mt_entry->init_complete = 1;
else
mt_entry->init_complete = 0;
} else {
dpm_idx = _mapping_get_free_dpm_idx(sc);
mt_entry->init_complete = 0;
}
if (dpm_idx != MPR_DPM_BAD_IDX &&
!mt_entry->init_complete) {
mt_entry->dpm_entry_num = dpm_idx;
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *)sc->dpm_pg0 + hdr_sz);
dpm_entry += dpm_idx;
dpm_entry->PhysicalIdentifier.Low =
(0xFFFFFFFF &
mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High =
(mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = (U16) map_idx;
dpm_entry->MappingInformation = 0;
dpm_entry->PhysicalBitsMapping = 0;
sc->dpm_entry_used[dpm_idx] = 1;
sc->dpm_flush_entry[dpm_idx] = 1;
phy_change->is_processed = 1;
} else if (dpm_idx == MPR_DPM_BAD_IDX) {
phy_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING,
"%s: failed to add the device with "
"handle 0x%04x to persistent table "
"because there is no free space "
"available\n", __func__,
phy_change->dev_handle);
}
}
mt_entry->init_complete = 1;
}
phy_change->is_processed = 1;
}
if (is_removed)
_mapping_clear_removed_entries(sc);
}
/**
* _mapping_add_new_pcie_device -Add the new PCIe device into mapping table
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the PCIe topology change event list and update map table,
* enclosure table and DPM pages for the newly added devices.
*
* Returns nothing
*/
static void
_mapping_add_new_pcie_device(struct mpr_softc *sc,
struct _map_pcie_topology_change *topo_change)
{
u8 enc_idx, missing_cnt, is_removed = 0;
u16 dpm_idx;
u32 search_idx, map_idx;
u32 entry;
struct dev_mapping_table *mt_entry;
struct enc_mapping_table *et_entry;
struct _map_port_change *port_change;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
Mpi2DriverMap0Entry_t *dpm_entry;
uint64_t temp64_var;
u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER);
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
for (entry = 0; entry < topo_change->num_entries; entry++) {
port_change = &topo_change->port_details[entry];
if (port_change->is_processed)
continue;
if (port_change->reason != MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED ||
!port_change->dev_handle) {
port_change->is_processed = 1;
continue;
}
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
enc_idx = _mapping_get_enc_idx_from_handle
(sc, topo_change->enc_handle);
if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
port_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because the enclosure is not in "
"the mapping table\n", __func__,
port_change->dev_handle);
continue;
}
/*
* If the enclosure's start_index is BAD here, it means
* that there is no room in the mapping table to cover
* all of the devices that could be in the enclosure.
* There's no reason to process any of the devices for
* this enclosure since they can't be mapped.
*/
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) {
port_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because there is no free space "
"available in the mapping table\n",
__func__, port_change->dev_handle);
continue;
}
/*
* Add this device to the mapping table at the correct
* offset where space was found to map the enclosure.
* Then setup the DPM entry information if being used.
*/
map_idx = et_entry->start_index + port_change->slot -
et_entry->start_slot;
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = port_change->physical_id;
mt_entry->id = map_idx;
mt_entry->dev_handle = port_change->dev_handle;
mt_entry->missing_count = 0;
mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
mt_entry->device_info = port_change->device_info |
(MPR_DEV_RESERVED | MPR_MAP_IN_USE);
if (sc->is_dpm_enable) {
dpm_idx = et_entry->dpm_entry_num;
if (dpm_idx == MPR_DPM_BAD_IDX)
dpm_idx = _mapping_get_dpm_idx_from_id
(sc, et_entry->enclosure_id,
et_entry->phy_bits);
if (dpm_idx == MPR_DPM_BAD_IDX) {
dpm_idx = _mapping_get_free_dpm_idx(sc);
if (dpm_idx != MPR_DPM_BAD_IDX) {
dpm_entry =
(Mpi2DriverMap0Entry_t *)
((u8 *) sc->dpm_pg0 +
hdr_sz);
dpm_entry += dpm_idx;
dpm_entry->
PhysicalIdentifier.Low =
(0xFFFFFFFF &
et_entry->enclosure_id);
dpm_entry->
PhysicalIdentifier.High =
(et_entry->enclosure_id
>> 32);
dpm_entry->DeviceIndex =
(U16)et_entry->start_index;
dpm_entry->MappingInformation =
et_entry->num_slots;
dpm_entry->MappingInformation
<<= map_shift;
dpm_entry->PhysicalBitsMapping
= et_entry->phy_bits;
et_entry->dpm_entry_num =
dpm_idx;
sc->dpm_entry_used[dpm_idx] = 1;
sc->dpm_flush_entry[dpm_idx] =
1;
port_change->is_processed = 1;
} else {
port_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR |
MPR_MAPPING, "%s: failed "
"to add the device with "
"handle 0x%04x to "
"persistent table because "
"there is no free space "
"available\n", __func__,
port_change->dev_handle);
}
} else {
et_entry->dpm_entry_num = dpm_idx;
mt_entry->dpm_entry_num = dpm_idx;
}
}
et_entry->init_complete = 1;
} else if ((ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
/*
* Get the mapping table index for this device. If it's
* not in the mapping table yet, find a free entry if
* one is available. If there are no free entries, look
* for the entry that has the highest missing count. If
* none of that works to find an entry in the mapping
* table, there is a problem. Log a message and just
* continue on.
*/
map_idx = _mapping_get_mt_idx_from_id
(sc, port_change->physical_id);
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
search_idx = sc->num_rsvd_entries;
if (topo_change->switch_dev_handle)
search_idx += max_num_phy_ids;
map_idx = _mapping_get_free_mt_idx(sc,
search_idx);
}
/*
* If an entry will be used that has a missing device,
* clear its entry from the DPM in the controller.
*/
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
map_idx = _mapping_get_high_missing_mt_idx(sc);
if (map_idx != MPR_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
_mapping_add_to_removal_table(sc,
mt_entry->dpm_entry_num);
is_removed = 1;
mt_entry->init_complete = 0;
}
}
if (map_idx != MPR_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id =
port_change->physical_id;
mt_entry->id = map_idx;
mt_entry->dev_handle = port_change->dev_handle;
mt_entry->missing_count = 0;
mt_entry->device_info =
port_change->device_info |
(MPR_DEV_RESERVED | MPR_MAP_IN_USE);
} else {
port_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because there is no free space "
"available in the mapping table\n",
__func__, port_change->dev_handle);
continue;
}
if (sc->is_dpm_enable) {
if (mt_entry->dpm_entry_num !=
MPR_DPM_BAD_IDX) {
dpm_idx = mt_entry->dpm_entry_num;
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *)sc->dpm_pg0 + hdr_sz);
dpm_entry += dpm_idx;
missing_cnt = dpm_entry->
MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
temp64_var = dpm_entry->
PhysicalIdentifier.High;
temp64_var = (temp64_var << 32) |
dpm_entry->PhysicalIdentifier.Low;
/*
* If the Mapping Table's info is not
* the same as the DPM entry, clear the
* init_complete flag so that it's
* updated.
*/
if ((mt_entry->physical_id ==
temp64_var) && !missing_cnt)
mt_entry->init_complete = 1;
else
mt_entry->init_complete = 0;
} else {
dpm_idx = _mapping_get_free_dpm_idx(sc);
mt_entry->init_complete = 0;
}
if (dpm_idx != MPR_DPM_BAD_IDX &&
!mt_entry->init_complete) {
mt_entry->dpm_entry_num = dpm_idx;
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *)sc->dpm_pg0 + hdr_sz);
dpm_entry += dpm_idx;
dpm_entry->PhysicalIdentifier.Low =
(0xFFFFFFFF &
mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High =
(mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = (U16) map_idx;
dpm_entry->MappingInformation = 0;
dpm_entry->PhysicalBitsMapping = 0;
sc->dpm_entry_used[dpm_idx] = 1;
sc->dpm_flush_entry[dpm_idx] = 1;
port_change->is_processed = 1;
} else if (dpm_idx == MPR_DPM_BAD_IDX) {
port_change->is_processed = 1;
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING,
"%s: failed to add the device with "
"handle 0x%04x to persistent table "
"because there is no free space "
"available\n", __func__,
port_change->dev_handle);
}
}
mt_entry->init_complete = 1;
}
port_change->is_processed = 1;
}
if (is_removed)
_mapping_clear_removed_entries(sc);
}
/**
* _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM
* @sc: per adapter object
*
* Returns nothing
*/
static void
_mapping_flush_dpm_pages(struct mpr_softc *sc)
{
Mpi2DriverMap0Entry_t *dpm_entry;
Mpi2ConfigReply_t mpi_reply;
Mpi2DriverMappingPage0_t config_page;
u16 entry_num;
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
if (!sc->dpm_flush_entry[entry_num])
continue;
memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += entry_num;
dpm_entry->MappingInformation = htole16(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping = htole32(dpm_entry->
PhysicalBitsMapping);
memcpy(&config_page.Entry, (u8 *)dpm_entry,
sizeof(Mpi2DriverMap0Entry_t));
/* TODO-How to handle failed writes? */
mpr_dprint(sc, MPR_MAPPING, "%s: Flushing DPM entry %d.\n",
__func__, entry_num);
if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
entry_num)) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Flush of "
"DPM entry %d for device failed\n", __func__,
entry_num);
} else
sc->dpm_flush_entry[entry_num] = 0;
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->
PhysicalBitsMapping);
}
}
/**
* _mapping_allocate_memory- allocates the memory required for mapping tables
* @sc: per adapter object
*
* Allocates the memory for all the tables required for host mapping
*
* Return 0 on success or non-zero on failure.
*/
int
mpr_mapping_allocate_memory(struct mpr_softc *sc)
{
uint32_t dpm_pg0_sz;
- sc->mapping_table = mallocarray(sc->max_devices,
- sizeof(struct dev_mapping_table), M_MPR, M_ZERO|M_NOWAIT);
+ sc->mapping_table = malloc((sizeof(struct dev_mapping_table) *
+ sc->max_devices), M_MPR, M_ZERO|M_NOWAIT);
if (!sc->mapping_table)
goto free_resources;
- sc->removal_table = mallocarray(sc->max_devices,
- sizeof(struct map_removal_table), M_MPR, M_ZERO|M_NOWAIT);
+ sc->removal_table = malloc((sizeof(struct map_removal_table) *
+ sc->max_devices), M_MPR, M_ZERO|M_NOWAIT);
if (!sc->removal_table)
goto free_resources;
- sc->enclosure_table = mallocarray(sc->max_enclosures,
- sizeof(struct enc_mapping_table), M_MPR, M_ZERO|M_NOWAIT);
+ sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) *
+ sc->max_enclosures), M_MPR, M_ZERO|M_NOWAIT);
if (!sc->enclosure_table)
goto free_resources;
- sc->dpm_entry_used = mallocarray(sc->max_dpm_entries, sizeof(u8),
+ sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries),
M_MPR, M_ZERO|M_NOWAIT);
if (!sc->dpm_entry_used)
goto free_resources;
- sc->dpm_flush_entry = mallocarray(sc->max_dpm_entries, sizeof(u8),
+ sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries),
M_MPR, M_ZERO|M_NOWAIT);
if (!sc->dpm_flush_entry)
goto free_resources;
dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
(sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPR, M_ZERO|M_NOWAIT);
if (!sc->dpm_pg0) {
printf("%s: memory alloc failed for dpm page; disabling dpm\n",
__func__);
sc->is_dpm_enable = 0;
}
return 0;
free_resources:
free(sc->mapping_table, M_MPR);
free(sc->removal_table, M_MPR);
free(sc->enclosure_table, M_MPR);
free(sc->dpm_entry_used, M_MPR);
free(sc->dpm_flush_entry, M_MPR);
free(sc->dpm_pg0, M_MPR);
printf("%s: device initialization failed due to failure in mapping "
"table memory allocation\n", __func__);
return -1;
}
/**
* mpr_mapping_free_memory- frees the memory allocated for mapping tables
* @sc: per adapter object
*
* Returns nothing.
*/
void
mpr_mapping_free_memory(struct mpr_softc *sc)
{
free(sc->mapping_table, M_MPR);
free(sc->removal_table, M_MPR);
free(sc->enclosure_table, M_MPR);
free(sc->dpm_entry_used, M_MPR);
free(sc->dpm_flush_entry, M_MPR);
free(sc->dpm_pg0, M_MPR);
}
static bool
_mapping_process_dpm_pg0(struct mpr_softc *sc)
{
u8 missing_cnt, enc_idx;
u16 slot_id, entry_num, num_slots;
u32 map_idx, dev_idx, start_idx, end_idx;
struct dev_mapping_table *mt_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
struct enc_mapping_table *et_entry;
u64 physical_id;
u32 phy_bits = 0;
/*
* start_idx and end_idx are only used for IR.
*/
if (sc->ir_firmware)
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
/*
* Look through all of the DPM entries that were read from the
* controller and copy them over to the driver's internal table if they
* have a non-zero ID. At this point, any ID with a value of 0 would be
* invalid, so don't copy it.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Start copy of %d DPM entries into the "
"mapping table.\n", __func__, sc->max_dpm_entries);
dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
dpm_entry++) {
physical_id = dpm_entry->PhysicalIdentifier.High;
physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
if (!physical_id) {
sc->dpm_entry_used[entry_num] = 0;
continue;
}
sc->dpm_entry_used[entry_num] = 1;
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
missing_cnt = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
dev_idx = le16toh(dpm_entry->DeviceIndex);
phy_bits = le32toh(dpm_entry->PhysicalBitsMapping);
/*
* Volumes are at special locations in the mapping table so
* account for that. Volume mapping table entries do not depend
* on the type of mapping, so continue the loop after adding
* volumes to the mapping table.
*/
if (sc->ir_firmware && (dev_idx >= start_idx) &&
(dev_idx <= end_idx)) {
mt_entry = &sc->mapping_table[dev_idx];
mt_entry->physical_id =
dpm_entry->PhysicalIdentifier.High;
mt_entry->physical_id = (mt_entry->physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
mt_entry->id = dev_idx;
mt_entry->missing_count = missing_cnt;
mt_entry->dpm_entry_num = entry_num;
mt_entry->device_info = MPR_DEV_RESERVED;
continue;
}
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
/*
* The dev_idx for an enclosure is the start index. If
* the start index is within the controller's default
* enclosure area, set the number of slots for this
* enclosure to the max allowed. Otherwise, it should be
* a normal enclosure and the number of slots is in the
* DPM entry's Mapping Information.
*/
if (dev_idx < (sc->num_rsvd_entries +
max_num_phy_ids)) {
slot_id = 0;
if (ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1)
slot_id = 1;
num_slots = max_num_phy_ids;
} else {
slot_id = 0;
num_slots = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_SLOT_MASK;
num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
}
enc_idx = sc->num_enc_table_entries;
if (enc_idx >= sc->max_enclosures) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"Number of enclosure entries in DPM exceed "
"the max allowed of %d.\n", __func__,
sc->max_enclosures);
break;
}
sc->num_enc_table_entries++;
et_entry = &sc->enclosure_table[enc_idx];
physical_id = dpm_entry->PhysicalIdentifier.High;
et_entry->enclosure_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
et_entry->start_index = dev_idx;
et_entry->dpm_entry_num = entry_num;
et_entry->num_slots = num_slots;
et_entry->start_slot = slot_id;
et_entry->missing_count = missing_cnt;
et_entry->phy_bits = phy_bits;
/*
* Initialize all entries for this enclosure in the
* mapping table and mark them as reserved. The actual
* devices have not been processed yet but when they are
* they will use these entries. If an entry is found
* that already has a valid DPM index, the mapping table
* is corrupt. This can happen if the mapping type is
* changed without clearing all of the DPM entries in
* the controller.
*/
mt_entry = &sc->mapping_table[dev_idx];
for (map_idx = dev_idx; map_idx < (dev_idx + num_slots);
map_idx++, mt_entry++) {
if (mt_entry->dpm_entry_num !=
MPR_DPM_BAD_IDX) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING,
"%s: Conflict in mapping table for "
" enclosure %d\n", __func__,
enc_idx);
goto fail;
}
physical_id =
dpm_entry->PhysicalIdentifier.High;
mt_entry->physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
mt_entry->phy_bits = phy_bits;
mt_entry->id = dev_idx;
mt_entry->dpm_entry_num = entry_num;
mt_entry->missing_count = missing_cnt;
mt_entry->device_info = MPR_DEV_RESERVED;
}
} else if ((ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
/*
* Device mapping, so simply copy the DPM entries to the
* mapping table, but check for a corrupt mapping table
* (as described above in Enc/Slot mapping).
*/
map_idx = dev_idx;
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"Conflict in mapping table for device %d\n",
__func__, map_idx);
goto fail;
}
physical_id = dpm_entry->PhysicalIdentifier.High;
mt_entry->physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
mt_entry->phy_bits = phy_bits;
mt_entry->id = dev_idx;
mt_entry->missing_count = missing_cnt;
mt_entry->dpm_entry_num = entry_num;
mt_entry->device_info = MPR_DEV_RESERVED;
}
} /*close the loop for DPM table */
return (true);
fail:
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
sc->dpm_entry_used[entry_num] = 0;
/*
* for IR firmware, it may be necessary to wipe out
* sc->mapping_table volumes tooi
*/
}
sc->num_enc_table_entries = 0;
return (false);
}
/*
* mpr_mapping_check_devices - start of the day check for device availabilty
* @sc: per adapter object
*
* Returns nothing.
*/
void
mpr_mapping_check_devices(void *data)
{
u32 i;
struct dev_mapping_table *mt_entry;
struct mpr_softc *sc = (struct mpr_softc *)data;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
struct enc_mapping_table *et_entry;
u32 start_idx = 0, end_idx = 0;
u8 stop_device_checks = 0;
MPR_FUNCTRACE(sc);
/*
* Clear this flag so that this function is never called again except
* within this function if the check needs to be done again. The
* purpose is to check for missing devices that are currently in the
* mapping table so do this only at driver init after discovery.
*/
sc->track_mapping_events = 0;
/*
* callout synchronization
* This is used to prevent race conditions for the callout.
*/
mpr_dprint(sc, MPR_MAPPING, "%s: Start check for missing devices.\n",
__func__);
mtx_assert(&sc->mpr_mtx, MA_OWNED);
if ((callout_pending(&sc->device_check_callout)) ||
(!callout_active(&sc->device_check_callout))) {
mpr_dprint(sc, MPR_MAPPING, "%s: Device Check Callout is "
"already pending or not active.\n", __func__);
return;
}
callout_deactivate(&sc->device_check_callout);
/*
* Use callout to check if any devices in the mapping table have been
* processed yet. If ALL devices are marked as not init_complete, no
* devices have been processed and mapped. Until devices are mapped
* there's no reason to mark them as missing. Continue resetting this
* callout until devices have been mapped.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
et_entry = sc->enclosure_table;
for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) {
if (et_entry->init_complete) {
stop_device_checks = 1;
break;
}
}
} else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
mt_entry = sc->mapping_table;
for (i = 0; i < sc->max_devices; i++, mt_entry++) {
if (mt_entry->init_complete) {
stop_device_checks = 1;
break;
}
}
}
/*
* Setup another callout check after a delay. Keep doing this until
* devices are mapped.
*/
if (!stop_device_checks) {
mpr_dprint(sc, MPR_MAPPING, "%s: No devices have been mapped. "
"Reset callout to check again after a %d second delay.\n",
__func__, MPR_MISSING_CHECK_DELAY);
callout_reset(&sc->device_check_callout,
MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
sc);
return;
}
mpr_dprint(sc, MPR_MAPPING, "%s: Device check complete.\n", __func__);
/*
* Depending on the mapping type, check if devices have been processed
* and update their missing counts if not processed.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
et_entry = sc->enclosure_table;
for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) {
if (!et_entry->init_complete) {
if (et_entry->missing_count <
MPR_MAX_MISSING_COUNT) {
mpr_dprint(sc, MPR_MAPPING, "%s: "
"Enclosure %d is missing from the "
"topology. Update its missing "
"count.\n", __func__, i);
et_entry->missing_count++;
if (et_entry->dpm_entry_num !=
MPR_DPM_BAD_IDX) {
_mapping_commit_enc_entry(sc,
et_entry);
}
}
et_entry->init_complete = 1;
}
}
if (!sc->ir_firmware)
return;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
} else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
start_idx = 0;
end_idx = sc->max_devices - 1;
mt_entry = sc->mapping_table;
}
/*
* The start and end indices have been set above according to the
* mapping type. Go through these mappings and update any entries that
* do not have the init_complete flag set, which means they are missing.
*/
if (end_idx == 0)
return;
for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) {
if (mt_entry->device_info & MPR_DEV_RESERVED
&& !mt_entry->physical_id)
mt_entry->init_complete = 1;
else if (mt_entry->device_info & MPR_DEV_RESERVED) {
if (!mt_entry->init_complete) {
mpr_dprint(sc, MPR_MAPPING, "%s: Device in "
"mapping table at index %d is missing from "
"topology. Update its missing count.\n",
__func__, i);
if (mt_entry->missing_count <
MPR_MAX_MISSING_COUNT) {
mt_entry->missing_count++;
if (mt_entry->dpm_entry_num !=
MPR_DPM_BAD_IDX) {
_mapping_commit_map_entry(sc,
mt_entry);
}
}
mt_entry->init_complete = 1;
}
}
}
}
/**
* mpr_mapping_initialize - initialize mapping tables
* @sc: per adapter object
*
* Read controller persitant mapping tables into internal data area.
*
* Return 0 for success or non-zero for failure.
*/
int
mpr_mapping_initialize(struct mpr_softc *sc)
{
uint16_t volume_mapping_flags, dpm_pg0_sz;
uint32_t i;
Mpi2ConfigReply_t mpi_reply;
int error;
uint8_t retry_count;
uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
/* The additional 1 accounts for the virtual enclosure
* created for the controller
*/
sc->max_enclosures = sc->facts->MaxEnclosures + 1;
sc->max_expanders = sc->facts->MaxSasExpanders;
sc->max_volumes = sc->facts->MaxVolumes;
sc->max_devices = sc->facts->MaxTargets + sc->max_volumes;
sc->pending_map_events = 0;
sc->num_enc_table_entries = 0;
sc->num_rsvd_entries = 0;
sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries;
sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0;
sc->track_mapping_events = 0;
mpr_dprint(sc, MPR_MAPPING, "%s: Mapping table has a max of %d entries "
"and DPM has a max of %d entries.\n", __func__, sc->max_devices,
sc->max_dpm_entries);
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING)
sc->is_dpm_enable = 0;
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
sc->num_rsvd_entries = 1;
volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
if (sc->ir_firmware && (volume_mapping_flags ==
MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING))
sc->num_rsvd_entries += sc->max_volumes;
error = mpr_mapping_allocate_memory(sc);
if (error)
return (error);
for (i = 0; i < sc->max_devices; i++)
_mapping_clear_map_entry(sc->mapping_table + i);
for (i = 0; i < sc->max_enclosures; i++)
_mapping_clear_enc_entry(sc->enclosure_table + i);
for (i = 0; i < sc->max_devices; i++) {
sc->removal_table[i].dev_handle = 0;
sc->removal_table[i].dpm_entry_num = MPR_DPM_BAD_IDX;
}
memset(sc->dpm_entry_used, 0, sc->max_dpm_entries);
memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries);
if (sc->is_dpm_enable) {
dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
(sc->max_dpm_entries *
sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
retry_count = 0;
retry_read_dpm:
if (mpr_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0,
dpm_pg0_sz)) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: DPM page "
"read failed.\n", __func__);
if (retry_count < 3) {
retry_count++;
goto retry_read_dpm;
}
sc->is_dpm_enable = 0;
}
}
if (sc->is_dpm_enable) {
if (!_mapping_process_dpm_pg0(sc))
sc->is_dpm_enable = 0;
}
if (! sc->is_dpm_enable) {
mpr_dprint(sc, MPR_MAPPING, "%s: DPM processing is disabled. "
"Device mappings will not persist across reboots or "
"resets.\n", __func__);
}
sc->track_mapping_events = 1;
return 0;
}
/**
* mpr_mapping_exit - clear mapping table and associated memory
* @sc: per adapter object
*
* Returns nothing.
*/
void
mpr_mapping_exit(struct mpr_softc *sc)
{
_mapping_flush_dpm_pages(sc);
mpr_mapping_free_memory(sc);
}
/**
* mpr_mapping_get_tid - return the target id for sas device and handle
* @sc: per adapter object
* @sas_address: sas address of the device
* @handle: device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mpr_mapping_get_tid(struct mpr_softc *sc, uint64_t sas_address, u16 handle)
{
u32 map_idx;
struct dev_mapping_table *mt_entry;
for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
sas_address)
return mt_entry->id;
}
return MPR_MAP_BAD_ID;
}
/**
* mpr_mapping_get_tid_from_handle - find a target id in mapping table using
* only the dev handle. This is just a wrapper function for the local function
* _mapping_get_mt_idx_from_handle.
* @sc: per adapter object
* @handle: device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mpr_mapping_get_tid_from_handle(struct mpr_softc *sc, u16 handle)
{
return (_mapping_get_mt_idx_from_handle(sc, handle));
}
/**
* mpr_mapping_get_raid_tid - return the target id for raid device
* @sc: per adapter object
* @wwid: world wide identifier for raid volume
* @volHandle: volume device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mpr_mapping_get_raid_tid(struct mpr_softc *sc, u64 wwid, u16 volHandle)
{
u32 start_idx, end_idx, map_idx;
struct dev_mapping_table *mt_entry;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
if (mt_entry->dev_handle == volHandle &&
mt_entry->physical_id == wwid)
return mt_entry->id;
}
return MPR_MAP_BAD_ID;
}
/**
* mpr_mapping_get_raid_tid_from_handle - find raid device in mapping table
* using only the volume dev handle. This is just a wrapper function for the
* local function _mapping_get_ir_mt_idx_from_handle.
* @sc: per adapter object
* @volHandle: volume device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mpr_mapping_get_raid_tid_from_handle(struct mpr_softc *sc, u16 volHandle)
{
return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle));
}
/**
* mpr_mapping_enclosure_dev_status_change_event - handle enclosure events
* @sc: per adapter object
* @event_data: event data payload
*
* Return nothing.
*/
void
mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
Mpi2EventDataSasEnclDevStatusChange_t *event_data)
{
u8 enc_idx, missing_count;
struct enc_mapping_table *et_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
u8 update_phy_bits = 0;
u32 saved_phy_bits;
uint64_t temp64_var;
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) !=
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING)
goto out;
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) {
if (!event_data->NumSlots) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure "
"with handle = 0x%x reported 0 slots.\n", __func__,
le16toh(event_data->EnclosureHandle));
goto out;
}
temp64_var = event_data->EnclosureLogicalID.High;
temp64_var = (temp64_var << 32) |
event_data->EnclosureLogicalID.Low;
enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var,
event_data->PhyBits);
/*
* If the Added enclosure is already in the Enclosure Table,
* make sure that all the the enclosure info is up to date. If
* the enclosure was missing and has just been added back, or if
* the enclosure's Phy Bits have changed, clear the missing
* count and update the Phy Bits in the mapping table and in the
* DPM, if it's being used.
*/
if (enc_idx != MPR_ENCTABLE_BAD_IDX) {
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->init_complete &&
!et_entry->missing_count) {
mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d "
"is already present with handle = 0x%x\n",
__func__, enc_idx, et_entry->enc_handle);
goto out;
}
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->start_slot = le16toh(event_data->StartSlot);
saved_phy_bits = et_entry->phy_bits;
et_entry->phy_bits |= le32toh(event_data->PhyBits);
if (saved_phy_bits != et_entry->phy_bits)
update_phy_bits = 1;
if (et_entry->missing_count || update_phy_bits) {
et_entry->missing_count = 0;
if (sc->is_dpm_enable &&
et_entry->dpm_entry_num !=
MPR_DPM_BAD_IDX) {
dpm_entry += et_entry->dpm_entry_num;
missing_count =
(u8)(dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK);
if (missing_count || update_phy_bits) {
dpm_entry->MappingInformation
= et_entry->num_slots;
dpm_entry->MappingInformation
<<= map_shift;
dpm_entry->PhysicalBitsMapping
= et_entry->phy_bits;
sc->dpm_flush_entry[et_entry->
dpm_entry_num] = 1;
}
}
}
} else {
/*
* This is a new enclosure that is being added.
* Initialize the Enclosure Table entry. It will be
* finalized when a device is added for the enclosure
* and the enclosure has enough space in the Mapping
* Table to map its devices.
*/
enc_idx = sc->num_enc_table_entries;
if (enc_idx >= sc->max_enclosures) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: "
"Enclosure cannot be added to mapping "
"table because it's full.\n", __func__);
goto out;
}
sc->num_enc_table_entries++;
et_entry = &sc->enclosure_table[enc_idx];
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->enclosure_id = le64toh(event_data->
EnclosureLogicalID.High);
et_entry->enclosure_id =
((et_entry->enclosure_id << 32) |
le64toh(event_data->EnclosureLogicalID.Low));
et_entry->start_index = MPR_MAPTABLE_BAD_IDX;
et_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
et_entry->num_slots = le16toh(event_data->NumSlots);
et_entry->start_slot = le16toh(event_data->StartSlot);
et_entry->phy_bits = le32toh(event_data->PhyBits);
}
et_entry->init_complete = 1;
} else if (event_data->ReasonCode ==
MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) {
/*
* An enclosure was removed. Update its missing count and then
* update the DPM entry with the new missing count for the
* enclosure.
*/
enc_idx = _mapping_get_enc_idx_from_handle(sc,
le16toh(event_data->EnclosureHandle));
if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Cannot "
"unmap enclosure %d because it has already been "
"deleted.\n", __func__, enc_idx);
goto out;
}
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->missing_count < MPR_MAX_MISSING_COUNT)
et_entry->missing_count++;
if (sc->is_dpm_enable &&
et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
dpm_entry += et_entry->dpm_entry_num;
dpm_entry->MappingInformation = et_entry->num_slots;
dpm_entry->MappingInformation <<= map_shift;
dpm_entry->MappingInformation |=
et_entry->missing_count;
sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1;
}
et_entry->init_complete = 1;
}
out:
_mapping_flush_dpm_pages(sc);
if (sc->pending_map_events)
sc->pending_map_events--;
}
/**
* mpr_mapping_topology_change_event - handle topology change events
* @sc: per adapter object
* @event_data: event data payload
*
* Returns nothing.
*/
void
mpr_mapping_topology_change_event(struct mpr_softc *sc,
Mpi2EventDataSasTopologyChangeList_t *event_data)
{
struct _map_topology_change topo_change;
struct _map_phy_change *phy_change;
Mpi2EventSasTopoPhyEntry_t *event_phy_change;
u8 i, num_entries;
topo_change.enc_handle = le16toh(event_data->EnclosureHandle);
topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle);
num_entries = event_data->NumEntries;
topo_change.num_entries = num_entries;
topo_change.start_phy_num = event_data->StartPhyNum;
topo_change.num_phys = event_data->NumPhys;
topo_change.exp_status = event_data->ExpStatus;
event_phy_change = event_data->PHY;
topo_change.phy_details = NULL;
if (!num_entries)
goto out;
- phy_change = mallocarray(num_entries, sizeof(struct _map_phy_change),
+ phy_change = malloc(sizeof(struct _map_phy_change) * num_entries,
M_MPR, M_NOWAIT|M_ZERO);
topo_change.phy_details = phy_change;
if (!phy_change)
goto out;
for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) {
phy_change->dev_handle = le16toh(event_phy_change->
AttachedDevHandle);
phy_change->reason = event_phy_change->PhyStatus &
MPI2_EVENT_SAS_TOPO_RC_MASK;
}
_mapping_update_missing_count(sc, &topo_change);
_mapping_get_dev_info(sc, &topo_change);
_mapping_clear_removed_entries(sc);
_mapping_add_new_device(sc, &topo_change);
out:
free(topo_change.phy_details, M_MPR);
_mapping_flush_dpm_pages(sc);
if (sc->pending_map_events)
sc->pending_map_events--;
}
/**
* mpr_mapping_pcie_topology_change_event - handle PCIe topology change events
* @sc: per adapter object
* @event_data: event data payload
*
* Returns nothing.
*/
void
mpr_mapping_pcie_topology_change_event(struct mpr_softc *sc,
Mpi26EventDataPCIeTopologyChangeList_t *event_data)
{
struct _map_pcie_topology_change topo_change;
struct _map_port_change *port_change;
Mpi26EventPCIeTopoPortEntry_t *event_port_change;
u8 i, num_entries;
topo_change.switch_dev_handle = le16toh(event_data->SwitchDevHandle);
topo_change.enc_handle = le16toh(event_data->EnclosureHandle);
num_entries = event_data->NumEntries;
topo_change.num_entries = num_entries;
topo_change.start_port_num = event_data->StartPortNum;
topo_change.num_ports = event_data->NumPorts;
topo_change.switch_status = event_data->SwitchStatus;
event_port_change = event_data->PortEntry;
topo_change.port_details = NULL;
if (!num_entries)
goto out;
- port_change = mallocarray(num_entries, sizeof(struct _map_port_change),
+ port_change = malloc(sizeof(struct _map_port_change) * num_entries,
M_MPR, M_NOWAIT|M_ZERO);
topo_change.port_details = port_change;
if (!port_change)
goto out;
for (i = 0; i < num_entries; i++, event_port_change++, port_change++) {
port_change->dev_handle = le16toh(event_port_change->
AttachedDevHandle);
port_change->reason = event_port_change->PortStatus;
}
_mapping_update_pcie_missing_count(sc, &topo_change);
_mapping_get_pcie_dev_info(sc, &topo_change);
_mapping_clear_removed_entries(sc);
_mapping_add_new_pcie_device(sc, &topo_change);
out:
free(topo_change.port_details, M_MPR);
_mapping_flush_dpm_pages(sc);
if (sc->pending_map_events)
sc->pending_map_events--;
}
/**
* mpr_mapping_ir_config_change_event - handle IR config change list events
* @sc: per adapter object
* @event_data: event data payload
*
* Returns nothing.
*/
void
mpr_mapping_ir_config_change_event(struct mpr_softc *sc,
Mpi2EventDataIrConfigChangeList_t *event_data)
{
Mpi2EventIrConfigElement_t *element;
int i;
u64 *wwid_table;
u32 map_idx, flags;
struct dev_mapping_table *mt_entry;
u16 element_flags;
- wwid_table = mallocarray(event_data->NumElements, sizeof(u64), M_MPR,
+ wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPR,
M_NOWAIT | M_ZERO);
if (!wwid_table)
goto out;
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
flags = le32toh(event_data->Flags);
/*
* For volume changes, get the WWID for the volume and put it in a
* table to be used in the processing of the IR change event.
*/
for (i = 0; i < event_data->NumElements; i++, element++) {
element_flags = le16toh(element->ElementFlags);
if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) &&
(element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) &&
(element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE)
&& (element->ReasonCode !=
MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED))
continue;
if ((element_flags &
MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) ==
MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) {
mpr_config_get_volume_wwid(sc,
le16toh(element->VolDevHandle), &wwid_table[i]);
}
}
/*
* Check the ReasonCode for each element in the IR event and Add/Remove
* Volumes or Physical Disks of Volumes to/from the mapping table. Use
* the WWIDs gotten above in wwid_table.
*/
if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
goto out;
else {
element = (Mpi2EventIrConfigElement_t *)&event_data->
ConfigElement[0];
for (i = 0; i < event_data->NumElements; i++, element++) {
if (element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_ADDED ||
element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
map_idx = _mapping_get_ir_mt_idx_from_wwid
(sc, wwid_table[i]);
if (map_idx != MPR_MAPTABLE_BAD_IDX) {
/*
* The volume is already in the mapping
* table. Just update it's info.
*/
mt_entry = &sc->mapping_table[map_idx];
mt_entry->id = map_idx;
mt_entry->dev_handle = le16toh
(element->VolDevHandle);
mt_entry->device_info =
MPR_DEV_RESERVED | MPR_MAP_IN_USE;
_mapping_update_ir_missing_cnt(sc,
map_idx, element, wwid_table[i]);
continue;
}
/*
* Volume is not in mapping table yet. Find a
* free entry in the mapping table at the
* volume mapping locations. If no entries are
* available, this is an error because it means
* there are more volumes than can be mapped
* and that should never happen for volumes.
*/
map_idx = _mapping_get_free_ir_mt_idx(sc);
if (map_idx == MPR_MAPTABLE_BAD_IDX)
{
mpr_dprint(sc, MPR_ERROR | MPR_MAPPING,
"%s: failed to add the volume with "
"handle 0x%04x because there is no "
"free space available in the "
"mapping table\n", __func__,
le16toh(element->VolDevHandle));
continue;
}
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = wwid_table[i];
mt_entry->id = map_idx;
mt_entry->dev_handle = le16toh(element->
VolDevHandle);
mt_entry->device_info = MPR_DEV_RESERVED |
MPR_MAP_IN_USE;
_mapping_update_ir_missing_cnt(sc, map_idx,
element, wwid_table[i]);
} else if (element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
map_idx = _mapping_get_ir_mt_idx_from_wwid(sc,
wwid_table[i]);
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_MAPPING,"%s: Failed "
"to remove a volume because it has "
"already been removed.\n",
__func__);
continue;
}
_mapping_update_ir_missing_cnt(sc, map_idx,
element, wwid_table[i]);
} else if (element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) {
map_idx = _mapping_get_mt_idx_from_handle(sc,
le16toh(element->VolDevHandle));
if (map_idx == MPR_MAPTABLE_BAD_IDX) {
mpr_dprint(sc, MPR_MAPPING,"%s: Failed "
"to remove volume with handle "
"0x%04x because it has already "
"been removed.\n", __func__,
le16toh(element->VolDevHandle));
continue;
}
mt_entry = &sc->mapping_table[map_idx];
_mapping_update_ir_missing_cnt(sc, map_idx,
element, mt_entry->physical_id);
}
}
}
out:
_mapping_flush_dpm_pages(sc);
free(wwid_table, M_MPR);
if (sc->pending_map_events)
sc->pending_map_events--;
}
Index: head/sys/dev/mps/mps.c
===================================================================
--- head/sys/dev/mps/mps.c (revision 328217)
+++ head/sys/dev/mps/mps.c (revision 328218)
@@ -1,3055 +1,3055 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2009 Yahoo! Inc.
* Copyright (c) 2011-2015 LSI Corp.
* Copyright (c) 2013-2015 Avago Technologies
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Communications core for Avago Technologies (LSI) MPT2 */
/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/selinfo.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/bio.h>
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
#include <sys/smp.h>
#include <sys/queue.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/endian.h>
#include <sys/eventhandler.h>
#include <sys/sbuf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <sys/proc.h>
#include <dev/pci/pcivar.h>
#include <cam/cam.h>
#include <cam/scsi/scsi_all.h>
#include <dev/mps/mpi/mpi2_type.h>
#include <dev/mps/mpi/mpi2.h>
#include <dev/mps/mpi/mpi2_ioc.h>
#include <dev/mps/mpi/mpi2_sas.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
#include <dev/mps/mpi/mpi2_init.h>
#include <dev/mps/mpi/mpi2_tool.h>
#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
#include <dev/mps/mps_table.h>
static int mps_diag_reset(struct mps_softc *sc, int sleep_flag);
static int mps_init_queues(struct mps_softc *sc);
static void mps_resize_queues(struct mps_softc *sc);
static int mps_message_unit_reset(struct mps_softc *sc, int sleep_flag);
static int mps_transition_operational(struct mps_softc *sc);
static int mps_iocfacts_allocate(struct mps_softc *sc, uint8_t attaching);
static void mps_iocfacts_free(struct mps_softc *sc);
static void mps_startup(void *arg);
static int mps_send_iocinit(struct mps_softc *sc);
static int mps_alloc_queues(struct mps_softc *sc);
static int mps_alloc_hw_queues(struct mps_softc *sc);
static int mps_alloc_replies(struct mps_softc *sc);
static int mps_alloc_requests(struct mps_softc *sc);
static int mps_attach_log(struct mps_softc *sc);
static __inline void mps_complete_command(struct mps_softc *sc,
struct mps_command *cm);
static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *reply);
static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm);
static void mps_periodic(void *);
static int mps_reregister_events(struct mps_softc *sc);
static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm);
static int mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts);
static int mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag);
static int mps_debug_sysctl(SYSCTL_HANDLER_ARGS);
static void mps_parse_debug(struct mps_softc *sc, char *list);
SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters");
MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory");
/*
* Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
* any state and back to its initialization state machine.
*/
static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
/* Added this union to smoothly convert le64toh cm->cm_desc.Words.
* Compiler only support unint64_t to be passed as argument.
* Otherwise it will throw below error
* "aggregate value used where an integer was expected"
*/
typedef union _reply_descriptor {
u64 word;
struct {
u32 low;
u32 high;
} u;
}reply_descriptor,address_descriptor;
/* Rate limit chain-fail messages to 1 per minute */
static struct timeval mps_chainfail_interval = { 60, 0 };
/*
* sleep_flag can be either CAN_SLEEP or NO_SLEEP.
* If this function is called from process context, it can sleep
* and there is no harm to sleep, in case if this fuction is called
* from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
* based on sleep flags driver will call either msleep, pause or DELAY.
* msleep and pause are of same variant, but pause is used when mps_mtx
* is not hold by driver.
*
*/
static int
mps_diag_reset(struct mps_softc *sc,int sleep_flag)
{
uint32_t reg;
int i, error, tries = 0;
uint8_t first_wait_done = FALSE;
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
/* Clear any pending interrupts */
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
/*
* Force NO_SLEEP for threads prohibited to sleep
* e.a Thread from interrupt handler are prohibited to sleep.
*/
if (curthread->td_no_sleeping != 0)
sleep_flag = NO_SLEEP;
mps_dprint(sc, MPS_INIT, "sequence start, sleep_flag= %d\n", sleep_flag);
/* Push the magic sequence */
error = ETIMEDOUT;
while (tries++ < 20) {
for (i = 0; i < sizeof(mpt2_reset_magic); i++)
mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
mpt2_reset_magic[i]);
/* wait 100 msec */
if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
"mpsdiag", hz/10);
else if (sleep_flag == CAN_SLEEP)
pause("mpsdiag", hz/10);
else
DELAY(100 * 1000);
reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
error = 0;
break;
}
}
if (error) {
mps_dprint(sc, MPS_INIT, "sequence failed, error=%d, exit\n",
error);
return (error);
}
/* Send the actual reset. XXX need to refresh the reg? */
reg |= MPI2_DIAG_RESET_ADAPTER;
mps_dprint(sc, MPS_INIT, "sequence success, sending reset, reg= 0x%x\n",
reg);
mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg);
/* Wait up to 300 seconds in 50ms intervals */
error = ETIMEDOUT;
for (i = 0; i < 6000; i++) {
/*
* Wait 50 msec. If this is the first time through, wait 256
* msec to satisfy Diag Reset timing requirements.
*/
if (first_wait_done) {
if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
"mpsdiag", hz/20);
else if (sleep_flag == CAN_SLEEP)
pause("mpsdiag", hz/20);
else
DELAY(50 * 1000);
} else {
DELAY(256 * 1000);
first_wait_done = TRUE;
}
/*
* Check for the RESET_ADAPTER bit to be cleared first, then
* wait for the RESET state to be cleared, which takes a little
* longer.
*/
reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
if (reg & MPI2_DIAG_RESET_ADAPTER) {
continue;
}
reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
error = 0;
break;
}
}
if (error) {
mps_dprint(sc, MPS_INIT, "reset failed, error= %d, exit\n",
error);
return (error);
}
mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
mps_dprint(sc, MPS_INIT, "diag reset success, exit\n");
return (0);
}
static int
mps_message_unit_reset(struct mps_softc *sc, int sleep_flag)
{
int error;
MPS_FUNCTRACE(sc);
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
error = 0;
mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
MPI2_DOORBELL_FUNCTION_SHIFT);
if (mps_wait_db_ack(sc, 5, sleep_flag) != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT,
"Doorbell handshake failed\n");
error = ETIMEDOUT;
}
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
return (error);
}
static int
mps_transition_ready(struct mps_softc *sc)
{
uint32_t reg, state;
int error, tries = 0;
int sleep_flags;
MPS_FUNCTRACE(sc);
/* If we are in attach call, do not sleep */
sleep_flags = (sc->mps_flags & MPS_FLAGS_ATTACH_DONE)
? CAN_SLEEP:NO_SLEEP;
error = 0;
mps_dprint(sc, MPS_INIT, "%s entered, sleep_flags= %d\n",
__func__, sleep_flags);
while (tries++ < 1200) {
reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
mps_dprint(sc, MPS_INIT, " Doorbell= 0x%x\n", reg);
/*
* Ensure the IOC is ready to talk. If it's not, try
* resetting it.
*/
if (reg & MPI2_DOORBELL_USED) {
mps_dprint(sc, MPS_INIT, " Not ready, sending diag "
"reset\n");
mps_diag_reset(sc, sleep_flags);
DELAY(50000);
continue;
}
/* Is the adapter owned by another peer? */
if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
(MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC is under the "
"control of another peer host, aborting "
"initialization.\n");
error = ENXIO;
break;
}
state = reg & MPI2_IOC_STATE_MASK;
if (state == MPI2_IOC_STATE_READY) {
/* Ready to go! */
error = 0;
break;
} else if (state == MPI2_IOC_STATE_FAULT) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC in fault "
"state 0x%x, resetting\n",
state & MPI2_DOORBELL_FAULT_CODE_MASK);
mps_diag_reset(sc, sleep_flags);
} else if (state == MPI2_IOC_STATE_OPERATIONAL) {
/* Need to take ownership */
mps_message_unit_reset(sc, sleep_flags);
} else if (state == MPI2_IOC_STATE_RESET) {
/* Wait a bit, IOC might be in transition */
mps_dprint(sc, MPS_INIT|MPS_FAULT,
"IOC in unexpected reset state\n");
} else {
mps_dprint(sc, MPS_INIT|MPS_FAULT,
"IOC in unknown state 0x%x\n", state);
error = EINVAL;
break;
}
/* Wait 50ms for things to settle down. */
DELAY(50000);
}
if (error)
mps_dprint(sc, MPS_INIT|MPS_FAULT,
"Cannot transition IOC to ready\n");
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
return (error);
}
static int
mps_transition_operational(struct mps_softc *sc)
{
uint32_t reg, state;
int error;
MPS_FUNCTRACE(sc);
error = 0;
reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
mps_dprint(sc, MPS_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg);
state = reg & MPI2_IOC_STATE_MASK;
if (state != MPI2_IOC_STATE_READY) {
mps_dprint(sc, MPS_INIT, "IOC not ready\n");
if ((error = mps_transition_ready(sc)) != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT,
"failed to transition ready, exit\n");
return (error);
}
}
error = mps_send_iocinit(sc);
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
return (error);
}
static void
mps_resize_queues(struct mps_softc *sc)
{
int reqcr, prireqcr;
/*
* Size the queues. Since the reply queues always need one free
* entry, we'll deduct one reply message here. The LSI documents
* suggest instead to add a count to the request queue, but I think
* that it's better to deduct from reply queue.
*/
prireqcr = MAX(1, sc->max_prireqframes);
prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
reqcr = MAX(2, sc->max_reqframes);
reqcr = MIN(reqcr, sc->facts->RequestCredit);
sc->num_reqs = prireqcr + reqcr;
sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
/*
* Figure out the number of MSIx-based queues. If the firmware or
* user has done something crazy and not allowed enough credit for
* the queues to be useful then don't enable multi-queue.
*/
if (sc->facts->MaxMSIxVectors < 2)
sc->msi_msgs = 1;
if (sc->msi_msgs > 1) {
sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus);
sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
if (sc->num_reqs / sc->msi_msgs < 2)
sc->msi_msgs = 1;
}
mps_dprint(sc, MPS_INIT, "Sized queues to q=%d reqs=%d replies=%d\n",
sc->msi_msgs, sc->num_reqs, sc->num_replies);
}
/*
* This is called during attach and when re-initializing due to a Diag Reset.
* IOC Facts is used to allocate many of the structures needed by the driver.
* If called from attach, de-allocation is not required because the driver has
* not allocated any structures yet, but if called from a Diag Reset, previously
* allocated structures based on IOC Facts will need to be freed and re-
* allocated bases on the latest IOC Facts.
*/
static int
mps_iocfacts_allocate(struct mps_softc *sc, uint8_t attaching)
{
int error;
Mpi2IOCFactsReply_t saved_facts;
uint8_t saved_mode, reallocating;
mps_dprint(sc, MPS_INIT|MPS_TRACE, "%s entered\n", __func__);
/* Save old IOC Facts and then only reallocate if Facts have changed */
if (!attaching) {
bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
}
/*
* Get IOC Facts. In all cases throughout this function, panic if doing
* a re-initialization and only return the error if attaching so the OS
* can handle it.
*/
if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) {
if (attaching) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to get "
"IOC Facts with error %d, exit\n", error);
return (error);
} else {
panic("%s failed to get IOC Facts with error %d\n",
__func__, error);
}
}
MPS_DPRINT_PAGE(sc, MPS_XINFO, iocfacts, sc->facts);
snprintf(sc->fw_version, sizeof(sc->fw_version),
"%02d.%02d.%02d.%02d",
sc->facts->FWVersion.Struct.Major,
sc->facts->FWVersion.Struct.Minor,
sc->facts->FWVersion.Struct.Unit,
sc->facts->FWVersion.Struct.Dev);
mps_dprint(sc, MPS_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
MPS_DRIVER_VERSION);
mps_dprint(sc, MPS_INFO, "IOCCapabilities: %b\n",
sc->facts->IOCCapabilities,
"\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
"\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
"\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc");
/*
* If the chip doesn't support event replay then a hard reset will be
* required to trigger a full discovery. Do the reset here then
* retransition to Ready. A hard reset might have already been done,
* but it doesn't hurt to do it again. Only do this if attaching, not
* for a Diag Reset.
*/
if (attaching && ((sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) {
mps_dprint(sc, MPS_INIT, "No event replay, reseting\n");
mps_diag_reset(sc, NO_SLEEP);
if ((error = mps_transition_ready(sc)) != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to "
"transition to ready with error %d, exit\n",
error);
return (error);
}
}
/*
* Set flag if IR Firmware is loaded. If the RAID Capability has
* changed from the previous IOC Facts, log a warning, but only if
* checking this after a Diag Reset and not during attach.
*/
saved_mode = sc->ir_firmware;
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
sc->ir_firmware = 1;
if (!attaching) {
if (sc->ir_firmware != saved_mode) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "new IR/IT mode "
"in IOC Facts does not match previous mode\n");
}
}
/* Only deallocate and reallocate if relevant IOC Facts have changed */
reallocating = FALSE;
sc->mps_flags &= ~MPS_FLAGS_REALLOCATED;
if ((!attaching) &&
((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
(saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
(saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
(saved_facts.RequestCredit != sc->facts->RequestCredit) ||
(saved_facts.ProductID != sc->facts->ProductID) ||
(saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
(saved_facts.IOCRequestFrameSize !=
sc->facts->IOCRequestFrameSize) ||
(saved_facts.MaxTargets != sc->facts->MaxTargets) ||
(saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
(saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
(saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
(saved_facts.MaxReplyDescriptorPostQueueDepth !=
sc->facts->MaxReplyDescriptorPostQueueDepth) ||
(saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
(saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
(saved_facts.MaxPersistentEntries !=
sc->facts->MaxPersistentEntries))) {
reallocating = TRUE;
/* Record that we reallocated everything */
sc->mps_flags |= MPS_FLAGS_REALLOCATED;
}
/*
* Some things should be done if attaching or re-allocating after a Diag
* Reset, but are not needed after a Diag Reset if the FW has not
* changed.
*/
if (attaching || reallocating) {
/*
* Check if controller supports FW diag buffers and set flag to
* enable each type.
*/
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
enabled = TRUE;
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
enabled = TRUE;
if (sc->facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
enabled = TRUE;
/*
* Set flag if EEDP is supported and if TLR is supported.
*/
if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
sc->eedp_enabled = TRUE;
if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
sc->control_TLR = TRUE;
mps_resize_queues(sc);
/*
* Initialize all Tail Queues
*/
TAILQ_INIT(&sc->req_list);
TAILQ_INIT(&sc->high_priority_req_list);
TAILQ_INIT(&sc->chain_list);
TAILQ_INIT(&sc->tm_list);
}
/*
* If doing a Diag Reset and the FW is significantly different
* (reallocating will be set above in IOC Facts comparison), then all
* buffers based on the IOC Facts will need to be freed before they are
* reallocated.
*/
if (reallocating) {
mps_iocfacts_free(sc);
mpssas_realloc_targets(sc, saved_facts.MaxTargets +
saved_facts.MaxVolumes);
}
/*
* Any deallocation has been completed. Now start reallocating
* if needed. Will only need to reallocate if attaching or if the new
* IOC Facts are different from the previous IOC Facts after a Diag
* Reset. Targets have already been allocated above if needed.
*/
error = 0;
while (attaching || reallocating) {
if ((error = mps_alloc_hw_queues(sc)) != 0)
break;
if ((error = mps_alloc_replies(sc)) != 0)
break;
if ((error = mps_alloc_requests(sc)) != 0)
break;
if ((error = mps_alloc_queues(sc)) != 0)
break;
break;
}
if (error) {
mps_dprint(sc, MPS_INIT|MPS_FAULT,
"Failed to alloc queues with error %d\n", error);
mps_free(sc);
return (error);
}
/* Always initialize the queues */
bzero(sc->free_queue, sc->fqdepth * 4);
mps_init_queues(sc);
/*
* Always get the chip out of the reset state, but only panic if not
* attaching. If attaching and there is an error, that is handled by
* the OS.
*/
error = mps_transition_operational(sc);
if (error != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to "
"transition to operational with error %d\n", error);
mps_free(sc);
return (error);
}
/*
* Finish the queue initialization.
* These are set here instead of in mps_init_queues() because the
* IOC resets these values during the state transition in
* mps_transition_operational(). The free index is set to 1
* because the corresponding index in the IOC is set to 0, and the
* IOC treats the queues as full if both are set to the same value.
* Hence the reason that the queue can't hold all of the possible
* replies.
*/
sc->replypostindex = 0;
mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
/*
* Attach the subsystems so they can prepare their event masks.
* XXX Should be dynamic so that IM/IR and user modules can attach
*/
error = 0;
while (attaching) {
mps_dprint(sc, MPS_INIT, "Attaching subsystems\n");
if ((error = mps_attach_log(sc)) != 0)
break;
if ((error = mps_attach_sas(sc)) != 0)
break;
if ((error = mps_attach_user(sc)) != 0)
break;
break;
}
if (error) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to attach all "
"subsystems: error %d\n", error);
mps_free(sc);
return (error);
}
/*
* XXX If the number of MSI-X vectors changes during re-init, this
* won't see it and adjust.
*/
if (attaching && (error = mps_pci_setup_interrupts(sc)) != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to setup "
"interrupts\n");
mps_free(sc);
return (error);
}
/*
* Set flag if this is a WD controller. This shouldn't ever change, but
* reset it after a Diag Reset, just in case.
*/
sc->WD_available = FALSE;
if (pci_get_device(sc->mps_dev) == MPI2_MFGPAGE_DEVID_SSS6200)
sc->WD_available = TRUE;
return (error);
}
/*
* This is called if memory is being free (during detach for example) and when
* buffers need to be reallocated due to a Diag Reset.
*/
static void
mps_iocfacts_free(struct mps_softc *sc)
{
struct mps_command *cm;
int i;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
if (sc->free_busaddr != 0)
bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
if (sc->free_queue != NULL)
bus_dmamem_free(sc->queues_dmat, sc->free_queue,
sc->queues_map);
if (sc->queues_dmat != NULL)
bus_dma_tag_destroy(sc->queues_dmat);
if (sc->chain_busaddr != 0)
bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
if (sc->chain_frames != NULL)
bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
sc->chain_map);
if (sc->chain_dmat != NULL)
bus_dma_tag_destroy(sc->chain_dmat);
if (sc->sense_busaddr != 0)
bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
if (sc->sense_frames != NULL)
bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
sc->sense_map);
if (sc->sense_dmat != NULL)
bus_dma_tag_destroy(sc->sense_dmat);
if (sc->reply_busaddr != 0)
bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
if (sc->reply_frames != NULL)
bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
sc->reply_map);
if (sc->reply_dmat != NULL)
bus_dma_tag_destroy(sc->reply_dmat);
if (sc->req_busaddr != 0)
bus_dmamap_unload(sc->req_dmat, sc->req_map);
if (sc->req_frames != NULL)
bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
if (sc->req_dmat != NULL)
bus_dma_tag_destroy(sc->req_dmat);
if (sc->chains != NULL)
free(sc->chains, M_MPT2);
if (sc->commands != NULL) {
for (i = 1; i < sc->num_reqs; i++) {
cm = &sc->commands[i];
bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
}
free(sc->commands, M_MPT2);
}
if (sc->buffer_dmat != NULL)
bus_dma_tag_destroy(sc->buffer_dmat);
mps_pci_free_interrupts(sc);
free(sc->queues, M_MPT2);
sc->queues = NULL;
}
/*
* The terms diag reset and hard reset are used interchangeably in the MPI
* docs to mean resetting the controller chip. In this code diag reset
* cleans everything up, and the hard reset function just sends the reset
* sequence to the chip. This should probably be refactored so that every
* subsystem gets a reset notification of some sort, and can clean up
* appropriately.
*/
int
mps_reinit(struct mps_softc *sc)
{
int error;
struct mpssas_softc *sassc;
sassc = sc->sassc;
MPS_FUNCTRACE(sc);
mtx_assert(&sc->mps_mtx, MA_OWNED);
mps_dprint(sc, MPS_INIT|MPS_INFO, "Reinitializing controller\n");
if (sc->mps_flags & MPS_FLAGS_DIAGRESET) {
mps_dprint(sc, MPS_INIT, "Reset already in progress\n");
return 0;
}
/* make sure the completion callbacks can recognize they're getting
* a NULL cm_reply due to a reset.
*/
sc->mps_flags |= MPS_FLAGS_DIAGRESET;
/*
* Mask interrupts here.
*/
mps_dprint(sc, MPS_INIT, "masking interrupts and resetting\n");
mps_mask_intr(sc);
error = mps_diag_reset(sc, CAN_SLEEP);
if (error != 0) {
/* XXXSL No need to panic here */
panic("%s hard reset failed with error %d\n",
__func__, error);
}
/* Restore the PCI state, including the MSI-X registers */
mps_pci_restore(sc);
/* Give the I/O subsystem special priority to get itself prepared */
mpssas_handle_reinit(sc);
/*
* Get IOC Facts and allocate all structures based on this information.
* The attach function will also call mps_iocfacts_allocate at startup.
* If relevant values have changed in IOC Facts, this function will free
* all of the memory based on IOC Facts and reallocate that memory.
*/
if ((error = mps_iocfacts_allocate(sc, FALSE)) != 0) {
panic("%s IOC Facts based allocation failed with error %d\n",
__func__, error);
}
/*
* Mapping structures will be re-allocated after getting IOC Page8, so
* free these structures here.
*/
mps_mapping_exit(sc);
/*
* The static page function currently read is IOC Page8. Others can be
* added in future. It's possible that the values in IOC Page8 have
* changed after a Diag Reset due to user modification, so always read
* these. Interrupts are masked, so unmask them before getting config
* pages.
*/
mps_unmask_intr(sc);
sc->mps_flags &= ~MPS_FLAGS_DIAGRESET;
mps_base_static_config_pages(sc);
/*
* Some mapping info is based in IOC Page8 data, so re-initialize the
* mapping tables.
*/
mps_mapping_initialize(sc);
/*
* Restart will reload the event masks clobbered by the reset, and
* then enable the port.
*/
mps_reregister_events(sc);
/* the end of discovery will release the simq, so we're done. */
mps_dprint(sc, MPS_INIT|MPS_XINFO, "Finished sc %p post %u free %u\n",
sc, sc->replypostindex, sc->replyfreeindex);
mpssas_release_simq_reinit(sassc);
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
return 0;
}
/* Wait for the chip to ACK a word that we've put into its FIFO
* Wait for <timeout> seconds. In single loop wait for busy loop
* for 500 microseconds.
* Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
* */
static int
mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag)
{
u32 cntdn, count;
u32 int_status;
u32 doorbell;
count = 0;
cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
do {
int_status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
mps_dprint(sc, MPS_TRACE,
"%s: successful count(%d), timeout(%d)\n",
__func__, count, timeout);
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
doorbell = mps_regread(sc, MPI2_DOORBELL_OFFSET);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
mps_dprint(sc, MPS_FAULT,
"fault_state(0x%04x)!\n", doorbell);
return (EFAULT);
}
} else if (int_status == 0xFFFFFFFF)
goto out;
/* If it can sleep, sleep for 1 milisecond, else busy loop for
* 0.5 milisecond */
if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
"mpsdba", hz/1000);
else if (sleep_flag == CAN_SLEEP)
pause("mpsdba", hz/1000);
else
DELAY(500);
count++;
} while (--cntdn);
out:
mps_dprint(sc, MPS_FAULT, "%s: failed due to timeout count(%d), "
"int_status(%x)!\n", __func__, count, int_status);
return (ETIMEDOUT);
}
/* Wait for the chip to signal that the next word in its FIFO can be fetched */
static int
mps_wait_db_int(struct mps_softc *sc)
{
int retry;
for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
return (0);
DELAY(2000);
}
return (ETIMEDOUT);
}
/* Step through the synchronous command state machine, i.e. "Doorbell mode" */
static int
mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
int req_sz, int reply_sz, int timeout)
{
uint32_t *data32;
uint16_t *data16;
int i, count, ioc_sz, residual;
int sleep_flags = CAN_SLEEP;
if (curthread->td_no_sleeping != 0)
sleep_flags = NO_SLEEP;
/* Step 1 */
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
/* Step 2 */
if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
return (EBUSY);
/* Step 3
* Announce that a message is coming through the doorbell. Messages
* are pushed at 32bit words, so round up if needed.
*/
count = (req_sz + 3) / 4;
mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
(MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
(count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
/* Step 4 */
if (mps_wait_db_int(sc) ||
(mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n");
return (ENXIO);
}
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) {
mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n");
return (ENXIO);
}
/* Step 5 */
/* Clock out the message data synchronously in 32-bit dwords*/
data32 = (uint32_t *)req;
for (i = 0; i < count; i++) {
mps_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) {
mps_dprint(sc, MPS_FAULT,
"Timeout while writing doorbell\n");
return (ENXIO);
}
}
/* Step 6 */
/* Clock in the reply in 16-bit words. The total length of the
* message is always in the 4th byte, so clock out the first 2 words
* manually, then loop the rest.
*/
data16 = (uint16_t *)reply;
if (mps_wait_db_int(sc) != 0) {
mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n");
return (ENXIO);
}
data16[0] =
mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
if (mps_wait_db_int(sc) != 0) {
mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n");
return (ENXIO);
}
data16[1] =
mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
/* Number of 32bit words in the message */
ioc_sz = reply->MsgLength;
/*
* Figure out how many 16bit words to clock in without overrunning.
* The precision loss with dividing reply_sz can safely be
* ignored because the messages can only be multiples of 32bits.
*/
residual = 0;
count = MIN((reply_sz / 4), ioc_sz) * 2;
if (count < ioc_sz * 2) {
residual = ioc_sz * 2 - count;
mps_dprint(sc, MPS_ERROR, "Driver error, throwing away %d "
"residual message words\n", residual);
}
for (i = 2; i < count; i++) {
if (mps_wait_db_int(sc) != 0) {
mps_dprint(sc, MPS_FAULT,
"Timeout reading doorbell %d\n", i);
return (ENXIO);
}
data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) &
MPI2_DOORBELL_DATA_MASK;
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
}
/*
* Pull out residual words that won't fit into the provided buffer.
* This keeps the chip from hanging due to a driver programming
* error.
*/
while (residual--) {
if (mps_wait_db_int(sc) != 0) {
mps_dprint(sc, MPS_FAULT,
"Timeout reading doorbell\n");
return (ENXIO);
}
(void)mps_regread(sc, MPI2_DOORBELL_OFFSET);
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
}
/* Step 7 */
if (mps_wait_db_int(sc) != 0) {
mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n");
return (ENXIO);
}
if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n");
mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
return (0);
}
static void
mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm)
{
reply_descriptor rd;
MPS_FUNCTRACE(sc);
mps_dprint(sc, MPS_TRACE, "SMID %u cm %p ccb %p\n",
cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE && !(sc->mps_flags & MPS_FLAGS_SHUTDOWN))
mtx_assert(&sc->mps_mtx, MA_OWNED);
if (++sc->io_cmds_active > sc->io_cmds_highwater)
sc->io_cmds_highwater++;
rd.u.low = cm->cm_desc.Words.Low;
rd.u.high = cm->cm_desc.Words.High;
rd.word = htole64(rd.word);
/* TODO-We may need to make below regwrite atomic */
mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
rd.u.low);
mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
rd.u.high);
}
/*
* Just the FACTS, ma'am.
*/
static int
mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
{
MPI2_DEFAULT_REPLY *reply;
MPI2_IOC_FACTS_REQUEST request;
int error, req_sz, reply_sz;
MPS_FUNCTRACE(sc);
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
reply = (MPI2_DEFAULT_REPLY *)facts;
bzero(&request, req_sz);
request.Function = MPI2_FUNCTION_IOC_FACTS;
error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
return (error);
}
static int
mps_send_iocinit(struct mps_softc *sc)
{
MPI2_IOC_INIT_REQUEST init;
MPI2_DEFAULT_REPLY reply;
int req_sz, reply_sz, error;
struct timeval now;
uint64_t time_in_msec;
MPS_FUNCTRACE(sc);
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
bzero(&init, req_sz);
bzero(&reply, reply_sz);
/*
* Fill in the init block. Note that most addresses are
* deliberately in the lower 32bits of memory. This is a micro-
* optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
*/
init.Function = MPI2_FUNCTION_IOC_INIT;
init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
init.MsgVersion = htole16(MPI2_VERSION);
init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize);
init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
init.SenseBufferAddressHigh = 0;
init.SystemReplyAddressHigh = 0;
init.SystemRequestFrameBaseAddress.High = 0;
init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr);
init.ReplyDescriptorPostQueueAddress.High = 0;
init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr);
init.ReplyFreeQueueAddress.High = 0;
init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
getmicrotime(&now);
time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
error = ENXIO;
mps_dprint(sc, MPS_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
return (error);
}
void
mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *addr;
addr = arg;
*addr = segs[0].ds_addr;
}
static int
mps_alloc_queues(struct mps_softc *sc)
{
struct mps_queue *q;
- u_int nq, i;
+ int nq, i;
nq = sc->msi_msgs;
mps_dprint(sc, MPS_INIT|MPS_XINFO, "Allocating %d I/O queues\n", nq);
- sc->queues = mallocarray(nq, sizeof(struct mps_queue), M_MPT2,
+ sc->queues = malloc(sizeof(struct mps_queue) * nq, M_MPT2,
M_NOWAIT|M_ZERO);
if (sc->queues == NULL)
return (ENOMEM);
for (i = 0; i < nq; i++) {
q = &sc->queues[i];
mps_dprint(sc, MPS_INIT, "Configuring queue %d %p\n", i, q);
q->sc = sc;
q->qnum = i;
}
return (0);
}
static int
mps_alloc_hw_queues(struct mps_softc *sc)
{
bus_addr_t queues_busaddr;
uint8_t *queues;
int qsize, fqsize, pqsize;
/*
* The reply free queue contains 4 byte entries in multiples of 16 and
* aligned on a 16 byte boundary. There must always be an unused entry.
* This queue supplies fresh reply frames for the firmware to use.
*
* The reply descriptor post queue contains 8 byte entries in
* multiples of 16 and aligned on a 16 byte boundary. This queue
* contains filled-in reply frames sent from the firmware to the host.
*
* These two queues are allocated together for simplicity.
*/
sc->fqdepth = roundup2(sc->num_replies + 1, 16);
sc->pqdepth = roundup2(sc->num_replies + 1, 16);
fqsize= sc->fqdepth * 4;
pqsize = sc->pqdepth * 8;
qsize = fqsize + pqsize;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
qsize, /* maxsize */
1, /* nsegments */
qsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->queues_dmat)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate queues DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
&sc->queues_map)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate queues memory\n");
return (ENOMEM);
}
bzero(queues, qsize);
bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
mps_memaddr_cb, &queues_busaddr, 0);
sc->free_queue = (uint32_t *)queues;
sc->free_busaddr = queues_busaddr;
sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
sc->post_busaddr = queues_busaddr + fqsize;
return (0);
}
static int
mps_alloc_replies(struct mps_softc *sc)
{
int rsize, num_replies;
/*
* sc->num_replies should be one less than sc->fqdepth. We need to
* allocate space for sc->fqdepth replies, but only sc->num_replies
* replies can be used at once.
*/
num_replies = max(sc->fqdepth, sc->num_replies);
rsize = sc->facts->ReplyFrameSize * num_replies * 4;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
4, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->reply_dmat)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate replies DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
BUS_DMA_NOWAIT, &sc->reply_map)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate replies memory\n");
return (ENOMEM);
}
bzero(sc->reply_frames, rsize);
bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
mps_memaddr_cb, &sc->reply_busaddr, 0);
return (0);
}
static int
mps_alloc_requests(struct mps_softc *sc)
{
struct mps_command *cm;
struct mps_chain *chain;
int i, rsize, nsegs;
rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->req_dmat)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate request DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
BUS_DMA_NOWAIT, &sc->req_map)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate request memory\n");
return (ENOMEM);
}
bzero(sc->req_frames, rsize);
bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
mps_memaddr_cb, &sc->req_busaddr, 0);
rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->chain_dmat)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate chain DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
BUS_DMA_NOWAIT, &sc->chain_map)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate chain memory\n");
return (ENOMEM);
}
bzero(sc->chain_frames, rsize);
bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize,
mps_memaddr_cb, &sc->chain_busaddr, 0);
rsize = MPS_SENSE_LEN * sc->num_reqs;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sense_dmat)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate sense DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
BUS_DMA_NOWAIT, &sc->sense_map)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate sense memory\n");
return (ENOMEM);
}
bzero(sc->sense_frames, rsize);
bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
mps_memaddr_cb, &sc->sense_busaddr, 0);
sc->chains = malloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2,
M_WAITOK | M_ZERO);
if(!sc->chains) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate chains memory\n");
return (ENOMEM);
}
for (i = 0; i < sc->max_chains; i++) {
chain = &sc->chains[i];
chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
i * sc->facts->IOCRequestFrameSize * 4);
chain->chain_busaddr = sc->chain_busaddr +
i * sc->facts->IOCRequestFrameSize * 4;
mps_free_chain(sc, chain);
sc->chain_free_lowwater++;
}
/* XXX Need to pick a more precise value */
nsegs = (MAXPHYS / PAGE_SIZE) + 1;
if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
nsegs, /* nsegments */
BUS_SPACE_MAXSIZE_24BIT,/* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&sc->mps_mtx, /* lockarg */
&sc->buffer_dmat)) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate buffer DMA tag\n");
return (ENOMEM);
}
/*
* SMID 0 cannot be used as a free command per the firmware spec.
* Just drop that command instead of risking accounting bugs.
*/
sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs,
M_MPT2, M_WAITOK | M_ZERO);
if(!sc->commands) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate command memory\n");
return (ENOMEM);
}
for (i = 1; i < sc->num_reqs; i++) {
cm = &sc->commands[i];
cm->cm_req = sc->req_frames +
i * sc->facts->IOCRequestFrameSize * 4;
cm->cm_req_busaddr = sc->req_busaddr +
i * sc->facts->IOCRequestFrameSize * 4;
cm->cm_sense = &sc->sense_frames[i];
cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN;
cm->cm_desc.Default.SMID = i;
cm->cm_sc = sc;
TAILQ_INIT(&cm->cm_chain_list);
callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0);
/* XXX Is a failure here a critical problem? */
if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
if (i <= sc->facts->HighPriorityCredit)
mps_free_high_priority_command(sc, cm);
else
mps_free_command(sc, cm);
else {
panic("failed to allocate command %d\n", i);
sc->num_reqs = i;
break;
}
}
return (0);
}
static int
mps_init_queues(struct mps_softc *sc)
{
int i;
memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
/*
* According to the spec, we need to use one less reply than we
* have space for on the queue. So sc->num_replies (the number we
* use) should be less than sc->fqdepth (allocated size).
*/
if (sc->num_replies >= sc->fqdepth)
return (EINVAL);
/*
* Initialize all of the free queue entries.
*/
for (i = 0; i < sc->fqdepth; i++)
sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4);
sc->replyfreeindex = sc->num_replies;
return (0);
}
/* Get the driver parameter tunables. Lowest priority are the driver defaults.
* Next are the global settings, if they exist. Highest are the per-unit
* settings, if they exist.
*/
void
mps_get_tunables(struct mps_softc *sc)
{
char tmpstr[80], mps_debug[80];
/* XXX default to some debugging for now */
sc->mps_debug = MPS_INFO|MPS_FAULT;
sc->disable_msix = 0;
sc->disable_msi = 0;
sc->max_msix = MPS_MSIX_MAX;
sc->max_chains = MPS_CHAIN_FRAMES;
sc->max_io_pages = MPS_MAXIO_PAGES;
sc->enable_ssu = MPS_SSU_ENABLE_SSD_DISABLE_HDD;
sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
sc->use_phynum = 1;
sc->max_reqframes = MPS_REQ_FRAMES;
sc->max_prireqframes = MPS_PRI_REQ_FRAMES;
sc->max_replyframes = MPS_REPLY_FRAMES;
sc->max_evtframes = MPS_EVT_REPLY_FRAMES;
/*
* Grab the global variables.
*/
bzero(mps_debug, 80);
if (TUNABLE_STR_FETCH("hw.mps.debug_level", mps_debug, 80) != 0)
mps_parse_debug(sc, mps_debug);
TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix);
TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi);
TUNABLE_INT_FETCH("hw.mps.max_msix", &sc->max_msix);
TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains);
TUNABLE_INT_FETCH("hw.mps.max_io_pages", &sc->max_io_pages);
TUNABLE_INT_FETCH("hw.mps.enable_ssu", &sc->enable_ssu);
TUNABLE_INT_FETCH("hw.mps.spinup_wait_time", &sc->spinup_wait_time);
TUNABLE_INT_FETCH("hw.mps.use_phy_num", &sc->use_phynum);
TUNABLE_INT_FETCH("hw.mps.max_reqframes", &sc->max_reqframes);
TUNABLE_INT_FETCH("hw.mps.max_prireqframes", &sc->max_prireqframes);
TUNABLE_INT_FETCH("hw.mps.max_replyframes", &sc->max_replyframes);
TUNABLE_INT_FETCH("hw.mps.max_evtframes", &sc->max_evtframes);
/* Grab the unit-instance variables */
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level",
device_get_unit(sc->mps_dev));
bzero(mps_debug, 80);
if (TUNABLE_STR_FETCH(tmpstr, mps_debug, 80) != 0)
mps_parse_debug(sc, mps_debug);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msi",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_msix",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_msix);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_io_pages",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.exclude_ids",
device_get_unit(sc->mps_dev));
TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.enable_ssu",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.spinup_wait_time",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.use_phy_num",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_reqframes",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_prireqframes",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_replyframes",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_evtframes",
device_get_unit(sc->mps_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
}
static void
mps_setup_sysctl(struct mps_softc *sc)
{
struct sysctl_ctx_list *sysctl_ctx = NULL;
struct sysctl_oid *sysctl_tree = NULL;
char tmpstr[80], tmpstr2[80];
/*
* Setup the sysctl variable so the user can change the debug level
* on the fly.
*/
snprintf(tmpstr, sizeof(tmpstr), "MPS controller %d",
device_get_unit(sc->mps_dev));
snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev);
if (sysctl_ctx != NULL)
sysctl_tree = device_get_sysctl_tree(sc->mps_dev);
if (sysctl_tree == NULL) {
sysctl_ctx_init(&sc->sysctl_ctx);
sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2,
CTLFLAG_RD, 0, tmpstr);
if (sc->sysctl_tree == NULL)
return;
sysctl_ctx = &sc->sysctl_ctx;
sysctl_tree = sc->sysctl_tree;
}
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW |CTLFLAG_MPSAFE,
sc, 0, mps_debug_sysctl, "A", "mps debug level");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
"Disable the use of MSI-X interrupts");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0,
"Disable the use of MSI interrupts");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0,
"User-defined maximum number of MSIX queues");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0,
"Negotiated number of MSIX queues");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
"Total number of allocated request frames");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
"Total number of allocated high priority request frames");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
"Total number of allocated reply frames");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
"Total number of event frames allocated");
SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version,
strlen(sc->fw_version), "firmware version");
SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION,
strlen(MPS_DRIVER_VERSION), "driver version");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_active", CTLFLAG_RD,
&sc->io_cmds_active, 0, "number of currently active commands");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
&sc->io_cmds_highwater, 0, "maximum active commands seen");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_free", CTLFLAG_RD,
&sc->chain_free, 0, "number of free chain elements");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
&sc->chain_free_lowwater, 0,"lowest number of free chain elements");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_chains", CTLFLAG_RD,
&sc->max_chains, 0,"maximum chain frames that will be allocated");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "max_io_pages", CTLFLAG_RD,
&sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
"IOCFacts)");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
"enable SSU to SATA SSD/HDD at shutdown");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
&sc->chain_alloc_fail, "chain allocation failures");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "spinup_wait_time", CTLFLAG_RD,
&sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
"spinup after SATA ID error");
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "mapping_table_dump", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
mps_mapping_dump, "A", "Mapping Table Dump");
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "encl_table_dump", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
mps_mapping_encl_dump, "A", "Enclosure Table Dump");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
"Use the phy number for enumeration");
}
static struct mps_debug_string {
char *name;
int flag;
} mps_debug_strings[] = {
{"info", MPS_INFO},
{"fault", MPS_FAULT},
{"event", MPS_EVENT},
{"log", MPS_LOG},
{"recovery", MPS_RECOVERY},
{"error", MPS_ERROR},
{"init", MPS_INIT},
{"xinfo", MPS_XINFO},
{"user", MPS_USER},
{"mapping", MPS_MAPPING},
{"trace", MPS_TRACE}
};
enum mps_debug_level_combiner {
COMB_NONE,
COMB_ADD,
COMB_SUB
};
static int
mps_debug_sysctl(SYSCTL_HANDLER_ARGS)
{
struct mps_softc *sc;
struct mps_debug_string *string;
struct sbuf *sbuf;
char *buffer;
size_t sz;
int i, len, debug, error;
sc = (struct mps_softc *)arg1;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
debug = sc->mps_debug;
sbuf_printf(sbuf, "%#x", debug);
sz = sizeof(mps_debug_strings) / sizeof(mps_debug_strings[0]);
for (i = 0; i < sz; i++) {
string = &mps_debug_strings[i];
if (debug & string->flag)
sbuf_printf(sbuf, ",%s", string->name);
}
error = sbuf_finish(sbuf);
sbuf_delete(sbuf);
if (error || req->newptr == NULL)
return (error);
len = req->newlen - req->newidx;
if (len == 0)
return (0);
buffer = malloc(len, M_MPT2, M_ZERO|M_WAITOK);
error = SYSCTL_IN(req, buffer, len);
mps_parse_debug(sc, buffer);
free(buffer, M_MPT2);
return (error);
}
static void
mps_parse_debug(struct mps_softc *sc, char *list)
{
struct mps_debug_string *string;
enum mps_debug_level_combiner op;
char *token, *endtoken;
size_t sz;
int flags, i;
if (list == NULL || *list == '\0')
return;
if (*list == '+') {
op = COMB_ADD;
list++;
} else if (*list == '-') {
op = COMB_SUB;
list++;
} else
op = COMB_NONE;
if (*list == '\0')
return;
flags = 0;
sz = sizeof(mps_debug_strings) / sizeof(mps_debug_strings[0]);
while ((token = strsep(&list, ":,")) != NULL) {
/* Handle integer flags */
flags |= strtol(token, &endtoken, 0);
if (token != endtoken)
continue;
/* Handle text flags */
for (i = 0; i < sz; i++) {
string = &mps_debug_strings[i];
if (strcasecmp(token, string->name) == 0) {
flags |= string->flag;
break;
}
}
}
switch (op) {
case COMB_NONE:
sc->mps_debug = flags;
break;
case COMB_ADD:
sc->mps_debug |= flags;
break;
case COMB_SUB:
sc->mps_debug &= (~flags);
break;
}
return;
}
int
mps_attach(struct mps_softc *sc)
{
int error;
MPS_FUNCTRACE(sc);
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
callout_init_mtx(&sc->device_check_callout, &sc->mps_mtx, 0);
TAILQ_INIT(&sc->event_list);
timevalclear(&sc->lastfail);
if ((error = mps_transition_ready(sc)) != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to transition "
"ready\n");
return (error);
}
sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
M_ZERO|M_NOWAIT);
if(!sc->facts) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "Cannot allocate memory, "
"exit\n");
return (ENOMEM);
}
/*
* Get IOC Facts and allocate all structures based on this information.
* A Diag Reset will also call mps_iocfacts_allocate and re-read the IOC
* Facts. If relevant values have changed in IOC Facts, this function
* will free all of the memory based on IOC Facts and reallocate that
* memory. If this fails, any allocated memory should already be freed.
*/
if ((error = mps_iocfacts_allocate(sc, TRUE)) != 0) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC Facts based allocation "
"failed with error %d, exit\n", error);
return (error);
}
/* Start the periodic watchdog check on the IOC Doorbell */
mps_periodic(sc);
/*
* The portenable will kick off discovery events that will drive the
* rest of the initialization process. The CAM/SAS module will
* hold up the boot sequence until discovery is complete.
*/
sc->mps_ich.ich_func = mps_startup;
sc->mps_ich.ich_arg = sc;
if (config_intrhook_establish(&sc->mps_ich) != 0) {
mps_dprint(sc, MPS_INIT|MPS_ERROR,
"Cannot establish MPS config hook\n");
error = EINVAL;
}
/*
* Allow IR to shutdown gracefully when shutdown occurs.
*/
sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
if (sc->shutdown_eh == NULL)
mps_dprint(sc, MPS_INIT|MPS_ERROR,
"shutdown event registration failed\n");
mps_setup_sysctl(sc);
sc->mps_flags |= MPS_FLAGS_ATTACH_DONE;
mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
return (error);
}
/* Run through any late-start handlers. */
static void
mps_startup(void *arg)
{
struct mps_softc *sc;
sc = (struct mps_softc *)arg;
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
mps_lock(sc);
mps_unmask_intr(sc);
/* initialize device mapping tables */
mps_base_static_config_pages(sc);
mps_mapping_initialize(sc);
mpssas_startup(sc);
mps_unlock(sc);
mps_dprint(sc, MPS_INIT, "disestablish config intrhook\n");
config_intrhook_disestablish(&sc->mps_ich);
sc->mps_ich.ich_arg = NULL;
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
}
/* Periodic watchdog. Is called with the driver lock already held. */
static void
mps_periodic(void *arg)
{
struct mps_softc *sc;
uint32_t db;
sc = (struct mps_softc *)arg;
if (sc->mps_flags & MPS_FLAGS_SHUTDOWN)
return;
db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mps_dprint(sc, MPS_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
mps_reinit(sc);
}
callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc);
}
static void
mps_log_evt_handler(struct mps_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *event)
{
MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
MPS_DPRINT_EVENT(sc, generic, event);
switch (event->Event) {
case MPI2_EVENT_LOG_DATA:
mps_dprint(sc, MPS_EVENT, "MPI2_EVENT_LOG_DATA:\n");
if (sc->mps_debug & MPS_EVENT)
hexdump(event->EventData, event->EventDataLength, NULL, 0);
break;
case MPI2_EVENT_LOG_ENTRY_ADDED:
entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
mps_dprint(sc, MPS_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
"0x%x Sequence %d:\n", entry->LogEntryQualifier,
entry->LogSequence);
break;
default:
break;
}
return;
}
static int
mps_attach_log(struct mps_softc *sc)
{
u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
bzero(events, 16);
setbit(events, MPI2_EVENT_LOG_DATA);
setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
mps_register_events(sc, events, mps_log_evt_handler, NULL,
&sc->mps_log_eh);
return (0);
}
static int
mps_detach_log(struct mps_softc *sc)
{
if (sc->mps_log_eh != NULL)
mps_deregister_events(sc, sc->mps_log_eh);
return (0);
}
/*
* Free all of the driver resources and detach submodules. Should be called
* without the lock held.
*/
int
mps_free(struct mps_softc *sc)
{
int error;
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
/* Turn off the watchdog */
mps_lock(sc);
sc->mps_flags |= MPS_FLAGS_SHUTDOWN;
mps_unlock(sc);
/* Lock must not be held for this */
callout_drain(&sc->periodic);
callout_drain(&sc->device_check_callout);
if (((error = mps_detach_log(sc)) != 0) ||
((error = mps_detach_sas(sc)) != 0)) {
mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to detach "
"subsystems, exit\n");
return (error);
}
mps_detach_user(sc);
/* Put the IOC back in the READY state. */
mps_lock(sc);
if ((error = mps_transition_ready(sc)) != 0) {
mps_unlock(sc);
return (error);
}
mps_unlock(sc);
if (sc->facts != NULL)
free(sc->facts, M_MPT2);
/*
* Free all buffers that are based on IOC Facts. A Diag Reset may need
* to free these buffers too.
*/
mps_iocfacts_free(sc);
if (sc->sysctl_tree != NULL)
sysctl_ctx_free(&sc->sysctl_ctx);
/* Deregister the shutdown function */
if (sc->shutdown_eh != NULL)
EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
mtx_destroy(&sc->mps_mtx);
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
return (0);
}
static __inline void
mps_complete_command(struct mps_softc *sc, struct mps_command *cm)
{
MPS_FUNCTRACE(sc);
if (cm == NULL) {
mps_dprint(sc, MPS_ERROR, "Completing NULL command\n");
return;
}
if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
if (cm->cm_complete != NULL) {
mps_dprint(sc, MPS_TRACE,
"%s cm %p calling cm_complete %p data %p reply %p\n",
__func__, cm, cm->cm_complete, cm->cm_complete_data,
cm->cm_reply);
cm->cm_complete(sc, cm);
}
if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
mps_dprint(sc, MPS_TRACE, "waking up %p\n", cm);
wakeup(cm);
}
if (cm->cm_sc->io_cmds_active != 0) {
cm->cm_sc->io_cmds_active--;
} else {
mps_dprint(sc, MPS_ERROR, "Warning: io_cmds_active is "
"out of sync - resynching to 0\n");
}
}
static void
mps_sas_log_info(struct mps_softc *sc , u32 log_info)
{
union loginfo_type {
u32 loginfo;
struct {
u32 subcode:16;
u32 code:8;
u32 originator:4;
u32 bus_type:4;
} dw;
};
union loginfo_type sas_loginfo;
char *originator_str = NULL;
sas_loginfo.loginfo = log_info;
if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
return;
/* each nexus loss loginfo */
if (log_info == 0x31170000)
return;
/* eat the loginfos associated with task aborts */
if ((log_info == 30050000 || log_info ==
0x31140000 || log_info == 0x31130000))
return;
switch (sas_loginfo.dw.originator) {
case 0:
originator_str = "IOP";
break;
case 1:
originator_str = "PL";
break;
case 2:
originator_str = "IR";
break;
}
mps_dprint(sc, MPS_LOG, "log_info(0x%08x): originator(%s), "
"code(0x%02x), sub_code(0x%04x)\n", log_info,
originator_str, sas_loginfo.dw.code,
sas_loginfo.dw.subcode);
}
static void
mps_display_reply_info(struct mps_softc *sc, uint8_t *reply)
{
MPI2DefaultReply_t *mpi_reply;
u16 sc_status;
mpi_reply = (MPI2DefaultReply_t*)reply;
sc_status = le16toh(mpi_reply->IOCStatus);
if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
mps_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
}
void
mps_intr(void *data)
{
struct mps_softc *sc;
uint32_t status;
sc = (struct mps_softc *)data;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
/*
* Check interrupt status register to flush the bus. This is
* needed for both INTx interrupts and driver-driven polling
*/
status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
return;
mps_lock(sc);
mps_intr_locked(data);
mps_unlock(sc);
return;
}
/*
* In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
* chip. Hopefully this theory is correct.
*/
void
mps_intr_msi(void *data)
{
struct mps_softc *sc;
sc = (struct mps_softc *)data;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
mps_lock(sc);
mps_intr_locked(data);
mps_unlock(sc);
return;
}
/*
* The locking is overly broad and simplistic, but easy to deal with for now.
*/
void
mps_intr_locked(void *data)
{
MPI2_REPLY_DESCRIPTORS_UNION *desc;
struct mps_softc *sc;
struct mps_command *cm = NULL;
uint8_t flags;
u_int pq;
MPI2_DIAG_RELEASE_REPLY *rel_rep;
mps_fw_diagnostic_buffer_t *pBuffer;
sc = (struct mps_softc *)data;
pq = sc->replypostindex;
mps_dprint(sc, MPS_TRACE,
"%s sc %p starting with replypostindex %u\n",
__func__, sc, sc->replypostindex);
for ( ;; ) {
cm = NULL;
desc = &sc->post_queue[sc->replypostindex];
flags = desc->Default.ReplyFlags &
MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
|| (le32toh(desc->Words.High) == 0xffffffff))
break;
/* increment the replypostindex now, so that event handlers
* and cm completion handlers which decide to do a diag
* reset can zero it without it getting incremented again
* afterwards, and we break out of this loop on the next
* iteration since the reply post queue has been cleared to
* 0xFF and all descriptors look unused (which they are).
*/
if (++sc->replypostindex >= sc->pqdepth)
sc->replypostindex = 0;
switch (flags) {
case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
cm->cm_reply = NULL;
break;
case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
{
uint32_t baddr;
uint8_t *reply;
/*
* Re-compose the reply address from the address
* sent back from the chip. The ReplyFrameAddress
* is the lower 32 bits of the physical address of
* particular reply frame. Convert that address to
* host format, and then use that to provide the
* offset against the virtual address base
* (sc->reply_frames).
*/
baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
reply = sc->reply_frames +
(baddr - ((uint32_t)sc->reply_busaddr));
/*
* Make sure the reply we got back is in a valid
* range. If not, go ahead and panic here, since
* we'll probably panic as soon as we deference the
* reply pointer anyway.
*/
if ((reply < sc->reply_frames)
|| (reply > (sc->reply_frames +
(sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) {
printf("%s: WARNING: reply %p out of range!\n",
__func__, reply);
printf("%s: reply_frames %p, fqdepth %d, "
"frame size %d\n", __func__,
sc->reply_frames, sc->fqdepth,
sc->facts->ReplyFrameSize * 4);
printf("%s: baddr %#x,\n", __func__, baddr);
/* LSI-TODO. See Linux Code. Need Graceful exit*/
panic("Reply address out of range");
}
if (le16toh(desc->AddressReply.SMID) == 0) {
if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
MPI2_FUNCTION_DIAG_BUFFER_POST) {
/*
* If SMID is 0 for Diag Buffer Post,
* this implies that the reply is due to
* a release function with a status that
* the buffer has been released. Set
* the buffer flags accordingly.
*/
rel_rep =
(MPI2_DIAG_RELEASE_REPLY *)reply;
if ((le16toh(rel_rep->IOCStatus) &
MPI2_IOCSTATUS_MASK) ==
MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
{
pBuffer =
&sc->fw_diag_buffer_list[
rel_rep->BufferType];
pBuffer->valid_data = TRUE;
pBuffer->owned_by_firmware =
FALSE;
pBuffer->immediate = FALSE;
}
} else
mps_dispatch_event(sc, baddr,
(MPI2_EVENT_NOTIFICATION_REPLY *)
reply);
} else {
cm = &sc->commands[le16toh(desc->AddressReply.SMID)];
cm->cm_reply = reply;
cm->cm_reply_data =
le32toh(desc->AddressReply.ReplyFrameAddress);
}
break;
}
case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
default:
/* Unhandled */
mps_dprint(sc, MPS_ERROR, "Unhandled reply 0x%x\n",
desc->Default.ReplyFlags);
cm = NULL;
break;
}
if (cm != NULL) {
// Print Error reply frame
if (cm->cm_reply)
mps_display_reply_info(sc,cm->cm_reply);
mps_complete_command(sc, cm);
}
desc->Words.Low = 0xffffffff;
desc->Words.High = 0xffffffff;
}
if (pq != sc->replypostindex) {
mps_dprint(sc, MPS_TRACE,
"%s sc %p writing postindex %d\n",
__func__, sc, sc->replypostindex);
mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex);
}
return;
}
static void
mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
MPI2_EVENT_NOTIFICATION_REPLY *reply)
{
struct mps_event_handle *eh;
int event, handled = 0;
event = le16toh(reply->Event);
TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
if (isset(eh->mask, event)) {
eh->callback(sc, data, reply);
handled++;
}
}
if (handled == 0)
mps_dprint(sc, MPS_EVENT, "Unhandled event 0x%x\n", le16toh(event));
/*
* This is the only place that the event/reply should be freed.
* Anything wanting to hold onto the event data should have
* already copied it into their own storage.
*/
mps_free_reply(sc, data);
}
static void
mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm)
{
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
if (cm->cm_reply)
MPS_DPRINT_EVENT(sc, generic,
(MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
mps_free_command(sc, cm);
/* next, send a port enable */
mpssas_startup(sc);
}
/*
* For both register_events and update_events, the caller supplies a bitmap
* of events that it _wants_. These functions then turn that into a bitmask
* suitable for the controller.
*/
int
mps_register_events(struct mps_softc *sc, u32 *mask,
mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle)
{
struct mps_event_handle *eh;
int error = 0;
eh = malloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO);
if(!eh) {
mps_dprint(sc, MPS_ERROR, "Cannot allocate event memory\n");
return (ENOMEM);
}
eh->callback = cb;
eh->data = data;
TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
if (mask != NULL)
error = mps_update_events(sc, eh, mask);
*handle = eh;
return (error);
}
int
mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle,
u32 *mask)
{
MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL;
struct mps_command *cm;
int error, i;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
if ((mask != NULL) && (handle != NULL))
bcopy(mask, &handle->mask[0], sizeof(u32) *
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
sc->event_mask[i] = -1;
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
sc->event_mask[i] &= ~handle->mask[i];
if ((cm = mps_alloc_command(sc)) == NULL)
return (EBUSY);
evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
evtreq->MsgFlags = 0;
evtreq->SASBroadcastPrimitiveMasks = 0;
#ifdef MPS_DEBUG_ALL_EVENTS
{
u_char fullmask[16];
memset(fullmask, 0x00, 16);
bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
}
#else
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
evtreq->EventMasks[i] =
htole32(sc->event_mask[i]);
#endif
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
cm->cm_data = NULL;
error = mps_wait_command(sc, &cm, 60, 0);
if (cm != NULL)
reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
if ((reply == NULL) ||
(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
error = ENXIO;
if (reply)
MPS_DPRINT_EVENT(sc, generic, reply);
mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error);
if (cm != NULL)
mps_free_command(sc, cm);
return (error);
}
static int
mps_reregister_events(struct mps_softc *sc)
{
MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
struct mps_command *cm;
struct mps_event_handle *eh;
int error, i;
mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
/* first, reregister events */
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
sc->event_mask[i] = -1;
TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
sc->event_mask[i] &= ~eh->mask[i];
}
if ((cm = mps_alloc_command(sc)) == NULL)
return (EBUSY);
evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
evtreq->MsgFlags = 0;
evtreq->SASBroadcastPrimitiveMasks = 0;
#ifdef MPS_DEBUG_ALL_EVENTS
{
u_char fullmask[16];
memset(fullmask, 0x00, 16);
bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
}
#else
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
evtreq->EventMasks[i] =
htole32(sc->event_mask[i]);
#endif
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
cm->cm_data = NULL;
cm->cm_complete = mps_reregister_events_complete;
error = mps_map_command(sc, cm);
mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__,
error);
return (error);
}
void
mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle)
{
TAILQ_REMOVE(&sc->event_list, handle, eh_list);
free(handle, M_MPT2);
}
/*
* Add a chain element as the next SGE for the specified command.
* Reset cm_sge and cm_sgesize to indicate all the available space.
*/
static int
mps_add_chain(struct mps_command *cm)
{
MPI2_SGE_CHAIN32 *sgc;
struct mps_chain *chain;
int space;
if (cm->cm_sglsize < MPS_SGC_SIZE)
panic("MPS: Need SGE Error Code\n");
chain = mps_alloc_chain(cm->cm_sc);
if (chain == NULL)
return (ENOBUFS);
space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
/*
* Note: a double-linked list is used to make it easier to
* walk for debugging.
*/
TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain;
sgc->Length = htole16(space);
sgc->NextChainOffset = 0;
/* TODO Looks like bug in Setting sgc->Flags.
* sgc->Flags = ( MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
* MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT
* This is fine.. because we are not using simple element. In case of
* MPI2_SGE_CHAIN32, we have separate Length and Flags feild.
*/
sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT;
sgc->Address = htole32(chain->chain_busaddr);
cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple;
cm->cm_sglsize = space;
return (0);
}
/*
* Add one scatter-gather element (chain, simple, transaction context)
* to the scatter-gather list for a command. Maintain cm_sglsize and
* cm_sge as the remaining size and pointer to the next SGE to fill
* in, respectively.
*/
int
mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
{
MPI2_SGE_TRANSACTION_UNION *tc = sgep;
MPI2_SGE_SIMPLE64 *sge = sgep;
int error, type;
uint32_t saved_buf_len, saved_address_low, saved_address_high;
type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
#ifdef INVARIANTS
switch (type) {
case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: {
if (len != tc->DetailsLength + 4)
panic("TC %p length %u or %zu?", tc,
tc->DetailsLength + 4, len);
}
break;
case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
/* Driver only uses 32-bit chain elements */
if (len != MPS_SGC_SIZE)
panic("CHAIN %p length %u or %zu?", sgep,
MPS_SGC_SIZE, len);
break;
case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
/* Driver only uses 64-bit SGE simple elements */
if (len != MPS_SGE64_SIZE)
panic("SGE simple %p length %u or %zu?", sge,
MPS_SGE64_SIZE, len);
if (((le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT) &
MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0)
panic("SGE simple %p not marked 64-bit?", sge);
break;
default:
panic("Unexpected SGE %p, flags %02x", tc, tc->Flags);
}
#endif
/*
* case 1: 1 more segment, enough room for it
* case 2: 2 more segments, enough room for both
* case 3: >=2 more segments, only enough room for 1 and a chain
* case 4: >=1 more segment, enough room for only a chain
* case 5: >=1 more segment, no room for anything (error)
*/
/*
* There should be room for at least a chain element, or this
* code is buggy. Case (5).
*/
if (cm->cm_sglsize < MPS_SGC_SIZE)
panic("MPS: Need SGE Error Code\n");
if (segsleft >= 2 &&
cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) {
/*
* There are 2 or more segments left to add, and only
* enough room for 1 and a chain. Case (3).
*
* Mark as last element in this chain if necessary.
*/
if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
sge->FlagsLength |= htole32(
MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT);
}
/*
* Add the item then a chain. Do the chain now,
* rather than on the next iteration, to simplify
* understanding the code.
*/
cm->cm_sglsize -= len;
bcopy(sgep, cm->cm_sge, len);
cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
return (mps_add_chain(cm));
}
if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) {
/*
* 1 or more segment, enough room for only a chain.
* Hope the previous element wasn't a Simple entry
* that needed to be marked with
* MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4).
*/
if ((error = mps_add_chain(cm)) != 0)
return (error);
}
#ifdef INVARIANTS
/* Case 1: 1 more segment, enough room for it. */
if (segsleft == 1 && cm->cm_sglsize < len)
panic("1 seg left and no room? %u versus %zu",
cm->cm_sglsize, len);
/* Case 2: 2 more segments, enough room for both */
if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE)
panic("2 segs left and no room? %u versus %zu",
cm->cm_sglsize, len);
#endif
if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
/*
* If this is a bi-directional request, need to account for that
* here. Save the pre-filled sge values. These will be used
* either for the 2nd SGL or for a single direction SGL. If
* cm_out_len is non-zero, this is a bi-directional request, so
* fill in the OUT SGL first, then the IN SGL, otherwise just
* fill in the IN SGL. Note that at this time, when filling in
* 2 SGL's for a bi-directional request, they both use the same
* DMA buffer (same cm command).
*/
saved_buf_len = le32toh(sge->FlagsLength) & 0x00FFFFFF;
saved_address_low = sge->Address.Low;
saved_address_high = sge->Address.High;
if (cm->cm_out_len) {
sge->FlagsLength = htole32(cm->cm_out_len |
((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_HOST_TO_IOC |
MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
MPI2_SGE_FLAGS_SHIFT));
cm->cm_sglsize -= len;
bcopy(sgep, cm->cm_sge, len);
cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge
+ len);
}
saved_buf_len |=
((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_LAST_ELEMENT |
MPI2_SGE_FLAGS_END_OF_LIST |
MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
MPI2_SGE_FLAGS_SHIFT);
if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
saved_buf_len |=
((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
MPI2_SGE_FLAGS_SHIFT);
} else {
saved_buf_len |=
((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
MPI2_SGE_FLAGS_SHIFT);
}
sge->FlagsLength = htole32(saved_buf_len);
sge->Address.Low = saved_address_low;
sge->Address.High = saved_address_high;
}
cm->cm_sglsize -= len;
bcopy(sgep, cm->cm_sge, len);
cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
return (0);
}
/*
* Add one dma segment to the scatter-gather list for a command.
*/
int
mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags,
int segsleft)
{
MPI2_SGE_SIMPLE64 sge;
/*
* This driver always uses 64-bit address elements for simplicity.
*/
bzero(&sge, sizeof(sge));
flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
sge.FlagsLength = htole32(len | (flags << MPI2_SGE_FLAGS_SHIFT));
mps_from_u64(pa, &sge.Address);
return (mps_push_sge(cm, &sge, sizeof sge, segsleft));
}
static void
mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct mps_softc *sc;
struct mps_command *cm;
u_int i, dir, sflags;
cm = (struct mps_command *)arg;
sc = cm->cm_sc;
/*
* In this case, just print out a warning and let the chip tell the
* user they did the wrong thing.
*/
if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
mps_dprint(sc, MPS_ERROR,
"%s: warning: busdma returned %d segments, "
"more than the %d allowed\n", __func__, nsegs,
cm->cm_max_segs);
}
/*
* Set up DMA direction flags. Bi-directional requests are also handled
* here. In that case, both direction flags will be set.
*/
sflags = 0;
if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
/*
* We have to add a special case for SMP passthrough, there
* is no easy way to generically handle it. The first
* S/G element is used for the command (therefore the
* direction bit needs to be set). The second one is used
* for the reply. We'll leave it to the caller to make
* sure we only have two buffers.
*/
/*
* Even though the busdma man page says it doesn't make
* sense to have both direction flags, it does in this case.
* We have one s/g element being accessed in each direction.
*/
dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
/*
* Set the direction flag on the first buffer in the SMP
* passthrough request. We'll clear it for the second one.
*/
sflags |= MPI2_SGE_FLAGS_DIRECTION |
MPI2_SGE_FLAGS_END_OF_BUFFER;
} else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
dir = BUS_DMASYNC_PREWRITE;
} else
dir = BUS_DMASYNC_PREREAD;
for (i = 0; i < nsegs; i++) {
if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) {
sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
}
error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
sflags, nsegs - i);
if (error != 0) {
/* Resource shortage, roll back! */
if (ratecheck(&sc->lastfail, &mps_chainfail_interval))
mps_dprint(sc, MPS_INFO, "Out of chain frames, "
"consider increasing hw.mps.max_chains.\n");
cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED;
mps_complete_command(sc, cm);
return;
}
}
bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
mps_enqueue_request(sc, cm);
return;
}
static void
mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
int error)
{
mps_data_cb(arg, segs, nsegs, error);
}
/*
* This is the routine to enqueue commands ansynchronously.
* Note that the only error path here is from bus_dmamap_load(), which can
* return EINPROGRESS if it is waiting for resources. Other than this, it's
* assumed that if you have a command in-hand, then you have enough credits
* to use it.
*/
int
mps_map_command(struct mps_softc *sc, struct mps_command *cm)
{
int error = 0;
if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
&cm->cm_uio, mps_data_cb2, cm, 0);
} else if (cm->cm_flags & MPS_CM_FLAGS_USE_CCB) {
error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
cm->cm_data, mps_data_cb, cm, 0);
} else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);
} else {
/* Add a zero-length element as needed */
if (cm->cm_sge != NULL)
mps_add_dmaseg(cm, 0, 0, 0, 1);
mps_enqueue_request(sc, cm);
}
return (error);
}
/*
* This is the routine to enqueue commands synchronously. An error of
* EINPROGRESS from mps_map_command() is ignored since the command will
* be executed and enqueued automatically. Other errors come from msleep().
*/
int
mps_wait_command(struct mps_softc *sc, struct mps_command **cmp, int timeout,
int sleep_flag)
{
int error, rc;
struct timeval cur_time, start_time;
struct mps_command *cm = *cmp;
if (sc->mps_flags & MPS_FLAGS_DIAGRESET)
return EBUSY;
cm->cm_complete = NULL;
cm->cm_flags |= MPS_CM_FLAGS_POLLED;
error = mps_map_command(sc, cm);
if ((error != 0) && (error != EINPROGRESS))
return (error);
/*
* Check for context and wait for 50 mSec at a time until time has
* expired or the command has finished. If msleep can't be used, need
* to poll.
*/
if (curthread->td_no_sleeping != 0)
sleep_flag = NO_SLEEP;
getmicrouptime(&start_time);
if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) {
cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout*hz);
if (error == EWOULDBLOCK) {
/*
* Record the actual elapsed time in the case of a
* timeout for the message below.
*/
getmicrouptime(&cur_time);
timevalsub(&cur_time, &start_time);
}
} else {
while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
mps_intr_locked(sc);
if (sleep_flag == CAN_SLEEP)
pause("mpswait", hz/20);
else
DELAY(50000);
getmicrouptime(&cur_time);
timevalsub(&cur_time, &start_time);
if (cur_time.tv_sec > timeout) {
error = EWOULDBLOCK;
break;
}
}
}
if (error == EWOULDBLOCK) {
mps_dprint(sc, MPS_FAULT, "Calling Reinit from %s, timeout=%d,"
" elapsed=%jd\n", __func__, timeout,
(intmax_t)cur_time.tv_sec);
rc = mps_reinit(sc);
mps_dprint(sc, MPS_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
"failed");
if (sc->mps_flags & MPS_FLAGS_REALLOCATED) {
/*
* Tell the caller that we freed the command in a
* reinit.
*/
*cmp = NULL;
}
error = ETIMEDOUT;
}
return (error);
}
/*
* The MPT driver had a verbose interface for config pages. In this driver,
* reduce it to much simpler terms, similar to the Linux driver.
*/
int
mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params)
{
MPI2_CONFIG_REQUEST *req;
struct mps_command *cm;
int error;
if (sc->mps_flags & MPS_FLAGS_BUSY) {
return (EBUSY);
}
cm = mps_alloc_command(sc);
if (cm == NULL) {
return (EBUSY);
}
req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
req->Function = MPI2_FUNCTION_CONFIG;
req->Action = params->action;
req->SGLFlags = 0;
req->ChainOffset = 0;
req->PageAddress = params->page_address;
if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
hdr = &params->hdr.Ext;
req->ExtPageType = hdr->ExtPageType;
req->ExtPageLength = hdr->ExtPageLength;
req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
req->Header.PageLength = 0; /* Must be set to zero */
req->Header.PageNumber = hdr->PageNumber;
req->Header.PageVersion = hdr->PageVersion;
} else {
MPI2_CONFIG_PAGE_HEADER *hdr;
hdr = &params->hdr.Struct;
req->Header.PageType = hdr->PageType;
req->Header.PageNumber = hdr->PageNumber;
req->Header.PageLength = hdr->PageLength;
req->Header.PageVersion = hdr->PageVersion;
}
cm->cm_data = params->buffer;
cm->cm_length = params->length;
if (cm->cm_data != NULL) {
cm->cm_sge = &req->PageBufferSGE;
cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
} else
cm->cm_sge = NULL;
cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
cm->cm_complete_data = params;
if (params->callback != NULL) {
cm->cm_complete = mps_config_complete;
return (mps_map_command(sc, cm));
} else {
error = mps_wait_command(sc, &cm, 0, CAN_SLEEP);
if (error) {
mps_dprint(sc, MPS_FAULT,
"Error %d reading config page\n", error);
if (cm != NULL)
mps_free_command(sc, cm);
return (error);
}
mps_config_complete(sc, cm);
}
return (0);
}
int
mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params)
{
return (EINVAL);
}
static void
mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
{
MPI2_CONFIG_REPLY *reply;
struct mps_config_params *params;
MPS_FUNCTRACE(sc);
params = cm->cm_complete_data;
if (cm->cm_data != NULL) {
bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
}
/*
* XXX KDM need to do more error recovery? This results in the
* device in question not getting probed.
*/
if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
params->status = MPI2_IOCSTATUS_BUSY;
goto done;
}
reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
if (reply == NULL) {
params->status = MPI2_IOCSTATUS_BUSY;
goto done;
}
params->status = reply->IOCStatus;
if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
params->hdr.Ext.ExtPageType = reply->ExtPageType;
params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
params->hdr.Ext.PageType = reply->Header.PageType;
params->hdr.Ext.PageNumber = reply->Header.PageNumber;
params->hdr.Ext.PageVersion = reply->Header.PageVersion;
} else {
params->hdr.Struct.PageType = reply->Header.PageType;
params->hdr.Struct.PageNumber = reply->Header.PageNumber;
params->hdr.Struct.PageLength = reply->Header.PageLength;
params->hdr.Struct.PageVersion = reply->Header.PageVersion;
}
done:
mps_free_command(sc, cm);
if (params->callback != NULL)
params->callback(sc, params);
return;
}
Index: head/sys/dev/mps/mps_mapping.c
===================================================================
--- head/sys/dev/mps/mps_mapping.c (revision 328217)
+++ head/sys/dev/mps/mps_mapping.c (revision 328218)
@@ -1,2678 +1,2678 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011-2015 LSI Corp.
* Copyright (c) 2013-2015 Avago Technologies
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/sysctl.h>
#include <sys/sbuf.h>
#include <sys/eventhandler.h>
#include <sys/uio.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/mps/mpi/mpi2_type.h>
#include <dev/mps/mpi/mpi2.h>
#include <dev/mps/mpi/mpi2_ioc.h>
#include <dev/mps/mpi/mpi2_sas.h>
#include <dev/mps/mpi/mpi2_cnfg.h>
#include <dev/mps/mpi/mpi2_init.h>
#include <dev/mps/mpi/mpi2_tool.h>
#include <dev/mps/mps_ioctl.h>
#include <dev/mps/mpsvar.h>
#include <dev/mps/mps_mapping.h>
/**
* _mapping_clear_map_entry - Clear a particular mapping entry.
* @map_entry: map table entry
*
* Returns nothing.
*/
static inline void
_mapping_clear_map_entry(struct dev_mapping_table *map_entry)
{
map_entry->physical_id = 0;
map_entry->device_info = 0;
map_entry->phy_bits = 0;
map_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
map_entry->dev_handle = 0;
map_entry->id = -1;
map_entry->missing_count = 0;
map_entry->init_complete = 0;
map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
}
/**
* _mapping_clear_enc_entry - Clear a particular enclosure table entry.
* @enc_entry: enclosure table entry
*
* Returns nothing.
*/
static inline void
_mapping_clear_enc_entry(struct enc_mapping_table *enc_entry)
{
enc_entry->enclosure_id = 0;
enc_entry->start_index = MPS_MAPTABLE_BAD_IDX;
enc_entry->phy_bits = 0;
enc_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
enc_entry->enc_handle = 0;
enc_entry->num_slots = 0;
enc_entry->start_slot = 0;
enc_entry->missing_count = 0;
enc_entry->removal_flag = 0;
enc_entry->skip_search = 0;
enc_entry->init_complete = 0;
}
/**
* _mapping_commit_enc_entry - write a particular enc entry in DPM page0.
* @sc: per adapter object
* @enc_entry: enclosure table entry
*
* Returns 0 for success, non-zero for failure.
*/
static int
_mapping_commit_enc_entry(struct mps_softc *sc,
struct enc_mapping_table *et_entry)
{
Mpi2DriverMap0Entry_t *dpm_entry;
struct dev_mapping_table *mt_entry;
Mpi2ConfigReply_t mpi_reply;
Mpi2DriverMappingPage0_t config_page;
if (!sc->is_dpm_enable)
return 0;
memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
memcpy(&config_page.Header, (u8 *) sc->dpm_pg0,
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += et_entry->dpm_entry_num;
dpm_entry->PhysicalIdentifier.Low =
( 0xFFFFFFFF & et_entry->enclosure_id);
dpm_entry->PhysicalIdentifier.High =
( et_entry->enclosure_id >> 32);
mt_entry = &sc->mapping_table[et_entry->start_index];
dpm_entry->DeviceIndex = htole16(mt_entry->id);
dpm_entry->MappingInformation = et_entry->num_slots;
dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
dpm_entry->MappingInformation |= et_entry->missing_count;
dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation);
dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits);
dpm_entry->Reserved1 = 0;
mps_dprint(sc, MPS_MAPPING, "%s: Writing DPM entry %d for enclosure.\n",
__func__, et_entry->dpm_entry_num);
memcpy(&config_page.Entry, (u8 *)dpm_entry,
sizeof(Mpi2DriverMap0Entry_t));
if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
et_entry->dpm_entry_num)) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Write of DPM "
"entry %d for enclosure failed.\n", __func__,
et_entry->dpm_entry_num);
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping =
le32toh(dpm_entry->PhysicalBitsMapping);
return -1;
}
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping =
le32toh(dpm_entry->PhysicalBitsMapping);
return 0;
}
/**
* _mapping_commit_map_entry - write a particular map table entry in DPM page0.
* @sc: per adapter object
* @mt_entry: mapping table entry
*
* Returns 0 for success, non-zero for failure.
*/
static int
_mapping_commit_map_entry(struct mps_softc *sc,
struct dev_mapping_table *mt_entry)
{
Mpi2DriverMap0Entry_t *dpm_entry;
Mpi2ConfigReply_t mpi_reply;
Mpi2DriverMappingPage0_t config_page;
if (!sc->is_dpm_enable)
return 0;
/*
* It's possible that this Map Entry points to a BAD DPM index. This
* can happen if the Map Entry is a for a missing device and the DPM
* entry that was being used by this device is now being used by some
* new device. So, check for a BAD DPM index and just return if so.
*/
if (mt_entry->dpm_entry_num == MPS_DPM_BAD_IDX) {
mps_dprint(sc, MPS_MAPPING, "%s: DPM entry location for target "
"%d is invalid. DPM will not be written.\n", __func__,
mt_entry->id);
return 0;
}
memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = dpm_entry + mt_entry->dpm_entry_num;
dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF &
mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = htole16(mt_entry->id);
dpm_entry->MappingInformation = htole16(mt_entry->missing_count);
dpm_entry->PhysicalBitsMapping = 0;
dpm_entry->Reserved1 = 0;
memcpy(&config_page.Entry, (u8 *)dpm_entry,
sizeof(Mpi2DriverMap0Entry_t));
mps_dprint(sc, MPS_MAPPING, "%s: Writing DPM entry %d for target %d.\n",
__func__, mt_entry->dpm_entry_num, mt_entry->id);
if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
mt_entry->dpm_entry_num)) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Write of DPM "
"entry %d for target %d failed.\n", __func__,
mt_entry->dpm_entry_num, mt_entry->id);
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
return -1;
}
dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
return 0;
}
/**
* _mapping_get_ir_maprange - get start and end index for IR map range.
* @sc: per adapter object
* @start_idx: place holder for start index
* @end_idx: place holder for end index
*
* The IR volumes can be mapped either at start or end of the mapping table
* this function gets the detail of where IR volume mapping starts and ends
* in the device mapping table
*
* Returns nothing.
*/
static void
_mapping_get_ir_maprange(struct mps_softc *sc, u32 *start_idx, u32 *end_idx)
{
u16 volume_mapping_flags;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
*start_idx = 0;
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
*start_idx = 1;
} else
*start_idx = sc->max_devices - sc->max_volumes;
*end_idx = *start_idx + sc->max_volumes - 1;
}
/**
* _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID
* @sc: per adapter object
* @enc_id: enclosure logical identifier
*
* Returns the index of enclosure entry on success or bad index.
*/
static u8
_mapping_get_enc_idx_from_id(struct mps_softc *sc, u64 enc_id,
u64 phy_bits)
{
struct enc_mapping_table *et_entry;
u8 enc_idx = 0;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
et_entry = &sc->enclosure_table[enc_idx];
if ((et_entry->enclosure_id == le64toh(enc_id)) &&
(!et_entry->phy_bits || (et_entry->phy_bits &
le32toh(phy_bits))))
return enc_idx;
}
return MPS_ENCTABLE_BAD_IDX;
}
/**
* _mapping_get_enc_idx_from_handle - get enclosure index from handle
* @sc: per adapter object
* @enc_id: enclosure handle
*
* Returns the index of enclosure entry on success or bad index.
*/
static u8
_mapping_get_enc_idx_from_handle(struct mps_softc *sc, u16 handle)
{
struct enc_mapping_table *et_entry;
u8 enc_idx = 0;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->missing_count)
continue;
if (et_entry->enc_handle == handle)
return enc_idx;
}
return MPS_ENCTABLE_BAD_IDX;
}
/**
* _mapping_get_high_missing_et_idx - get missing enclosure index
* @sc: per adapter object
*
* Search through the enclosure table and identifies the enclosure entry
* with high missing count and returns it's index
*
* Returns the index of enclosure entry on success or bad index.
*/
static u8
_mapping_get_high_missing_et_idx(struct mps_softc *sc)
{
struct enc_mapping_table *et_entry;
u8 high_missing_count = 0;
u8 enc_idx, high_idx = MPS_ENCTABLE_BAD_IDX;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
et_entry = &sc->enclosure_table[enc_idx];
if ((et_entry->missing_count > high_missing_count) &&
!et_entry->skip_search) {
high_missing_count = et_entry->missing_count;
high_idx = enc_idx;
}
}
return high_idx;
}
/**
* _mapping_get_high_missing_mt_idx - get missing map table index
* @sc: per adapter object
*
* Search through the map table and identifies the device entry
* with high missing count and returns it's index
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_high_missing_mt_idx(struct mps_softc *sc)
{
u32 map_idx, high_idx = MPS_MAPTABLE_BAD_IDX;
u8 high_missing_count = 0;
u32 start_idx, end_idx, start_idx_ir, end_idx_ir;
struct dev_mapping_table *mt_entry;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
start_idx = 0;
start_idx_ir = 0;
end_idx_ir = 0;
end_idx = sc->max_devices;
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
start_idx = 1;
if (sc->ir_firmware) {
_mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir);
if (start_idx == start_idx_ir)
start_idx = end_idx_ir + 1;
else
end_idx = start_idx_ir;
}
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) {
if (mt_entry->missing_count > high_missing_count) {
high_missing_count = mt_entry->missing_count;
high_idx = map_idx;
}
}
return high_idx;
}
/**
* _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID
* @sc: per adapter object
* @wwid: world wide unique ID of the volume
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_ir_mt_idx_from_wwid(struct mps_softc *sc, u64 wwid)
{
u32 start_idx, end_idx, map_idx;
struct dev_mapping_table *mt_entry;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
if (mt_entry->physical_id == wwid)
return map_idx;
return MPS_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_mt_idx_from_id - get map table index from a device ID
* @sc: per adapter object
* @dev_id: device identifer (SAS Address)
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_mt_idx_from_id(struct mps_softc *sc, u64 dev_id)
{
u32 map_idx;
struct dev_mapping_table *mt_entry;
for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->physical_id == dev_id)
return map_idx;
}
return MPS_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle
* @sc: per adapter object
* @wwid: volume device handle
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_ir_mt_idx_from_handle(struct mps_softc *sc, u16 volHandle)
{
u32 start_idx, end_idx, map_idx;
struct dev_mapping_table *mt_entry;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
if (mt_entry->dev_handle == volHandle)
return map_idx;
return MPS_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_mt_idx_from_handle - get map table index from handle
* @sc: per adapter object
* @dev_id: device handle
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_mt_idx_from_handle(struct mps_softc *sc, u16 handle)
{
u32 map_idx;
struct dev_mapping_table *mt_entry;
for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->dev_handle == handle)
return map_idx;
}
return MPS_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_free_ir_mt_idx - get first free index for a volume
* @sc: per adapter object
*
* Search through mapping table for free index for a volume and if no free
* index then looks for a volume with high mapping index
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_free_ir_mt_idx(struct mps_softc *sc)
{
u8 high_missing_count = 0;
u32 start_idx, end_idx, map_idx;
u32 high_idx = MPS_MAPTABLE_BAD_IDX;
struct dev_mapping_table *mt_entry;
/*
* The IN_USE flag should be clear if the entry is available to use.
* This flag is cleared on initialization and and when a volume is
* deleted. All other times this flag should be set. If, for some
* reason, a free entry cannot be found, look for the entry with the
* highest missing count just in case there is one.
*/
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
if (!(mt_entry->device_info & MPS_MAP_IN_USE))
return map_idx;
if (mt_entry->missing_count > high_missing_count) {
high_missing_count = mt_entry->missing_count;
high_idx = map_idx;
}
}
if (high_idx == MPS_MAPTABLE_BAD_IDX) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Could not find a "
"free entry in the mapping table for a Volume. The mapping "
"table is probably corrupt.\n", __func__);
}
return high_idx;
}
/**
* _mapping_get_free_mt_idx - get first free index for a device
* @sc: per adapter object
* @start_idx: offset in the table to start search
*
* Returns the index of map table entry on success or bad index.
*/
static u32
_mapping_get_free_mt_idx(struct mps_softc *sc, u32 start_idx)
{
u32 map_idx, max_idx = sc->max_devices;
struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx];
u16 volume_mapping_flags;
volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
if (sc->ir_firmware && (volume_mapping_flags ==
MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING))
max_idx -= sc->max_volumes;
for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++)
if (!(mt_entry->device_info & (MPS_MAP_IN_USE |
MPS_DEV_RESERVED)))
return map_idx;
return MPS_MAPTABLE_BAD_IDX;
}
/**
* _mapping_get_dpm_idx_from_id - get DPM index from ID
* @sc: per adapter object
* @id: volume WWID or enclosure ID or device ID
*
* Returns the index of DPM entry on success or bad index.
*/
static u16
_mapping_get_dpm_idx_from_id(struct mps_softc *sc, u64 id, u32 phy_bits)
{
u16 entry_num;
uint64_t PhysicalIdentifier;
Mpi2DriverMap0Entry_t *dpm_entry;
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High;
PhysicalIdentifier = (PhysicalIdentifier << 32) |
dpm_entry->PhysicalIdentifier.Low;
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
dpm_entry++)
if ((id == PhysicalIdentifier) &&
(!phy_bits || !dpm_entry->PhysicalBitsMapping ||
(phy_bits & dpm_entry->PhysicalBitsMapping)))
return entry_num;
return MPS_DPM_BAD_IDX;
}
/**
* _mapping_get_free_dpm_idx - get first available DPM index
* @sc: per adapter object
*
* Returns the index of DPM entry on success or bad index.
*/
static u32
_mapping_get_free_dpm_idx(struct mps_softc *sc)
{
u16 entry_num;
Mpi2DriverMap0Entry_t *dpm_entry;
u16 current_entry = MPS_DPM_BAD_IDX, missing_cnt, high_missing_cnt = 0;
u64 physical_id;
struct dev_mapping_table *mt_entry;
u32 map_idx;
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += entry_num;
missing_cnt = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
/*
* If entry is used and not missing, then this entry can't be
* used. Look at next one.
*/
if (sc->dpm_entry_used[entry_num] && !missing_cnt)
continue;
/*
* If this entry is not used at all, then the missing count
* doesn't matter. Just use this one. Otherwise, keep looking
* and make sure the entry with the highest missing count is
* used.
*/
if (!sc->dpm_entry_used[entry_num]) {
current_entry = entry_num;
break;
}
if ((current_entry == MPS_DPM_BAD_IDX) ||
(missing_cnt > high_missing_cnt)) {
current_entry = entry_num;
high_missing_cnt = missing_cnt;
}
}
/*
* If an entry has been found to use and it's already marked as used
* it means that some device was already using this entry but it's
* missing, and that means that the connection between the missing
* device's DPM entry and the mapping table needs to be cleared. To do
* this, use the Physical ID of the old device still in the DPM entry
* to find its mapping table entry, then mark its DPM entry as BAD.
*/
if ((current_entry != MPS_DPM_BAD_IDX) &&
sc->dpm_entry_used[current_entry]) {
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += current_entry;
physical_id = dpm_entry->PhysicalIdentifier.High;
physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
map_idx = _mapping_get_mt_idx_from_id(sc, physical_id);
if (map_idx != MPS_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
mt_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
}
}
return current_entry;
}
/**
* _mapping_update_ir_missing_cnt - Updates missing count for a volume
* @sc: per adapter object
* @map_idx: map table index of the volume
* @element: IR configuration change element
* @wwid: IR volume ID.
*
* Updates the missing count in the map table and in the DPM entry for a volume
*
* Returns nothing.
*/
static void
_mapping_update_ir_missing_cnt(struct mps_softc *sc, u32 map_idx,
Mpi2EventIrConfigElement_t *element, u64 wwid)
{
struct dev_mapping_table *mt_entry;
u8 missing_cnt, reason = element->ReasonCode, update_dpm = 1;
u16 dpm_idx;
Mpi2DriverMap0Entry_t *dpm_entry;
/*
* Depending on the reason code, update the missing count. Always set
* the init_complete flag when here, so just do it first. That flag is
* used for volumes to make sure that the DPM entry has been updated.
* When a volume is deleted, clear the map entry's IN_USE flag so that
* the entry can be used again if another volume is created. Also clear
* its dev_handle entry so that other functions can't find this volume
* by the handle, since it's not defined any longer.
*/
mt_entry = &sc->mapping_table[map_idx];
mt_entry->init_complete = 1;
if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) ||
(reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) {
mt_entry->missing_count = 0;
} else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) {
if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT)
mt_entry->missing_count++;
mt_entry->device_info &= ~MPS_MAP_IN_USE;
mt_entry->dev_handle = 0;
}
/*
* If persistent mapping is enabled, update the DPM with the new missing
* count for the volume. If the DPM index is bad, get a free one. If
* it's bad for a volume that's being deleted do nothing because that
* volume doesn't have a DPM entry.
*/
if (!sc->is_dpm_enable)
return;
dpm_idx = mt_entry->dpm_entry_num;
if (dpm_idx == MPS_DPM_BAD_IDX) {
if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)
{
mps_dprint(sc, MPS_MAPPING, "%s: Volume being deleted "
"is not in DPM so DPM missing count will not be "
"updated.\n", __func__);
return;
}
}
if (dpm_idx == MPS_DPM_BAD_IDX)
dpm_idx = _mapping_get_free_dpm_idx(sc);
/*
* Got the DPM entry for the volume or found a free DPM entry if this is
* a new volume. Check if the current information is outdated.
*/
if (dpm_idx != MPS_DPM_BAD_IDX) {
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += dpm_idx;
missing_cnt = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
if ((mt_entry->physical_id ==
le64toh(((u64)dpm_entry->PhysicalIdentifier.High << 32) |
(u64)dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt ==
mt_entry->missing_count)) {
mps_dprint(sc, MPS_MAPPING, "%s: DPM entry for volume "
"with target ID %d does not require an update.\n",
__func__, mt_entry->id);
update_dpm = 0;
}
}
/*
* Update the volume's persistent info if it's new or the ID or missing
* count has changed. If a good DPM index has not been found by now,
* there is no space left in the DPM table.
*/
if ((dpm_idx != MPS_DPM_BAD_IDX) && update_dpm) {
mps_dprint(sc, MPS_MAPPING, "%s: Update DPM entry for volume "
"with target ID %d.\n", __func__, mt_entry->id);
mt_entry->dpm_entry_num = dpm_idx;
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += dpm_idx;
dpm_entry->PhysicalIdentifier.Low =
(0xFFFFFFFF & mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High =
(mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = map_idx;
dpm_entry->MappingInformation = mt_entry->missing_count;
dpm_entry->PhysicalBitsMapping = 0;
dpm_entry->Reserved1 = 0;
sc->dpm_flush_entry[dpm_idx] = 1;
sc->dpm_entry_used[dpm_idx] = 1;
} else if (dpm_idx == MPS_DPM_BAD_IDX) {
mps_dprint(sc, MPS_INFO | MPS_MAPPING, "%s: No space to add an "
"entry in the DPM table for volume with target ID %d.\n",
__func__, mt_entry->id);
}
}
/**
* _mapping_add_to_removal_table - add DPM index to the removal table
* @sc: per adapter object
* @dpm_idx: Index of DPM entry to remove
*
* Adds a DPM entry number to the removal table.
*
* Returns nothing.
*/
static void
_mapping_add_to_removal_table(struct mps_softc *sc, u16 dpm_idx)
{
struct map_removal_table *remove_entry;
u32 i;
/*
* This is only used to remove entries from the DPM in the controller.
* If DPM is not enabled, just return.
*/
if (!sc->is_dpm_enable)
return;
/*
* Find the first available removal_table entry and add the new entry
* there.
*/
remove_entry = sc->removal_table;
for (i = 0; i < sc->max_devices; i++, remove_entry++) {
if (remove_entry->dpm_entry_num != MPS_DPM_BAD_IDX)
continue;
mps_dprint(sc, MPS_MAPPING, "%s: Adding DPM entry %d to table "
"for removal.\n", __func__, dpm_idx);
remove_entry->dpm_entry_num = dpm_idx;
break;
}
}
/**
* _mapping_update_missing_count - Update missing count for a device
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Increment the missing count in the mapping table for a device that is not
* responding. If Persitent Mapping is used, increment the DPM entry as well.
* Currently, this function only increments the missing count if the device
* goes missing, so after initialization has completed. This means that the
* missing count can only go from 0 to 1 here. The missing count is incremented
* during initialization as well, so that's where a target's missing count can
* go past 1.
*
* Returns nothing.
*/
static void
_mapping_update_missing_count(struct mps_softc *sc,
struct _map_topology_change *topo_change)
{
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
u8 entry;
struct _map_phy_change *phy_change;
u32 map_idx;
struct dev_mapping_table *mt_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
for (entry = 0; entry < topo_change->num_entries; entry++) {
phy_change = &topo_change->phy_details[entry];
if (!phy_change->dev_handle || (phy_change->reason !=
MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
continue;
map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change->
dev_handle);
phy_change->is_processed = 1;
if (map_idx == MPS_MAPTABLE_BAD_IDX) {
mps_dprint(sc, MPS_INFO | MPS_MAPPING, "%s: device is "
"already removed from mapping table\n", __func__);
continue;
}
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT)
mt_entry->missing_count++;
/*
* When using Enc/Slot mapping, when a device is removed, it's
* mapping table information should be cleared. Otherwise, the
* target ID will be incorrect if this same device is re-added
* to a different slot.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
_mapping_clear_map_entry(mt_entry);
}
/*
* When using device mapping, update the missing count in the
* DPM entry, but only if the missing count has changed.
*/
if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) &&
sc->is_dpm_enable &&
mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
dpm_entry =
(Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += mt_entry->dpm_entry_num;
if (dpm_entry->MappingInformation !=
mt_entry->missing_count) {
dpm_entry->MappingInformation =
mt_entry->missing_count;
sc->dpm_flush_entry[mt_entry->dpm_entry_num] =
1;
}
}
}
}
/**
* _mapping_find_enc_map_space -find map table entries for enclosure
* @sc: per adapter object
* @et_entry: enclosure entry
*
* Search through the mapping table defragment it and provide contiguous
* space in map table for a particular enclosure entry
*
* Returns start index in map table or bad index.
*/
static u32
_mapping_find_enc_map_space(struct mps_softc *sc,
struct enc_mapping_table *et_entry)
{
u16 vol_mapping_flags;
u32 skip_count, end_of_table, map_idx, enc_idx;
u16 num_found;
u32 start_idx = MPS_MAPTABLE_BAD_IDX;
struct dev_mapping_table *mt_entry;
struct enc_mapping_table *enc_entry;
unsigned char done_flag = 0, found_space;
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
skip_count = sc->num_rsvd_entries;
num_found = 0;
vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
/*
* The end of the mapping table depends on where volumes are kept, if
* IR is enabled.
*/
if (!sc->ir_firmware)
end_of_table = sc->max_devices;
else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)
end_of_table = sc->max_devices;
else
end_of_table = sc->max_devices - sc->max_volumes;
/*
* The skip_count is the number of entries that are reserved at the
* beginning of the mapping table. But, it does not include the number
* of Physical IDs that are reserved for direct attached devices. Look
* through the mapping table after these reserved entries to see if
* the devices for this enclosure are already mapped. The PHY bit check
* is used to make sure that at least one PHY bit is common between the
* enclosure and the device that is already mapped.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Looking for space in the mapping "
"table for added enclosure.\n", __func__);
for (map_idx = (max_num_phy_ids + skip_count);
map_idx < end_of_table; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if ((et_entry->enclosure_id == mt_entry->physical_id) &&
(!mt_entry->phy_bits || (mt_entry->phy_bits &
et_entry->phy_bits))) {
num_found += 1;
if (num_found == et_entry->num_slots) {
start_idx = (map_idx - num_found) + 1;
mps_dprint(sc, MPS_MAPPING, "%s: Found space "
"in the mapping for enclosure at map index "
"%d.\n", __func__, start_idx);
return start_idx;
}
} else
num_found = 0;
}
/*
* If the enclosure's devices are not mapped already, look for
* contiguous entries in the mapping table that are not reserved. If
* enough entries are found, return the starting index for that space.
*/
num_found = 0;
for (map_idx = (max_num_phy_ids + skip_count);
map_idx < end_of_table; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (!(mt_entry->device_info & MPS_DEV_RESERVED)) {
num_found += 1;
if (num_found == et_entry->num_slots) {
start_idx = (map_idx - num_found) + 1;
mps_dprint(sc, MPS_MAPPING, "%s: Found space "
"in the mapping for enclosure at map index "
"%d.\n", __func__, start_idx);
return start_idx;
}
} else
num_found = 0;
}
/*
* If here, it means that not enough space in the mapping table was
* found to support this enclosure, so go through the enclosure table to
* see if any enclosure entries have a missing count. If so, get the
* enclosure with the highest missing count and check it to see if there
* is enough space for the new enclosure.
*/
while (!done_flag) {
enc_idx = _mapping_get_high_missing_et_idx(sc);
if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
mps_dprint(sc, MPS_MAPPING, "%s: Not enough space was "
"found in the mapping for the added enclosure.\n",
__func__);
return MPS_MAPTABLE_BAD_IDX;
}
/*
* Found a missing enclosure. Set the skip_search flag so this
* enclosure is not checked again for a high missing count if
* the loop continues. This way, all missing enclosures can
* have their space added together to find enough space in the
* mapping table for the added enclosure. The space must be
* contiguous.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Space from a missing "
"enclosure was found.\n", __func__);
enc_entry = &sc->enclosure_table[enc_idx];
enc_entry->skip_search = 1;
/*
* Unmark all of the missing enclosure's device's reserved
* space. These will be remarked as reserved if this missing
* enclosure's space is not used.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Clear the reserved flag for "
"all of the map entries for the enclosure.\n", __func__);
mt_entry = &sc->mapping_table[enc_entry->start_index];
for (map_idx = enc_entry->start_index; map_idx <
(enc_entry->start_index + enc_entry->num_slots); map_idx++,
mt_entry++)
mt_entry->device_info &= ~MPS_DEV_RESERVED;
/*
* Now that space has been unreserved, check again to see if
* enough space is available for the new enclosure.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Check if new mapping space is "
"enough for the new enclosure.\n", __func__);
found_space = 0;
num_found = 0;
for (map_idx = (max_num_phy_ids + skip_count);
map_idx < end_of_table; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (!(mt_entry->device_info & MPS_DEV_RESERVED)) {
num_found += 1;
if (num_found == et_entry->num_slots) {
start_idx = (map_idx - num_found) + 1;
found_space = 1;
break;
}
} else
num_found = 0;
}
if (!found_space)
continue;
/*
* If enough space was found, all of the missing enclosures that
* will be used for the new enclosure must be added to the
* removal table. Then all mappings for the enclosure's devices
* and for the enclosure itself need to be cleared. There may be
* more than one enclosure to add to the removal table and
* clear.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Found space in the mapping "
"for enclosure at map index %d.\n", __func__, start_idx);
for (map_idx = start_idx; map_idx < (start_idx + num_found);
map_idx++) {
enc_entry = sc->enclosure_table;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
enc_idx++, enc_entry++) {
if (map_idx < enc_entry->start_index ||
map_idx > (enc_entry->start_index +
enc_entry->num_slots))
continue;
if (!enc_entry->removal_flag) {
mps_dprint(sc, MPS_MAPPING, "%s: "
"Enclosure %d will be removed from "
"the mapping table.\n", __func__,
enc_idx);
enc_entry->removal_flag = 1;
_mapping_add_to_removal_table(sc,
enc_entry->dpm_entry_num);
}
mt_entry = &sc->mapping_table[map_idx];
_mapping_clear_map_entry(mt_entry);
if (map_idx == (enc_entry->start_index +
enc_entry->num_slots - 1))
_mapping_clear_enc_entry(et_entry);
}
}
/*
* During the search for space for this enclosure, some entries
* in the mapping table may have been unreserved. Go back and
* change all of these to reserved again. Only the enclosures
* with the removal_flag set should be left as unreserved. The
* skip_search flag needs to be cleared as well so that the
* enclosure's space will be looked at the next time space is
* needed.
*/
enc_entry = sc->enclosure_table;
for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
enc_idx++, enc_entry++) {
if (!enc_entry->removal_flag) {
mps_dprint(sc, MPS_MAPPING, "%s: Reset the "
"reserved flag for all of the map entries "
"for enclosure %d.\n", __func__, enc_idx);
mt_entry = &sc->mapping_table[enc_entry->
start_index];
for (map_idx = enc_entry->start_index; map_idx <
(enc_entry->start_index +
enc_entry->num_slots); map_idx++,
mt_entry++)
mt_entry->device_info |=
MPS_DEV_RESERVED;
et_entry->skip_search = 0;
}
}
done_flag = 1;
}
return start_idx;
}
/**
* _mapping_get_dev_info -get information about newly added devices
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the topology change event list and issues sas device pg0
* requests for the newly added device and reserved entries in tables
*
* Returns nothing
*/
static void
_mapping_get_dev_info(struct mps_softc *sc,
struct _map_topology_change *topo_change)
{
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
u8 entry, enc_idx, phy_idx;
u32 map_idx, index, device_info;
struct _map_phy_change *phy_change, *tmp_phy_change;
uint64_t sas_address;
struct enc_mapping_table *et_entry;
struct dev_mapping_table *mt_entry;
u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED;
int rc = 1;
for (entry = 0; entry < topo_change->num_entries; entry++) {
phy_change = &topo_change->phy_details[entry];
if (phy_change->is_processed || !phy_change->dev_handle ||
phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED)
continue;
if (mps_config_get_sas_device_pg0(sc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
phy_change->dev_handle)) {
phy_change->is_processed = 1;
continue;
}
/*
* Always get SATA Identify information because this is used
* to determine if Start/Stop Unit should be sent to the drive
* when the system is shutdown.
*/
device_info = le32toh(sas_device_pg0.DeviceInfo);
sas_address = le32toh(sas_device_pg0.SASAddress.High);
sas_address = (sas_address << 32) |
le32toh(sas_device_pg0.SASAddress.Low);
if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) {
rc = mpssas_get_sas_address_for_sata_disk(sc,
&sas_address, phy_change->dev_handle, device_info,
&phy_change->is_SATA_SSD);
if (rc) {
mps_dprint(sc, MPS_ERROR, "%s: failed to get "
"disk type (SSD or HDD) and SAS Address "
"for SATA device with handle 0x%04x\n",
__func__, phy_change->dev_handle);
}
}
phy_change->physical_id = sas_address;
phy_change->slot = le16toh(sas_device_pg0.Slot);
phy_change->device_info = device_info;
/*
* When using Enc/Slot mapping, if this device is an enclosure
* make sure that all of its slots can fit into the mapping
* table.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
/*
* The enclosure should already be in the enclosure
* table due to the Enclosure Add event. If not, just
* continue, nothing can be done.
*/
enc_idx = _mapping_get_enc_idx_from_handle(sc,
topo_change->enc_handle);
if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
phy_change->is_processed = 1;
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because the enclosure is not in "
"the mapping table\n", __func__,
phy_change->dev_handle);
continue;
}
if (!((phy_change->device_info &
MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
(phy_change->device_info &
(MPI2_SAS_DEVICE_INFO_SSP_TARGET |
MPI2_SAS_DEVICE_INFO_STP_TARGET |
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) {
phy_change->is_processed = 1;
continue;
}
et_entry = &sc->enclosure_table[enc_idx];
/*
* If the enclosure already has a start_index, it's been
* mapped, so go to the next Topo change.
*/
if (et_entry->start_index != MPS_MAPTABLE_BAD_IDX)
continue;
/*
* If the Expander Handle is 0, the devices are direct
* attached. In that case, the start_index must be just
* after the reserved entries. Otherwise, find space in
* the mapping table for the enclosure's devices.
*/
if (!topo_change->exp_handle) {
map_idx = sc->num_rsvd_entries;
et_entry->start_index = map_idx;
} else {
map_idx = _mapping_find_enc_map_space(sc,
et_entry);
et_entry->start_index = map_idx;
/*
* If space cannot be found to hold all of the
* enclosure's devices in the mapping table,
* there's no need to continue checking the
* other devices in this event. Set all of the
* phy_details for this event (if the change is
* for an add) as already processed because none
* of these devices can be added to the mapping
* table.
*/
if (et_entry->start_index ==
MPS_MAPTABLE_BAD_IDX) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING,
"%s: failed to add the enclosure "
"with ID 0x%016jx because there is "
"no free space available in the "
"mapping table for all of the "
"enclosure's devices.\n", __func__,
(uintmax_t)et_entry->enclosure_id);
phy_change->is_processed = 1;
for (phy_idx = 0; phy_idx <
topo_change->num_entries;
phy_idx++) {
tmp_phy_change =
&topo_change->phy_details
[phy_idx];
if (tmp_phy_change->reason ==
add_code)
tmp_phy_change->
is_processed = 1;
}
break;
}
}
/*
* Found space in the mapping table for this enclosure.
* Initialize each mapping table entry for the
* enclosure.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Initialize %d map "
"entries for the enclosure, starting at map index "
" %d.\n", __func__, et_entry->num_slots, map_idx);
mt_entry = &sc->mapping_table[map_idx];
for (index = map_idx; index < (et_entry->num_slots
+ map_idx); index++, mt_entry++) {
mt_entry->device_info = MPS_DEV_RESERVED;
mt_entry->physical_id = et_entry->enclosure_id;
mt_entry->phy_bits = et_entry->phy_bits;
mt_entry->missing_count = 0;
}
}
}
}
/**
* _mapping_set_mid_to_eid -set map table data from enclosure table
* @sc: per adapter object
* @et_entry: enclosure entry
*
* Returns nothing
*/
static inline void
_mapping_set_mid_to_eid(struct mps_softc *sc,
struct enc_mapping_table *et_entry)
{
struct dev_mapping_table *mt_entry;
u16 slots = et_entry->num_slots, map_idx;
u32 start_idx = et_entry->start_index;
if (start_idx != MPS_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++)
mt_entry->physical_id = et_entry->enclosure_id;
}
}
/**
* _mapping_clear_removed_entries - mark the entries to be cleared
* @sc: per adapter object
*
* Search through the removal table and mark the entries which needs to be
* flushed to DPM and also updates the map table and enclosure table by
* clearing the corresponding entries.
*
* Returns nothing
*/
static void
_mapping_clear_removed_entries(struct mps_softc *sc)
{
u32 remove_idx;
struct map_removal_table *remove_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
u8 done_flag = 0, num_entries, m, i;
struct enc_mapping_table *et_entry, *from, *to;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
if (sc->is_dpm_enable) {
remove_entry = sc->removal_table;
for (remove_idx = 0; remove_idx < sc->max_devices;
remove_idx++, remove_entry++) {
if (remove_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *) sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += remove_entry->dpm_entry_num;
dpm_entry->PhysicalIdentifier.Low = 0;
dpm_entry->PhysicalIdentifier.High = 0;
dpm_entry->DeviceIndex = 0;
dpm_entry->MappingInformation = 0;
dpm_entry->PhysicalBitsMapping = 0;
sc->dpm_flush_entry[remove_entry->
dpm_entry_num] = 1;
sc->dpm_entry_used[remove_entry->dpm_entry_num]
= 0;
remove_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
}
}
}
/*
* When using Enc/Slot mapping, if a new enclosure was added and old
* enclosure space was needed, the enclosure table may now have gaps
* that need to be closed. All enclosure mappings need to be contiguous
* so that space can be reused correctly if available.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
num_entries = sc->num_enc_table_entries;
while (!done_flag) {
done_flag = 1;
et_entry = sc->enclosure_table;
for (i = 0; i < num_entries; i++, et_entry++) {
if (!et_entry->enc_handle && et_entry->
init_complete) {
done_flag = 0;
if (i != (num_entries - 1)) {
from = &sc->enclosure_table
[i+1];
to = &sc->enclosure_table[i];
for (m = i; m < (num_entries -
1); m++, from++, to++) {
_mapping_set_mid_to_eid
(sc, to);
*to = *from;
}
_mapping_clear_enc_entry(to);
sc->num_enc_table_entries--;
num_entries =
sc->num_enc_table_entries;
} else {
_mapping_clear_enc_entry
(et_entry);
sc->num_enc_table_entries--;
num_entries =
sc->num_enc_table_entries;
}
}
}
}
}
}
/**
* _mapping_add_new_device -Add the new device into mapping table
* @sc: per adapter object
* @topo_change: Topology change event entry
*
* Search through the topology change event list and update map table,
* enclosure table and DPM pages for the newly added devices.
*
* Returns nothing
*/
static void
_mapping_add_new_device(struct mps_softc *sc,
struct _map_topology_change *topo_change)
{
u8 enc_idx, missing_cnt, is_removed = 0;
u16 dpm_idx;
u32 search_idx, map_idx;
u32 entry;
struct dev_mapping_table *mt_entry;
struct enc_mapping_table *et_entry;
struct _map_phy_change *phy_change;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
Mpi2DriverMap0Entry_t *dpm_entry;
uint64_t temp64_var;
u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER);
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
for (entry = 0; entry < topo_change->num_entries; entry++) {
phy_change = &topo_change->phy_details[entry];
if (phy_change->is_processed)
continue;
if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED ||
!phy_change->dev_handle) {
phy_change->is_processed = 1;
continue;
}
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
enc_idx = _mapping_get_enc_idx_from_handle
(sc, topo_change->enc_handle);
if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
phy_change->is_processed = 1;
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because the enclosure is not in "
"the mapping table\n", __func__,
phy_change->dev_handle);
continue;
}
/*
* If the enclosure's start_index is BAD here, it means
* that there is no room in the mapping table to cover
* all of the devices that could be in the enclosure.
* There's no reason to process any of the devices for
* this enclosure since they can't be mapped.
*/
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->start_index == MPS_MAPTABLE_BAD_IDX) {
phy_change->is_processed = 1;
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because there is no free space "
"available in the mapping table\n",
__func__, phy_change->dev_handle);
continue;
}
/*
* Add this device to the mapping table at the correct
* offset where space was found to map the enclosure.
* Then setup the DPM entry information if being used.
*/
map_idx = et_entry->start_index + phy_change->slot -
et_entry->start_slot;
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = phy_change->physical_id;
mt_entry->id = map_idx;
mt_entry->dev_handle = phy_change->dev_handle;
mt_entry->missing_count = 0;
mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
mt_entry->device_info = phy_change->device_info |
(MPS_DEV_RESERVED | MPS_MAP_IN_USE);
if (sc->is_dpm_enable) {
dpm_idx = et_entry->dpm_entry_num;
if (dpm_idx == MPS_DPM_BAD_IDX)
dpm_idx = _mapping_get_dpm_idx_from_id
(sc, et_entry->enclosure_id,
et_entry->phy_bits);
if (dpm_idx == MPS_DPM_BAD_IDX) {
dpm_idx = _mapping_get_free_dpm_idx(sc);
if (dpm_idx != MPS_DPM_BAD_IDX) {
dpm_entry =
(Mpi2DriverMap0Entry_t *)
((u8 *) sc->dpm_pg0 +
hdr_sz);
dpm_entry += dpm_idx;
dpm_entry->
PhysicalIdentifier.Low =
(0xFFFFFFFF &
et_entry->enclosure_id);
dpm_entry->
PhysicalIdentifier.High =
(et_entry->enclosure_id
>> 32);
dpm_entry->DeviceIndex =
(U16)et_entry->start_index;
dpm_entry->MappingInformation =
et_entry->num_slots;
dpm_entry->MappingInformation
<<= map_shift;
dpm_entry->PhysicalBitsMapping
= et_entry->phy_bits;
et_entry->dpm_entry_num =
dpm_idx;
sc->dpm_entry_used[dpm_idx] = 1;
sc->dpm_flush_entry[dpm_idx] =
1;
phy_change->is_processed = 1;
} else {
phy_change->is_processed = 1;
mps_dprint(sc, MPS_ERROR |
MPS_MAPPING, "%s: failed "
"to add the device with "
"handle 0x%04x to "
"persistent table because "
"there is no free space "
"available\n", __func__,
phy_change->dev_handle);
}
} else {
et_entry->dpm_entry_num = dpm_idx;
mt_entry->dpm_entry_num = dpm_idx;
}
}
et_entry->init_complete = 1;
} else if ((ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
/*
* Get the mapping table index for this device. If it's
* not in the mapping table yet, find a free entry if
* one is available. If there are no free entries, look
* for the entry that has the highest missing count. If
* none of that works to find an entry in the mapping
* table, there is a problem. Log a message and just
* continue on.
*/
map_idx = _mapping_get_mt_idx_from_id
(sc, phy_change->physical_id);
if (map_idx == MPS_MAPTABLE_BAD_IDX) {
search_idx = sc->num_rsvd_entries;
if (topo_change->exp_handle)
search_idx += max_num_phy_ids;
map_idx = _mapping_get_free_mt_idx(sc,
search_idx);
}
/*
* If an entry will be used that has a missing device,
* clear its entry from the DPM in the controller.
*/
if (map_idx == MPS_MAPTABLE_BAD_IDX) {
map_idx = _mapping_get_high_missing_mt_idx(sc);
if (map_idx != MPS_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
_mapping_add_to_removal_table(sc,
mt_entry->dpm_entry_num);
is_removed = 1;
mt_entry->init_complete = 0;
}
}
if (map_idx != MPS_MAPTABLE_BAD_IDX) {
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = phy_change->physical_id;
mt_entry->id = map_idx;
mt_entry->dev_handle = phy_change->dev_handle;
mt_entry->missing_count = 0;
mt_entry->device_info = phy_change->device_info
| (MPS_DEV_RESERVED | MPS_MAP_IN_USE);
} else {
phy_change->is_processed = 1;
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"failed to add the device with handle "
"0x%04x because there is no free space "
"available in the mapping table\n",
__func__, phy_change->dev_handle);
continue;
}
if (sc->is_dpm_enable) {
if (mt_entry->dpm_entry_num !=
MPS_DPM_BAD_IDX) {
dpm_idx = mt_entry->dpm_entry_num;
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *)sc->dpm_pg0 + hdr_sz);
dpm_entry += dpm_idx;
missing_cnt = dpm_entry->
MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
temp64_var = dpm_entry->
PhysicalIdentifier.High;
temp64_var = (temp64_var << 32) |
dpm_entry->PhysicalIdentifier.Low;
/*
* If the Mapping Table's info is not
* the same as the DPM entry, clear the
* init_complete flag so that it's
* updated.
*/
if ((mt_entry->physical_id ==
temp64_var) && !missing_cnt)
mt_entry->init_complete = 1;
else
mt_entry->init_complete = 0;
} else {
dpm_idx = _mapping_get_free_dpm_idx(sc);
mt_entry->init_complete = 0;
}
if (dpm_idx != MPS_DPM_BAD_IDX &&
!mt_entry->init_complete) {
mt_entry->dpm_entry_num = dpm_idx;
dpm_entry = (Mpi2DriverMap0Entry_t *)
((u8 *)sc->dpm_pg0 + hdr_sz);
dpm_entry += dpm_idx;
dpm_entry->PhysicalIdentifier.Low =
(0xFFFFFFFF &
mt_entry->physical_id);
dpm_entry->PhysicalIdentifier.High =
(mt_entry->physical_id >> 32);
dpm_entry->DeviceIndex = (U16) map_idx;
dpm_entry->MappingInformation = 0;
dpm_entry->PhysicalBitsMapping = 0;
sc->dpm_entry_used[dpm_idx] = 1;
sc->dpm_flush_entry[dpm_idx] = 1;
phy_change->is_processed = 1;
} else if (dpm_idx == MPS_DPM_BAD_IDX) {
phy_change->is_processed = 1;
mps_dprint(sc, MPS_ERROR | MPS_MAPPING,
"%s: failed to add the device with "
"handle 0x%04x to persistent table "
"because there is no free space "
"available\n", __func__,
phy_change->dev_handle);
}
}
mt_entry->init_complete = 1;
}
phy_change->is_processed = 1;
}
if (is_removed)
_mapping_clear_removed_entries(sc);
}
/**
* _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM
* @sc: per adapter object
*
* Returns nothing
*/
static void
_mapping_flush_dpm_pages(struct mps_softc *sc)
{
Mpi2DriverMap0Entry_t *dpm_entry;
Mpi2ConfigReply_t mpi_reply;
Mpi2DriverMappingPage0_t config_page;
u16 entry_num;
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
if (!sc->dpm_flush_entry[entry_num])
continue;
memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
dpm_entry += entry_num;
dpm_entry->MappingInformation = htole16(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping = htole32(dpm_entry->
PhysicalBitsMapping);
memcpy(&config_page.Entry, (u8 *)dpm_entry,
sizeof(Mpi2DriverMap0Entry_t));
/* TODO-How to handle failed writes? */
mps_dprint(sc, MPS_MAPPING, "%s: Flushing DPM entry %d.\n",
__func__, entry_num);
if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
entry_num)) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Flush of "
"DPM entry %d for device failed\n", __func__,
entry_num);
} else
sc->dpm_flush_entry[entry_num] = 0;
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->
PhysicalBitsMapping);
}
}
/**
* _mapping_allocate_memory- allocates the memory required for mapping tables
* @sc: per adapter object
*
* Allocates the memory for all the tables required for host mapping
*
* Return 0 on success or non-zero on failure.
*/
int
mps_mapping_allocate_memory(struct mps_softc *sc)
{
uint32_t dpm_pg0_sz;
- sc->mapping_table = mallocarray(sc->max_devices,
- sizeof(struct dev_mapping_table), M_MPT2, M_ZERO|M_NOWAIT);
+ sc->mapping_table = malloc((sizeof(struct dev_mapping_table) *
+ sc->max_devices), M_MPT2, M_ZERO|M_NOWAIT);
if (!sc->mapping_table)
goto free_resources;
- sc->removal_table = mallocarray(sc->max_devices,
- sizeof(struct map_removal_table), M_MPT2, M_ZERO|M_NOWAIT);
+ sc->removal_table = malloc((sizeof(struct map_removal_table) *
+ sc->max_devices), M_MPT2, M_ZERO|M_NOWAIT);
if (!sc->removal_table)
goto free_resources;
- sc->enclosure_table = mallocarray(sc->max_enclosures,
- sizeof(struct enc_mapping_table), M_MPT2, M_ZERO|M_NOWAIT);
+ sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) *
+ sc->max_enclosures), M_MPT2, M_ZERO|M_NOWAIT);
if (!sc->enclosure_table)
goto free_resources;
- sc->dpm_entry_used = mallocarray(sc->max_dpm_entries, sizeof(u8),
+ sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries),
M_MPT2, M_ZERO|M_NOWAIT);
if (!sc->dpm_entry_used)
goto free_resources;
- sc->dpm_flush_entry = mallocarray(sc->max_dpm_entries, sizeof(u8),
+ sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries),
M_MPT2, M_ZERO|M_NOWAIT);
if (!sc->dpm_flush_entry)
goto free_resources;
dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
(sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPT2, M_ZERO|M_NOWAIT);
if (!sc->dpm_pg0) {
printf("%s: memory alloc failed for dpm page; disabling dpm\n",
__func__);
sc->is_dpm_enable = 0;
}
return 0;
free_resources:
free(sc->mapping_table, M_MPT2);
free(sc->removal_table, M_MPT2);
free(sc->enclosure_table, M_MPT2);
free(sc->dpm_entry_used, M_MPT2);
free(sc->dpm_flush_entry, M_MPT2);
free(sc->dpm_pg0, M_MPT2);
printf("%s: device initialization failed due to failure in mapping "
"table memory allocation\n", __func__);
return -1;
}
/**
* mps_mapping_free_memory- frees the memory allocated for mapping tables
* @sc: per adapter object
*
* Returns nothing.
*/
void
mps_mapping_free_memory(struct mps_softc *sc)
{
free(sc->mapping_table, M_MPT2);
free(sc->removal_table, M_MPT2);
free(sc->enclosure_table, M_MPT2);
free(sc->dpm_entry_used, M_MPT2);
free(sc->dpm_flush_entry, M_MPT2);
free(sc->dpm_pg0, M_MPT2);
}
static void
_mapping_process_dpm_pg0(struct mps_softc *sc)
{
u8 missing_cnt, enc_idx;
u16 slot_id, entry_num, num_slots;
u32 map_idx, dev_idx, start_idx, end_idx;
struct dev_mapping_table *mt_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
struct enc_mapping_table *et_entry;
u64 physical_id;
u32 phy_bits = 0;
/*
* start_idx and end_idx are only used for IR.
*/
if (sc->ir_firmware)
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
/*
* Look through all of the DPM entries that were read from the
* controller and copy them over to the driver's internal table if they
* have a non-zero ID. At this point, any ID with a value of 0 would be
* invalid, so don't copy it.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Start copy of %d DPM entries into the "
"mapping table.\n", __func__, sc->max_dpm_entries);
dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
dpm_entry++) {
physical_id = dpm_entry->PhysicalIdentifier.High;
physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
if (!physical_id) {
sc->dpm_entry_used[entry_num] = 0;
continue;
}
sc->dpm_entry_used[entry_num] = 1;
dpm_entry->MappingInformation = le16toh(dpm_entry->
MappingInformation);
missing_cnt = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
dev_idx = le16toh(dpm_entry->DeviceIndex);
phy_bits = le32toh(dpm_entry->PhysicalBitsMapping);
/*
* Volumes are at special locations in the mapping table so
* account for that. Volume mapping table entries do not depend
* on the type of mapping, so continue the loop after adding
* volumes to the mapping table.
*/
if (sc->ir_firmware && (dev_idx >= start_idx) &&
(dev_idx <= end_idx)) {
mt_entry = &sc->mapping_table[dev_idx];
mt_entry->physical_id =
dpm_entry->PhysicalIdentifier.High;
mt_entry->physical_id = (mt_entry->physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
mt_entry->id = dev_idx;
mt_entry->missing_count = missing_cnt;
mt_entry->dpm_entry_num = entry_num;
mt_entry->device_info = MPS_DEV_RESERVED;
continue;
}
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
/*
* The dev_idx for an enclosure is the start index. If
* the start index is within the controller's default
* enclosure area, set the number of slots for this
* enclosure to the max allowed. Otherwise, it should be
* a normal enclosure and the number of slots is in the
* DPM entry's Mapping Information.
*/
if (dev_idx < (sc->num_rsvd_entries +
max_num_phy_ids)) {
slot_id = 0;
if (ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1)
slot_id = 1;
num_slots = max_num_phy_ids;
} else {
slot_id = 0;
num_slots = dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_SLOT_MASK;
num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
}
enc_idx = sc->num_enc_table_entries;
if (enc_idx >= sc->max_enclosures) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"Number of enclosure entries in DPM exceed "
"the max allowed of %d.\n", __func__,
sc->max_enclosures);
break;
}
sc->num_enc_table_entries++;
et_entry = &sc->enclosure_table[enc_idx];
physical_id = dpm_entry->PhysicalIdentifier.High;
et_entry->enclosure_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
et_entry->start_index = dev_idx;
et_entry->dpm_entry_num = entry_num;
et_entry->num_slots = num_slots;
et_entry->start_slot = slot_id;
et_entry->missing_count = missing_cnt;
et_entry->phy_bits = phy_bits;
/*
* Initialize all entries for this enclosure in the
* mapping table and mark them as reserved. The actual
* devices have not been processed yet but when they are
* they will use these entries. If an entry is found
* that already has a valid DPM index, the mapping table
* is corrupt. This can happen if the mapping type is
* changed without clearing all of the DPM entries in
* the controller.
*/
mt_entry = &sc->mapping_table[dev_idx];
for (map_idx = dev_idx; map_idx < (dev_idx + num_slots);
map_idx++, mt_entry++) {
if (mt_entry->dpm_entry_num !=
MPS_DPM_BAD_IDX) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING,
"%s: Conflict in mapping table for "
" enclosure %d\n", __func__,
enc_idx);
break;
}
physical_id =
dpm_entry->PhysicalIdentifier.High;
mt_entry->physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
mt_entry->phy_bits = phy_bits;
mt_entry->id = dev_idx;
mt_entry->dpm_entry_num = entry_num;
mt_entry->missing_count = missing_cnt;
mt_entry->device_info = MPS_DEV_RESERVED;
}
} else if ((ioc_pg8_flags &
MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
/*
* Device mapping, so simply copy the DPM entries to the
* mapping table, but check for a corrupt mapping table
* (as described above in Enc/Slot mapping).
*/
map_idx = dev_idx;
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"Conflict in mapping table for device %d\n",
__func__, map_idx);
break;
}
physical_id = dpm_entry->PhysicalIdentifier.High;
mt_entry->physical_id = (physical_id << 32) |
dpm_entry->PhysicalIdentifier.Low;
mt_entry->phy_bits = phy_bits;
mt_entry->id = dev_idx;
mt_entry->missing_count = missing_cnt;
mt_entry->dpm_entry_num = entry_num;
mt_entry->device_info = MPS_DEV_RESERVED;
}
} /*close the loop for DPM table */
}
/*
* mps_mapping_check_devices - start of the day check for device availabilty
* @sc: per adapter object
*
* Returns nothing.
*/
void
mps_mapping_check_devices(void *data)
{
u32 i;
struct dev_mapping_table *mt_entry;
struct mps_softc *sc = (struct mps_softc *)data;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
struct enc_mapping_table *et_entry;
u32 start_idx = 0, end_idx = 0;
u8 stop_device_checks = 0;
MPS_FUNCTRACE(sc);
/*
* Clear this flag so that this function is never called again except
* within this function if the check needs to be done again. The
* purpose is to check for missing devices that are currently in the
* mapping table so do this only at driver init after discovery.
*/
sc->track_mapping_events = 0;
/*
* callout synchronization
* This is used to prevent race conditions for the callout.
*/
mps_dprint(sc, MPS_MAPPING, "%s: Start check for missing devices.\n",
__func__);
mtx_assert(&sc->mps_mtx, MA_OWNED);
if ((callout_pending(&sc->device_check_callout)) ||
(!callout_active(&sc->device_check_callout))) {
mps_dprint(sc, MPS_MAPPING, "%s: Device Check Callout is "
"already pending or not active.\n", __func__);
return;
}
callout_deactivate(&sc->device_check_callout);
/*
* Use callout to check if any devices in the mapping table have been
* processed yet. If ALL devices are marked as not init_complete, no
* devices have been processed and mapped. Until devices are mapped
* there's no reason to mark them as missing. Continue resetting this
* callout until devices have been mapped.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
et_entry = sc->enclosure_table;
for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) {
if (et_entry->init_complete) {
stop_device_checks = 1;
break;
}
}
} else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
mt_entry = sc->mapping_table;
for (i = 0; i < sc->max_devices; i++, mt_entry++) {
if (mt_entry->init_complete) {
stop_device_checks = 1;
break;
}
}
}
/*
* Setup another callout check after a delay. Keep doing this until
* devices are mapped.
*/
if (!stop_device_checks) {
mps_dprint(sc, MPS_MAPPING, "%s: No devices have been mapped. "
"Reset callout to check again after a %d second delay.\n",
__func__, MPS_MISSING_CHECK_DELAY);
callout_reset(&sc->device_check_callout,
MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
sc);
return;
}
mps_dprint(sc, MPS_MAPPING, "%s: Device check complete.\n", __func__);
/*
* Depending on the mapping type, check if devices have been processed
* and update their missing counts if not processed.
*/
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
et_entry = sc->enclosure_table;
for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) {
if (!et_entry->init_complete) {
if (et_entry->missing_count <
MPS_MAX_MISSING_COUNT) {
mps_dprint(sc, MPS_MAPPING, "%s: "
"Enclosure %d is missing from the "
"topology. Update its missing "
"count.\n", __func__, i);
et_entry->missing_count++;
if (et_entry->dpm_entry_num !=
MPS_DPM_BAD_IDX) {
_mapping_commit_enc_entry(sc,
et_entry);
}
}
et_entry->init_complete = 1;
}
}
if (!sc->ir_firmware)
return;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
} else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
start_idx = 0;
end_idx = sc->max_devices - 1;
mt_entry = sc->mapping_table;
}
/*
* The start and end indices have been set above according to the
* mapping type. Go through these mappings and update any entries that
* do not have the init_complete flag set, which means they are missing.
*/
if (end_idx == 0)
return;
for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) {
if (mt_entry->device_info & MPS_DEV_RESERVED
&& !mt_entry->physical_id)
mt_entry->init_complete = 1;
else if (mt_entry->device_info & MPS_DEV_RESERVED) {
if (!mt_entry->init_complete) {
mps_dprint(sc, MPS_MAPPING, "%s: Device in "
"mapping table at index %d is missing from "
"topology. Update its missing count.\n",
__func__, i);
if (mt_entry->missing_count <
MPS_MAX_MISSING_COUNT) {
mt_entry->missing_count++;
if (mt_entry->dpm_entry_num !=
MPS_DPM_BAD_IDX) {
_mapping_commit_map_entry(sc,
mt_entry);
}
}
mt_entry->init_complete = 1;
}
}
}
}
/**
* mps_mapping_initialize - initialize mapping tables
* @sc: per adapter object
*
* Read controller persitant mapping tables into internal data area.
*
* Return 0 for success or non-zero for failure.
*/
int
mps_mapping_initialize(struct mps_softc *sc)
{
uint16_t volume_mapping_flags, dpm_pg0_sz;
uint32_t i;
Mpi2ConfigReply_t mpi_reply;
int error;
uint8_t retry_count;
uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
/* The additional 1 accounts for the virtual enclosure
* created for the controller
*/
sc->max_enclosures = sc->facts->MaxEnclosures + 1;
sc->max_expanders = sc->facts->MaxSasExpanders;
sc->max_volumes = sc->facts->MaxVolumes;
sc->max_devices = sc->facts->MaxTargets + sc->max_volumes;
sc->pending_map_events = 0;
sc->num_enc_table_entries = 0;
sc->num_rsvd_entries = 0;
sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries;
sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0;
sc->track_mapping_events = 0;
mps_dprint(sc, MPS_MAPPING, "%s: Mapping table has a max of %d entries "
"and DPM has a max of %d entries.\n", __func__, sc->max_devices,
sc->max_dpm_entries);
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING)
sc->is_dpm_enable = 0;
if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
sc->num_rsvd_entries = 1;
volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags &
MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
if (sc->ir_firmware && (volume_mapping_flags ==
MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING))
sc->num_rsvd_entries += sc->max_volumes;
error = mps_mapping_allocate_memory(sc);
if (error)
return (error);
for (i = 0; i < sc->max_devices; i++)
_mapping_clear_map_entry(sc->mapping_table + i);
for (i = 0; i < sc->max_enclosures; i++)
_mapping_clear_enc_entry(sc->enclosure_table + i);
for (i = 0; i < sc->max_devices; i++) {
sc->removal_table[i].dev_handle = 0;
sc->removal_table[i].dpm_entry_num = MPS_DPM_BAD_IDX;
}
memset(sc->dpm_entry_used, 0, sc->max_dpm_entries);
memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries);
if (sc->is_dpm_enable) {
dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
(sc->max_dpm_entries *
sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
retry_count = 0;
retry_read_dpm:
if (mps_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0,
dpm_pg0_sz)) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: DPM page "
"read failed.\n", __func__);
if (retry_count < 3) {
retry_count++;
goto retry_read_dpm;
}
sc->is_dpm_enable = 0;
}
}
if (sc->is_dpm_enable)
_mapping_process_dpm_pg0(sc);
else {
mps_dprint(sc, MPS_MAPPING, "%s: DPM processing is disabled. "
"Device mappings will not persist across reboots or "
"resets.\n", __func__);
}
sc->track_mapping_events = 1;
return 0;
}
/**
* mps_mapping_exit - clear mapping table and associated memory
* @sc: per adapter object
*
* Returns nothing.
*/
void
mps_mapping_exit(struct mps_softc *sc)
{
_mapping_flush_dpm_pages(sc);
mps_mapping_free_memory(sc);
}
/**
* mps_mapping_get_tid - return the target id for sas device and handle
* @sc: per adapter object
* @sas_address: sas address of the device
* @handle: device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mps_mapping_get_tid(struct mps_softc *sc, uint64_t sas_address, u16 handle)
{
u32 map_idx;
struct dev_mapping_table *mt_entry;
for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
mt_entry = &sc->mapping_table[map_idx];
if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
sas_address)
return mt_entry->id;
}
return MPS_MAP_BAD_ID;
}
/**
* mps_mapping_get_tid_from_handle - find a target id in mapping table using
* only the dev handle. This is just a wrapper function for the local function
* _mapping_get_mt_idx_from_handle.
* @sc: per adapter object
* @handle: device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mps_mapping_get_tid_from_handle(struct mps_softc *sc, u16 handle)
{
return (_mapping_get_mt_idx_from_handle(sc, handle));
}
/**
* mps_mapping_get_raid_tid - return the target id for raid device
* @sc: per adapter object
* @wwid: world wide identifier for raid volume
* @volHandle: volume device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mps_mapping_get_raid_tid(struct mps_softc *sc, u64 wwid, u16 volHandle)
{
u32 start_idx, end_idx, map_idx;
struct dev_mapping_table *mt_entry;
_mapping_get_ir_maprange(sc, &start_idx, &end_idx);
mt_entry = &sc->mapping_table[start_idx];
for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
if (mt_entry->dev_handle == volHandle &&
mt_entry->physical_id == wwid)
return mt_entry->id;
}
return MPS_MAP_BAD_ID;
}
/**
* mps_mapping_get_raid_tid_from_handle - find raid device in mapping table
* using only the volume dev handle. This is just a wrapper function for the
* local function _mapping_get_ir_mt_idx_from_handle.
* @sc: per adapter object
* @volHandle: volume device handle
*
* Returns valid target ID on success or BAD_ID.
*/
unsigned int
mps_mapping_get_raid_tid_from_handle(struct mps_softc *sc, u16 volHandle)
{
return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle));
}
/**
* mps_mapping_enclosure_dev_status_change_event - handle enclosure events
* @sc: per adapter object
* @event_data: event data payload
*
* Return nothing.
*/
void
mps_mapping_enclosure_dev_status_change_event(struct mps_softc *sc,
Mpi2EventDataSasEnclDevStatusChange_t *event_data)
{
u8 enc_idx, missing_count;
struct enc_mapping_table *et_entry;
Mpi2DriverMap0Entry_t *dpm_entry;
u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
u8 update_phy_bits = 0;
u32 saved_phy_bits;
uint64_t temp64_var;
if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) !=
MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING)
goto out;
dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) {
if (!event_data->NumSlots) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Enclosure "
"with handle = 0x%x reported 0 slots.\n", __func__,
le16toh(event_data->EnclosureHandle));
goto out;
}
temp64_var = event_data->EnclosureLogicalID.High;
temp64_var = (temp64_var << 32) |
event_data->EnclosureLogicalID.Low;
enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var,
event_data->PhyBits);
/*
* If the Added enclosure is already in the Enclosure Table,
* make sure that all the the enclosure info is up to date. If
* the enclosure was missing and has just been added back, or if
* the enclosure's Phy Bits have changed, clear the missing
* count and update the Phy Bits in the mapping table and in the
* DPM, if it's being used.
*/
if (enc_idx != MPS_ENCTABLE_BAD_IDX) {
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->init_complete &&
!et_entry->missing_count) {
mps_dprint(sc, MPS_MAPPING, "%s: Enclosure %d "
"is already present with handle = 0x%x\n",
__func__, enc_idx, et_entry->enc_handle);
goto out;
}
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->start_slot = le16toh(event_data->StartSlot);
saved_phy_bits = et_entry->phy_bits;
et_entry->phy_bits |= le32toh(event_data->PhyBits);
if (saved_phy_bits != et_entry->phy_bits)
update_phy_bits = 1;
if (et_entry->missing_count || update_phy_bits) {
et_entry->missing_count = 0;
if (sc->is_dpm_enable &&
et_entry->dpm_entry_num !=
MPS_DPM_BAD_IDX) {
dpm_entry += et_entry->dpm_entry_num;
missing_count =
(u8)(dpm_entry->MappingInformation &
MPI2_DRVMAP0_MAPINFO_MISSING_MASK);
if (missing_count || update_phy_bits) {
dpm_entry->MappingInformation
= et_entry->num_slots;
dpm_entry->MappingInformation
<<= map_shift;
dpm_entry->PhysicalBitsMapping
= et_entry->phy_bits;
sc->dpm_flush_entry[et_entry->
dpm_entry_num] = 1;
}
}
}
} else {
/*
* This is a new enclosure that is being added.
* Initialize the Enclosure Table entry. It will be
* finalized when a device is added for the enclosure
* and the enclosure has enough space in the Mapping
* Table to map its devices.
*/
enc_idx = sc->num_enc_table_entries;
if (enc_idx >= sc->max_enclosures) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: "
"Enclosure cannot be added to mapping "
"table because it's full.\n", __func__);
goto out;
}
sc->num_enc_table_entries++;
et_entry = &sc->enclosure_table[enc_idx];
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->enclosure_id = le64toh(event_data->
EnclosureLogicalID.High);
et_entry->enclosure_id =
((et_entry->enclosure_id << 32) |
le64toh(event_data->EnclosureLogicalID.Low));
et_entry->start_index = MPS_MAPTABLE_BAD_IDX;
et_entry->dpm_entry_num = MPS_DPM_BAD_IDX;
et_entry->num_slots = le16toh(event_data->NumSlots);
et_entry->start_slot = le16toh(event_data->StartSlot);
et_entry->phy_bits = le32toh(event_data->PhyBits);
}
et_entry->init_complete = 1;
} else if (event_data->ReasonCode ==
MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) {
/*
* An enclosure was removed. Update its missing count and then
* update the DPM entry with the new missing count for the
* enclosure.
*/
enc_idx = _mapping_get_enc_idx_from_handle(sc,
le16toh(event_data->EnclosureHandle));
if (enc_idx == MPS_ENCTABLE_BAD_IDX) {
mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Cannot "
"unmap enclosure %d because it has already been "
"deleted.\n", __func__, enc_idx);
goto out;
}
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->missing_count < MPS_MAX_MISSING_COUNT)
et_entry->missing_count++;
if (sc->is_dpm_enable &&
et_entry->dpm_entry_num != MPS_DPM_BAD_IDX) {
dpm_entry += et_entry->dpm_entry_num;
dpm_entry->MappingInformation = et_entry->num_slots;
dpm_entry->MappingInformation <<= map_shift;
dpm_entry->MappingInformation |=
et_entry->missing_count;
sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1;
}
et_entry->init_complete = 1;
}
out:
_mapping_flush_dpm_pages(sc);
if (sc->pending_map_events)
sc->pending_map_events--;
}
/**
* mps_mapping_topology_change_event - handle topology change events
* @sc: per adapter object
* @event_data: event data payload
*
* Returns nothing.
*/
void
mps_mapping_topology_change_event(struct mps_softc *sc,
Mpi2EventDataSasTopologyChangeList_t *event_data)
{
struct _map_topology_change topo_change;
struct _map_phy_change *phy_change;
Mpi2EventSasTopoPhyEntry_t *event_phy_change;
u8 i, num_entries;
topo_change.enc_handle = le16toh(event_data->EnclosureHandle);
topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle);
num_entries = event_data->NumEntries;
topo_change.num_entries = num_entries;
topo_change.start_phy_num = event_data->StartPhyNum;
topo_change.num_phys = event_data->NumPhys;
topo_change.exp_status = event_data->ExpStatus;
event_phy_change = event_data->PHY;
topo_change.phy_details = NULL;
if (!num_entries)
goto out;
- phy_change = mallocarray(num_entries, sizeof(struct _map_phy_change),
+ phy_change = malloc(sizeof(struct _map_phy_change) * num_entries,
M_MPT2, M_NOWAIT|M_ZERO);
topo_change.phy_details = phy_change;
if (!phy_change)
goto out;
for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) {
phy_change->dev_handle = le16toh(event_phy_change->
AttachedDevHandle);
phy_change->reason = event_phy_change->PhyStatus &
MPI2_EVENT_SAS_TOPO_RC_MASK;
}
_mapping_update_missing_count(sc, &topo_change);
_mapping_get_dev_info(sc, &topo_change);
_mapping_clear_removed_entries(sc);
_mapping_add_new_device(sc, &topo_change);
out:
free(topo_change.phy_details, M_MPT2);
_mapping_flush_dpm_pages(sc);
if (sc->pending_map_events)
sc->pending_map_events--;
}
/**
* mps_mapping_ir_config_change_event - handle IR config change list events
* @sc: per adapter object
* @event_data: event data payload
*
* Returns nothing.
*/
void
mps_mapping_ir_config_change_event(struct mps_softc *sc,
Mpi2EventDataIrConfigChangeList_t *event_data)
{
Mpi2EventIrConfigElement_t *element;
int i;
u64 *wwid_table;
u32 map_idx, flags;
struct dev_mapping_table *mt_entry;
u16 element_flags;
- wwid_table = mallocarray(event_data->NumElements, sizeof(u64), M_MPT2,
+ wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPT2,
M_NOWAIT | M_ZERO);
if (!wwid_table)
goto out;
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
flags = le32toh(event_data->Flags);
/*
* For volume changes, get the WWID for the volume and put it in a
* table to be used in the processing of the IR change event.
*/
for (i = 0; i < event_data->NumElements; i++, element++) {
element_flags = le16toh(element->ElementFlags);
if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) &&
(element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) &&
(element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE)
&& (element->ReasonCode !=
MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED))
continue;
if ((element_flags &
MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) ==
MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) {
mps_config_get_volume_wwid(sc,
le16toh(element->VolDevHandle), &wwid_table[i]);
}
}
/*
* Check the ReasonCode for each element in the IR event and Add/Remove
* Volumes or Physical Disks of Volumes to/from the mapping table. Use
* the WWIDs gotten above in wwid_table.
*/
if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
goto out;
else {
element = (Mpi2EventIrConfigElement_t *)&event_data->
ConfigElement[0];
for (i = 0; i < event_data->NumElements; i++, element++) {
if (element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_ADDED ||
element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
map_idx = _mapping_get_ir_mt_idx_from_wwid
(sc, wwid_table[i]);
if (map_idx != MPS_MAPTABLE_BAD_IDX) {
/*
* The volume is already in the mapping
* table. Just update it's info.
*/
mt_entry = &sc->mapping_table[map_idx];
mt_entry->id = map_idx;
mt_entry->dev_handle = le16toh
(element->VolDevHandle);
mt_entry->device_info =
MPS_DEV_RESERVED | MPS_MAP_IN_USE;
_mapping_update_ir_missing_cnt(sc,
map_idx, element, wwid_table[i]);
continue;
}
/*
* Volume is not in mapping table yet. Find a
* free entry in the mapping table at the
* volume mapping locations. If no entries are
* available, this is an error because it means
* there are more volumes than can be mapped
* and that should never happen for volumes.
*/
map_idx = _mapping_get_free_ir_mt_idx(sc);
if (map_idx == MPS_MAPTABLE_BAD_IDX)
{
mps_dprint(sc, MPS_ERROR | MPS_MAPPING,
"%s: failed to add the volume with "
"handle 0x%04x because there is no "
"free space available in the "
"mapping table\n", __func__,
le16toh(element->VolDevHandle));
continue;
}
mt_entry = &sc->mapping_table[map_idx];
mt_entry->physical_id = wwid_table[i];
mt_entry->id = map_idx;
mt_entry->dev_handle = le16toh(element->
VolDevHandle);
mt_entry->device_info = MPS_DEV_RESERVED |
MPS_MAP_IN_USE;
_mapping_update_ir_missing_cnt(sc, map_idx,
element, wwid_table[i]);
} else if (element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
map_idx = _mapping_get_ir_mt_idx_from_wwid(sc,
wwid_table[i]);
if (map_idx == MPS_MAPTABLE_BAD_IDX) {
mps_dprint(sc, MPS_MAPPING,"%s: Failed "
"to remove a volume because it has "
"already been removed.\n",
__func__);
continue;
}
_mapping_update_ir_missing_cnt(sc, map_idx,
element, wwid_table[i]);
} else if (element->ReasonCode ==
MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) {
map_idx = _mapping_get_mt_idx_from_handle(sc,
le16toh(element->VolDevHandle));
if (map_idx == MPS_MAPTABLE_BAD_IDX) {
mps_dprint(sc, MPS_MAPPING,"%s: Failed "
"to remove volume with handle "
"0x%04x because it has already "
"been removed.\n", __func__,
le16toh(element->VolDevHandle));
continue;
}
mt_entry = &sc->mapping_table[map_idx];
_mapping_update_ir_missing_cnt(sc, map_idx,
element, mt_entry->physical_id);
}
}
}
out:
_mapping_flush_dpm_pages(sc);
free(wwid_table, M_MPT2);
if (sc->pending_map_events)
sc->pending_map_events--;
}
int
mps_mapping_dump(SYSCTL_HANDLER_ARGS)
{
struct mps_softc *sc;
struct dev_mapping_table *mt_entry;
struct sbuf sbuf;
int i, error;
sc = (struct mps_softc *)arg1;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
sbuf_printf(&sbuf, "\nindex physical_id handle id\n");
for (i = 0; i < sc->max_devices; i++) {
mt_entry = &sc->mapping_table[i];
if (mt_entry->physical_id == 0)
continue;
sbuf_printf(&sbuf, "%4d %jx %04x %hd\n",
i, mt_entry->physical_id, mt_entry->dev_handle,
mt_entry->id);
}
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
return (error);
}
int
mps_mapping_encl_dump(SYSCTL_HANDLER_ARGS)
{
struct mps_softc *sc;
struct enc_mapping_table *enc_entry;
struct sbuf sbuf;
int i, error;
sc = (struct mps_softc *)arg1;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
sbuf_printf(&sbuf, "\nindex enclosure_id handle map_index\n");
for (i = 0; i < sc->max_enclosures; i++) {
enc_entry = &sc->enclosure_table[i];
if (enc_entry->enclosure_id == 0)
continue;
sbuf_printf(&sbuf, "%4d %jx %04x %d\n",
i, enc_entry->enclosure_id, enc_entry->enc_handle,
enc_entry->start_index);
}
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
return (error);
}
Index: head/sys/dev/mpt/mpt_cam.c
===================================================================
--- head/sys/dev/mpt/mpt_cam.c (revision 328217)
+++ head/sys/dev/mpt/mpt_cam.c (revision 328218)
@@ -1,5369 +1,5369 @@
/*-
* FreeBSD/CAM specific routines for LSI '909 FC adapters.
* FreeBSD Version.
*
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause
*
* Copyright (c) 2000, 2001 by Greg Ansley
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*-
* Copyright (c) 2002, 2006 by Matthew Jacob
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon including
* a substantially similar Disclaimer requirement for further binary
* redistribution.
* 3. Neither the names of the above listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
* OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Support from Chris Ellsworth in order to make SAS adapters work
* is gratefully acknowledged.
*
* Support from LSI-Logic has also gone a great deal toward making this a
* workable subsystem and is gratefully acknowledged.
*/
/*-
* Copyright (c) 2004, Avid Technology, Inc. and its contributors.
* Copyright (c) 2005, WHEEL Sp. z o.o.
* Copyright (c) 2004, 2005 Justin T. Gibbs
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon including
* a substantially similar Disclaimer requirement for further binary
* redistribution.
* 3. Neither the names of the above listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
* OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/mpt/mpt.h>
#include <dev/mpt/mpt_cam.h>
#include <dev/mpt/mpt_raid.h>
#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
#include "dev/mpt/mpilib/mpi_init.h"
#include "dev/mpt/mpilib/mpi_targ.h"
#include "dev/mpt/mpilib/mpi_fc.h"
#include "dev/mpt/mpilib/mpi_sas.h"
#include <sys/callout.h>
#include <sys/kthread.h>
#include <sys/sysctl.h>
static void mpt_poll(struct cam_sim *);
static timeout_t mpt_timeout;
static void mpt_action(struct cam_sim *, union ccb *);
static int
mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
static void mpt_setwidth(struct mpt_softc *, int, int);
static void mpt_setsync(struct mpt_softc *, int, int, int);
static int mpt_update_spi_config(struct mpt_softc *, int);
static mpt_reply_handler_t mpt_scsi_reply_handler;
static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
static mpt_reply_handler_t mpt_fc_els_reply_handler;
static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
MSG_DEFAULT_REPLY *);
static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
static int mpt_fc_reset_link(struct mpt_softc *, int);
static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
static void mpt_recovery_thread(void *arg);
static void mpt_recover_commands(struct mpt_softc *mpt);
static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
target_id_t, lun_id_t, u_int, int);
static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
static int mpt_add_els_buffers(struct mpt_softc *mpt);
static int mpt_add_target_commands(struct mpt_softc *mpt);
static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
static void mpt_target_start_io(struct mpt_softc *, union ccb *);
static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
uint8_t, uint8_t const *, u_int);
static void
mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
tgt_resource_t *, int);
static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
static mpt_reply_handler_t mpt_sata_pass_reply_handler;
static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
static mpt_probe_handler_t mpt_cam_probe;
static mpt_attach_handler_t mpt_cam_attach;
static mpt_enable_handler_t mpt_cam_enable;
static mpt_ready_handler_t mpt_cam_ready;
static mpt_event_handler_t mpt_cam_event;
static mpt_reset_handler_t mpt_cam_ioc_reset;
static mpt_detach_handler_t mpt_cam_detach;
static struct mpt_personality mpt_cam_personality =
{
.name = "mpt_cam",
.probe = mpt_cam_probe,
.attach = mpt_cam_attach,
.enable = mpt_cam_enable,
.ready = mpt_cam_ready,
.event = mpt_cam_event,
.reset = mpt_cam_ioc_reset,
.detach = mpt_cam_detach,
};
DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
int mpt_enable_sata_wc = -1;
TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
static int
mpt_cam_probe(struct mpt_softc *mpt)
{
int role;
/*
* Only attach to nodes that support the initiator or target role
* (or want to) or have RAID physical devices that need CAM pass-thru
* support.
*/
if (mpt->do_cfg_role) {
role = mpt->cfg_role;
} else {
role = mpt->role;
}
if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
(mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
return (0);
}
return (ENODEV);
}
static int
mpt_cam_attach(struct mpt_softc *mpt)
{
struct cam_devq *devq;
mpt_handler_t handler;
int maxq;
int error;
MPT_LOCK(mpt);
TAILQ_INIT(&mpt->request_timeout_list);
maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
handler.reply_handler = mpt_scsi_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&scsi_io_handler_id);
if (error != 0) {
MPT_UNLOCK(mpt);
goto cleanup;
}
handler.reply_handler = mpt_scsi_tmf_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&scsi_tmf_handler_id);
if (error != 0) {
MPT_UNLOCK(mpt);
goto cleanup;
}
/*
* If we're fibre channel and could support target mode, we register
* an ELS reply handler and give it resources.
*/
if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
handler.reply_handler = mpt_fc_els_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&fc_els_handler_id);
if (error != 0) {
MPT_UNLOCK(mpt);
goto cleanup;
}
if (mpt_add_els_buffers(mpt) == FALSE) {
error = ENOMEM;
MPT_UNLOCK(mpt);
goto cleanup;
}
maxq -= mpt->els_cmds_allocated;
}
/*
* If we support target mode, we register a reply handler for it,
* but don't add command resources until we actually enable target
* mode.
*/
if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
handler.reply_handler = mpt_scsi_tgt_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&mpt->scsi_tgt_handler_id);
if (error != 0) {
MPT_UNLOCK(mpt);
goto cleanup;
}
}
if (mpt->is_sas) {
handler.reply_handler = mpt_sata_pass_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&sata_pass_handler_id);
if (error != 0) {
MPT_UNLOCK(mpt);
goto cleanup;
}
}
/*
* We keep one request reserved for timeout TMF requests.
*/
mpt->tmf_req = mpt_get_request(mpt, FALSE);
if (mpt->tmf_req == NULL) {
mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
error = ENOMEM;
MPT_UNLOCK(mpt);
goto cleanup;
}
/*
* Mark the request as free even though not on the free list.
* There is only one TMF request allowed to be outstanding at
* a time and the TMF routines perform their own allocation
* tracking using the standard state flags.
*/
mpt->tmf_req->state = REQ_STATE_FREE;
maxq--;
/*
* The rest of this is CAM foo, for which we need to drop our lock
*/
MPT_UNLOCK(mpt);
if (mpt_spawn_recovery_thread(mpt) != 0) {
mpt_prt(mpt, "Unable to spawn recovery thread!\n");
error = ENOMEM;
goto cleanup;
}
/*
* Create the device queue for our SIM(s).
*/
devq = cam_simq_alloc(maxq);
if (devq == NULL) {
mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
error = ENOMEM;
goto cleanup;
}
/*
* Construct our SIM entry.
*/
mpt->sim =
mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
if (mpt->sim == NULL) {
mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
cam_simq_free(devq);
error = ENOMEM;
goto cleanup;
}
/*
* Register exactly this bus.
*/
MPT_LOCK(mpt);
if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
mpt_prt(mpt, "Bus registration Failed!\n");
error = ENOMEM;
MPT_UNLOCK(mpt);
goto cleanup;
}
if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
mpt_prt(mpt, "Unable to allocate Path!\n");
error = ENOMEM;
MPT_UNLOCK(mpt);
goto cleanup;
}
MPT_UNLOCK(mpt);
/*
* Only register a second bus for RAID physical
* devices if the controller supports RAID.
*/
if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
return (0);
}
/*
* Create a "bus" to export all hidden disks to CAM.
*/
mpt->phydisk_sim =
mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
if (mpt->phydisk_sim == NULL) {
mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
error = ENOMEM;
goto cleanup;
}
/*
* Register this bus.
*/
MPT_LOCK(mpt);
if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
CAM_SUCCESS) {
mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
error = ENOMEM;
MPT_UNLOCK(mpt);
goto cleanup;
}
if (xpt_create_path(&mpt->phydisk_path, NULL,
cam_sim_path(mpt->phydisk_sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
error = ENOMEM;
MPT_UNLOCK(mpt);
goto cleanup;
}
MPT_UNLOCK(mpt);
mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
return (0);
cleanup:
mpt_cam_detach(mpt);
return (error);
}
/*
* Read FC configuration information
*/
static int
mpt_read_config_info_fc(struct mpt_softc *mpt)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
char *topology = NULL;
int rv;
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
if (rv) {
return (-1);
}
mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
mpt->mpt_fcport_page0.Header.PageVersion,
mpt->mpt_fcport_page0.Header.PageLength,
mpt->mpt_fcport_page0.Header.PageNumber,
mpt->mpt_fcport_page0.Header.PageType);
rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
if (rv) {
mpt_prt(mpt, "failed to read FC Port Page 0\n");
return (-1);
}
mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
switch (mpt->mpt_fcport_page0.CurrentSpeed) {
case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
mpt->mpt_fcport_speed = 1;
break;
case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
mpt->mpt_fcport_speed = 2;
break;
case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
mpt->mpt_fcport_speed = 10;
break;
case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
mpt->mpt_fcport_speed = 4;
break;
default:
mpt->mpt_fcport_speed = 0;
break;
}
switch (mpt->mpt_fcport_page0.Flags &
MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
mpt->mpt_fcport_speed = 0;
topology = "<NO LOOP>";
break;
case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
topology = "N-Port";
break;
case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
topology = "NL-Port";
break;
case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
topology = "F-Port";
break;
case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
topology = "FL-Port";
break;
default:
mpt->mpt_fcport_speed = 0;
topology = "?";
break;
}
mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
| mpt->mpt_fcport_page0.WWNN.Low;
mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
| mpt->mpt_fcport_page0.WWPN.Low;
mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
mpt_lprt(mpt, MPT_PRT_INFO,
"FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
"Speed %u-Gbit\n", topology,
(uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
mpt->mpt_fcport_speed);
MPT_UNLOCK(mpt);
ctx = device_get_sysctl_ctx(mpt->dev);
tree = device_get_sysctl_tree(mpt->dev);
SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
"World Wide Node Name");
SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
"World Wide Port Name");
MPT_LOCK(mpt);
return (0);
}
/*
* Set FC configuration information.
*/
static int
mpt_set_initial_config_fc(struct mpt_softc *mpt)
{
CONFIG_PAGE_FC_PORT_1 fc;
U32 fl;
int r, doit = 0;
int role;
r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
&fc.Header, FALSE, 5000);
if (r) {
mpt_prt(mpt, "failed to read FC page 1 header\n");
return (mpt_fc_reset_link(mpt, 1));
}
r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
&fc.Header, sizeof (fc), FALSE, 5000);
if (r) {
mpt_prt(mpt, "failed to read FC page 1\n");
return (mpt_fc_reset_link(mpt, 1));
}
mpt2host_config_page_fc_port_1(&fc);
/*
* Check our flags to make sure we support the role we want.
*/
doit = 0;
role = 0;
fl = fc.Flags;
if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
role |= MPT_ROLE_INITIATOR;
}
if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
role |= MPT_ROLE_TARGET;
}
fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
if (mpt->do_cfg_role == 0) {
role = mpt->cfg_role;
} else {
mpt->do_cfg_role = 0;
}
if (role != mpt->cfg_role) {
if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
if ((role & MPT_ROLE_INITIATOR) == 0) {
mpt_prt(mpt, "adding initiator role\n");
fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
doit++;
} else {
mpt_prt(mpt, "keeping initiator role\n");
}
} else if (role & MPT_ROLE_INITIATOR) {
mpt_prt(mpt, "removing initiator role\n");
doit++;
}
if (mpt->cfg_role & MPT_ROLE_TARGET) {
if ((role & MPT_ROLE_TARGET) == 0) {
mpt_prt(mpt, "adding target role\n");
fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
doit++;
} else {
mpt_prt(mpt, "keeping target role\n");
}
} else if (role & MPT_ROLE_TARGET) {
mpt_prt(mpt, "removing target role\n");
doit++;
}
mpt->role = mpt->cfg_role;
}
if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
mpt_prt(mpt, "adding OXID option\n");
fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
doit++;
}
}
if (doit) {
fc.Flags = fl;
host2mpt_config_page_fc_port_1(&fc);
r = mpt_write_cfg_page(mpt,
MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
sizeof(fc), FALSE, 5000);
if (r != 0) {
mpt_prt(mpt, "failed to update NVRAM with changes\n");
return (0);
}
mpt_prt(mpt, "NOTE: NVRAM changes will not take "
"effect until next reboot or IOC reset\n");
}
return (0);
}
static int
mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
{
ConfigExtendedPageHeader_t hdr;
struct mptsas_phyinfo *phyinfo;
SasIOUnitPage0_t *buffer;
int error, len, i;
error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
&hdr, 0, 10000);
if (error)
goto out;
if (hdr.ExtPageLength == 0) {
error = ENXIO;
goto out;
}
len = hdr.ExtPageLength * 4;
buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
if (buffer == NULL) {
error = ENOMEM;
goto out;
}
error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
0, &hdr, buffer, len, 0, 10000);
if (error) {
free(buffer, M_DEVBUF);
goto out;
}
portinfo->num_phys = buffer->NumPhys;
- portinfo->phy_info = mallocarray(portinfo->num_phys,
- sizeof(*portinfo->phy_info), M_DEVBUF, M_NOWAIT|M_ZERO);
+ portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
+ portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
if (portinfo->phy_info == NULL) {
free(buffer, M_DEVBUF);
error = ENOMEM;
goto out;
}
for (i = 0; i < portinfo->num_phys; i++) {
phyinfo = &portinfo->phy_info[i];
phyinfo->phy_num = i;
phyinfo->port_id = buffer->PhyData[i].Port;
phyinfo->negotiated_link_rate =
buffer->PhyData[i].NegotiatedLinkRate;
phyinfo->handle =
le16toh(buffer->PhyData[i].ControllerDevHandle);
}
free(buffer, M_DEVBUF);
out:
return (error);
}
static int
mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
uint32_t form, uint32_t form_specific)
{
ConfigExtendedPageHeader_t hdr;
SasPhyPage0_t *buffer;
int error;
error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
0, 10000);
if (error)
goto out;
if (hdr.ExtPageLength == 0) {
error = ENXIO;
goto out;
}
buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
if (buffer == NULL) {
error = ENOMEM;
goto out;
}
error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
form + form_specific, &hdr, buffer,
sizeof(SasPhyPage0_t), 0, 10000);
if (error) {
free(buffer, M_DEVBUF);
goto out;
}
phy_info->hw_link_rate = buffer->HwLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
free(buffer, M_DEVBUF);
out:
return (error);
}
static int
mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
uint32_t form, uint32_t form_specific)
{
ConfigExtendedPageHeader_t hdr;
SasDevicePage0_t *buffer;
uint64_t sas_address;
int error = 0;
bzero(device_info, sizeof(*device_info));
error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
&hdr, 0, 10000);
if (error)
goto out;
if (hdr.ExtPageLength == 0) {
error = ENXIO;
goto out;
}
buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
if (buffer == NULL) {
error = ENOMEM;
goto out;
}
error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
form + form_specific, &hdr, buffer,
sizeof(SasDevicePage0_t), 0, 10000);
if (error) {
free(buffer, M_DEVBUF);
goto out;
}
device_info->dev_handle = le16toh(buffer->DevHandle);
device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
device_info->slot = le16toh(buffer->Slot);
device_info->phy_num = buffer->PhyNum;
device_info->physical_port = buffer->PhysicalPort;
device_info->target_id = buffer->TargetID;
device_info->bus = buffer->Bus;
bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
device_info->sas_address = le64toh(sas_address);
device_info->device_info = le32toh(buffer->DeviceInfo);
free(buffer, M_DEVBUF);
out:
return (error);
}
/*
* Read SAS configuration information. Nothing to do yet.
*/
static int
mpt_read_config_info_sas(struct mpt_softc *mpt)
{
struct mptsas_portinfo *portinfo;
struct mptsas_phyinfo *phyinfo;
int error, i;
portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
if (portinfo == NULL)
return (ENOMEM);
error = mptsas_sas_io_unit_pg0(mpt, portinfo);
if (error) {
free(portinfo, M_DEVBUF);
return (0);
}
for (i = 0; i < portinfo->num_phys; i++) {
phyinfo = &portinfo->phy_info[i];
error = mptsas_sas_phy_pg0(mpt, phyinfo,
(MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
if (error)
break;
error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
phyinfo->handle);
if (error)
break;
phyinfo->identify.phy_num = phyinfo->phy_num = i;
if (phyinfo->attached.dev_handle)
error = mptsas_sas_device_pg0(mpt,
&phyinfo->attached,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
phyinfo->attached.dev_handle);
if (error)
break;
}
mpt->sas_portinfo = portinfo;
return (0);
}
static void
mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
int enabled)
{
SataPassthroughRequest_t *pass;
request_t *req;
int error, status;
req = mpt_get_request(mpt, 0);
if (req == NULL)
return;
pass = req->req_vbuf;
bzero(pass, sizeof(SataPassthroughRequest_t));
pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
pass->TargetID = devinfo->target_id;
pass->Bus = devinfo->bus;
pass->PassthroughFlags = 0;
pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
pass->DataLength = 0;
pass->MsgContext = htole32(req->index | sata_pass_handler_id);
pass->CommandFIS[0] = 0x27;
pass->CommandFIS[1] = 0x80;
pass->CommandFIS[2] = 0xef;
pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
pass->CommandFIS[7] = 0x40;
pass->CommandFIS[15] = 0x08;
mpt_check_doorbell(mpt);
mpt_send_cmd(mpt, req);
error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
10 * 1000);
if (error) {
mpt_free_request(mpt, req);
printf("error %d sending passthrough\n", error);
return;
}
status = le16toh(req->IOCStatus);
if (status != MPI_IOCSTATUS_SUCCESS) {
mpt_free_request(mpt, req);
printf("IOCSTATUS %d\n", status);
return;
}
mpt_free_request(mpt, req);
}
/*
* Set SAS configuration information. Nothing to do yet.
*/
static int
mpt_set_initial_config_sas(struct mpt_softc *mpt)
{
struct mptsas_phyinfo *phyinfo;
int i;
if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
phyinfo = &mpt->sas_portinfo->phy_info[i];
if (phyinfo->attached.dev_handle == 0)
continue;
if ((phyinfo->attached.device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
continue;
if (bootverbose)
device_printf(mpt->dev,
"%sabling SATA WC on phy %d\n",
(mpt_enable_sata_wc) ? "En" : "Dis", i);
mptsas_set_sata_wc(mpt, &phyinfo->attached,
mpt_enable_sata_wc);
}
}
return (0);
}
static int
mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
{
if (req != NULL) {
if (reply_frame != NULL) {
req->IOCStatus = le16toh(reply_frame->IOCStatus);
}
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
wakeup(req);
} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
/*
* Whew- we can free this request (late completion)
*/
mpt_free_request(mpt, req);
}
}
return (TRUE);
}
/*
* Read SCSI configuration information
*/
static int
mpt_read_config_info_spi(struct mpt_softc *mpt)
{
int rv, i;
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
&mpt->mpt_port_page0.Header, FALSE, 5000);
if (rv) {
return (-1);
}
mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
mpt->mpt_port_page0.Header.PageVersion,
mpt->mpt_port_page0.Header.PageLength,
mpt->mpt_port_page0.Header.PageNumber,
mpt->mpt_port_page0.Header.PageType);
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
&mpt->mpt_port_page1.Header, FALSE, 5000);
if (rv) {
return (-1);
}
mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
mpt->mpt_port_page1.Header.PageVersion,
mpt->mpt_port_page1.Header.PageLength,
mpt->mpt_port_page1.Header.PageNumber,
mpt->mpt_port_page1.Header.PageType);
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
&mpt->mpt_port_page2.Header, FALSE, 5000);
if (rv) {
return (-1);
}
mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
mpt->mpt_port_page2.Header.PageVersion,
mpt->mpt_port_page2.Header.PageLength,
mpt->mpt_port_page2.Header.PageNumber,
mpt->mpt_port_page2.Header.PageType);
for (i = 0; i < 16; i++) {
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
if (rv) {
return (-1);
}
mpt_lprt(mpt, MPT_PRT_DEBUG,
"SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
mpt->mpt_dev_page0[i].Header.PageVersion,
mpt->mpt_dev_page0[i].Header.PageLength,
mpt->mpt_dev_page0[i].Header.PageNumber,
mpt->mpt_dev_page0[i].Header.PageType);
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
if (rv) {
return (-1);
}
mpt_lprt(mpt, MPT_PRT_DEBUG,
"SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
mpt->mpt_dev_page1[i].Header.PageVersion,
mpt->mpt_dev_page1[i].Header.PageLength,
mpt->mpt_dev_page1[i].Header.PageNumber,
mpt->mpt_dev_page1[i].Header.PageType);
}
/*
* At this point, we don't *have* to fail. As long as we have
* valid config header information, we can (barely) lurch
* along.
*/
rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
sizeof(mpt->mpt_port_page0), FALSE, 5000);
if (rv) {
mpt_prt(mpt, "failed to read SPI Port Page 0\n");
} else {
mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
mpt->mpt_port_page0.Capabilities,
mpt->mpt_port_page0.PhysicalInterface);
}
rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
sizeof(mpt->mpt_port_page1), FALSE, 5000);
if (rv) {
mpt_prt(mpt, "failed to read SPI Port Page 1\n");
} else {
mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
mpt->mpt_port_page1.Configuration,
mpt->mpt_port_page1.OnBusTimerValue);
}
rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
sizeof(mpt->mpt_port_page2), FALSE, 5000);
if (rv) {
mpt_prt(mpt, "failed to read SPI Port Page 2\n");
} else {
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"Port Page 2: Flags %x Settings %x\n",
mpt->mpt_port_page2.PortFlags,
mpt->mpt_port_page2.PortSettings);
mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
for (i = 0; i < 16; i++) {
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
" Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
}
}
for (i = 0; i < 16; i++) {
rv = mpt_read_cur_cfg_page(mpt, i,
&mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
FALSE, 5000);
if (rv) {
mpt_prt(mpt,
"cannot read SPI Target %d Device Page 0\n", i);
continue;
}
mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"target %d page 0: Negotiated Params %x Information %x\n",
i, mpt->mpt_dev_page0[i].NegotiatedParameters,
mpt->mpt_dev_page0[i].Information);
rv = mpt_read_cur_cfg_page(mpt, i,
&mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
FALSE, 5000);
if (rv) {
mpt_prt(mpt,
"cannot read SPI Target %d Device Page 1\n", i);
continue;
}
mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"target %d page 1: Requested Params %x Configuration %x\n",
i, mpt->mpt_dev_page1[i].RequestedParameters,
mpt->mpt_dev_page1[i].Configuration);
}
return (0);
}
/*
* Validate SPI configuration information.
*
* In particular, validate SPI Port Page 1.
*/
static int
mpt_set_initial_config_spi(struct mpt_softc *mpt)
{
int error, i, pp1val;
mpt->mpt_disc_enable = 0xff;
mpt->mpt_tag_enable = 0;
pp1val = ((1 << mpt->mpt_ini_id) <<
MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
if (mpt->mpt_port_page1.Configuration != pp1val) {
CONFIG_PAGE_SCSI_PORT_1 tmp;
mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
"be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
tmp = mpt->mpt_port_page1;
tmp.Configuration = pp1val;
host2mpt_config_page_scsi_port_1(&tmp);
error = mpt_write_cur_cfg_page(mpt, 0,
&tmp.Header, sizeof(tmp), FALSE, 5000);
if (error) {
return (-1);
}
error = mpt_read_cur_cfg_page(mpt, 0,
&tmp.Header, sizeof(tmp), FALSE, 5000);
if (error) {
return (-1);
}
mpt2host_config_page_scsi_port_1(&tmp);
if (tmp.Configuration != pp1val) {
mpt_prt(mpt,
"failed to reset SPI Port Page 1 Config value\n");
return (-1);
}
mpt->mpt_port_page1 = tmp;
}
/*
* The purpose of this exercise is to get
* all targets back to async/narrow.
*
* We skip this step if the BIOS has already negotiated
* speeds with the targets.
*/
i = mpt->mpt_port_page2.PortSettings &
MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"honoring BIOS transfer negotiations\n");
} else {
for (i = 0; i < 16; i++) {
mpt->mpt_dev_page1[i].RequestedParameters = 0;
mpt->mpt_dev_page1[i].Configuration = 0;
(void) mpt_update_spi_config(mpt, i);
}
}
return (0);
}
static int
mpt_cam_enable(struct mpt_softc *mpt)
{
int error;
MPT_LOCK(mpt);
error = EIO;
if (mpt->is_fc) {
if (mpt_read_config_info_fc(mpt)) {
goto out;
}
if (mpt_set_initial_config_fc(mpt)) {
goto out;
}
} else if (mpt->is_sas) {
if (mpt_read_config_info_sas(mpt)) {
goto out;
}
if (mpt_set_initial_config_sas(mpt)) {
goto out;
}
} else if (mpt->is_spi) {
if (mpt_read_config_info_spi(mpt)) {
goto out;
}
if (mpt_set_initial_config_spi(mpt)) {
goto out;
}
}
error = 0;
out:
MPT_UNLOCK(mpt);
return (error);
}
static void
mpt_cam_ready(struct mpt_softc *mpt)
{
/*
* If we're in target mode, hang out resources now
* so we don't cause the world to hang talking to us.
*/
if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
/*
* Try to add some target command resources
*/
MPT_LOCK(mpt);
if (mpt_add_target_commands(mpt) == FALSE) {
mpt_prt(mpt, "failed to add target commands\n");
}
MPT_UNLOCK(mpt);
}
mpt->ready = 1;
}
static void
mpt_cam_detach(struct mpt_softc *mpt)
{
mpt_handler_t handler;
MPT_LOCK(mpt);
mpt->ready = 0;
mpt_terminate_recovery_thread(mpt);
handler.reply_handler = mpt_scsi_reply_handler;
mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
scsi_io_handler_id);
handler.reply_handler = mpt_scsi_tmf_reply_handler;
mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
scsi_tmf_handler_id);
handler.reply_handler = mpt_fc_els_reply_handler;
mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
fc_els_handler_id);
handler.reply_handler = mpt_scsi_tgt_reply_handler;
mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
mpt->scsi_tgt_handler_id);
handler.reply_handler = mpt_sata_pass_reply_handler;
mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
sata_pass_handler_id);
if (mpt->tmf_req != NULL) {
mpt->tmf_req->state = REQ_STATE_ALLOCATED;
mpt_free_request(mpt, mpt->tmf_req);
mpt->tmf_req = NULL;
}
if (mpt->sas_portinfo != NULL) {
free(mpt->sas_portinfo, M_DEVBUF);
mpt->sas_portinfo = NULL;
}
if (mpt->sim != NULL) {
xpt_free_path(mpt->path);
xpt_bus_deregister(cam_sim_path(mpt->sim));
cam_sim_free(mpt->sim, TRUE);
mpt->sim = NULL;
}
if (mpt->phydisk_sim != NULL) {
xpt_free_path(mpt->phydisk_path);
xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
cam_sim_free(mpt->phydisk_sim, TRUE);
mpt->phydisk_sim = NULL;
}
MPT_UNLOCK(mpt);
}
/* This routine is used after a system crash to dump core onto the swap device.
*/
static void
mpt_poll(struct cam_sim *sim)
{
struct mpt_softc *mpt;
mpt = (struct mpt_softc *)cam_sim_softc(sim);
mpt_intr(mpt);
}
/*
* Watchdog timeout routine for SCSI requests.
*/
static void
mpt_timeout(void *arg)
{
union ccb *ccb;
struct mpt_softc *mpt;
request_t *req;
ccb = (union ccb *)arg;
mpt = ccb->ccb_h.ccb_mpt_ptr;
MPT_LOCK_ASSERT(mpt);
req = ccb->ccb_h.ccb_req_ptr;
mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
req->serno, ccb, req->ccb);
/* XXX: WHAT ARE WE TRYING TO DO HERE? */
if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
req->state |= REQ_STATE_TIMEDOUT;
mpt_wakeup_recovery_thread(mpt);
}
}
/*
* Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
* directly.
*
* Takes a list of physical segments and builds the SGL for SCSI IO command
* and forwards the commard to the IOC after one last check that CAM has not
* aborted the transaction.
*/
static void
mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
request_t *req, *trq;
char *mpt_off;
union ccb *ccb;
struct mpt_softc *mpt;
bus_addr_t chain_list_addr;
int first_lim, seg, this_seg_lim;
uint32_t addr, cur_off, flags, nxt_off, tf;
void *sglp = NULL;
MSG_REQUEST_HEADER *hdrp;
SGE_SIMPLE64 *se;
SGE_CHAIN64 *ce;
int istgt = 0;
req = (request_t *)arg;
ccb = req->ccb;
mpt = ccb->ccb_h.ccb_mpt_ptr;
req = ccb->ccb_h.ccb_req_ptr;
hdrp = req->req_vbuf;
mpt_off = req->req_vbuf;
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
}
if (error == 0) {
switch (hdrp->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
istgt = 0;
sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
break;
case MPI_FUNCTION_TARGET_ASSIST:
istgt = 1;
sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
break;
default:
mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
hdrp->Function);
error = EINVAL;
break;
}
}
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
mpt_prt(mpt, "segment count %d too large (max %u)\n",
nseg, mpt->max_seg_cnt);
}
bad:
if (error != 0) {
if (error != EFBIG && error != ENOMEM) {
mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
}
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
cam_status status;
mpt_freeze_ccb(ccb);
if (error == EFBIG) {
status = CAM_REQ_TOO_BIG;
} else if (error == ENOMEM) {
if (mpt->outofbeer == 0) {
mpt->outofbeer = 1;
xpt_freeze_simq(mpt->sim, 1);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"FREEZEQ\n");
}
status = CAM_REQUEUE_REQ;
} else {
status = CAM_REQ_CMP_ERR;
}
mpt_set_ccb_status(ccb, status);
}
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req =
MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
xpt_done(ccb);
mpt_free_request(mpt, req);
return;
}
/*
* No data to transfer?
* Just make a single simple SGL with zero length.
*/
if (mpt->verbose >= MPT_PRT_DEBUG) {
int tidx = ((char *)sglp) - mpt_off;
memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
}
if (nseg == 0) {
SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
MPI_pSGE_SET_FLAGS(se1,
(MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
se1->FlagsLength = htole32(se1->FlagsLength);
goto out;
}
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
if (istgt == 0) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
} else {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
}
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if (istgt == 0) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREREAD;
} else {
op = BUS_DMASYNC_PREWRITE;
}
} else {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREWRITE;
} else {
op = BUS_DMASYNC_PREREAD;
}
}
bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
}
/*
* Okay, fill in what we can at the end of the command frame.
* If we have up to MPT_NSGL_FIRST, we can fit them all into
* the command frame.
*
* Otherwise, we fill up through MPT_NSGL_FIRST less one
* SIMPLE64 pointers and start doing CHAIN64 entries after
* that.
*/
if (nseg < MPT_NSGL_FIRST(mpt)) {
first_lim = nseg;
} else {
/*
* Leave room for CHAIN element
*/
first_lim = MPT_NSGL_FIRST(mpt) - 1;
}
se = (SGE_SIMPLE64 *) sglp;
for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
tf = flags;
memset(se, 0, sizeof (*se));
MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
if (sizeof(bus_addr_t) > 4) {
addr = ((uint64_t)dm_segs->ds_addr) >> 32;
/* SAS1078 36GB limitation WAR */
if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
addr |= (1U << 31);
tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
}
se->Address.High = htole32(addr);
}
if (seg == first_lim - 1) {
tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
}
if (seg == nseg - 1) {
tf |= MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
}
MPI_pSGE_SET_FLAGS(se, tf);
se->FlagsLength = htole32(se->FlagsLength);
}
if (seg == nseg) {
goto out;
}
/*
* Tell the IOC where to find the first chain element.
*/
hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
nxt_off = MPT_RQSL(mpt);
trq = req;
/*
* Make up the rest of the data segments out of a chain element
* (contained in the current request frame) which points to
* SIMPLE64 elements in the next request frame, possibly ending
* with *another* chain element (if there's more).
*/
while (seg < nseg) {
/*
* Point to the chain descriptor. Note that the chain
* descriptor is at the end of the *previous* list (whether
* chain or simple).
*/
ce = (SGE_CHAIN64 *) se;
/*
* Before we change our current pointer, make sure we won't
* overflow the request area with this frame. Note that we
* test against 'greater than' here as it's okay in this case
* to have next offset be just outside the request area.
*/
if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
nxt_off = MPT_REQUEST_AREA;
goto next_chain;
}
/*
* Set our SGE element pointer to the beginning of the chain
* list and update our next chain list offset.
*/
se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
cur_off = nxt_off;
nxt_off += MPT_RQSL(mpt);
/*
* Now initialize the chain descriptor.
*/
memset(ce, 0, sizeof (*ce));
/*
* Get the physical address of the chain list.
*/
chain_list_addr = trq->req_pbuf;
chain_list_addr += cur_off;
if (sizeof (bus_addr_t) > 4) {
ce->Address.High =
htole32(((uint64_t)chain_list_addr) >> 32);
}
ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
MPI_SGE_FLAGS_64_BIT_ADDRESSING;
/*
* If we have more than a frame's worth of segments left,
* set up the chain list to have the last element be another
* chain descriptor.
*/
if ((nseg - seg) > MPT_NSGL(mpt)) {
this_seg_lim = seg + MPT_NSGL(mpt) - 1;
/*
* The length of the chain is the length in bytes of the
* number of segments plus the next chain element.
*
* The next chain descriptor offset is the length,
* in words, of the number of segments.
*/
ce->Length = (this_seg_lim - seg) *
sizeof (SGE_SIMPLE64);
ce->NextChainOffset = ce->Length >> 2;
ce->Length += sizeof (SGE_CHAIN64);
} else {
this_seg_lim = nseg;
ce->Length = (this_seg_lim - seg) *
sizeof (SGE_SIMPLE64);
}
ce->Length = htole16(ce->Length);
/*
* Fill in the chain list SGE elements with our segment data.
*
* If we're the last element in this chain list, set the last
* element flag. If we're the completely last element period,
* set the end of list and end of buffer flags.
*/
while (seg < this_seg_lim) {
tf = flags;
memset(se, 0, sizeof (*se));
MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
se->Address.Low = htole32(dm_segs->ds_addr &
0xffffffff);
if (sizeof (bus_addr_t) > 4) {
addr = ((uint64_t)dm_segs->ds_addr) >> 32;
/* SAS1078 36GB limitation WAR */
if (mpt->is_1078 &&
(((uint64_t)dm_segs->ds_addr +
MPI_SGE_LENGTH(se->FlagsLength)) >>
32) == 9) {
addr |= (1U << 31);
tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
}
se->Address.High = htole32(addr);
}
if (seg == this_seg_lim - 1) {
tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
}
if (seg == nseg - 1) {
tf |= MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
}
MPI_pSGE_SET_FLAGS(se, tf);
se->FlagsLength = htole32(se->FlagsLength);
se++;
seg++;
dm_segs++;
}
next_chain:
/*
* If we have more segments to do and we've used up all of
* the space in a request area, go allocate another one
* and chain to that.
*/
if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
request_t *nrq;
nrq = mpt_get_request(mpt, FALSE);
if (nrq == NULL) {
error = ENOMEM;
goto bad;
}
/*
* Append the new request area on the tail of our list.
*/
if ((trq = req->chain) == NULL) {
req->chain = nrq;
} else {
while (trq->chain != NULL) {
trq = trq->chain;
}
trq->chain = nrq;
}
trq = nrq;
mpt_off = trq->req_vbuf;
if (mpt->verbose >= MPT_PRT_DEBUG) {
memset(mpt_off, 0xff, MPT_REQUEST_AREA);
}
nxt_off = 0;
}
}
out:
/*
* Last time we need to check if this CCB needs to be aborted.
*/
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req =
MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
}
mpt_prt(mpt,
"mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
ccb->ccb_h.status & CAM_STATUS_MASK);
if (nseg) {
bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
xpt_done(ccb);
mpt_free_request(mpt, req);
return;
}
ccb->ccb_h.status |= CAM_SIM_QUEUED;
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
mpt_timeout, ccb);
}
if (mpt->verbose > MPT_PRT_DEBUG) {
int nc = 0;
mpt_print_request(req->req_vbuf);
for (trq = req->chain; trq; trq = trq->chain) {
printf(" Additional Chain Area %d\n", nc++);
mpt_dump_sgl(trq->req_vbuf, 0);
}
}
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
#ifdef WE_TRUST_AUTO_GOOD_STATUS
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
} else {
tgt->state = TGT_STATE_MOVING_DATA;
}
#else
tgt->state = TGT_STATE_MOVING_DATA;
#endif
}
mpt_send_cmd(mpt, req);
}
static void
mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
request_t *req, *trq;
char *mpt_off;
union ccb *ccb;
struct mpt_softc *mpt;
int seg, first_lim;
uint32_t flags, nxt_off;
void *sglp = NULL;
MSG_REQUEST_HEADER *hdrp;
SGE_SIMPLE32 *se;
SGE_CHAIN32 *ce;
int istgt = 0;
req = (request_t *)arg;
ccb = req->ccb;
mpt = ccb->ccb_h.ccb_mpt_ptr;
req = ccb->ccb_h.ccb_req_ptr;
hdrp = req->req_vbuf;
mpt_off = req->req_vbuf;
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
}
if (error == 0) {
switch (hdrp->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
break;
case MPI_FUNCTION_TARGET_ASSIST:
istgt = 1;
sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
break;
default:
mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
hdrp->Function);
error = EINVAL;
break;
}
}
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
mpt_prt(mpt, "segment count %d too large (max %u)\n",
nseg, mpt->max_seg_cnt);
}
bad:
if (error != 0) {
if (error != EFBIG && error != ENOMEM) {
mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
}
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
cam_status status;
mpt_freeze_ccb(ccb);
if (error == EFBIG) {
status = CAM_REQ_TOO_BIG;
} else if (error == ENOMEM) {
if (mpt->outofbeer == 0) {
mpt->outofbeer = 1;
xpt_freeze_simq(mpt->sim, 1);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"FREEZEQ\n");
}
status = CAM_REQUEUE_REQ;
} else {
status = CAM_REQ_CMP_ERR;
}
mpt_set_ccb_status(ccb, status);
}
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req =
MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
xpt_done(ccb);
mpt_free_request(mpt, req);
return;
}
/*
* No data to transfer?
* Just make a single simple SGL with zero length.
*/
if (mpt->verbose >= MPT_PRT_DEBUG) {
int tidx = ((char *)sglp) - mpt_off;
memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
}
if (nseg == 0) {
SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
MPI_pSGE_SET_FLAGS(se1,
(MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
se1->FlagsLength = htole32(se1->FlagsLength);
goto out;
}
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
if (istgt == 0) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
} else {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
}
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if (istgt) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREREAD;
} else {
op = BUS_DMASYNC_PREWRITE;
}
} else {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREWRITE;
} else {
op = BUS_DMASYNC_PREREAD;
}
}
bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
}
/*
* Okay, fill in what we can at the end of the command frame.
* If we have up to MPT_NSGL_FIRST, we can fit them all into
* the command frame.
*
* Otherwise, we fill up through MPT_NSGL_FIRST less one
* SIMPLE32 pointers and start doing CHAIN32 entries after
* that.
*/
if (nseg < MPT_NSGL_FIRST(mpt)) {
first_lim = nseg;
} else {
/*
* Leave room for CHAIN element
*/
first_lim = MPT_NSGL_FIRST(mpt) - 1;
}
se = (SGE_SIMPLE32 *) sglp;
for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
uint32_t tf;
memset(se, 0,sizeof (*se));
se->Address = htole32(dm_segs->ds_addr);
MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
tf = flags;
if (seg == first_lim - 1) {
tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
}
if (seg == nseg - 1) {
tf |= MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
}
MPI_pSGE_SET_FLAGS(se, tf);
se->FlagsLength = htole32(se->FlagsLength);
}
if (seg == nseg) {
goto out;
}
/*
* Tell the IOC where to find the first chain element.
*/
hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
nxt_off = MPT_RQSL(mpt);
trq = req;
/*
* Make up the rest of the data segments out of a chain element
* (contained in the current request frame) which points to
* SIMPLE32 elements in the next request frame, possibly ending
* with *another* chain element (if there's more).
*/
while (seg < nseg) {
int this_seg_lim;
uint32_t tf, cur_off;
bus_addr_t chain_list_addr;
/*
* Point to the chain descriptor. Note that the chain
* descriptor is at the end of the *previous* list (whether
* chain or simple).
*/
ce = (SGE_CHAIN32 *) se;
/*
* Before we change our current pointer, make sure we won't
* overflow the request area with this frame. Note that we
* test against 'greater than' here as it's okay in this case
* to have next offset be just outside the request area.
*/
if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
nxt_off = MPT_REQUEST_AREA;
goto next_chain;
}
/*
* Set our SGE element pointer to the beginning of the chain
* list and update our next chain list offset.
*/
se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
cur_off = nxt_off;
nxt_off += MPT_RQSL(mpt);
/*
* Now initialize the chain descriptor.
*/
memset(ce, 0, sizeof (*ce));
/*
* Get the physical address of the chain list.
*/
chain_list_addr = trq->req_pbuf;
chain_list_addr += cur_off;
ce->Address = htole32(chain_list_addr);
ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
/*
* If we have more than a frame's worth of segments left,
* set up the chain list to have the last element be another
* chain descriptor.
*/
if ((nseg - seg) > MPT_NSGL(mpt)) {
this_seg_lim = seg + MPT_NSGL(mpt) - 1;
/*
* The length of the chain is the length in bytes of the
* number of segments plus the next chain element.
*
* The next chain descriptor offset is the length,
* in words, of the number of segments.
*/
ce->Length = (this_seg_lim - seg) *
sizeof (SGE_SIMPLE32);
ce->NextChainOffset = ce->Length >> 2;
ce->Length += sizeof (SGE_CHAIN32);
} else {
this_seg_lim = nseg;
ce->Length = (this_seg_lim - seg) *
sizeof (SGE_SIMPLE32);
}
ce->Length = htole16(ce->Length);
/*
* Fill in the chain list SGE elements with our segment data.
*
* If we're the last element in this chain list, set the last
* element flag. If we're the completely last element period,
* set the end of list and end of buffer flags.
*/
while (seg < this_seg_lim) {
memset(se, 0, sizeof (*se));
se->Address = htole32(dm_segs->ds_addr);
MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
tf = flags;
if (seg == this_seg_lim - 1) {
tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
}
if (seg == nseg - 1) {
tf |= MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
}
MPI_pSGE_SET_FLAGS(se, tf);
se->FlagsLength = htole32(se->FlagsLength);
se++;
seg++;
dm_segs++;
}
next_chain:
/*
* If we have more segments to do and we've used up all of
* the space in a request area, go allocate another one
* and chain to that.
*/
if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
request_t *nrq;
nrq = mpt_get_request(mpt, FALSE);
if (nrq == NULL) {
error = ENOMEM;
goto bad;
}
/*
* Append the new request area on the tail of our list.
*/
if ((trq = req->chain) == NULL) {
req->chain = nrq;
} else {
while (trq->chain != NULL) {
trq = trq->chain;
}
trq->chain = nrq;
}
trq = nrq;
mpt_off = trq->req_vbuf;
if (mpt->verbose >= MPT_PRT_DEBUG) {
memset(mpt_off, 0xff, MPT_REQUEST_AREA);
}
nxt_off = 0;
}
}
out:
/*
* Last time we need to check if this CCB needs to be aborted.
*/
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req =
MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
}
mpt_prt(mpt,
"mpt_execute_req: I/O cancelled (status 0x%x)\n",
ccb->ccb_h.status & CAM_STATUS_MASK);
if (nseg) {
bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
xpt_done(ccb);
mpt_free_request(mpt, req);
return;
}
ccb->ccb_h.status |= CAM_SIM_QUEUED;
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
mpt_timeout, ccb);
}
if (mpt->verbose > MPT_PRT_DEBUG) {
int nc = 0;
mpt_print_request(req->req_vbuf);
for (trq = req->chain; trq; trq = trq->chain) {
printf(" Additional Chain Area %d\n", nc++);
mpt_dump_sgl(trq->req_vbuf, 0);
}
}
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
#ifdef WE_TRUST_AUTO_GOOD_STATUS
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
} else {
tgt->state = TGT_STATE_MOVING_DATA;
}
#else
tgt->state = TGT_STATE_MOVING_DATA;
#endif
}
mpt_send_cmd(mpt, req);
}
static void
mpt_start(struct cam_sim *sim, union ccb *ccb)
{
request_t *req;
struct mpt_softc *mpt;
MSG_SCSI_IO_REQUEST *mpt_req;
struct ccb_scsiio *csio = &ccb->csio;
struct ccb_hdr *ccbh = &ccb->ccb_h;
bus_dmamap_callback_t *cb;
target_id_t tgt;
int raid_passthru;
int error;
/* Get the pointer for the physical addapter */
mpt = ccb->ccb_h.ccb_mpt_ptr;
raid_passthru = (sim == mpt->phydisk_sim);
if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
if (mpt->outofbeer == 0) {
mpt->outofbeer = 1;
xpt_freeze_simq(mpt->sim, 1);
mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
xpt_done(ccb);
return;
}
#ifdef INVARIANTS
mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
#endif
if (sizeof (bus_addr_t) > 4) {
cb = mpt_execute_req_a64;
} else {
cb = mpt_execute_req;
}
/*
* Link the ccb and the request structure so we can find
* the other knowing either the request or the ccb
*/
req->ccb = ccb;
ccb->ccb_h.ccb_req_ptr = req;
/* Now we build the command for the IOC */
mpt_req = req->req_vbuf;
memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
if (raid_passthru) {
mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
xpt_done(ccb);
return;
}
mpt_req->Bus = 0; /* we never set bus here */
} else {
tgt = ccb->ccb_h.target_id;
mpt_req->Bus = 0; /* XXX */
}
mpt_req->SenseBufferLength =
(csio->sense_len < MPT_SENSE_SIZE) ?
csio->sense_len : MPT_SENSE_SIZE;
/*
* We use the message context to find the request structure when we
* Get the command completion interrupt from the IOC.
*/
mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
/* Which physical device to do the I/O on */
mpt_req->TargetID = tgt;
be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
/* Set the direction of the transfer */
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
} else {
mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
}
if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
switch(ccb->csio.tag_action) {
case MSG_HEAD_OF_Q_TAG:
mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
break;
case MSG_ACA_TASK:
mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
break;
case MSG_ORDERED_Q_TAG:
mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
break;
case MSG_SIMPLE_Q_TAG:
default:
mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
break;
}
} else {
if (mpt->is_fc || mpt->is_sas) {
mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
} else {
/* XXX No such thing for a target doing packetized. */
mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
}
}
if (mpt->is_spi) {
if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
}
}
mpt_req->Control = htole32(mpt_req->Control);
/* Copy the scsi command block into place */
if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
} else {
bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
}
mpt_req->CDBLength = csio->cdb_len;
mpt_req->DataLength = htole32(csio->dxfer_len);
mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
/*
* Do a *short* print here if we're set to MPT_PRT_DEBUG
*/
if (mpt->verbose == MPT_PRT_DEBUG) {
U32 df;
mpt_prt(mpt, "mpt_start: %s op 0x%x ",
(mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
"SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
mpt_prtc(mpt, "(%s %u byte%s ",
(df == MPI_SCSIIO_CONTROL_READ)?
"read" : "write", csio->dxfer_len,
(csio->dxfer_len == 1)? ")" : "s)");
}
mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
(uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
}
error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
req, 0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller queue
* until our mapping is returned.
*/
xpt_freeze_simq(mpt->sim, 1);
ccbh->status |= CAM_RELEASE_SIMQ;
}
}
static int
mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
int sleep_ok)
{
int error;
uint16_t status;
uint8_t response;
error = mpt_scsi_send_tmf(mpt,
(tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
0, /* XXX How do I get the channel ID? */
tgt != CAM_TARGET_WILDCARD ? tgt : 0,
lun != CAM_LUN_WILDCARD ? lun : 0,
0, sleep_ok);
if (error != 0) {
/*
* mpt_scsi_send_tmf hard resets on failure, so no
* need to do so here.
*/
mpt_prt(mpt,
"mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
return (EIO);
}
/* Wait for bus reset to be processed by the IOC. */
error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
REQ_STATE_DONE, sleep_ok, 5000);
status = le16toh(mpt->tmf_req->IOCStatus);
response = mpt->tmf_req->ResponseCode;
mpt->tmf_req->state = REQ_STATE_FREE;
if (error) {
mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
"Resetting controller.\n");
mpt_reset(mpt, TRUE);
return (ETIMEDOUT);
}
if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
"Resetting controller.\n", status);
mpt_reset(mpt, TRUE);
return (EIO);
}
if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
"Resetting controller.\n", response);
mpt_reset(mpt, TRUE);
return (EIO);
}
return (0);
}
static int
mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
{
int r = 0;
request_t *req;
PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
req = mpt_get_request(mpt, FALSE);
if (req == NULL) {
return (ENOMEM);
}
fc = req->req_vbuf;
memset(fc, 0, sizeof(*fc));
fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
fc->MsgContext = htole32(req->index | fc_els_handler_id);
mpt_send_cmd(mpt, req);
if (dowait) {
r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
REQ_STATE_DONE, FALSE, 60 * 1000);
if (r == 0) {
mpt_free_request(mpt, req);
}
}
return (r);
}
static int
mpt_cam_event(struct mpt_softc *mpt, request_t *req,
MSG_EVENT_NOTIFY_REPLY *msg)
{
uint32_t data0, data1;
data0 = le32toh(msg->Data[0]);
data1 = le32toh(msg->Data[1]);
switch(msg->Event & 0xFF) {
case MPI_EVENT_UNIT_ATTENTION:
mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
(data0 >> 8) & 0xff, data0 & 0xff);
break;
case MPI_EVENT_IOC_BUS_RESET:
/* We generated a bus reset */
mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
(data0 >> 8) & 0xff);
xpt_async(AC_BUS_RESET, mpt->path, NULL);
break;
case MPI_EVENT_EXT_BUS_RESET:
/* Someone else generated a bus reset */
mpt_prt(mpt, "External Bus Reset Detected\n");
/*
* These replies don't return EventData like the MPI
* spec says they do
*/
xpt_async(AC_BUS_RESET, mpt->path, NULL);
break;
case MPI_EVENT_RESCAN:
{
union ccb *ccb;
uint32_t pathid;
/*
* In general this means a device has been added to the loop.
*/
mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
if (mpt->ready == 0) {
break;
}
if (mpt->phydisk_sim) {
pathid = cam_sim_path(mpt->phydisk_sim);
} else {
pathid = cam_sim_path(mpt->sim);
}
/*
* Allocate a CCB, create a wildcard path for this bus,
* and schedule a rescan.
*/
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
mpt_prt(mpt, "unable to alloc CCB for rescan\n");
break;
}
if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
mpt_prt(mpt, "unable to create path for rescan\n");
xpt_free_ccb(ccb);
break;
}
xpt_rescan(ccb);
break;
}
case MPI_EVENT_LINK_STATUS_CHANGE:
mpt_prt(mpt, "Port %d: LinkState: %s\n",
(data1 >> 8) & 0xff,
((data0 & 0xff) == 0)? "Failed" : "Active");
break;
case MPI_EVENT_LOOP_STATE_CHANGE:
switch ((data0 >> 16) & 0xff) {
case 0x01:
mpt_prt(mpt,
"Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
"(Loop Initialization)\n",
(data1 >> 8) & 0xff,
(data0 >> 8) & 0xff,
(data0 ) & 0xff);
switch ((data0 >> 8) & 0xff) {
case 0xF7:
if ((data0 & 0xff) == 0xF7) {
mpt_prt(mpt, "Device needs AL_PA\n");
} else {
mpt_prt(mpt, "Device %02x doesn't like "
"FC performance\n",
data0 & 0xFF);
}
break;
case 0xF8:
if ((data0 & 0xff) == 0xF7) {
mpt_prt(mpt, "Device had loop failure "
"at its receiver prior to acquiring"
" AL_PA\n");
} else {
mpt_prt(mpt, "Device %02x detected loop"
" failure at its receiver\n",
data0 & 0xFF);
}
break;
default:
mpt_prt(mpt, "Device %02x requests that device "
"%02x reset itself\n",
data0 & 0xFF,
(data0 >> 8) & 0xFF);
break;
}
break;
case 0x02:
mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
"LPE(%02x,%02x) (Loop Port Enable)\n",
(data1 >> 8) & 0xff, /* Port */
(data0 >> 8) & 0xff, /* Character 3 */
(data0 ) & 0xff /* Character 4 */);
break;
case 0x03:
mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
"LPB(%02x,%02x) (Loop Port Bypass)\n",
(data1 >> 8) & 0xff, /* Port */
(data0 >> 8) & 0xff, /* Character 3 */
(data0 ) & 0xff /* Character 4 */);
break;
default:
mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
"FC event (%02x %02x %02x)\n",
(data1 >> 8) & 0xff, /* Port */
(data0 >> 16) & 0xff, /* Event */
(data0 >> 8) & 0xff, /* Character 3 */
(data0 ) & 0xff /* Character 4 */);
}
break;
case MPI_EVENT_LOGOUT:
mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
(data1 >> 8) & 0xff, data0);
break;
case MPI_EVENT_QUEUE_FULL:
{
struct cam_sim *sim;
struct cam_path *tmppath;
struct ccb_relsim crs;
PTR_EVENT_DATA_QUEUE_FULL pqf;
lun_id_t lun_id;
pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
if (bootverbose) {
mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
"Depth %d\n",
pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
}
if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
pqf->TargetID) != 0) {
sim = mpt->phydisk_sim;
} else {
sim = mpt->sim;
}
for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
pqf->TargetID, lun_id) != CAM_REQ_CMP) {
mpt_prt(mpt, "unable to create a path to send "
"XPT_REL_SIMQ");
break;
}
xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
crs.ccb_h.func_code = XPT_REL_SIMQ;
crs.ccb_h.flags = CAM_DEV_QFREEZE;
crs.release_flags = RELSIM_ADJUST_OPENINGS;
crs.openings = pqf->CurrentDepth - 1;
xpt_action((union ccb *)&crs);
if (crs.ccb_h.status != CAM_REQ_CMP) {
mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
}
xpt_free_path(tmppath);
}
break;
}
case MPI_EVENT_IR_RESYNC_UPDATE:
mpt_prt(mpt, "IR resync update %d completed\n",
(data0 >> 16) & 0xff);
break;
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
union ccb *ccb;
struct cam_sim *sim;
struct cam_path *tmppath;
PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
psdsc->TargetID) != 0)
sim = mpt->phydisk_sim;
else
sim = mpt->sim;
switch(psdsc->ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
mpt_prt(mpt,
"unable to alloc CCB for rescan\n");
break;
}
if (xpt_create_path(&ccb->ccb_h.path, NULL,
cam_sim_path(sim), psdsc->TargetID,
CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
mpt_prt(mpt,
"unable to create path for rescan\n");
xpt_free_ccb(ccb);
break;
}
xpt_rescan(ccb);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
psdsc->TargetID, CAM_LUN_WILDCARD) !=
CAM_REQ_CMP) {
mpt_prt(mpt,
"unable to create path for async event");
break;
}
xpt_async(AC_LOST_DEVICE, tmppath, NULL);
xpt_free_path(tmppath);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
break;
default:
mpt_lprt(mpt, MPT_PRT_WARN,
"SAS device status change: Bus: 0x%02x TargetID: "
"0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
psdsc->TargetID, psdsc->ReasonCode);
break;
}
break;
}
case MPI_EVENT_SAS_DISCOVERY_ERROR:
{
PTR_EVENT_DATA_DISCOVERY_ERROR pde;
pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
mpt_lprt(mpt, MPT_PRT_WARN,
"SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
pde->Port, pde->DiscoveryStatus);
break;
}
case MPI_EVENT_EVENT_CHANGE:
case MPI_EVENT_INTEGRATED_RAID:
case MPI_EVENT_IR2:
case MPI_EVENT_LOG_ENTRY_ADDED:
case MPI_EVENT_SAS_DISCOVERY:
case MPI_EVENT_SAS_PHY_LINK_STATUS:
case MPI_EVENT_SAS_SES:
break;
default:
mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
msg->Event & 0xFF);
return (0);
}
return (1);
}
/*
* Reply path for all SCSI I/O requests, called from our
* interrupt handler by extracting our handler index from
* the MsgContext field of the reply from the IOC.
*
* This routine is optimized for the common case of a
* completion without error. All exception handling is
* offloaded to non-inlined helper routines to minimize
* cache footprint.
*/
static int
mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
{
MSG_SCSI_IO_REQUEST *scsi_req;
union ccb *ccb;
if (req->state == REQ_STATE_FREE) {
mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
return (TRUE);
}
scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
ccb = req->ccb;
if (ccb == NULL) {
mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
req, req->serno);
return (TRUE);
}
mpt_req_untimeout(req, mpt_timeout, ccb);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_POSTREAD;
else
op = BUS_DMASYNC_POSTWRITE;
bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
}
if (reply_frame == NULL) {
/*
* Context only reply, completion without error status.
*/
ccb->csio.resid = 0;
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
ccb->csio.scsi_status = SCSI_STATUS_OK;
} else {
mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
}
if (mpt->outofbeer) {
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
mpt->outofbeer = 0;
mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
}
if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
struct scsi_inquiry_data *iq =
(struct scsi_inquiry_data *)ccb->csio.data_ptr;
if (scsi_req->Function ==
MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
/*
* Fake out the device type so that only the
* pass-thru device will attach.
*/
iq->device &= ~0x1F;
iq->device |= T_NODEVICE;
}
}
if (mpt->verbose == MPT_PRT_DEBUG) {
mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
req, req->serno);
}
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
xpt_done(ccb);
if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
} else {
mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
req, req->serno);
TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
}
KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
("CCB req needed wakeup"));
#ifdef INVARIANTS
mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
#endif
mpt_free_request(mpt, req);
return (TRUE);
}
static int
mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
{
MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
#ifdef INVARIANTS
mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
#endif
tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
/* Record IOC Status and Response Code of TMF for any waiters. */
req->IOCStatus = le16toh(tmf_reply->IOCStatus);
req->ResponseCode = tmf_reply->ResponseCode;
mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
req, req->serno, le16toh(tmf_reply->IOCStatus));
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
req->state |= REQ_STATE_DONE;
wakeup(req);
} else {
mpt->tmf_req->state = REQ_STATE_FREE;
}
return (TRUE);
}
/*
* XXX: Move to definitions file
*/
#define ELS 0x22
#define FC4LS 0x32
#define ABTS 0x81
#define BA_ACC 0x84
#define LS_RJT 0x01
#define LS_ACC 0x02
#define PLOGI 0x03
#define LOGO 0x05
#define SRR 0x14
#define PRLI 0x20
#define PRLO 0x21
#define ADISC 0x52
#define RSCN 0x61
static void
mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
{
uint32_t fl;
MSG_LINK_SERVICE_RSP_REQUEST tmp;
PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
/*
* We are going to reuse the ELS request to send this response back.
*/
rsp = &tmp;
memset(rsp, 0, sizeof(*rsp));
#ifdef USE_IMMEDIATE_LINK_DATA
/*
* Apparently the IMMEDIATE stuff doesn't seem to work.
*/
rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
#endif
rsp->RspLength = length;
rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
rsp->MsgContext = htole32(req->index | fc_els_handler_id);
/*
* Copy over information from the original reply frame to
* it's correct place in the response.
*/
memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
/*
* And now copy back the temporary area to the original frame.
*/
memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
rsp = req->req_vbuf;
#ifdef USE_IMMEDIATE_LINK_DATA
memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
#else
{
PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
bus_addr_t paddr = req->req_pbuf;
paddr += MPT_RQSL(mpt);
fl =
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_LAST_ELEMENT |
MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
fl <<= MPI_SGE_FLAGS_SHIFT;
fl |= (length);
se->FlagsLength = htole32(fl);
se->Address = htole32((uint32_t) paddr);
}
#endif
/*
* Send it on...
*/
mpt_send_cmd(mpt, req);
}
static int
mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
{
PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
(PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
U8 rctl;
U8 type;
U8 cmd;
U16 status = le16toh(reply_frame->IOCStatus);
U32 *elsbuf;
int ioindex;
int do_refresh = TRUE;
#ifdef INVARIANTS
KASSERT(mpt_req_on_free_list(mpt, req) == 0,
("fc_els_reply_handler: req %p:%u for function %x on freelist!",
req, req->serno, rp->Function));
if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
} else {
mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
}
#endif
mpt_lprt(mpt, MPT_PRT_DEBUG,
"FC_ELS Complete: req %p:%u, reply %p function %x\n",
req, req->serno, reply_frame, reply_frame->Function);
if (status != MPI_IOCSTATUS_SUCCESS) {
mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
status, reply_frame->Function);
if (status == MPI_IOCSTATUS_INVALID_STATE) {
/*
* XXX: to get around shutdown issue
*/
mpt->disabled = 1;
return (TRUE);
}
return (TRUE);
}
/*
* If the function of a link service response, we recycle the
* response to be a refresh for a new link service request.
*
* The request pointer is bogus in this case and we have to fetch
* it based upon the TransactionContext.
*/
if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
/* Freddie Uncle Charlie Katie */
/* We don't get the IOINDEX as part of the Link Svc Rsp */
for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
if (mpt->els_cmd_ptrs[ioindex] == req) {
break;
}
KASSERT(ioindex < mpt->els_cmds_allocated,
("can't find my mommie!"));
/* remove from active list as we're going to re-post it */
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
mpt_fc_post_els(mpt, req, ioindex);
return (TRUE);
}
if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
/* remove from active list as we're done */
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
if (req->state & REQ_STATE_TIMEDOUT) {
mpt_lprt(mpt, MPT_PRT_DEBUG,
"Sync Primitive Send Completed After Timeout\n");
mpt_free_request(mpt, req);
} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
mpt_lprt(mpt, MPT_PRT_DEBUG,
"Async Primitive Send Complete\n");
mpt_free_request(mpt, req);
} else {
mpt_lprt(mpt, MPT_PRT_DEBUG,
"Sync Primitive Send Complete- Waking Waiter\n");
wakeup(req);
}
return (TRUE);
}
if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
"Length %d Message Flags %x\n", rp->Function, rp->Flags,
rp->MsgLength, rp->MsgFlags);
return (TRUE);
}
if (rp->MsgLength <= 5) {
/*
* This is just a ack of an original ELS buffer post
*/
mpt_lprt(mpt, MPT_PRT_DEBUG,
"RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
return (TRUE);
}
rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
cmd = be32toh(elsbuf[0]) >> 24;
if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
return (TRUE);
}
ioindex = le32toh(rp->TransactionContext);
req = mpt->els_cmd_ptrs[ioindex];
if (rctl == ELS && type == 1) {
switch (cmd) {
case PRLI:
/*
* Send back a PRLI ACC
*/
mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
le32toh(rp->Wwn.PortNameHigh),
le32toh(rp->Wwn.PortNameLow));
elsbuf[0] = htobe32(0x02100014);
elsbuf[1] |= htobe32(0x00000100);
elsbuf[4] = htobe32(0x00000002);
if (mpt->role & MPT_ROLE_TARGET)
elsbuf[4] |= htobe32(0x00000010);
if (mpt->role & MPT_ROLE_INITIATOR)
elsbuf[4] |= htobe32(0x00000020);
/* remove from active list as we're done */
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
mpt_fc_els_send_response(mpt, req, rp, 20);
do_refresh = FALSE;
break;
case PRLO:
memset(elsbuf, 0, 5 * (sizeof (U32)));
elsbuf[0] = htobe32(0x02100014);
elsbuf[1] = htobe32(0x08000100);
mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
le32toh(rp->Wwn.PortNameHigh),
le32toh(rp->Wwn.PortNameLow));
/* remove from active list as we're done */
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
mpt_fc_els_send_response(mpt, req, rp, 20);
do_refresh = FALSE;
break;
default:
mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
break;
}
} else if (rctl == ABTS && type == 0) {
uint16_t rx_id = le16toh(rp->Rxid);
uint16_t ox_id = le16toh(rp->Oxid);
mpt_tgt_state_t *tgt;
request_t *tgt_req = NULL;
union ccb *ccb;
uint32_t ct_id;
mpt_prt(mpt,
"ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
le32toh(rp->Wwn.PortNameLow));
if (rx_id >= mpt->mpt_max_tgtcmds) {
mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
} else if (mpt->tgt_cmd_ptrs == NULL) {
mpt_prt(mpt, "No TGT CMD PTRS\n");
} else {
tgt_req = mpt->tgt_cmd_ptrs[rx_id];
}
if (tgt_req == NULL) {
mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
goto skip;
}
tgt = MPT_TGT_STATE(mpt, tgt_req);
/* Check to make sure we have the correct command. */
ct_id = GET_IO_INDEX(tgt->reply_desc);
if (ct_id != rx_id) {
mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
"RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
goto skip;
}
if (tgt->itag != ox_id) {
mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
"OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
goto skip;
}
if ((ccb = tgt->ccb) != NULL) {
mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
ccb, (uintmax_t)ccb->ccb_h.target_lun,
ccb->ccb_h.flags, ccb->ccb_h.status);
}
mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
"%x nxfers %x\n", tgt->state, tgt->resid,
tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
if (mpt_abort_target_cmd(mpt, tgt_req))
mpt_prt(mpt, "unable to start TargetAbort\n");
skip:
memset(elsbuf, 0, 5 * (sizeof (U32)));
elsbuf[0] = htobe32(0);
elsbuf[1] = htobe32((ox_id << 16) | rx_id);
elsbuf[2] = htobe32(0x000ffff);
/*
* Dork with the reply frame so that the response to it
* will be correct.
*/
rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
/* remove from active list as we're done */
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
mpt_fc_els_send_response(mpt, req, rp, 12);
do_refresh = FALSE;
} else {
mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
}
if (do_refresh == TRUE) {
/* remove from active list as we're done */
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
req->state &= ~REQ_STATE_QUEUED;
req->state |= REQ_STATE_DONE;
mpt_fc_post_els(mpt, req, ioindex);
}
return (TRUE);
}
/*
* Clean up all SCSI Initiator personality state in response
* to a controller reset.
*/
static void
mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
{
/*
* The pending list is already run down by
* the generic handler. Perform the same
* operation on the timed out request list.
*/
mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
MPI_IOCSTATUS_INVALID_STATE);
/*
* XXX: We need to repost ELS and Target Command Buffers?
*/
/*
* Inform the XPT that a bus reset has occurred.
*/
xpt_async(AC_BUS_RESET, mpt->path, NULL);
}
/*
* Parse additional completion information in the reply
* frame for SCSI I/O requests.
*/
static int
mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
MSG_DEFAULT_REPLY *reply_frame)
{
union ccb *ccb;
MSG_SCSI_IO_REPLY *scsi_io_reply;
u_int ioc_status;
u_int sstate;
MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
|| reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
("MPT SCSI I/O Handler called with incorrect reply type"));
KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
("MPT SCSI I/O Handler called with continuation reply"));
scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
ioc_status = le16toh(scsi_io_reply->IOCStatus);
ioc_status &= MPI_IOCSTATUS_MASK;
sstate = scsi_io_reply->SCSIState;
ccb = req->ccb;
ccb->csio.resid =
ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
&& (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
uint32_t sense_returned;
ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
sense_returned = le32toh(scsi_io_reply->SenseCount);
if (sense_returned < ccb->csio.sense_len)
ccb->csio.sense_resid = ccb->csio.sense_len -
sense_returned;
else
ccb->csio.sense_resid = 0;
bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
bcopy(req->sense_vbuf, &ccb->csio.sense_data,
min(ccb->csio.sense_len, sense_returned));
}
if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
/*
* Tag messages rejected, but non-tagged retry
* was successful.
XXXX
mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
*/
}
switch(ioc_status) {
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
/*
* XXX
* Linux driver indicates that a zero
* transfer length with this error code
* indicates a CRC error.
*
* No need to swap the bytes for checking
* against zero.
*/
if (scsi_io_reply->TransferCount == 0) {
mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
break;
}
/* FALLTHROUGH */
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
case MPI_IOCSTATUS_SUCCESS:
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
/*
* Status was never returned for this transaction.
*/
mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
/* XXX Handle SPI-Packet and FCP-2 response info. */
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
} else
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
break;
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
break;
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
/*
* Since selection timeouts and "device really not
* there" are grouped into this error code, report
* selection timeout. Selection timeouts are
* typically retried before giving up on the device
* whereas "device not there" errors are considered
* unretryable.
*/
mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
break;
case MPI_IOCSTATUS_SCSI_INVALID_BUS:
mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
break;
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
mpt_set_ccb_status(ccb, CAM_TID_INVALID);
break;
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
ccb->ccb_h.status = CAM_UA_TERMIO;
break;
case MPI_IOCSTATUS_INVALID_STATE:
/*
* The IOC has been reset. Emulate a bus reset.
*/
/* FALLTHROUGH */
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
break;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
/*
* Don't clobber any timeout status that has
* already been set for this transaction. We
* want the SCSI layer to be able to differentiate
* between the command we aborted due to timeout
* and any innocent bystanders.
*/
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
break;
mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
break;
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
break;
case MPI_IOCSTATUS_BUSY:
mpt_set_ccb_status(ccb, CAM_BUSY);
break;
case MPI_IOCSTATUS_INVALID_FUNCTION:
case MPI_IOCSTATUS_INVALID_SGL:
case MPI_IOCSTATUS_INTERNAL_ERROR:
case MPI_IOCSTATUS_INVALID_FIELD:
default:
/* XXX
* Some of the above may need to kick
* of a recovery action!!!!
*/
ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
break;
}
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
mpt_freeze_ccb(ccb);
}
return (TRUE);
}
static void
mpt_action(struct cam_sim *sim, union ccb *ccb)
{
struct mpt_softc *mpt;
struct ccb_trans_settings *cts;
target_id_t tgt;
lun_id_t lun;
int raid_passthru;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
mpt = (struct mpt_softc *)cam_sim_softc(sim);
raid_passthru = (sim == mpt->phydisk_sim);
MPT_LOCK_ASSERT(mpt);
tgt = ccb->ccb_h.target_id;
lun = ccb->ccb_h.target_lun;
if (raid_passthru &&
ccb->ccb_h.func_code != XPT_PATH_INQ &&
ccb->ccb_h.func_code != XPT_RESET_BUS &&
ccb->ccb_h.func_code != XPT_RESET_DEV) {
if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
xpt_done(ccb);
return;
}
}
ccb->ccb_h.ccb_mpt_ptr = mpt;
switch (ccb->ccb_h.func_code) {
case XPT_SCSI_IO: /* Execute the requested I/O operation */
/*
* Do a couple of preliminary checks...
*/
if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
break;
}
}
/* Max supported CDB length is 16 bytes */
/* XXX Unless we implement the new 32byte message type */
if (ccb->csio.cdb_len >
sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
break;
}
#ifdef MPT_TEST_MULTIPATH
if (mpt->failure_id == ccb->ccb_h.target_id) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
break;
}
#endif
ccb->csio.scsi_status = SCSI_STATUS_OK;
mpt_start(sim, ccb);
return;
case XPT_RESET_BUS:
if (raid_passthru) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
case XPT_RESET_DEV:
if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
if (bootverbose) {
xpt_print(ccb->ccb_h.path, "reset bus\n");
}
} else {
xpt_print(ccb->ccb_h.path, "reset device\n");
}
(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
/*
* mpt_bus_reset is always successful in that it
* will fall back to a hard reset should a bus
* reset attempt fail.
*/
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
case XPT_ABORT:
{
union ccb *accb = ccb->cab.abort_ccb;
switch (accb->ccb_h.func_code) {
case XPT_ACCEPT_TARGET_IO:
case XPT_IMMEDIATE_NOTIFY:
ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
break;
case XPT_CONT_TARGET_IO:
mpt_prt(mpt, "cannot abort active CTIOs yet\n");
ccb->ccb_h.status = CAM_UA_ABORT;
break;
case XPT_SCSI_IO:
ccb->ccb_h.status = CAM_UA_ABORT;
break;
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
break;
}
#define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
#define DP_DISC_ENABLE 0x1
#define DP_DISC_DISABL 0x2
#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
#define DP_TQING_ENABLE 0x4
#define DP_TQING_DISABL 0x8
#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
#define DP_WIDE 0x10
#define DP_NARROW 0x20
#define DP_WIDTH (DP_WIDE|DP_NARROW)
#define DP_SYNC 0x40
case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
{
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
uint8_t dval;
u_int period;
u_int offset;
int i, j;
cts = &ccb->cts;
if (mpt->is_fc || mpt->is_sas) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
/*
* We can be called just to valid transport and proto versions
*/
if (scsi->valid == 0 && spi->valid == 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
/*
* Skip attempting settings on RAID volume disks.
* Other devices on the bus get the normal treatment.
*/
if (mpt->phydisk_sim && raid_passthru == 0 &&
mpt_is_raid_volume(mpt, tgt) != 0) {
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"no transfer settings for RAID vols\n");
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
i = mpt->mpt_port_page2.PortSettings &
MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
j = mpt->mpt_port_page2.PortFlags &
MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
mpt_lprt(mpt, MPT_PRT_ALWAYS,
"honoring BIOS transfer negotiations\n");
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
dval = 0;
period = 0;
offset = 0;
if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
DP_DISC_ENABLE : DP_DISC_DISABL;
}
if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
DP_TQING_ENABLE : DP_TQING_DISABL;
}
if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
DP_WIDE : DP_NARROW;
}
if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
dval |= DP_SYNC;
offset = spi->sync_offset;
} else {
PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
&mpt->mpt_dev_page1[tgt];
offset = ptr->RequestedParameters;
offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
}
if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
dval |= DP_SYNC;
period = spi->sync_period;
} else {
PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
&mpt->mpt_dev_page1[tgt];
period = ptr->RequestedParameters;
period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
}
if (dval & DP_DISC_ENABLE) {
mpt->mpt_disc_enable |= (1 << tgt);
} else if (dval & DP_DISC_DISABL) {
mpt->mpt_disc_enable &= ~(1 << tgt);
}
if (dval & DP_TQING_ENABLE) {
mpt->mpt_tag_enable |= (1 << tgt);
} else if (dval & DP_TQING_DISABL) {
mpt->mpt_tag_enable &= ~(1 << tgt);
}
if (dval & DP_WIDTH) {
mpt_setwidth(mpt, tgt, 1);
}
if (dval & DP_SYNC) {
mpt_setsync(mpt, tgt, period, offset);
}
if (dval == 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"set [%d]: 0x%x period 0x%x offset %d\n",
tgt, dval, period, offset);
if (mpt_update_spi_config(mpt, tgt)) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
} else {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
}
break;
}
case XPT_GET_TRAN_SETTINGS:
{
struct ccb_trans_settings_scsi *scsi;
cts = &ccb->cts;
cts->protocol = PROTO_SCSI;
if (mpt->is_fc) {
struct ccb_trans_settings_fc *fc =
&cts->xport_specific.fc;
cts->protocol_version = SCSI_REV_SPC;
cts->transport = XPORT_FC;
cts->transport_version = 0;
if (mpt->mpt_fcport_speed != 0) {
fc->valid = CTS_FC_VALID_SPEED;
fc->bitrate = 100000 * mpt->mpt_fcport_speed;
}
} else if (mpt->is_sas) {
struct ccb_trans_settings_sas *sas =
&cts->xport_specific.sas;
cts->protocol_version = SCSI_REV_SPC2;
cts->transport = XPORT_SAS;
cts->transport_version = 0;
sas->valid = CTS_SAS_VALID_SPEED;
sas->bitrate = 300000;
} else {
cts->protocol_version = SCSI_REV_2;
cts->transport = XPORT_SPI;
cts->transport_version = 2;
if (mpt_get_spi_settings(mpt, cts) != 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
break;
}
}
scsi = &cts->proto_specific.scsi;
scsi->valid = CTS_SCSI_VALID_TQ;
scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
case XPT_CALC_GEOMETRY:
{
struct ccb_calc_geometry *ccg;
ccg = &ccb->ccg;
if (ccg->block_size == 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
break;
}
cam_calc_geometry(ccg, /* extended */ 1);
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
break;
}
case XPT_GET_SIM_KNOB:
{
struct ccb_sim_knob *kp = &ccb->knob;
if (mpt->is_fc) {
kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
switch (mpt->role) {
case MPT_ROLE_NONE:
kp->xport_specific.fc.role = KNOB_ROLE_NONE;
break;
case MPT_ROLE_INITIATOR:
kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
break;
case MPT_ROLE_TARGET:
kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
break;
case MPT_ROLE_BOTH:
kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
break;
}
kp->xport_specific.fc.valid =
KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
ccb->ccb_h.status = CAM_REQ_CMP;
} else {
ccb->ccb_h.status = CAM_REQ_INVALID;
}
xpt_done(ccb);
break;
}
case XPT_PATH_INQ: /* Path routing inquiry */
{
struct ccb_pathinq *cpi = &ccb->cpi;
cpi->version_num = 1;
cpi->target_sprt = 0;
cpi->hba_eng_cnt = 0;
cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
/*
* FC cards report MAX_DEVICES of 512, but
* the MSG_SCSI_IO_REQUEST target id field
* is only 8 bits. Until we fix the driver
* to support 'channels' for bus overflow,
* just limit it.
*/
if (cpi->max_target > 255) {
cpi->max_target = 255;
}
/*
* VMware ESX reports > 16 devices and then dies when we probe.
*/
if (mpt->is_spi && cpi->max_target > 15) {
cpi->max_target = 15;
}
if (mpt->is_spi)
cpi->max_lun = 7;
else
cpi->max_lun = MPT_MAX_LUNS;
cpi->initiator_id = mpt->mpt_ini_id;
cpi->bus_id = cam_sim_bus(sim);
/*
* The base speed is the speed of the underlying connection.
*/
cpi->protocol = PROTO_SCSI;
if (mpt->is_fc) {
cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
PIM_EXTLUNS;
cpi->base_transfer_speed = 100000;
cpi->hba_inquiry = PI_TAG_ABLE;
cpi->transport = XPORT_FC;
cpi->transport_version = 0;
cpi->protocol_version = SCSI_REV_SPC;
cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
cpi->xport_specific.fc.bitrate =
100000 * mpt->mpt_fcport_speed;
} else if (mpt->is_sas) {
cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
PIM_EXTLUNS;
cpi->base_transfer_speed = 300000;
cpi->hba_inquiry = PI_TAG_ABLE;
cpi->transport = XPORT_SAS;
cpi->transport_version = 0;
cpi->protocol_version = SCSI_REV_SPC2;
} else {
cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
PIM_EXTLUNS;
cpi->base_transfer_speed = 3300;
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->protocol_version = SCSI_REV_2;
}
/*
* We give our fake RAID passhtru bus a width that is MaxVolumes
* wide and restrict it to one lun.
*/
if (raid_passthru) {
cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
cpi->initiator_id = cpi->max_target + 1;
cpi->max_lun = 0;
}
if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
cpi->hba_misc |= PIM_NOINITIATOR;
}
if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
cpi->target_sprt =
PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
} else {
cpi->target_sprt = 0;
}
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->ccb_h.status = CAM_REQ_CMP;
break;
}
case XPT_EN_LUN: /* Enable LUN as a target */
{
int result;
if (ccb->cel.enable)
result = mpt_enable_lun(mpt,
ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
else
result = mpt_disable_lun(mpt,
ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
if (result == 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
} else {
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
}
break;
}
case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
{
tgt_resource_t *trtp;
lun_id_t lun = ccb->ccb_h.target_lun;
ccb->ccb_h.sim_priv.entries[0].field = 0;
ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
if (lun == CAM_LUN_WILDCARD) {
if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
break;
}
trtp = &mpt->trt_wildcard;
} else if (lun >= MPT_MAX_LUNS) {
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
break;
} else {
trtp = &mpt->trt[lun];
}
if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
mpt_lprt(mpt, MPT_PRT_DEBUG1,
"Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
sim_links.stqe);
} else {
mpt_lprt(mpt, MPT_PRT_DEBUG1,
"Put FREE INOT lun %jx\n", (uintmax_t)lun);
STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
sim_links.stqe);
}
mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
return;
}
case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */
{
request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
case XPT_CONT_TARGET_IO:
mpt_target_start_io(mpt, ccb);
return;
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
xpt_done(ccb);
}
static int
mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
{
struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
target_id_t tgt;
uint32_t dval, pval, oval;
int rv;
if (IS_CURRENT_SETTINGS(cts) == 0) {
tgt = cts->ccb_h.target_id;
} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
return (-1);
}
} else {
tgt = cts->ccb_h.target_id;
}
/*
* We aren't looking at Port Page 2 BIOS settings here-
* sometimes these have been known to be bogus XXX.
*
* For user settings, we pick the max from port page 0
*
* For current settings we read the current settings out from
* device page 0 for that target.
*/
if (IS_CURRENT_SETTINGS(cts)) {
CONFIG_PAGE_SCSI_DEVICE_0 tmp;
dval = 0;
tmp = mpt->mpt_dev_page0[tgt];
rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
sizeof(tmp), FALSE, 5000);
if (rv) {
mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
return (rv);
}
mpt2host_config_page_scsi_device_0(&tmp);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
tmp.NegotiatedParameters, tmp.Information);
dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
DP_WIDE : DP_NARROW;
dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
DP_DISC_ENABLE : DP_DISC_DISABL;
dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
DP_TQING_ENABLE : DP_TQING_DISABL;
oval = tmp.NegotiatedParameters;
oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
pval = tmp.NegotiatedParameters;
pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
mpt->mpt_dev_page0[tgt] = tmp;
} else {
dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
oval = mpt->mpt_port_page0.Capabilities;
oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
pval = mpt->mpt_port_page0.Capabilities;
pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
}
spi->valid = 0;
scsi->valid = 0;
spi->flags = 0;
scsi->flags = 0;
spi->sync_offset = oval;
spi->sync_period = pval;
spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
spi->valid |= CTS_SPI_VALID_SYNC_RATE;
spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
if (dval & DP_WIDE) {
spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
} else {
spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
}
if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
scsi->valid = CTS_SCSI_VALID_TQ;
if (dval & DP_TQING_ENABLE) {
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
}
spi->valid |= CTS_SPI_VALID_DISC;
if (dval & DP_DISC_ENABLE) {
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
}
}
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
return (0);
}
static void
mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
{
PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
ptr = &mpt->mpt_dev_page1[tgt];
if (onoff) {
ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
} else {
ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
}
}
static void
mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
{
PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
ptr = &mpt->mpt_dev_page1[tgt];
ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
if (period == 0) {
return;
}
ptr->RequestedParameters |=
period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
ptr->RequestedParameters |=
offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
if (period < 0xa) {
ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
}
if (period < 0x9) {
ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
}
}
static int
mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
{
CONFIG_PAGE_SCSI_DEVICE_1 tmp;
int rv;
mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
"mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
tmp = mpt->mpt_dev_page1[tgt];
host2mpt_config_page_scsi_device_1(&tmp);
rv = mpt_write_cur_cfg_page(mpt, tgt,
&tmp.Header, sizeof(tmp), FALSE, 5000);
if (rv) {
mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
return (-1);
}
return (0);
}
/****************************** Timeout Recovery ******************************/
static int
mpt_spawn_recovery_thread(struct mpt_softc *mpt)
{
int error;
error = kproc_create(mpt_recovery_thread, mpt,
&mpt->recovery_thread, /*flags*/0,
/*altstack*/0, "mpt_recovery%d", mpt->unit);
return (error);
}
static void
mpt_terminate_recovery_thread(struct mpt_softc *mpt)
{
if (mpt->recovery_thread == NULL) {
return;
}
mpt->shutdwn_recovery = 1;
wakeup(mpt);
/*
* Sleep on a slightly different location
* for this interlock just for added safety.
*/
mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
}
static void
mpt_recovery_thread(void *arg)
{
struct mpt_softc *mpt;
mpt = (struct mpt_softc *)arg;
MPT_LOCK(mpt);
for (;;) {
if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
if (mpt->shutdwn_recovery == 0) {
mpt_sleep(mpt, mpt, PUSER, "idle", 0);
}
}
if (mpt->shutdwn_recovery != 0) {
break;
}
mpt_recover_commands(mpt);
}
mpt->recovery_thread = NULL;
wakeup(&mpt->recovery_thread);
MPT_UNLOCK(mpt);
kproc_exit(0);
}
static int
mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
int sleep_ok)
{
MSG_SCSI_TASK_MGMT *tmf_req;
int error;
/*
* Wait for any current TMF request to complete.
* We're only allowed to issue one TMF at a time.
*/
error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
sleep_ok, MPT_TMF_MAX_TIMEOUT);
if (error != 0) {
mpt_reset(mpt, TRUE);
return (ETIMEDOUT);
}
mpt_assign_serno(mpt, mpt->tmf_req);
mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
memset(tmf_req, 0, sizeof(*tmf_req));
tmf_req->TargetID = target;
tmf_req->Bus = channel;
tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
tmf_req->TaskType = type;
tmf_req->MsgFlags = flags;
tmf_req->MsgContext =
htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
tmf_req->TaskMsgContext = abort_ctx;
mpt_lprt(mpt, MPT_PRT_DEBUG,
"Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
mpt->tmf_req->serno, tmf_req->MsgContext);
if (mpt->verbose > MPT_PRT_DEBUG) {
mpt_print_request(tmf_req);
}
KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
("mpt_scsi_send_tmf: tmf_req already on pending list"));
TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
if (error != MPT_OK) {
TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
mpt->tmf_req->state = REQ_STATE_FREE;
mpt_reset(mpt, TRUE);
}
return (error);
}
/*
* When a command times out, it is placed on the requeust_timeout_list
* and we wake our recovery thread. The MPT-Fusion architecture supports
* only a single TMF operation at a time, so we serially abort/bdr, etc,
* the timedout transactions. The next TMF is issued either by the
* completion handler of the current TMF waking our recovery thread,
* or the TMF timeout handler causing a hard reset sequence.
*/
static void
mpt_recover_commands(struct mpt_softc *mpt)
{
request_t *req;
union ccb *ccb;
int error;
if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
/*
* No work to do- leave.
*/
mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
return;
}
/*
* Flush any commands whose completion coincides with their timeout.
*/
mpt_intr(mpt);
if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
/*
* The timedout commands have already
* completed. This typically means
* that either the timeout value was on
* the hairy edge of what the device
* requires or - more likely - interrupts
* are not happening.
*/
mpt_prt(mpt, "Timedout requests already complete. "
"Interrupts may not be functioning.\n");
mpt_enable_ints(mpt);
return;
}
/*
* We have no visibility into the current state of the
* controller, so attempt to abort the commands in the
* order they timed-out. For initiator commands, we
* depend on the reply handler pulling requests off
* the timeout list.
*/
while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
uint16_t status;
uint8_t response;
MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
req, req->serno, hdrp->Function);
ccb = req->ccb;
if (ccb == NULL) {
mpt_prt(mpt, "null ccb in timed out request. "
"Resetting Controller.\n");
mpt_reset(mpt, TRUE);
continue;
}
mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
/*
* Check to see if this is not an initiator command and
* deal with it differently if it is.
*/
switch (hdrp->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
break;
default:
/*
* XXX: FIX ME: need to abort target assists...
*/
mpt_prt(mpt, "just putting it back on the pend q\n");
TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
links);
continue;
}
error = mpt_scsi_send_tmf(mpt,
MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
htole32(req->index | scsi_io_handler_id), TRUE);
if (error != 0) {
/*
* mpt_scsi_send_tmf hard resets on failure, so no
* need to do so here. Our queue should be emptied
* by the hard reset.
*/
continue;
}
error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
REQ_STATE_DONE, TRUE, 500);
status = le16toh(mpt->tmf_req->IOCStatus);
response = mpt->tmf_req->ResponseCode;
mpt->tmf_req->state = REQ_STATE_FREE;
if (error != 0) {
/*
* If we've errored out,, reset the controller.
*/
mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
"Resetting controller\n");
mpt_reset(mpt, TRUE);
continue;
}
if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
"Resetting controller.\n", status);
mpt_reset(mpt, TRUE);
continue;
}
if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
"Resetting controller.\n", response);
mpt_reset(mpt, TRUE);
continue;
}
mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
}
}
/************************ Target Mode Support ****************************/
static void
mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
{
MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
PTR_SGE_TRANSACTION32 tep;
PTR_SGE_SIMPLE32 se;
bus_addr_t paddr;
uint32_t fl;
paddr = req->req_pbuf;
paddr += MPT_RQSL(mpt);
fc = req->req_vbuf;
memset(fc, 0, MPT_REQUEST_AREA);
fc->BufferCount = 1;
fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
fc->MsgContext = htole32(req->index | fc_els_handler_id);
/*
* Okay, set up ELS buffer pointers. ELS buffer pointers
* consist of a TE SGL element (with details length of zero)
* followed by a SIMPLE SGL element which holds the address
* of the buffer.
*/
tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
tep->ContextSize = 4;
tep->Flags = 0;
tep->TransactionContext[0] = htole32(ioindex);
se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
fl =
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_LAST_ELEMENT |
MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
fl <<= MPI_SGE_FLAGS_SHIFT;
fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
se->FlagsLength = htole32(fl);
se->Address = htole32((uint32_t) paddr);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"add ELS index %d ioindex %d for %p:%u\n",
req->index, ioindex, req, req->serno);
KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
("mpt_fc_post_els: request not locked"));
mpt_send_cmd(mpt, req);
}
static void
mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
{
PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
PTR_CMD_BUFFER_DESCRIPTOR cb;
bus_addr_t paddr;
paddr = req->req_pbuf;
paddr += MPT_RQSL(mpt);
memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
fc = req->req_vbuf;
fc->BufferCount = 1;
fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
cb = &fc->Buffer[0];
cb->IoIndex = htole16(ioindex);
cb->u.PhysicalAddress32 = htole32((U32) paddr);
mpt_check_doorbell(mpt);
mpt_send_cmd(mpt, req);
}
static int
mpt_add_els_buffers(struct mpt_softc *mpt)
{
int i;
if (mpt->is_fc == 0) {
return (TRUE);
}
if (mpt->els_cmds_allocated) {
return (TRUE);
}
mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->els_cmd_ptrs == NULL) {
return (FALSE);
}
/*
* Feed the chip some ELS buffer resources
*/
for (i = 0; i < MPT_MAX_ELS; i++) {
request_t *req = mpt_get_request(mpt, FALSE);
if (req == NULL) {
break;
}
req->state |= REQ_STATE_LOCKED;
mpt->els_cmd_ptrs[i] = req;
mpt_fc_post_els(mpt, req, i);
}
if (i == 0) {
mpt_prt(mpt, "unable to add ELS buffer resources\n");
free(mpt->els_cmd_ptrs, M_DEVBUF);
mpt->els_cmd_ptrs = NULL;
return (FALSE);
}
if (i != MPT_MAX_ELS) {
mpt_lprt(mpt, MPT_PRT_INFO,
"only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
}
mpt->els_cmds_allocated = i;
return(TRUE);
}
static int
mpt_add_target_commands(struct mpt_softc *mpt)
{
int i, max;
if (mpt->tgt_cmd_ptrs) {
return (TRUE);
}
max = MPT_MAX_REQUESTS(mpt) >> 1;
if (max > mpt->mpt_max_tgtcmds) {
max = mpt->mpt_max_tgtcmds;
}
mpt->tgt_cmd_ptrs =
- mallocarray(max, sizeof(request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
+ malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->tgt_cmd_ptrs == NULL) {
mpt_prt(mpt,
"mpt_add_target_commands: could not allocate cmd ptrs\n");
return (FALSE);
}
for (i = 0; i < max; i++) {
request_t *req;
req = mpt_get_request(mpt, FALSE);
if (req == NULL) {
break;
}
req->state |= REQ_STATE_LOCKED;
mpt->tgt_cmd_ptrs[i] = req;
mpt_post_target_command(mpt, req, i);
}
if (i == 0) {
mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
free(mpt->tgt_cmd_ptrs, M_DEVBUF);
mpt->tgt_cmd_ptrs = NULL;
return (FALSE);
}
mpt->tgt_cmds_allocated = i;
if (i < max) {
mpt_lprt(mpt, MPT_PRT_INFO,
"added %d of %d target bufs\n", i, max);
}
return (i);
}
static int
mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
{
if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
mpt->twildcard = 1;
} else if (lun >= MPT_MAX_LUNS) {
return (EINVAL);
} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
return (EINVAL);
}
if (mpt->tenabled == 0) {
if (mpt->is_fc) {
(void) mpt_fc_reset_link(mpt, 0);
}
mpt->tenabled = 1;
}
if (lun == CAM_LUN_WILDCARD) {
mpt->trt_wildcard.enabled = 1;
} else {
mpt->trt[lun].enabled = 1;
}
return (0);
}
static int
mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
{
int i;
if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
mpt->twildcard = 0;
} else if (lun >= MPT_MAX_LUNS) {
return (EINVAL);
} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
return (EINVAL);
}
if (lun == CAM_LUN_WILDCARD) {
mpt->trt_wildcard.enabled = 0;
} else {
mpt->trt[lun].enabled = 0;
}
for (i = 0; i < MPT_MAX_LUNS; i++) {
if (mpt->trt[i].enabled) {
break;
}
}
if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
if (mpt->is_fc) {
(void) mpt_fc_reset_link(mpt, 0);
}
mpt->tenabled = 0;
}
return (0);
}
/*
* Called with MPT lock held
*/
static void
mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
{
struct ccb_scsiio *csio = &ccb->csio;
request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
switch (tgt->state) {
case TGT_STATE_IN_CAM:
break;
case TGT_STATE_MOVING_DATA:
mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
xpt_freeze_simq(mpt->sim, 1);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
xpt_done(ccb);
return;
default:
mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
"starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
mpt_tgt_dump_req_state(mpt, cmd_req);
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
xpt_done(ccb);
return;
}
if (csio->dxfer_len) {
bus_dmamap_callback_t *cb;
PTR_MSG_TARGET_ASSIST_REQUEST ta;
request_t *req;
int error;
KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
("dxfer_len %u but direction is NONE", csio->dxfer_len));
if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
if (mpt->outofbeer == 0) {
mpt->outofbeer = 1;
xpt_freeze_simq(mpt->sim, 1);
mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
xpt_done(ccb);
return;
}
ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
if (sizeof (bus_addr_t) > 4) {
cb = mpt_execute_req_a64;
} else {
cb = mpt_execute_req;
}
req->ccb = ccb;
ccb->ccb_h.ccb_req_ptr = req;
/*
* Record the currently active ccb and the
* request for it in our target state area.
*/
tgt->ccb = ccb;
tgt->req = req;
memset(req->req_vbuf, 0, MPT_RQSL(mpt));
ta = req->req_vbuf;
if (mpt->is_sas) {
PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
cmd_req->req_vbuf;
ta->QueueTag = ssp->InitiatorTag;
} else if (mpt->is_spi) {
PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
cmd_req->req_vbuf;
ta->QueueTag = sp->Tag;
}
ta->Function = MPI_FUNCTION_TARGET_ASSIST;
ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
ta->ReplyWord = htole32(tgt->reply_desc);
be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
ta->RelativeOffset = tgt->bytes_xfered;
ta->DataLength = ccb->csio.dxfer_len;
if (ta->DataLength > tgt->resid) {
ta->DataLength = tgt->resid;
}
/*
* XXX Should be done after data transfer completes?
*/
csio->resid = csio->dxfer_len - ta->DataLength;
tgt->resid -= csio->dxfer_len;
tgt->bytes_xfered += csio->dxfer_len;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
ta->TargetAssistFlags |=
TARGET_ASSIST_FLAGS_DATA_DIRECTION;
}
#ifdef WE_TRUST_AUTO_GOOD_STATUS
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
ta->TargetAssistFlags |=
TARGET_ASSIST_FLAGS_AUTO_STATUS;
}
#endif
tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
mpt_lprt(mpt, MPT_PRT_DEBUG,
"DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
"nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
cb, req, 0);
if (error == EINPROGRESS) {
xpt_freeze_simq(mpt->sim, 1);
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
} else {
/*
* XXX: I don't know why this seems to happen, but
* XXX: completing the CCB seems to make things happy.
* XXX: This seems to happen if the initiator requests
* XXX: enough data that we have to do multiple CTIOs.
*/
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
mpt_lprt(mpt, MPT_PRT_DEBUG,
"Meaningless STATUS CCB (%p): flags %x status %x "
"resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
return;
}
mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
(void *)&csio->sense_data,
(ccb->ccb_h.flags & CAM_SEND_SENSE) ?
csio->sense_len : 0);
}
}
static void
mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
lun_id_t lun, int send, uint8_t *data, size_t length)
{
mpt_tgt_state_t *tgt;
PTR_MSG_TARGET_ASSIST_REQUEST ta;
SGE_SIMPLE32 *se;
uint32_t flags;
uint8_t *dptr;
bus_addr_t pptr;
request_t *req;
/*
* We enter with resid set to the data load for the command.
*/
tgt = MPT_TGT_STATE(mpt, cmd_req);
if (length == 0 || tgt->resid == 0) {
tgt->resid = 0;
mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
return;
}
if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
mpt_prt(mpt, "out of resources- dropping local response\n");
return;
}
tgt->is_local = 1;
memset(req->req_vbuf, 0, MPT_RQSL(mpt));
ta = req->req_vbuf;
if (mpt->is_sas) {
PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
ta->QueueTag = ssp->InitiatorTag;
} else if (mpt->is_spi) {
PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
ta->QueueTag = sp->Tag;
}
ta->Function = MPI_FUNCTION_TARGET_ASSIST;
ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
ta->ReplyWord = htole32(tgt->reply_desc);
be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
ta->RelativeOffset = 0;
ta->DataLength = length;
dptr = req->req_vbuf;
dptr += MPT_RQSL(mpt);
pptr = req->req_pbuf;
pptr += MPT_RQSL(mpt);
memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
se = (SGE_SIMPLE32 *) &ta->SGL[0];
memset(se, 0,sizeof (*se));
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
if (send) {
ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
se->Address = pptr;
MPI_pSGE_SET_LENGTH(se, length);
flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
MPI_pSGE_SET_FLAGS(se, flags);
tgt->ccb = NULL;
tgt->req = req;
tgt->resid -= length;
tgt->bytes_xfered = length;
#ifdef WE_TRUST_AUTO_GOOD_STATUS
tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
#else
tgt->state = TGT_STATE_MOVING_DATA;
#endif
mpt_send_cmd(mpt, req);
}
/*
* Abort queued up CCBs
*/
static cam_status
mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
{
struct mpt_hdr_stailq *lp;
struct ccb_hdr *srch;
union ccb *accb = ccb->cab.abort_ccb;
tgt_resource_t *trtp;
mpt_tgt_state_t *tgt;
request_t *req;
uint32_t tag;
mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
trtp = &mpt->trt_wildcard;
else
trtp = &mpt->trt[ccb->ccb_h.target_lun];
if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
lp = &trtp->atios;
tag = accb->atio.tag_id;
} else {
lp = &trtp->inots;
tag = accb->cin1.tag_id;
}
/* Search the CCB among queued. */
STAILQ_FOREACH(srch, lp, sim_links.stqe) {
if (srch != &accb->ccb_h)
continue;
STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
accb->ccb_h.status = CAM_REQ_ABORTED;
xpt_done(accb);
return (CAM_REQ_CMP);
}
/* Search the CCB among running. */
req = MPT_TAG_2_REQ(mpt, tag);
tgt = MPT_TGT_STATE(mpt, req);
if (tgt->tag_id == tag) {
mpt_abort_target_cmd(mpt, req);
return (CAM_REQ_CMP);
}
return (CAM_UA_ABORT);
}
/*
* Ask the MPT to abort the current target command
*/
static int
mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
{
int error;
request_t *req;
PTR_MSG_TARGET_MODE_ABORT abtp;
req = mpt_get_request(mpt, FALSE);
if (req == NULL) {
return (-1);
}
abtp = req->req_vbuf;
memset(abtp, 0, sizeof (*abtp));
abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
error = 0;
if (mpt->is_fc || mpt->is_sas) {
mpt_send_cmd(mpt, req);
} else {
error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
}
return (error);
}
/*
* WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
* TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
* FC929 to set bogus FC_RSP fields (nonzero residuals
* but w/o RESID fields set). This causes QLogic initiators
* to think maybe that a frame was lost.
*
* WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
* we use allocated requests to do TARGET_ASSIST and we
* need to know when to release them.
*/
static void
mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
uint8_t status, uint8_t const *sense_data, u_int sense_len)
{
uint8_t *cmd_vbuf;
mpt_tgt_state_t *tgt;
PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
request_t *req;
bus_addr_t paddr;
int resplen = 0;
uint32_t fl;
cmd_vbuf = cmd_req->req_vbuf;
cmd_vbuf += MPT_RQSL(mpt);
tgt = MPT_TGT_STATE(mpt, cmd_req);
if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
if (mpt->outofbeer == 0) {
mpt->outofbeer = 1;
xpt_freeze_simq(mpt->sim, 1);
mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
}
if (ccb) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
xpt_done(ccb);
} else {
mpt_prt(mpt,
"could not allocate status request- dropping\n");
}
return;
}
req->ccb = ccb;
if (ccb) {
ccb->ccb_h.ccb_mpt_ptr = mpt;
ccb->ccb_h.ccb_req_ptr = req;
}
/*
* Record the currently active ccb, if any, and the
* request for it in our target state area.
*/
tgt->ccb = ccb;
tgt->req = req;
tgt->state = TGT_STATE_SENDING_STATUS;
tp = req->req_vbuf;
paddr = req->req_pbuf;
paddr += MPT_RQSL(mpt);
memset(tp, 0, sizeof (*tp));
tp->StatusCode = status;
tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
if (mpt->is_fc) {
PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
(PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
uint8_t *sts_vbuf;
uint32_t *rsp;
sts_vbuf = req->req_vbuf;
sts_vbuf += MPT_RQSL(mpt);
rsp = (uint32_t *) sts_vbuf;
memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
/*
* The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
* It has to be big-endian in memory and is organized
* in 32 bit words, which are much easier to deal with
* as words which are swizzled as needed.
*
* All we're filling here is the FC_RSP payload.
* We may just have the chip synthesize it if
* we have no residual and an OK status.
*
*/
memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
rsp[2] = htobe32(status);
#define MIN_FCP_RESPONSE_SIZE 24
#ifndef WE_TRUST_AUTO_GOOD_STATUS
resplen = MIN_FCP_RESPONSE_SIZE;
#endif
if (tgt->resid < 0) {
rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
rsp[3] = htobe32(-tgt->resid);
resplen = MIN_FCP_RESPONSE_SIZE;
} else if (tgt->resid > 0) {
rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
rsp[3] = htobe32(tgt->resid);
resplen = MIN_FCP_RESPONSE_SIZE;
}
if (sense_len > 0) {
rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
rsp[4] = htobe32(sense_len);
memcpy(&rsp[6], sense_data, sense_len);
resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
}
} else if (mpt->is_sas) {
PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
(PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
} else {
PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
(PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
tp->QueueTag = htole16(sp->Tag);
memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
}
tp->ReplyWord = htole32(tgt->reply_desc);
tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
#ifdef WE_CAN_USE_AUTO_REPOST
tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
#endif
if (status == SCSI_STATUS_OK && resplen == 0) {
tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
} else {
tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
fl = MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_LAST_ELEMENT |
MPI_SGE_FLAGS_END_OF_LIST |
MPI_SGE_FLAGS_END_OF_BUFFER;
fl <<= MPI_SGE_FLAGS_SHIFT;
fl |= resplen;
tp->StatusDataSGE.FlagsLength = htole32(fl);
}
mpt_lprt(mpt, MPT_PRT_DEBUG,
"STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
req, req->serno, tgt->resid);
if (mpt->verbose > MPT_PRT_DEBUG)
mpt_print_request(req->req_vbuf);
if (ccb) {
ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
}
mpt_send_cmd(mpt, req);
}
static void
mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
tgt_resource_t *trtp, int init_id)
{
struct ccb_immediate_notify *inot;
mpt_tgt_state_t *tgt;
tgt = MPT_TGT_STATE(mpt, req);
inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
if (inot == NULL) {
mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
return;
}
STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
mpt_lprt(mpt, MPT_PRT_DEBUG1,
"Get FREE INOT %p lun %jx\n", inot,
(uintmax_t)inot->ccb_h.target_lun);
inot->initiator_id = init_id; /* XXX */
inot->tag_id = tgt->tag_id;
inot->seq_id = 0;
/*
* This is a somewhat grotesque attempt to map from task management
* to old style SCSI messages. God help us all.
*/
switch (fc) {
case MPT_QUERY_TASK_SET:
inot->arg = MSG_QUERY_TASK_SET;
break;
case MPT_ABORT_TASK_SET:
inot->arg = MSG_ABORT_TASK_SET;
break;
case MPT_CLEAR_TASK_SET:
inot->arg = MSG_CLEAR_TASK_SET;
break;
case MPT_QUERY_ASYNC_EVENT:
inot->arg = MSG_QUERY_ASYNC_EVENT;
break;
case MPT_LOGICAL_UNIT_RESET:
inot->arg = MSG_LOGICAL_UNIT_RESET;
break;
case MPT_TARGET_RESET:
inot->arg = MSG_TARGET_RESET;
break;
case MPT_CLEAR_ACA:
inot->arg = MSG_CLEAR_ACA;
break;
default:
inot->arg = MSG_NOOP;
break;
}
tgt->ccb = (union ccb *) inot;
inot->ccb_h.status = CAM_MESSAGE_RECV;
xpt_done((union ccb *)inot);
}
static void
mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
{
static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
'0', '0', '0', '1'
};
struct ccb_accept_tio *atiop;
lun_id_t lun;
int tag_action = 0;
mpt_tgt_state_t *tgt;
tgt_resource_t *trtp = NULL;
U8 *lunptr;
U8 *vbuf;
U16 ioindex;
mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
uint8_t *cdbp;
/*
* Stash info for the current command where we can get at it later.
*/
vbuf = req->req_vbuf;
vbuf += MPT_RQSL(mpt);
if (mpt->verbose >= MPT_PRT_DEBUG) {
mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
}
/*
* Get our state pointer set up.
*/
tgt = MPT_TGT_STATE(mpt, req);
if (tgt->state != TGT_STATE_LOADED) {
mpt_tgt_dump_req_state(mpt, req);
panic("bad target state in mpt_scsi_tgt_atio");
}
memset(tgt, 0, sizeof (mpt_tgt_state_t));
tgt->state = TGT_STATE_IN_CAM;
tgt->reply_desc = reply_desc;
ioindex = GET_IO_INDEX(reply_desc);
/*
* The tag we construct here allows us to find the
* original request that the command came in with.
*
* This way we don't have to depend on anything but the
* tag to find things when CCBs show back up from CAM.
*/
tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
if (mpt->is_fc) {
PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
if (fc->FcpCntl[2]) {
/*
* Task Management Request
*/
switch (fc->FcpCntl[2]) {
case 0x1:
fct = MPT_QUERY_TASK_SET;
break;
case 0x2:
fct = MPT_ABORT_TASK_SET;
break;
case 0x4:
fct = MPT_CLEAR_TASK_SET;
break;
case 0x8:
fct = MPT_QUERY_ASYNC_EVENT;
break;
case 0x10:
fct = MPT_LOGICAL_UNIT_RESET;
break;
case 0x20:
fct = MPT_TARGET_RESET;
break;
case 0x40:
fct = MPT_CLEAR_ACA;
break;
default:
mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
fc->FcpCntl[2]);
mpt_scsi_tgt_status(mpt, NULL, req,
SCSI_STATUS_OK, NULL, 0);
return;
}
} else {
switch (fc->FcpCntl[1]) {
case 0:
tag_action = MSG_SIMPLE_Q_TAG;
break;
case 1:
tag_action = MSG_HEAD_OF_Q_TAG;
break;
case 2:
tag_action = MSG_ORDERED_Q_TAG;
break;
default:
/*
* Bah. Ignore Untagged Queing and ACA
*/
tag_action = MSG_SIMPLE_Q_TAG;
break;
}
}
tgt->resid = be32toh(fc->FcpDl);
cdbp = fc->FcpCdb;
lunptr = fc->FcpLun;
tgt->itag = fc->OptionalOxid;
} else if (mpt->is_sas) {
PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
cdbp = ssp->CDB;
lunptr = ssp->LogicalUnitNumber;
tgt->itag = ssp->InitiatorTag;
} else {
PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
cdbp = sp->CDB;
lunptr = sp->LogicalUnitNumber;
tgt->itag = sp->Tag;
}
lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
/*
* Deal with non-enabled or bad luns here.
*/
if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
mpt->trt[lun].enabled == 0) {
if (mpt->twildcard) {
trtp = &mpt->trt_wildcard;
} else if (fct == MPT_NIL_TMT_VALUE) {
/*
* In this case, we haven't got an upstream listener
* for either a specific lun or wildcard luns. We
* have to make some sensible response. For regular
* inquiry, just return some NOT HERE inquiry data.
* For VPD inquiry, report illegal field in cdb.
* For REQUEST SENSE, just return NO SENSE data.
* REPORT LUNS gets illegal command.
* All other commands get 'no such device'.
*/
uint8_t sense[MPT_SENSE_SIZE];
size_t len;
memset(sense, 0, sizeof(sense));
sense[0] = 0xf0;
sense[2] = 0x5;
sense[7] = 0x8;
switch (cdbp[0]) {
case INQUIRY:
{
if (cdbp[1] != 0) {
sense[12] = 0x26;
sense[13] = 0x01;
break;
}
len = min(tgt->resid, cdbp[4]);
len = min(len, sizeof (null_iqd));
mpt_lprt(mpt, MPT_PRT_DEBUG,
"local inquiry %ld bytes\n", (long) len);
mpt_scsi_tgt_local(mpt, req, lun, 1,
null_iqd, len);
return;
}
case REQUEST_SENSE:
{
sense[2] = 0x0;
len = min(tgt->resid, cdbp[4]);
len = min(len, sizeof (sense));
mpt_lprt(mpt, MPT_PRT_DEBUG,
"local reqsense %ld bytes\n", (long) len);
mpt_scsi_tgt_local(mpt, req, lun, 1,
sense, len);
return;
}
case REPORT_LUNS:
mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
sense[12] = 0x26;
return;
default:
mpt_lprt(mpt, MPT_PRT_DEBUG,
"CMD 0x%x to unmanaged lun %jx\n",
cdbp[0], (uintmax_t)lun);
sense[12] = 0x25;
break;
}
mpt_scsi_tgt_status(mpt, NULL, req,
SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
return;
}
/* otherwise, leave trtp NULL */
} else {
trtp = &mpt->trt[lun];
}
/*
* Deal with any task management
*/
if (fct != MPT_NIL_TMT_VALUE) {
if (trtp == NULL) {
mpt_prt(mpt, "task mgmt function %x but no listener\n",
fct);
mpt_scsi_tgt_status(mpt, NULL, req,
SCSI_STATUS_OK, NULL, 0);
} else {
mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
GET_INITIATOR_INDEX(reply_desc));
}
return;
}
atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
if (atiop == NULL) {
mpt_lprt(mpt, MPT_PRT_WARN,
"no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
mpt->tenabled? "QUEUE FULL" : "BUSY");
mpt_scsi_tgt_status(mpt, NULL, req,
mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
NULL, 0);
return;
}
STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
mpt_lprt(mpt, MPT_PRT_DEBUG1,
"Get FREE ATIO %p lun %jx\n", atiop,
(uintmax_t)atiop->ccb_h.target_lun);
atiop->ccb_h.ccb_mpt_ptr = mpt;
atiop->ccb_h.status = CAM_CDB_RECVD;
atiop->ccb_h.target_lun = lun;
atiop->sense_len = 0;
atiop->tag_id = tgt->tag_id;
atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
atiop->cdb_len = 16;
memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
if (tag_action) {
atiop->tag_action = tag_action;
atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
}
if (mpt->verbose >= MPT_PRT_DEBUG) {
int i;
mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
(uintmax_t)atiop->ccb_h.target_lun);
for (i = 0; i < atiop->cdb_len; i++) {
mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
(i == (atiop->cdb_len - 1))? '>' : ' ');
}
mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
}
xpt_done((union ccb *)atiop);
}
static void
mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
{
mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
"nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
}
static void
mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
{
mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
req->index, req->index, req->state);
mpt_tgt_dump_tgt_state(mpt, req);
}
static int
mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
{
int dbg;
union ccb *ccb;
U16 status;
if (reply_frame == NULL) {
/*
* Figure out what the state of the command is.
*/
mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
#ifdef INVARIANTS
mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
if (tgt->req) {
mpt_req_not_spcl(mpt, tgt->req,
"turbo scsi_tgt_reply associated req", __LINE__);
}
#endif
switch(tgt->state) {
case TGT_STATE_LOADED:
/*
* This is a new command starting.
*/
mpt_scsi_tgt_atio(mpt, req, reply_desc);
break;
case TGT_STATE_MOVING_DATA:
{
ccb = tgt->ccb;
if (tgt->req == NULL) {
panic("mpt: turbo target reply with null "
"associated request moving data");
/* NOTREACHED */
}
if (ccb == NULL) {
if (tgt->is_local == 0) {
panic("mpt: turbo target reply with "
"null associated ccb moving data");
/* NOTREACHED */
}
mpt_lprt(mpt, MPT_PRT_DEBUG,
"TARGET_ASSIST local done\n");
TAILQ_REMOVE(&mpt->request_pending_list,
tgt->req, links);
mpt_free_request(mpt, tgt->req);
tgt->req = NULL;
mpt_scsi_tgt_status(mpt, NULL, req,
0, NULL, 0);
return (TRUE);
}
tgt->ccb = NULL;
tgt->nxfers++;
mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
/*
* Free the Target Assist Request
*/
KASSERT(tgt->req->ccb == ccb,
("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
tgt->req->serno, tgt->req->ccb));
TAILQ_REMOVE(&mpt->request_pending_list,
tgt->req, links);
mpt_free_request(mpt, tgt->req);
tgt->req = NULL;
/*
* Do we need to send status now? That is, are
* we done with all our data transfers?
*/
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status,
("zero ccb sts at %d", __LINE__));
tgt->state = TGT_STATE_IN_CAM;
if (mpt->outofbeer) {
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
mpt->outofbeer = 0;
mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
}
xpt_done(ccb);
break;
}
/*
* Otherwise, send status (and sense)
*/
mpt_scsi_tgt_status(mpt, ccb, req,
ccb->csio.scsi_status,
(void *)&ccb->csio.sense_data,
(ccb->ccb_h.flags & CAM_SEND_SENSE) ?
ccb->csio.sense_len : 0);
break;
}
case TGT_STATE_SENDING_STATUS:
case TGT_STATE_MOVING_DATA_AND_STATUS:
{
int ioindex;
ccb = tgt->ccb;
if (tgt->req == NULL) {
panic("mpt: turbo target reply with null "
"associated request sending status");
/* NOTREACHED */
}
if (ccb) {
tgt->ccb = NULL;
if (tgt->state ==
TGT_STATE_MOVING_DATA_AND_STATUS) {
tgt->nxfers++;
}
mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
ccb->ccb_h.status |= CAM_SENT_SENSE;
}
mpt_lprt(mpt, MPT_PRT_DEBUG,
"TARGET_STATUS tag %x sts %x flgs %x req "
"%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
ccb->ccb_h.flags, tgt->req);
/*
* Free the Target Send Status Request
*/
KASSERT(tgt->req->ccb == ccb,
("tgt->req %p:%u tgt->req->ccb %p",
tgt->req, tgt->req->serno, tgt->req->ccb));
/*
* Notify CAM that we're done
*/
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status,
("ZERO ccb sts at %d", __LINE__));
tgt->ccb = NULL;
} else {
mpt_lprt(mpt, MPT_PRT_DEBUG,
"TARGET_STATUS non-CAM for req %p:%u\n",
tgt->req, tgt->req->serno);
}
TAILQ_REMOVE(&mpt->request_pending_list,
tgt->req, links);
mpt_free_request(mpt, tgt->req);
tgt->req = NULL;
/*
* And re-post the Command Buffer.
* This will reset the state.
*/
ioindex = GET_IO_INDEX(reply_desc);
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
tgt->is_local = 0;
mpt_post_target_command(mpt, req, ioindex);
/*
* And post a done for anyone who cares
*/
if (ccb) {
if (mpt->outofbeer) {
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
mpt->outofbeer = 0;
mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
}
xpt_done(ccb);
}
break;
}
case TGT_STATE_NIL: /* XXX This Never Happens XXX */
tgt->state = TGT_STATE_LOADED;
break;
default:
mpt_prt(mpt, "Unknown Target State 0x%x in Context "
"Reply Function\n", tgt->state);
}
return (TRUE);
}
status = le16toh(reply_frame->IOCStatus);
if (status != MPI_IOCSTATUS_SUCCESS) {
dbg = MPT_PRT_ERROR;
} else {
dbg = MPT_PRT_DEBUG1;
}
mpt_lprt(mpt, dbg,
"SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
req, req->serno, reply_frame, reply_frame->Function, status);
switch (reply_frame->Function) {
case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
{
mpt_tgt_state_t *tgt;
#ifdef INVARIANTS
mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
#endif
if (status != MPI_IOCSTATUS_SUCCESS) {
/*
* XXX What to do?
*/
break;
}
tgt = MPT_TGT_STATE(mpt, req);
KASSERT(tgt->state == TGT_STATE_LOADING,
("bad state 0x%x on reply to buffer post", tgt->state));
mpt_assign_serno(mpt, req);
tgt->state = TGT_STATE_LOADED;
break;
}
case MPI_FUNCTION_TARGET_ASSIST:
#ifdef INVARIANTS
mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
#endif
mpt_prt(mpt, "target assist completion\n");
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
mpt_free_request(mpt, req);
break;
case MPI_FUNCTION_TARGET_STATUS_SEND:
#ifdef INVARIANTS
mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
#endif
mpt_prt(mpt, "status send completion\n");
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
mpt_free_request(mpt, req);
break;
case MPI_FUNCTION_TARGET_MODE_ABORT:
{
PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
(PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
PTR_MSG_TARGET_MODE_ABORT abtp =
(PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
#ifdef INVARIANTS
mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
#endif
mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
mpt_free_request(mpt, req);
break;
}
default:
mpt_prt(mpt, "Unknown Target Address Reply Function code: "
"0x%x\n", reply_frame->Function);
break;
}
return (TRUE);
}
Index: head/sys/dev/mrsas/mrsas.c
===================================================================
--- head/sys/dev/mrsas/mrsas.c (revision 328217)
+++ head/sys/dev/mrsas/mrsas.c (revision 328218)
@@ -1,4602 +1,4601 @@
/*
* Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
* Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
* Support: freebsdraid@avagotech.com
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer. 2. Redistributions
* in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution. 3. Neither the name of the
* <ORGANIZATION> nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing
* official policies,either expressed or implied, of the FreeBSD Project.
*
* Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
* Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/mrsas/mrsas.h>
#include <dev/mrsas/mrsas_ioctl.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <sys/sysent.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
/*
* Function prototypes
*/
static d_open_t mrsas_open;
static d_close_t mrsas_close;
static d_read_t mrsas_read;
static d_write_t mrsas_write;
static d_ioctl_t mrsas_ioctl;
static d_poll_t mrsas_poll;
static void mrsas_ich_startup(void *arg);
static struct mrsas_mgmt_info mrsas_mgmt_info;
static struct mrsas_ident *mrsas_find_ident(device_t);
static int mrsas_setup_msix(struct mrsas_softc *sc);
static int mrsas_allocate_msix(struct mrsas_softc *sc);
static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
static void mrsas_flush_cache(struct mrsas_softc *sc);
static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
static void mrsas_ocr_thread(void *arg);
static int mrsas_get_map_info(struct mrsas_softc *sc);
static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
static int mrsas_sync_map_info(struct mrsas_softc *sc);
static int mrsas_get_pd_list(struct mrsas_softc *sc);
static int mrsas_get_ld_list(struct mrsas_softc *sc);
static int mrsas_setup_irq(struct mrsas_softc *sc);
static int mrsas_alloc_mem(struct mrsas_softc *sc);
static int mrsas_init_fw(struct mrsas_softc *sc);
static int mrsas_setup_raidmap(struct mrsas_softc *sc);
static void megasas_setup_jbod_map(struct mrsas_softc *sc);
static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
static int mrsas_clear_intr(struct mrsas_softc *sc);
static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
static int
mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd_to_abort);
static struct mrsas_softc *
mrsas_get_softc_instance(struct cdev *dev,
u_long cmd, caddr_t arg);
u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
u_int8_t
mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *mfi_cmd);
void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
int mrsas_init_adapter(struct mrsas_softc *sc);
int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
int mrsas_ioc_init(struct mrsas_softc *sc);
int mrsas_bus_scan(struct mrsas_softc *sc);
int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
int mrsas_reset_targets(struct mrsas_softc *sc);
int
mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd);
int
mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
int size);
void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
void mrsas_disable_intr(struct mrsas_softc *sc);
void mrsas_enable_intr(struct mrsas_softc *sc);
void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
void mrsas_free_mem(struct mrsas_softc *sc);
void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
void mrsas_isr(void *arg);
void mrsas_teardown_intr(struct mrsas_softc *sc);
void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
void mrsas_kill_hba(struct mrsas_softc *sc);
void mrsas_aen_handler(struct mrsas_softc *sc);
void
mrsas_write_reg(struct mrsas_softc *sc, int offset,
u_int32_t value);
void
mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
u_int32_t req_desc_hi);
void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
void
mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd, u_int8_t status);
void
mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
u_int8_t extStatus);
struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
extern int mrsas_cam_attach(struct mrsas_softc *sc);
extern void mrsas_cam_detach(struct mrsas_softc *sc);
extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
extern void mrsas_xpt_release(struct mrsas_softc *sc);
extern MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_get_request_desc(struct mrsas_softc *sc,
u_int16_t index);
extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
/*
* PCI device struct and table
*
*/
typedef struct mrsas_ident {
uint16_t vendor;
uint16_t device;
uint16_t subvendor;
uint16_t subdevice;
const char *desc;
} MRSAS_CTLR_ID;
MRSAS_CTLR_ID device_table[] = {
{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
{0, 0, 0, 0, NULL}
};
/*
* Character device entry points
*
*/
static struct cdevsw mrsas_cdevsw = {
.d_version = D_VERSION,
.d_open = mrsas_open,
.d_close = mrsas_close,
.d_read = mrsas_read,
.d_write = mrsas_write,
.d_ioctl = mrsas_ioctl,
.d_poll = mrsas_poll,
.d_name = "mrsas",
};
MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
/*
* In the cdevsw routines, we find our softc by using the si_drv1 member of
* struct cdev. We set this variable to point to our softc in our attach
* routine when we create the /dev entry.
*/
int
mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
struct mrsas_softc *sc;
sc = dev->si_drv1;
return (0);
}
int
mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
struct mrsas_softc *sc;
sc = dev->si_drv1;
return (0);
}
int
mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
{
struct mrsas_softc *sc;
sc = dev->si_drv1;
return (0);
}
int
mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
{
struct mrsas_softc *sc;
sc = dev->si_drv1;
return (0);
}
/*
* Register Read/Write Functions
*
*/
void
mrsas_write_reg(struct mrsas_softc *sc, int offset,
u_int32_t value)
{
bus_space_tag_t bus_tag = sc->bus_tag;
bus_space_handle_t bus_handle = sc->bus_handle;
bus_space_write_4(bus_tag, bus_handle, offset, value);
}
u_int32_t
mrsas_read_reg(struct mrsas_softc *sc, int offset)
{
bus_space_tag_t bus_tag = sc->bus_tag;
bus_space_handle_t bus_handle = sc->bus_handle;
return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
}
/*
* Interrupt Disable/Enable/Clear Functions
*
*/
void
mrsas_disable_intr(struct mrsas_softc *sc)
{
u_int32_t mask = 0xFFFFFFFF;
u_int32_t status;
sc->mask_interrupts = 1;
mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
/* Dummy read to force pci flush */
status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
}
void
mrsas_enable_intr(struct mrsas_softc *sc)
{
u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
u_int32_t status;
sc->mask_interrupts = 0;
mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
}
static int
mrsas_clear_intr(struct mrsas_softc *sc)
{
u_int32_t status;
/* Read received interrupt */
status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
/* Not our interrupt, so just return */
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
return (0);
/* We got a reply interrupt */
return (1);
}
/*
* PCI Support Functions
*
*/
static struct mrsas_ident *
mrsas_find_ident(device_t dev)
{
struct mrsas_ident *pci_device;
for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
if ((pci_device->vendor == pci_get_vendor(dev)) &&
(pci_device->device == pci_get_device(dev)) &&
((pci_device->subvendor == pci_get_subvendor(dev)) ||
(pci_device->subvendor == 0xffff)) &&
((pci_device->subdevice == pci_get_subdevice(dev)) ||
(pci_device->subdevice == 0xffff)))
return (pci_device);
}
return (NULL);
}
static int
mrsas_probe(device_t dev)
{
static u_int8_t first_ctrl = 1;
struct mrsas_ident *id;
if ((id = mrsas_find_ident(dev)) != NULL) {
if (first_ctrl) {
printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
MRSAS_VERSION);
first_ctrl = 0;
}
device_set_desc(dev, id->desc);
/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
return (-30);
}
return (ENXIO);
}
/*
* mrsas_setup_sysctl: setup sysctl values for mrsas
* input: Adapter instance soft state
*
* Setup sysctl entries for mrsas driver.
*/
static void
mrsas_setup_sysctl(struct mrsas_softc *sc)
{
struct sysctl_ctx_list *sysctl_ctx = NULL;
struct sysctl_oid *sysctl_tree = NULL;
char tmpstr[80], tmpstr2[80];
/*
* Setup the sysctl variable so the user can change the debug level
* on the fly.
*/
snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
device_get_unit(sc->mrsas_dev));
snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
if (sysctl_ctx != NULL)
sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
if (sysctl_tree == NULL) {
sysctl_ctx_init(&sc->sysctl_ctx);
sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
CTLFLAG_RD, 0, tmpstr);
if (sc->sysctl_tree == NULL)
return;
sysctl_ctx = &sc->sysctl_ctx;
sysctl_tree = sc->sysctl_tree;
}
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
"Disable the use of OCR");
SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
strlen(MRSAS_VERSION), "driver version");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "reset_count", CTLFLAG_RD,
&sc->reset_count, 0, "number of ocr from start of the day");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "fw_outstanding", CTLFLAG_RD,
&sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
&sc->io_cmds_highwater, 0, "Max FW outstanding commands");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
"Driver debug level");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
0, "Driver IO timeout value in mili-second.");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
&sc->mrsas_fw_fault_check_delay,
0, "FW fault check thread delay in seconds. <default is 1 sec>");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "reset_in_progress", CTLFLAG_RD,
&sc->reset_in_progress, 0, "ocr in progress status");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "block_sync_cache", CTLFLAG_RW,
&sc->block_sync_cache, 0,
"Block SYNC CACHE at driver. <default: 0, send it to FW>");
}
/*
* mrsas_get_tunables: get tunable parameters.
* input: Adapter instance soft state
*
* Get tunable parameters. This will help to debug driver at boot time.
*/
static void
mrsas_get_tunables(struct mrsas_softc *sc)
{
char tmpstr[80];
/* XXX default to some debugging for now */
sc->mrsas_debug = MRSAS_FAULT;
sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
sc->mrsas_fw_fault_check_delay = 1;
sc->reset_count = 0;
sc->reset_in_progress = 0;
sc->block_sync_cache = 0;
/*
* Grab the global variables.
*/
TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
/*
* Grab the global variables.
*/
TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
/* Grab the unit-instance variables */
snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
device_get_unit(sc->mrsas_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
}
/*
* mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
* Used to get sequence number at driver load time.
* input: Adapter soft state
*
* Allocates DMAable memory for the event log info internal command.
*/
int
mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
{
int el_info_size;
/* Allocate get event log info command */
el_info_size = sizeof(struct mrsas_evt_log_info);
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
el_info_size,
1,
el_info_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->el_info_tag)) {
device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
return (ENOMEM);
}
if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
sc->el_info_mem, el_info_size, mrsas_addr_cb,
&sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
return (ENOMEM);
}
memset(sc->el_info_mem, 0, el_info_size);
return (0);
}
/*
* mrsas_free_evt_info_cmd: Free memory for Event log info command
* input: Adapter soft state
*
* Deallocates memory for the event log info internal command.
*/
void
mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
{
if (sc->el_info_phys_addr)
bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
if (sc->el_info_mem != NULL)
bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
if (sc->el_info_tag != NULL)
bus_dma_tag_destroy(sc->el_info_tag);
}
/*
* mrsas_get_seq_num: Get latest event sequence number
* @sc: Adapter soft state
* @eli: Firmware event log sequence number information.
*
* Firmware maintains a log of all events in a non-volatile area.
* Driver get the sequence number using DCMD
* "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
*/
static int
mrsas_get_seq_num(struct mrsas_softc *sc,
struct mrsas_evt_log_info *eli)
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
u_int8_t do_ocr = 1, retcode = 0;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
return -ENOMEM;
}
dcmd = &cmd->frame->dcmd;
if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
mrsas_release_mfi_cmd(cmd);
return -ENOMEM;
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
retcode = mrsas_issue_blocked_cmd(sc, cmd);
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
do_ocr = 0;
/*
* Copy the data back into callers buffer
*/
memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
mrsas_free_evt_log_info_cmd(sc);
dcmd_timeout:
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
else
mrsas_release_mfi_cmd(cmd);
return retcode;
}
/*
* mrsas_register_aen: Register for asynchronous event notification
* @sc: Adapter soft state
* @seq_num: Starting sequence number
* @class_locale: Class of the event
*
* This function subscribes for events beyond the @seq_num
* and type @class_locale.
*
*/
static int
mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
u_int32_t class_locale_word)
{
int ret_val;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
union mrsas_evt_class_locale curr_aen;
union mrsas_evt_class_locale prev_aen;
/*
* If there an AEN pending already (aen_cmd), check if the
* class_locale of that pending AEN is inclusive of the new AEN
* request we currently have. If it is, then we don't have to do
* anything. In other words, whichever events the current AEN request
* is subscribing to, have already been subscribed to. If the old_cmd
* is _not_ inclusive, then we have to abort that command, form a
* class_locale that is superset of both old and current and re-issue
* to the FW
*/
curr_aen.word = class_locale_word;
if (sc->aen_cmd) {
prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
* registered, then a new registration requests for higher
* classes need not be sent to FW. They are automatically
* included. Locale numbers don't have such hierarchy. They
* are bitmap values
*/
if ((prev_aen.members.class <= curr_aen.members.class) &&
!((prev_aen.members.locale & curr_aen.members.locale) ^
curr_aen.members.locale)) {
/*
* Previously issued event registration includes
* current request. Nothing to do.
*/
return 0;
} else {
curr_aen.members.locale |= prev_aen.members.locale;
if (prev_aen.members.class < curr_aen.members.class)
curr_aen.members.class = prev_aen.members.class;
sc->aen_cmd->abort_aen = 1;
ret_val = mrsas_issue_blocked_abort_cmd(sc,
sc->aen_cmd);
if (ret_val) {
printf("mrsas: Failed to abort previous AEN command\n");
return ret_val;
} else
sc->aen_cmd = NULL;
}
}
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd)
return ENOMEM;
dcmd = &cmd->frame->dcmd;
memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
/*
* Prepare DCMD for aen registration
*/
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
dcmd->mbox.w[0] = seq_num;
sc->last_seq_num = seq_num;
dcmd->mbox.w[1] = curr_aen.word;
dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
if (sc->aen_cmd != NULL) {
mrsas_release_mfi_cmd(cmd);
return 0;
}
/*
* Store reference to the cmd used to register for AEN. When an
* application wants us to register for AEN, we have to abort this
* cmd and re-register with a new EVENT LOCALE supplied by that app
*/
sc->aen_cmd = cmd;
/*
* Issue the aen registration frame
*/
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
return (1);
}
return 0;
}
/*
* mrsas_start_aen: Subscribes to AEN during driver load time
* @instance: Adapter soft state
*/
static int
mrsas_start_aen(struct mrsas_softc *sc)
{
struct mrsas_evt_log_info eli;
union mrsas_evt_class_locale class_locale;
/* Get the latest sequence number from FW */
memset(&eli, 0, sizeof(eli));
if (mrsas_get_seq_num(sc, &eli))
return -1;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
return mrsas_register_aen(sc, eli.newest_seq_num + 1,
class_locale.word);
}
/*
* mrsas_setup_msix: Allocate MSI-x vectors
* @sc: adapter soft state
*/
static int
mrsas_setup_msix(struct mrsas_softc *sc)
{
int i;
for (i = 0; i < sc->msix_vectors; i++) {
sc->irq_context[i].sc = sc;
sc->irq_context[i].MSIxIndex = i;
sc->irq_id[i] = i + 1;
sc->mrsas_irq[i] = bus_alloc_resource_any
(sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
,RF_ACTIVE);
if (sc->mrsas_irq[i] == NULL) {
device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
goto irq_alloc_failed;
}
if (bus_setup_intr(sc->mrsas_dev,
sc->mrsas_irq[i],
INTR_MPSAFE | INTR_TYPE_CAM,
NULL, mrsas_isr, &sc->irq_context[i],
&sc->intr_handle[i])) {
device_printf(sc->mrsas_dev,
"Cannot set up MSI-x interrupt handler\n");
goto irq_alloc_failed;
}
}
return SUCCESS;
irq_alloc_failed:
mrsas_teardown_intr(sc);
return (FAIL);
}
/*
* mrsas_allocate_msix: Setup MSI-x vectors
* @sc: adapter soft state
*/
static int
mrsas_allocate_msix(struct mrsas_softc *sc)
{
if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
" of vectors\n", sc->msix_vectors);
} else {
device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
goto irq_alloc_failed;
}
return SUCCESS;
irq_alloc_failed:
mrsas_teardown_intr(sc);
return (FAIL);
}
/*
* mrsas_attach: PCI entry point
* input: pointer to device struct
*
* Performs setup of PCI and registers, initializes mutexes and linked lists,
* registers interrupts and CAM, and initializes the adapter/controller to
* its proper state.
*/
static int
mrsas_attach(device_t dev)
{
struct mrsas_softc *sc = device_get_softc(dev);
uint32_t cmd, bar, error;
memset(sc, 0, sizeof(struct mrsas_softc));
/* Look up our softc and initialize its fields. */
sc->mrsas_dev = dev;
sc->device_id = pci_get_device(dev);
if ((sc->device_id == MRSAS_INVADER) ||
(sc->device_id == MRSAS_FURY) ||
(sc->device_id == MRSAS_INTRUDER) ||
(sc->device_id == MRSAS_INTRUDER_24) ||
(sc->device_id == MRSAS_CUTLASS_52) ||
(sc->device_id == MRSAS_CUTLASS_53)) {
sc->mrsas_gen3_ctrl = 1;
}
mrsas_get_tunables(sc);
/*
* Set up PCI and registers
*/
cmd = pci_read_config(dev, PCIR_COMMAND, 2);
if ((cmd & PCIM_CMD_PORTEN) == 0) {
return (ENXIO);
}
/* Force the busmaster enable bit on. */
cmd |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, cmd, 2);
bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&(sc->reg_res_id), RF_ACTIVE))
== NULL) {
device_printf(dev, "Cannot allocate PCI registers\n");
goto attach_fail;
}
sc->bus_tag = rman_get_bustag(sc->reg_res);
sc->bus_handle = rman_get_bushandle(sc->reg_res);
/* Intialize mutexes */
mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
/* Intialize linked list */
TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
mrsas_atomic_set(&sc->fw_outstanding, 0);
mrsas_atomic_set(&sc->target_reset_outstanding, 0);
sc->io_cmds_highwater = 0;
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
sc->UnevenSpanSupport = 0;
sc->msix_enable = 0;
/* Initialize Firmware */
if (mrsas_init_fw(sc) != SUCCESS) {
goto attach_fail_fw;
}
/* Register mrsas to CAM layer */
if ((mrsas_cam_attach(sc) != SUCCESS)) {
goto attach_fail_cam;
}
/* Register IRQs */
if (mrsas_setup_irq(sc) != SUCCESS) {
goto attach_fail_irq;
}
error = mrsas_kproc_create(mrsas_ocr_thread, sc,
&sc->ocr_thread, 0, 0, "mrsas_ocr%d",
device_get_unit(sc->mrsas_dev));
if (error) {
device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
goto attach_fail_ocr_thread;
}
/*
* After FW initialization and OCR thread creation
* we will defer the cdev creation, AEN setup on ICH callback
*/
sc->mrsas_ich.ich_func = mrsas_ich_startup;
sc->mrsas_ich.ich_arg = sc;
if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
device_printf(sc->mrsas_dev, "Config hook is already established\n");
}
mrsas_setup_sysctl(sc);
return SUCCESS;
attach_fail_ocr_thread:
if (sc->ocr_thread_active)
wakeup(&sc->ocr_chan);
attach_fail_irq:
mrsas_teardown_intr(sc);
attach_fail_cam:
mrsas_cam_detach(sc);
attach_fail_fw:
/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
if (sc->msix_enable == 1)
pci_release_msi(sc->mrsas_dev);
mrsas_free_mem(sc);
mtx_destroy(&sc->sim_lock);
mtx_destroy(&sc->aen_lock);
mtx_destroy(&sc->pci_lock);
mtx_destroy(&sc->io_lock);
mtx_destroy(&sc->ioctl_lock);
mtx_destroy(&sc->mpt_cmd_pool_lock);
mtx_destroy(&sc->mfi_cmd_pool_lock);
mtx_destroy(&sc->raidmap_lock);
attach_fail:
if (sc->reg_res) {
bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
sc->reg_res_id, sc->reg_res);
}
return (ENXIO);
}
/*
* Interrupt config hook
*/
static void
mrsas_ich_startup(void *arg)
{
struct mrsas_softc *sc = (struct mrsas_softc *)arg;
/*
* Intialize a counting Semaphore to take care no. of concurrent IOCTLs
*/
sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
IOCTL_SEMA_DESCRIPTION);
/* Create a /dev entry for mrsas controller. */
sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
device_get_unit(sc->mrsas_dev));
if (device_get_unit(sc->mrsas_dev) == 0) {
make_dev_alias_p(MAKEDEV_CHECKNAME,
&sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
"megaraid_sas_ioctl_node");
}
if (sc->mrsas_cdev)
sc->mrsas_cdev->si_drv1 = sc;
/*
* Add this controller to mrsas_mgmt_info structure so that it can be
* exported to management applications
*/
if (device_get_unit(sc->mrsas_dev) == 0)
memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
mrsas_mgmt_info.count++;
mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
mrsas_mgmt_info.max_index++;
/* Enable Interrupts */
mrsas_enable_intr(sc);
/* Initiate AEN (Asynchronous Event Notification) */
if (mrsas_start_aen(sc)) {
device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
"Further events from the controller will not be communicated.\n"
"Either there is some problem in the controller"
"or the controller does not support AEN.\n"
"Please contact to the SUPPORT TEAM if the problem persists\n");
}
if (sc->mrsas_ich.ich_arg != NULL) {
device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
config_intrhook_disestablish(&sc->mrsas_ich);
sc->mrsas_ich.ich_arg = NULL;
}
}
/*
* mrsas_detach: De-allocates and teardown resources
* input: pointer to device struct
*
* This function is the entry point for device disconnect and detach.
* It performs memory de-allocations, shutdown of the controller and various
* teardown and destroy resource functions.
*/
static int
mrsas_detach(device_t dev)
{
struct mrsas_softc *sc;
int i = 0;
sc = device_get_softc(dev);
sc->remove_in_progress = 1;
/* Destroy the character device so no other IOCTL will be handled */
if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
destroy_dev(sc->mrsas_linux_emulator_cdev);
destroy_dev(sc->mrsas_cdev);
/*
* Take the instance off the instance array. Note that we will not
* decrement the max_index. We let this array be sparse array
*/
for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
if (mrsas_mgmt_info.sc_ptr[i] == sc) {
mrsas_mgmt_info.count--;
mrsas_mgmt_info.sc_ptr[i] = NULL;
break;
}
}
if (sc->ocr_thread_active)
wakeup(&sc->ocr_chan);
while (sc->reset_in_progress) {
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
"[%2d]waiting for OCR to be finished from %s\n", i, __func__);
}
pause("mr_shutdown", hz);
}
i = 0;
while (sc->ocr_thread_active) {
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
"[%2d]waiting for "
"mrsas_ocr thread to quit ocr %d\n", i,
sc->ocr_thread_active);
}
pause("mr_shutdown", hz);
}
mrsas_flush_cache(sc);
mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
mrsas_disable_intr(sc);
mrsas_cam_detach(sc);
mrsas_teardown_intr(sc);
mrsas_free_mem(sc);
mtx_destroy(&sc->sim_lock);
mtx_destroy(&sc->aen_lock);
mtx_destroy(&sc->pci_lock);
mtx_destroy(&sc->io_lock);
mtx_destroy(&sc->ioctl_lock);
mtx_destroy(&sc->mpt_cmd_pool_lock);
mtx_destroy(&sc->mfi_cmd_pool_lock);
mtx_destroy(&sc->raidmap_lock);
/* Wait for all the semaphores to be released */
while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
pause("mr_shutdown", hz);
/* Destroy the counting semaphore created for Ioctl */
sema_destroy(&sc->ioctl_count_sema);
if (sc->reg_res) {
bus_release_resource(sc->mrsas_dev,
SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
}
if (sc->sysctl_tree != NULL)
sysctl_ctx_free(&sc->sysctl_ctx);
return (0);
}
/*
* mrsas_free_mem: Frees allocated memory
* input: Adapter instance soft state
*
* This function is called from mrsas_detach() to free previously allocated
* memory.
*/
void
mrsas_free_mem(struct mrsas_softc *sc)
{
int i;
u_int32_t max_cmd;
struct mrsas_mfi_cmd *mfi_cmd;
struct mrsas_mpt_cmd *mpt_cmd;
/*
* Free RAID map memory
*/
for (i = 0; i < 2; i++) {
if (sc->raidmap_phys_addr[i])
bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
if (sc->raidmap_mem[i] != NULL)
bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
if (sc->raidmap_tag[i] != NULL)
bus_dma_tag_destroy(sc->raidmap_tag[i]);
if (sc->ld_drv_map[i] != NULL)
free(sc->ld_drv_map[i], M_MRSAS);
}
for (i = 0; i < 2; i++) {
if (sc->jbodmap_phys_addr[i])
bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
if (sc->jbodmap_mem[i] != NULL)
bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
if (sc->jbodmap_tag[i] != NULL)
bus_dma_tag_destroy(sc->jbodmap_tag[i]);
}
/*
* Free version buffer memory
*/
if (sc->verbuf_phys_addr)
bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
if (sc->verbuf_mem != NULL)
bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
if (sc->verbuf_tag != NULL)
bus_dma_tag_destroy(sc->verbuf_tag);
/*
* Free sense buffer memory
*/
if (sc->sense_phys_addr)
bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
if (sc->sense_mem != NULL)
bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
if (sc->sense_tag != NULL)
bus_dma_tag_destroy(sc->sense_tag);
/*
* Free chain frame memory
*/
if (sc->chain_frame_phys_addr)
bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
if (sc->chain_frame_mem != NULL)
bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
if (sc->chain_frame_tag != NULL)
bus_dma_tag_destroy(sc->chain_frame_tag);
/*
* Free IO Request memory
*/
if (sc->io_request_phys_addr)
bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
if (sc->io_request_mem != NULL)
bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
if (sc->io_request_tag != NULL)
bus_dma_tag_destroy(sc->io_request_tag);
/*
* Free Reply Descriptor memory
*/
if (sc->reply_desc_phys_addr)
bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
if (sc->reply_desc_mem != NULL)
bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
if (sc->reply_desc_tag != NULL)
bus_dma_tag_destroy(sc->reply_desc_tag);
/*
* Free event detail memory
*/
if (sc->evt_detail_phys_addr)
bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
if (sc->evt_detail_mem != NULL)
bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
if (sc->evt_detail_tag != NULL)
bus_dma_tag_destroy(sc->evt_detail_tag);
/*
* Free MFI frames
*/
if (sc->mfi_cmd_list) {
for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
mfi_cmd = sc->mfi_cmd_list[i];
mrsas_free_frame(sc, mfi_cmd);
}
}
if (sc->mficmd_frame_tag != NULL)
bus_dma_tag_destroy(sc->mficmd_frame_tag);
/*
* Free MPT internal command list
*/
max_cmd = sc->max_fw_cmds;
if (sc->mpt_cmd_list) {
for (i = 0; i < max_cmd; i++) {
mpt_cmd = sc->mpt_cmd_list[i];
bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
free(sc->mpt_cmd_list[i], M_MRSAS);
}
free(sc->mpt_cmd_list, M_MRSAS);
sc->mpt_cmd_list = NULL;
}
/*
* Free MFI internal command list
*/
if (sc->mfi_cmd_list) {
for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
free(sc->mfi_cmd_list[i], M_MRSAS);
}
free(sc->mfi_cmd_list, M_MRSAS);
sc->mfi_cmd_list = NULL;
}
/*
* Free request descriptor memory
*/
free(sc->req_desc, M_MRSAS);
sc->req_desc = NULL;
/*
* Destroy parent tag
*/
if (sc->mrsas_parent_tag != NULL)
bus_dma_tag_destroy(sc->mrsas_parent_tag);
/*
* Free ctrl_info memory
*/
if (sc->ctrl_info != NULL)
free(sc->ctrl_info, M_MRSAS);
}
/*
* mrsas_teardown_intr: Teardown interrupt
* input: Adapter instance soft state
*
* This function is called from mrsas_detach() to teardown and release bus
* interrupt resourse.
*/
void
mrsas_teardown_intr(struct mrsas_softc *sc)
{
int i;
if (!sc->msix_enable) {
if (sc->intr_handle[0])
bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
if (sc->mrsas_irq[0] != NULL)
bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
sc->irq_id[0], sc->mrsas_irq[0]);
sc->intr_handle[0] = NULL;
} else {
for (i = 0; i < sc->msix_vectors; i++) {
if (sc->intr_handle[i])
bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
sc->intr_handle[i]);
if (sc->mrsas_irq[i] != NULL)
bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
sc->irq_id[i], sc->mrsas_irq[i]);
sc->intr_handle[i] = NULL;
}
pci_release_msi(sc->mrsas_dev);
}
}
/*
* mrsas_suspend: Suspend entry point
* input: Device struct pointer
*
* This function is the entry point for system suspend from the OS.
*/
static int
mrsas_suspend(device_t dev)
{
/* This will be filled when the driver will have hibernation support */
return (0);
}
/*
* mrsas_resume: Resume entry point
* input: Device struct pointer
*
* This function is the entry point for system resume from the OS.
*/
static int
mrsas_resume(device_t dev)
{
/* This will be filled when the driver will have hibernation support */
return (0);
}
/**
* mrsas_get_softc_instance: Find softc instance based on cmd type
*
* This function will return softc instance based on cmd type.
* In some case, application fire ioctl on required management instance and
* do not provide host_no. Use cdev->si_drv1 to get softc instance for those
* case, else get the softc instance from host_no provided by application in
* user data.
*/
static struct mrsas_softc *
mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
{
struct mrsas_softc *sc = NULL;
struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
if (cmd == MRSAS_IOC_GET_PCI_INFO) {
sc = dev->si_drv1;
} else {
/*
* get the Host number & the softc from data sent by the
* Application
*/
sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
if (sc == NULL)
printf("There is no Controller number %d\n",
user_ioc->host_no);
else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
mrsas_dprint(sc, MRSAS_FAULT,
"Invalid Controller number %d\n", user_ioc->host_no);
}
return sc;
}
/*
* mrsas_ioctl: IOCtl commands entry point.
*
* This function is the entry point for IOCtls from the OS. It calls the
* appropriate function for processing depending on the command received.
*/
static int
mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
struct thread *td)
{
struct mrsas_softc *sc;
int ret = 0, i = 0;
MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
sc = mrsas_get_softc_instance(dev, cmd, arg);
if (!sc)
return ENOENT;
if (sc->remove_in_progress ||
(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
mrsas_dprint(sc, MRSAS_INFO,
"Either driver remove or shutdown called or "
"HW is in unrecoverable critical error state.\n");
return ENOENT;
}
mtx_lock_spin(&sc->ioctl_lock);
if (!sc->reset_in_progress) {
mtx_unlock_spin(&sc->ioctl_lock);
goto do_ioctl;
}
mtx_unlock_spin(&sc->ioctl_lock);
while (sc->reset_in_progress) {
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
"[%2d]waiting for OCR to be finished from %s\n", i, __func__);
}
pause("mr_ioctl", hz);
}
do_ioctl:
switch (cmd) {
case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
#ifdef COMPAT_FREEBSD32
case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
#endif
/*
* Decrement the Ioctl counting Semaphore before getting an
* mfi command
*/
sema_wait(&sc->ioctl_count_sema);
ret = mrsas_passthru(sc, (void *)arg, cmd);
/* Increment the Ioctl counting semaphore value */
sema_post(&sc->ioctl_count_sema);
break;
case MRSAS_IOC_SCAN_BUS:
ret = mrsas_bus_scan(sc);
break;
case MRSAS_IOC_GET_PCI_INFO:
pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
"pci device no: %d, pci function no: %d,"
"pci domain ID: %d\n",
pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
pciDrvInfo->functionNumber, pciDrvInfo->domainID);
ret = 0;
break;
default:
mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
ret = ENOENT;
}
return (ret);
}
/*
* mrsas_poll: poll entry point for mrsas driver fd
*
* This function is the entry point for poll from the OS. It waits for some AEN
* events to be triggered from the controller and notifies back.
*/
static int
mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
{
struct mrsas_softc *sc;
int revents = 0;
sc = dev->si_drv1;
if (poll_events & (POLLIN | POLLRDNORM)) {
if (sc->mrsas_aen_triggered) {
revents |= poll_events & (POLLIN | POLLRDNORM);
}
}
if (revents == 0) {
if (poll_events & (POLLIN | POLLRDNORM)) {
mtx_lock(&sc->aen_lock);
sc->mrsas_poll_waiting = 1;
selrecord(td, &sc->mrsas_select);
mtx_unlock(&sc->aen_lock);
}
}
return revents;
}
/*
* mrsas_setup_irq: Set up interrupt
* input: Adapter instance soft state
*
* This function sets up interrupts as a bus resource, with flags indicating
* resource permitting contemporaneous sharing and for resource to activate
* atomically.
*/
static int
mrsas_setup_irq(struct mrsas_softc *sc)
{
if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
else {
device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
sc->irq_context[0].sc = sc;
sc->irq_context[0].MSIxIndex = 0;
sc->irq_id[0] = 0;
sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
if (sc->mrsas_irq[0] == NULL) {
device_printf(sc->mrsas_dev, "Cannot allocate legcay"
"interrupt\n");
return (FAIL);
}
if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
&sc->irq_context[0], &sc->intr_handle[0])) {
device_printf(sc->mrsas_dev, "Cannot set up legacy"
"interrupt\n");
return (FAIL);
}
}
return (0);
}
/*
* mrsas_isr: ISR entry point
* input: argument pointer
*
* This function is the interrupt service routine entry point. There are two
* types of interrupts, state change interrupt and response interrupt. If an
* interrupt is not ours, we just return.
*/
void
mrsas_isr(void *arg)
{
struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
struct mrsas_softc *sc = irq_context->sc;
int status = 0;
if (sc->mask_interrupts)
return;
if (!sc->msix_vectors) {
status = mrsas_clear_intr(sc);
if (!status)
return;
}
/* If we are resetting, bail */
if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
printf(" Entered into ISR when OCR is going active. \n");
mrsas_clear_intr(sc);
return;
}
/* Process for reply request and clear response interrupt */
if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
mrsas_clear_intr(sc);
return;
}
/*
* mrsas_complete_cmd: Process reply request
* input: Adapter instance soft state
*
* This function is called from mrsas_isr() to process reply request and clear
* response interrupt. Processing of the reply request entails walking
* through the reply descriptor array for the command request pended from
* Firmware. We look at the Function field to determine the command type and
* perform the appropriate action. Before we return, we clear the response
* interrupt.
*/
int
mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
{
Mpi2ReplyDescriptorsUnion_t *desc;
MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
struct mrsas_mpt_cmd *cmd_mpt;
struct mrsas_mfi_cmd *cmd_mfi;
u_int8_t reply_descript_type;
u_int16_t smid, num_completed;
u_int8_t status, extStatus;
union desc_value desc_val;
PLD_LOAD_BALANCE_INFO lbinfo;
u_int32_t device_id;
int threshold_reply_count = 0;
#if TM_DEBUG
MR_TASK_MANAGE_REQUEST *mr_tm_req;
MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
#endif
/* If we have a hardware error, not need to continue */
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
return (DONE);
desc = sc->reply_desc_mem;
desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
+ sc->last_reply_idx[MSIxIndex];
reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
desc_val.word = desc->Words;
num_completed = 0;
reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
/* Find our reply descriptor for the command and process */
while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
smid = reply_desc->SMID;
cmd_mpt = sc->mpt_cmd_list[smid - 1];
scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
status = scsi_io_req->RaidContext.status;
extStatus = scsi_io_req->RaidContext.exStatus;
switch (scsi_io_req->Function) {
case MPI2_FUNCTION_SCSI_TASK_MGMT:
#if TM_DEBUG
mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
&mr_tm_req->TmRequest;
device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
"TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
#endif
wakeup_one((void *)&sc->ocr_chan);
break;
case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
lbinfo = &sc->load_balance_info[device_id];
if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
}
/* Fall thru and complete IO */
case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
mrsas_cmd_done(sc, cmd_mpt);
scsi_io_req->RaidContext.status = 0;
scsi_io_req->RaidContext.exStatus = 0;
mrsas_atomic_dec(&sc->fw_outstanding);
break;
case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
/*
* Make sure NOT TO release the mfi command from the called
* function's context if it is fired with issue_polled call.
* And also make sure that the issue_polled call should only be
* used if INTERRUPT IS DISABLED.
*/
if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
mrsas_release_mfi_cmd(cmd_mfi);
else
mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
break;
}
sc->last_reply_idx[MSIxIndex]++;
if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
sc->last_reply_idx[MSIxIndex] = 0;
desc->Words = ~((uint64_t)0x00); /* set it back to all
* 0xFFFFFFFFs */
num_completed++;
threshold_reply_count++;
/* Get the next reply descriptor */
if (!sc->last_reply_idx[MSIxIndex]) {
desc = sc->reply_desc_mem;
desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
} else
desc++;
reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
desc_val.word = desc->Words;
reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
break;
/*
* Write to reply post index after completing threshold reply
* count and still there are more replies in reply queue
* pending to be completed.
*/
if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
if (sc->msix_enable) {
if (sc->mrsas_gen3_ctrl)
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
else
mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
sc->last_reply_idx[MSIxIndex]);
} else
mrsas_write_reg(sc, offsetof(mrsas_reg_set,
reply_post_host_index), sc->last_reply_idx[0]);
threshold_reply_count = 0;
}
}
/* No match, just return */
if (num_completed == 0)
return (DONE);
/* Clear response interrupt */
if (sc->msix_enable) {
if (sc->mrsas_gen3_ctrl) {
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
} else
mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
sc->last_reply_idx[MSIxIndex]);
} else
mrsas_write_reg(sc, offsetof(mrsas_reg_set,
reply_post_host_index), sc->last_reply_idx[0]);
return (0);
}
/*
* mrsas_map_mpt_cmd_status: Allocate DMAable memory.
* input: Adapter instance soft state
*
* This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
* It checks the command status and maps the appropriate CAM status for the
* CCB.
*/
void
mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
{
struct mrsas_softc *sc = cmd->sc;
u_int8_t *sense_data;
switch (status) {
case MFI_STAT_OK:
cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
break;
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_SCSI_DONE_WITH_ERROR:
cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
if (sense_data) {
/* For now just copy 18 bytes back */
memcpy(sense_data, cmd->sense, 18);
cmd->ccb_ptr->csio.sense_len = 18;
cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
}
break;
case MFI_STAT_LD_OFFLINE:
case MFI_STAT_DEVICE_NOT_FOUND:
if (cmd->ccb_ptr->ccb_h.target_lun)
cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
else
cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
break;
case MFI_STAT_CONFIG_SEQ_MISMATCH:
cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
break;
default:
device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
cmd->ccb_ptr->csio.scsi_status = status;
}
return;
}
/*
* mrsas_alloc_mem: Allocate DMAable memory
* input: Adapter instance soft state
*
* This function creates the parent DMA tag and allocates DMAable memory. DMA
* tag describes constraints of DMA mapping. Memory allocated is mapped into
* Kernel virtual address. Callback argument is physical memory address.
*/
static int
mrsas_alloc_mem(struct mrsas_softc *sc)
{
u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
chain_frame_size, evt_detail_size, count;
/*
* Allocate parent DMA tag
*/
if (bus_dma_tag_create(NULL, /* parent */
1, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MAXPHYS, /* maxsize */
sc->max_num_sge, /* nsegments */
MAXPHYS, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->mrsas_parent_tag /* tag */
)) {
device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
return (ENOMEM);
}
/*
* Allocate for version buffer
*/
verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
verbuf_size,
1,
verbuf_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->verbuf_tag)) {
device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
return (ENOMEM);
}
bzero(sc->verbuf_mem, verbuf_size);
if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
return (ENOMEM);
}
/*
* Allocate IO Request Frames
*/
io_req_size = sc->io_frames_alloc_sz;
if (bus_dma_tag_create(sc->mrsas_parent_tag,
16, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
io_req_size,
1,
io_req_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->io_request_tag)) {
device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
return (ENOMEM);
}
bzero(sc->io_request_mem, io_req_size);
if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
sc->io_request_mem, io_req_size, mrsas_addr_cb,
&sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
return (ENOMEM);
}
/*
* Allocate Chain Frames
*/
chain_frame_size = sc->chain_frames_alloc_sz;
if (bus_dma_tag_create(sc->mrsas_parent_tag,
4, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
chain_frame_size,
1,
chain_frame_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->chain_frame_tag)) {
device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
return (ENOMEM);
}
bzero(sc->chain_frame_mem, chain_frame_size);
if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
&sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
return (ENOMEM);
}
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
/*
* Allocate Reply Descriptor Array
*/
reply_desc_size = sc->reply_alloc_sz * count;
if (bus_dma_tag_create(sc->mrsas_parent_tag,
16, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
reply_desc_size,
1,
reply_desc_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->reply_desc_tag)) {
device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
return (ENOMEM);
}
if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
&sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
return (ENOMEM);
}
/*
* Allocate Sense Buffer Array. Keep in lower 4GB
*/
sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
if (bus_dma_tag_create(sc->mrsas_parent_tag,
64, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
sense_size,
1,
sense_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->sense_tag)) {
device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
return (ENOMEM);
}
if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
return (ENOMEM);
}
/*
* Allocate for Event detail structure
*/
evt_detail_size = sizeof(struct mrsas_evt_detail);
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
evt_detail_size,
1,
evt_detail_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->evt_detail_tag)) {
device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
return (ENOMEM);
}
bzero(sc->evt_detail_mem, evt_detail_size);
if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
&sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
return (ENOMEM);
}
/*
* Create a dma tag for data buffers; size will be the maximum
* possible I/O size (280kB).
*/
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
MAXPHYS,
sc->max_num_sge, /* nsegments */
MAXPHYS,
BUS_DMA_ALLOCNOW,
busdma_lock_mutex,
&sc->io_lock,
&sc->data_tag)) {
device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
return (ENOMEM);
}
return (0);
}
/*
* mrsas_addr_cb: Callback function of bus_dmamap_load()
* input: callback argument, machine dependent type
* that describes DMA segments, number of segments, error code
*
* This function is for the driver to receive mapping information resultant of
* the bus_dmamap_load(). The information is actually not being used, but the
* address is saved anyway.
*/
void
mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *addr;
addr = arg;
*addr = segs[0].ds_addr;
}
/*
* mrsas_setup_raidmap: Set up RAID map.
* input: Adapter instance soft state
*
* Allocate DMA memory for the RAID maps and perform setup.
*/
static int
mrsas_setup_raidmap(struct mrsas_softc *sc)
{
int i;
for (i = 0; i < 2; i++) {
sc->ld_drv_map[i] =
(void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
/* Do Error handling */
if (!sc->ld_drv_map[i]) {
device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
if (i == 1)
free(sc->ld_drv_map[0], M_MRSAS);
/* ABORT driver initialization */
goto ABORT;
}
}
for (int i = 0; i < 2; i++) {
if (bus_dma_tag_create(sc->mrsas_parent_tag,
4, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
sc->max_map_sz,
1,
sc->max_map_sz,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->raidmap_tag[i])) {
device_printf(sc->mrsas_dev,
"Cannot allocate raid map tag.\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->raidmap_tag[i],
(void **)&sc->raidmap_mem[i],
BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
device_printf(sc->mrsas_dev,
"Cannot allocate raidmap memory.\n");
return (ENOMEM);
}
bzero(sc->raidmap_mem[i], sc->max_map_sz);
if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
sc->raidmap_mem[i], sc->max_map_sz,
mrsas_addr_cb, &sc->raidmap_phys_addr[i],
BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
return (ENOMEM);
}
if (!sc->raidmap_mem[i]) {
device_printf(sc->mrsas_dev,
"Cannot allocate memory for raid map.\n");
return (ENOMEM);
}
}
if (!mrsas_get_map_info(sc))
mrsas_sync_map_info(sc);
return (0);
ABORT:
return (1);
}
/**
* megasas_setup_jbod_map - setup jbod map for FP seq_number.
* @sc: Adapter soft state
*
* Return 0 on success.
*/
void
megasas_setup_jbod_map(struct mrsas_softc *sc)
{
int i;
uint32_t pd_seq_map_sz;
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
sc->use_seqnum_jbod_fp = 0;
return;
}
if (sc->jbodmap_mem[0])
goto skip_alloc;
for (i = 0; i < 2; i++) {
if (bus_dma_tag_create(sc->mrsas_parent_tag,
4, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
pd_seq_map_sz,
1,
pd_seq_map_sz,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->jbodmap_tag[i])) {
device_printf(sc->mrsas_dev,
"Cannot allocate jbod map tag.\n");
return;
}
if (bus_dmamem_alloc(sc->jbodmap_tag[i],
(void **)&sc->jbodmap_mem[i],
BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
device_printf(sc->mrsas_dev,
"Cannot allocate jbod map memory.\n");
return;
}
bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
sc->jbodmap_mem[i], pd_seq_map_sz,
mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
return;
}
if (!sc->jbodmap_mem[i]) {
device_printf(sc->mrsas_dev,
"Cannot allocate memory for jbod map.\n");
sc->use_seqnum_jbod_fp = 0;
return;
}
}
skip_alloc:
if (!megasas_sync_pd_seq_num(sc, false) &&
!megasas_sync_pd_seq_num(sc, true))
sc->use_seqnum_jbod_fp = 1;
else
sc->use_seqnum_jbod_fp = 0;
device_printf(sc->mrsas_dev, "Jbod map is supported\n");
}
/*
* mrsas_init_fw: Initialize Firmware
* input: Adapter soft state
*
* Calls transition_to_ready() to make sure Firmware is in operational state and
* calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
* issues internal commands to get the controller info after the IOC_INIT
* command response is received by Firmware. Note: code relating to
* get_pdlist, get_ld_list and max_sectors are currently not being used, it
* is left here as placeholder.
*/
static int
mrsas_init_fw(struct mrsas_softc *sc)
{
int ret, loop, ocr = 0;
u_int32_t max_sectors_1;
u_int32_t max_sectors_2;
u_int32_t tmp_sectors;
u_int32_t scratch_pad_2;
int msix_enable = 0;
int fw_msix_count = 0;
/* Make sure Firmware is ready */
ret = mrsas_transition_to_ready(sc, ocr);
if (ret != SUCCESS) {
return (ret);
}
/* MSI-x index 0- reply post host index register */
sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
/* Check if MSI-X is supported while in ready state */
msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
if (msix_enable) {
scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad_2));
/* Check max MSI-X vectors */
if (sc->device_id == MRSAS_TBOLT) {
sc->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = sc->msix_vectors;
} else {
/* Invader/Fury supports 96 MSI-X vectors */
sc->msix_vectors = ((scratch_pad_2
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
fw_msix_count = sc->msix_vectors;
for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
loop++) {
sc->msix_reg_offset[loop] =
MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(loop * 0x10);
}
}
/* Don't bother allocating more MSI-X vectors than cpus */
sc->msix_vectors = min(sc->msix_vectors,
mp_ncpus);
/* Allocate MSI-x vectors */
if (mrsas_allocate_msix(sc) == SUCCESS)
sc->msix_enable = 1;
else
sc->msix_enable = 0;
device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
"Online CPU %d Current MSIX <%d>\n",
fw_msix_count, mp_ncpus, sc->msix_vectors);
}
if (mrsas_init_adapter(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
return (1);
}
/* Allocate internal commands for pass-thru */
if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
return (1);
}
sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
if (!sc->ctrl_info) {
device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
return (1);
}
/*
* Get the controller info from FW, so that the MAX VD support
* availability can be decided.
*/
if (mrsas_get_ctrl_info(sc)) {
device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
return (1);
}
sc->secure_jbod_support =
(u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
if (sc->secure_jbod_support)
device_printf(sc->mrsas_dev, "FW supports SED \n");
if (sc->use_seqnum_jbod_fp)
device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
if (mrsas_setup_raidmap(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
"There seems to be some problem in the controller\n"
"Please contact to the SUPPORT TEAM if the problem persists\n");
}
megasas_setup_jbod_map(sc);
/* For pass-thru, get PD/LD list and controller info */
memset(sc->pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
if (mrsas_get_pd_list(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Get PD list failed.\n");
return (1);
}
memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
if (mrsas_get_ld_list(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
return (1);
}
/*
* Compute the max allowed sectors per IO: The controller info has
* two limits on max sectors. Driver should use the minimum of these
* two.
*
* 1 << stripe_sz_ops.min = max sectors per strip
*
* Note that older firmwares ( < FW ver 30) didn't report information to
* calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
sc->ctrl_info->max_strips_per_io;
max_sectors_2 = sc->ctrl_info->max_request_size;
tmp_sectors = min(max_sectors_1, max_sectors_2);
sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
sc->max_sectors_per_req = tmp_sectors;
sc->disableOnlineCtrlReset =
sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
sc->UnevenSpanSupport =
sc->ctrl_info->adapterOperations2.supportUnevenSpans;
if (sc->UnevenSpanSupport) {
device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
sc->UnevenSpanSupport);
if (MR_ValidateMapInfo(sc))
sc->fast_path_io = 1;
else
sc->fast_path_io = 0;
}
return (0);
}
/*
* mrsas_init_adapter: Initializes the adapter/controller
* input: Adapter soft state
*
* Prepares for the issuing of the IOC Init cmd to FW for initializing the
* ROC/controller. The FW register is read to determined the number of
* commands that is supported. All memory allocations for IO is based on
* max_cmd. Appropriate calculations are performed in this function.
*/
int
mrsas_init_adapter(struct mrsas_softc *sc)
{
uint32_t status;
u_int32_t max_cmd, scratch_pad_2;
int ret;
int i = 0;
/* Read FW status register */
status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
/* Get operational params from status register */
sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
/* Decrement the max supported by 1, to correlate with FW */
sc->max_fw_cmds = sc->max_fw_cmds - 1;
max_cmd = sc->max_fw_cmds;
/* Determine allocation size of command frames */
sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad_2));
/*
* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
* Firmware support extended IO chain frame which is 4 time more
* than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
* 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
*/
if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
sc->max_chain_frame_sz =
((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
* MEGASAS_1MB_IO;
else
sc->max_chain_frame_sz =
((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
* MEGASAS_256K_IO;
sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
sc->max_num_sge, sc->max_chain_frame_sz);
/* Used for pass thru MFI frame (DCMD) */
sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
sizeof(MPI2_SGE_IO_UNION)) / 16;
int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
for (i = 0; i < count; i++)
sc->last_reply_idx[i] = 0;
ret = mrsas_alloc_mem(sc);
if (ret != SUCCESS)
return (ret);
ret = mrsas_alloc_mpt_cmds(sc);
if (ret != SUCCESS)
return (ret);
ret = mrsas_ioc_init(sc);
if (ret != SUCCESS)
return (ret);
return (0);
}
/*
* mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
* input: Adapter soft state
*
* Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
*/
int
mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
{
int ioc_init_size;
/* Allocate IOC INIT command */
ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
ioc_init_size,
1,
ioc_init_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->ioc_init_tag)) {
device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
return (ENOMEM);
}
bzero(sc->ioc_init_mem, ioc_init_size);
if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
&sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
return (ENOMEM);
}
return (0);
}
/*
* mrsas_free_ioc_cmd: Allocates memory for IOC Init command
* input: Adapter soft state
*
* Deallocates memory of the IOC Init cmd.
*/
void
mrsas_free_ioc_cmd(struct mrsas_softc *sc)
{
if (sc->ioc_init_phys_mem)
bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
if (sc->ioc_init_mem != NULL)
bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
if (sc->ioc_init_tag != NULL)
bus_dma_tag_destroy(sc->ioc_init_tag);
}
/*
* mrsas_ioc_init: Sends IOC Init command to FW
* input: Adapter soft state
*
* Issues the IOC Init cmd to FW to initialize the ROC/controller.
*/
int
mrsas_ioc_init(struct mrsas_softc *sc)
{
struct mrsas_init_frame *init_frame;
pMpi2IOCInitRequest_t IOCInitMsg;
MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
bus_addr_t phys_addr;
int i, retcode = 0;
u_int32_t scratch_pad_2;
/* Allocate memory for the IOC INIT command */
if (mrsas_alloc_ioc_cmd(sc)) {
device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
return (1);
}
if (!sc->block_sync_cache) {
scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad_2));
sc->fw_sync_cache_support = (scratch_pad_2 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
}
IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
IOCInitMsg->MsgVersion = MPI2_VERSION;
IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
/* driver support Extended MSIX */
if (sc->mrsas_gen3_ctrl) {
init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1;
}
if (sc->verbuf_mem) {
snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
MRSAS_VERSION);
init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
init_frame->driver_ver_hi = 0;
}
init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
init_frame->queue_info_new_phys_addr_lo = phys_addr;
init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
req_desc.MFAIo.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
mrsas_disable_intr(sc);
mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
/*
* Poll response timer to wait for Firmware response. While this
* timer with the DELAY call could block CPU, the time interval for
* this is only 1 millisecond.
*/
if (init_frame->cmd_status == 0xFF) {
for (i = 0; i < (max_wait * 1000); i++) {
if (init_frame->cmd_status == 0xFF)
DELAY(1000);
else
break;
}
}
if (init_frame->cmd_status == 0)
mrsas_dprint(sc, MRSAS_OCR,
"IOC INIT response received from FW.\n");
else {
if (init_frame->cmd_status == 0xFF)
device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
else
device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
retcode = 1;
}
mrsas_free_ioc_cmd(sc);
return (retcode);
}
/*
* mrsas_alloc_mpt_cmds: Allocates the command packets
* input: Adapter instance soft state
*
* This function allocates the internal commands for IOs. Each command that is
* issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
* array is allocated with mrsas_mpt_cmd context. The free commands are
* maintained in a linked list (cmd pool). SMID value range is from 1 to
* max_fw_cmds.
*/
int
mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
{
int i, j;
u_int32_t max_cmd, count;
struct mrsas_mpt_cmd *cmd;
pMpi2ReplyDescriptorsUnion_t reply_desc;
u_int32_t offset, chain_offset, sense_offset;
bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
u_int8_t *io_req_base, *chain_frame_base, *sense_base;
max_cmd = sc->max_fw_cmds;
sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
if (!sc->req_desc) {
device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
return (ENOMEM);
}
memset(sc->req_desc, 0, sc->request_alloc_sz);
/*
* sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
- sc->mpt_cmd_list = mallocarray(max_cmd, sizeof(struct mrsas_mpt_cmd *),
- M_MRSAS, M_NOWAIT);
+ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
if (!sc->mpt_cmd_list) {
device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
return (ENOMEM);
}
memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
for (i = 0; i < max_cmd; i++) {
sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
M_MRSAS, M_NOWAIT);
if (!sc->mpt_cmd_list[i]) {
for (j = 0; j < i; j++)
free(sc->mpt_cmd_list[j], M_MRSAS);
free(sc->mpt_cmd_list, M_MRSAS);
sc->mpt_cmd_list = NULL;
return (ENOMEM);
}
}
io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
sense_base = (u_int8_t *)sc->sense_mem;
sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
for (i = 0; i < max_cmd; i++) {
cmd = sc->mpt_cmd_list[i];
offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
chain_offset = sc->max_chain_frame_sz * i;
sense_offset = MRSAS_SENSE_LEN * i;
memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
cmd->index = i + 1;
cmd->ccb_ptr = NULL;
callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
cmd->sc = sc;
cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
cmd->io_request_phys_addr = io_req_base_phys + offset;
cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
cmd->sense = sense_base + sense_offset;
cmd->sense_phys_addr = sense_base_phys + sense_offset;
if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
return (FAIL);
}
TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
}
/* Initialize reply descriptor array to 0xFFFFFFFF */
reply_desc = sc->reply_desc_mem;
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
reply_desc->Words = MRSAS_ULONG_MAX;
}
return (0);
}
/*
* mrsas_fire_cmd: Sends command to FW
* input: Adapter softstate
* request descriptor address low
* request descriptor address high
*
* This functions fires the command to Firmware by writing to the
* inbound_low_queue_port and inbound_high_queue_port.
*/
void
mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
u_int32_t req_desc_hi)
{
mtx_lock(&sc->pci_lock);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
req_desc_lo);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
req_desc_hi);
mtx_unlock(&sc->pci_lock);
}
/*
* mrsas_transition_to_ready: Move FW to Ready state input:
* Adapter instance soft state
*
* During the initialization, FW passes can potentially be in any one of several
* possible states. If the FW in operational, waiting-for-handshake states,
* driver must take steps to bring it to ready state. Otherwise, it has to
* wait for the ready state.
*/
int
mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
{
int i;
u_int8_t max_wait;
u_int32_t val, fw_state;
u_int32_t cur_state;
u_int32_t abs_state, curr_abs_state;
val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
fw_state = val & MFI_STATE_MASK;
max_wait = MRSAS_RESET_WAIT_TIME;
if (fw_state != MFI_STATE_READY)
device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
while (fw_state != MFI_STATE_READY) {
abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
switch (fw_state) {
case MFI_STATE_FAULT:
device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
if (ocr) {
cur_state = MFI_STATE_FAULT;
break;
} else
return -ENODEV;
case MFI_STATE_WAIT_HANDSHAKE:
/* Set the CLR bit in inbound doorbell */
mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
cur_state = MFI_STATE_WAIT_HANDSHAKE;
break;
case MFI_STATE_BOOT_MESSAGE_PENDING:
mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
MFI_INIT_HOTPLUG);
cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
break;
case MFI_STATE_OPERATIONAL:
/*
* Bring it to READY state; assuming max wait 10
* secs
*/
mrsas_disable_intr(sc);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
for (i = 0; i < max_wait * 1000; i++) {
if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
DELAY(1000);
else
break;
}
cur_state = MFI_STATE_OPERATIONAL;
break;
case MFI_STATE_UNDEFINED:
/*
* This state should not last for more than 2
* seconds
*/
cur_state = MFI_STATE_UNDEFINED;
break;
case MFI_STATE_BB_INIT:
cur_state = MFI_STATE_BB_INIT;
break;
case MFI_STATE_FW_INIT:
cur_state = MFI_STATE_FW_INIT;
break;
case MFI_STATE_FW_INIT_2:
cur_state = MFI_STATE_FW_INIT_2;
break;
case MFI_STATE_DEVICE_SCAN:
cur_state = MFI_STATE_DEVICE_SCAN;
break;
case MFI_STATE_FLUSH_CACHE:
cur_state = MFI_STATE_FLUSH_CACHE;
break;
default:
device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
return -ENODEV;
}
/*
* The cur_state should not last for more than max_wait secs
*/
for (i = 0; i < (max_wait * 1000); i++) {
fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad)) & MFI_STATE_MASK);
curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad));
if (abs_state == curr_abs_state)
DELAY(1000);
else
break;
}
/*
* Return error if fw_state hasn't changed after max_wait
*/
if (curr_abs_state == abs_state) {
device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
"in %d secs\n", fw_state, max_wait);
return -ENODEV;
}
}
mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
return 0;
}
/*
* mrsas_get_mfi_cmd: Get a cmd from free command pool
* input: Adapter soft state
*
* This function removes an MFI command from the command list.
*/
struct mrsas_mfi_cmd *
mrsas_get_mfi_cmd(struct mrsas_softc *sc)
{
struct mrsas_mfi_cmd *cmd = NULL;
mtx_lock(&sc->mfi_cmd_pool_lock);
if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
}
mtx_unlock(&sc->mfi_cmd_pool_lock);
return cmd;
}
/*
* mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
* input: Adapter Context.
*
* This function will check FW status register and flag do_timeout_reset flag.
* It will do OCR/Kill adapter if FW is in fault state or IO timed out has
* trigger reset.
*/
static void
mrsas_ocr_thread(void *arg)
{
struct mrsas_softc *sc;
u_int32_t fw_status, fw_state;
u_int8_t tm_target_reset_failed = 0;
sc = (struct mrsas_softc *)arg;
mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
sc->ocr_thread_active = 1;
mtx_lock(&sc->sim_lock);
for (;;) {
/* Sleep for 1 second and check the queue status */
msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
"mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
if (sc->remove_in_progress ||
sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
mrsas_dprint(sc, MRSAS_OCR,
"Exit due to %s from %s\n",
sc->remove_in_progress ? "Shutdown" :
"Hardware critical error", __func__);
break;
}
fw_status = mrsas_read_reg(sc,
offsetof(mrsas_reg_set, outbound_scratch_pad));
fw_state = fw_status & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
mrsas_atomic_read(&sc->target_reset_outstanding)) {
/* First, freeze further IOs to come to the SIM */
mrsas_xpt_freeze(sc);
/* If this is an IO timeout then go for target reset */
if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
device_printf(sc->mrsas_dev, "Initiating Target RESET "
"because of SCSI IO timeout!\n");
/* Let the remaining IOs to complete */
msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
"mrsas_reset_targets", 5 * hz);
/* Try to reset the target device */
if (mrsas_reset_targets(sc) == FAIL)
tm_target_reset_failed = 1;
}
/* If this is a DCMD timeout or FW fault,
* then go for controller reset
*/
if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
(sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
if (tm_target_reset_failed)
device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
"TM FAILURE!\n");
else
device_printf(sc->mrsas_dev, "Initiaiting OCR "
"because of %s!\n", sc->do_timedout_reset ?
"DCMD IO Timeout" : "FW fault");
mtx_lock_spin(&sc->ioctl_lock);
sc->reset_in_progress = 1;
mtx_unlock_spin(&sc->ioctl_lock);
sc->reset_count++;
/*
* Wait for the AEN task to be completed if it is running.
*/
mtx_unlock(&sc->sim_lock);
taskqueue_drain(sc->ev_tq, &sc->ev_task);
mtx_lock(&sc->sim_lock);
taskqueue_block(sc->ev_tq);
/* Try to reset the controller */
mrsas_reset_ctrl(sc, sc->do_timedout_reset);
sc->do_timedout_reset = 0;
sc->reset_in_progress = 0;
tm_target_reset_failed = 0;
mrsas_atomic_set(&sc->target_reset_outstanding, 0);
memset(sc->target_reset_pool, 0,
sizeof(sc->target_reset_pool));
taskqueue_unblock(sc->ev_tq);
}
/* Now allow IOs to come to the SIM */
mrsas_xpt_release(sc);
}
}
mtx_unlock(&sc->sim_lock);
sc->ocr_thread_active = 0;
mrsas_kproc_exit(0);
}
/*
* mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
* input: Adapter Context.
*
* This function will clear reply descriptor so that post OCR driver and FW will
* lost old history.
*/
void
mrsas_reset_reply_desc(struct mrsas_softc *sc)
{
int i, count;
pMpi2ReplyDescriptorsUnion_t reply_desc;
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
for (i = 0; i < count; i++)
sc->last_reply_idx[i] = 0;
reply_desc = sc->reply_desc_mem;
for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
reply_desc->Words = MRSAS_ULONG_MAX;
}
}
/*
* mrsas_reset_ctrl: Core function to OCR/Kill adapter.
* input: Adapter Context.
*
* This function will run from thread context so that it can sleep. 1. Do not
* handle OCR if FW is in HW critical error. 2. Wait for outstanding command
* to complete for 180 seconds. 3. If #2 does not find any outstanding
* command Controller is in working state, so skip OCR. Otherwise, do
* OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
* OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
* OCR, Re-fire Management command and move Controller to Operation state.
*/
int
mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
{
int retval = SUCCESS, i, j, retry = 0;
u_int32_t host_diag, abs_state, status_reg, reset_adapter;
union ccb *ccb;
struct mrsas_mfi_cmd *mfi_cmd;
struct mrsas_mpt_cmd *mpt_cmd;
union mrsas_evt_class_locale class_locale;
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
device_printf(sc->mrsas_dev,
"mrsas: Hardware critical error, returning FAIL.\n");
return FAIL;
}
mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
mrsas_disable_intr(sc);
msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
sc->mrsas_fw_fault_check_delay * hz);
/* First try waiting for commands to complete */
if (mrsas_wait_for_outstanding(sc, reset_reason)) {
mrsas_dprint(sc, MRSAS_OCR,
"resetting adapter from %s.\n",
__func__);
/* Now return commands back to the CAM layer */
mtx_unlock(&sc->sim_lock);
for (i = 0; i < sc->max_fw_cmds; i++) {
mpt_cmd = sc->mpt_cmd_list[i];
if (mpt_cmd->ccb_ptr) {
ccb = (union ccb *)(mpt_cmd->ccb_ptr);
ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
mrsas_cmd_done(sc, mpt_cmd);
mrsas_atomic_dec(&sc->fw_outstanding);
}
}
mtx_lock(&sc->sim_lock);
status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad));
abs_state = status_reg & MFI_STATE_MASK;
reset_adapter = status_reg & MFI_RESET_ADAPTER;
if (sc->disableOnlineCtrlReset ||
(abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
mrsas_kill_hba(sc);
retval = FAIL;
goto out;
}
/* Now try to reset the chip */
for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_FLUSH_KEY_VALUE);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_1ST_KEY_VALUE);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_2ND_KEY_VALUE);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_3RD_KEY_VALUE);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_4TH_KEY_VALUE);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_5TH_KEY_VALUE);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
MPI2_WRSEQ_6TH_KEY_VALUE);
/* Check that the diag write enable (DRWE) bit is on */
host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
fusion_host_diag));
retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
DELAY(100 * 1000);
host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
fusion_host_diag));
if (retry++ == 100) {
mrsas_dprint(sc, MRSAS_OCR,
"Host diag unlock failed!\n");
break;
}
}
if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
continue;
/* Send chip reset command */
mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
host_diag | HOST_DIAG_RESET_ADAPTER);
DELAY(3000 * 1000);
/* Make sure reset adapter bit is cleared */
host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
fusion_host_diag));
retry = 0;
while (host_diag & HOST_DIAG_RESET_ADAPTER) {
DELAY(100 * 1000);
host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
fusion_host_diag));
if (retry++ == 1000) {
mrsas_dprint(sc, MRSAS_OCR,
"Diag reset adapter never cleared!\n");
break;
}
}
if (host_diag & HOST_DIAG_RESET_ADAPTER)
continue;
abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad)) & MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
DELAY(100 * 1000);
abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad)) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
" state = 0x%x\n", abs_state);
continue;
}
/* Wait for FW to become ready */
if (mrsas_transition_to_ready(sc, 1)) {
mrsas_dprint(sc, MRSAS_OCR,
"mrsas: Failed to transition controller to ready.\n");
continue;
}
mrsas_reset_reply_desc(sc);
if (mrsas_ioc_init(sc)) {
mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
continue;
}
for (j = 0; j < sc->max_fw_cmds; j++) {
mpt_cmd = sc->mpt_cmd_list[j];
if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
/* If not an IOCTL then release the command else re-fire */
if (!mfi_cmd->sync_cmd) {
mrsas_release_mfi_cmd(mfi_cmd);
} else {
req_desc = mrsas_get_request_desc(sc,
mfi_cmd->cmd_id.context.smid - 1);
mrsas_dprint(sc, MRSAS_OCR,
"Re-fire command DCMD opcode 0x%x index %d\n ",
mfi_cmd->frame->dcmd.opcode, j);
if (!req_desc)
device_printf(sc->mrsas_dev,
"Cannot build MPT cmd.\n");
else
mrsas_fire_cmd(sc, req_desc->addr.u.low,
req_desc->addr.u.high);
}
}
}
/* Reset load balance info */
memset(sc->load_balance_info, 0,
sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
if (mrsas_get_ctrl_info(sc)) {
mrsas_kill_hba(sc);
retval = FAIL;
goto out;
}
if (!mrsas_get_map_info(sc))
mrsas_sync_map_info(sc);
megasas_setup_jbod_map(sc);
mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
mrsas_enable_intr(sc);
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
/* Register AEN with FW for last sequence number */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
mtx_unlock(&sc->sim_lock);
if (mrsas_register_aen(sc, sc->last_seq_num,
class_locale.word)) {
device_printf(sc->mrsas_dev,
"ERROR: AEN registration FAILED from OCR !!! "
"Further events from the controller cannot be notified."
"Either there is some problem in the controller"
"or the controller does not support AEN.\n"
"Please contact to the SUPPORT TEAM if the problem persists\n");
}
mtx_lock(&sc->sim_lock);
/* Adapter reset completed successfully */
device_printf(sc->mrsas_dev, "Reset successful\n");
retval = SUCCESS;
goto out;
}
/* Reset failed, kill the adapter */
device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
mrsas_kill_hba(sc);
retval = FAIL;
} else {
mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
mrsas_enable_intr(sc);
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
}
out:
mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
mrsas_dprint(sc, MRSAS_OCR,
"Reset Exit with %d.\n", retval);
return retval;
}
/*
* mrsas_kill_hba: Kill HBA when OCR is not supported
* input: Adapter Context.
*
* This function will kill HBA when OCR is not supported.
*/
void
mrsas_kill_hba(struct mrsas_softc *sc)
{
sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
DELAY(1000 * 1000);
mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
MFI_STOP_ADP);
/* Flush */
mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
mrsas_complete_outstanding_ioctls(sc);
}
/**
* mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
* input: Controller softc
*
* Returns void
*/
void
mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
{
int i;
struct mrsas_mpt_cmd *cmd_mpt;
struct mrsas_mfi_cmd *cmd_mfi;
u_int32_t count, MSIxIndex;
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
for (i = 0; i < sc->max_fw_cmds; i++) {
cmd_mpt = sc->mpt_cmd_list[i];
if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
cmd_mpt->io_request->RaidContext.status);
}
}
}
}
/*
* mrsas_wait_for_outstanding: Wait for outstanding commands
* input: Adapter Context.
*
* This function will wait for 180 seconds for outstanding commands to be
* completed.
*/
int
mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
{
int i, outstanding, retval = 0;
u_int32_t fw_state, count, MSIxIndex;
for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
if (sc->remove_in_progress) {
mrsas_dprint(sc, MRSAS_OCR,
"Driver remove or shutdown called.\n");
retval = 1;
goto out;
}
/* Check if firmware is in fault state */
fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
outbound_scratch_pad)) & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
mrsas_dprint(sc, MRSAS_OCR,
"Found FW in FAULT state, will reset adapter.\n");
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
mtx_unlock(&sc->sim_lock);
for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
mrsas_complete_cmd(sc, MSIxIndex);
mtx_lock(&sc->sim_lock);
retval = 1;
goto out;
}
if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
mrsas_dprint(sc, MRSAS_OCR,
"DCMD IO TIMEOUT detected, will reset adapter.\n");
retval = 1;
goto out;
}
outstanding = mrsas_atomic_read(&sc->fw_outstanding);
if (!outstanding)
goto out;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
"commands to complete\n", i, outstanding);
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
mtx_unlock(&sc->sim_lock);
for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
mrsas_complete_cmd(sc, MSIxIndex);
mtx_lock(&sc->sim_lock);
}
DELAY(1000 * 1000);
}
if (mrsas_atomic_read(&sc->fw_outstanding)) {
mrsas_dprint(sc, MRSAS_OCR,
" pending commands remain after waiting,"
" will reset adapter.\n");
retval = 1;
}
out:
return retval;
}
/*
* mrsas_release_mfi_cmd: Return a cmd to free command pool
* input: Command packet for return to free cmd pool
*
* This function returns the MFI & MPT command to the command list.
*/
void
mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
{
struct mrsas_softc *sc = cmd_mfi->sc;
struct mrsas_mpt_cmd *cmd_mpt;
mtx_lock(&sc->mfi_cmd_pool_lock);
/*
* Release the mpt command (if at all it is allocated
* associated with the mfi command
*/
if (cmd_mfi->cmd_id.context.smid) {
mtx_lock(&sc->mpt_cmd_pool_lock);
/* Get the mpt cmd from mfi cmd frame's smid value */
cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
cmd_mpt->flags = 0;
cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
mtx_unlock(&sc->mpt_cmd_pool_lock);
}
/* Release the mfi command */
cmd_mfi->ccb_ptr = NULL;
cmd_mfi->cmd_id.frame_count = 0;
TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
mtx_unlock(&sc->mfi_cmd_pool_lock);
return;
}
/*
* mrsas_get_controller_info: Returns FW's controller structure
* input: Adapter soft state
* Controller information structure
*
* Issues an internal command (DCMD) to get the FW's controller structure. This
* information is mainly used to find out the maximum IO transfer per command
* supported by the FW.
*/
static int
mrsas_get_ctrl_info(struct mrsas_softc *sc)
{
int retcode = 0;
u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
return -ENOMEM;
}
dcmd = &cmd->frame->dcmd;
if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
mrsas_release_mfi_cmd(cmd);
return -ENOMEM;
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
if (!sc->mask_interrupts)
retcode = mrsas_issue_blocked_cmd(sc, cmd);
else
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
else
memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
do_ocr = 0;
mrsas_update_ext_vd_details(sc);
sc->use_seqnum_jbod_fp =
sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
sc->disableOnlineCtrlReset =
sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
dcmd_timeout:
mrsas_free_ctlr_info_cmd(sc);
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
if (!sc->mask_interrupts)
mrsas_release_mfi_cmd(cmd);
return (retcode);
}
/*
* mrsas_update_ext_vd_details : Update details w.r.t Extended VD
* input:
* sc - Controller's softc
*/
static void
mrsas_update_ext_vd_details(struct mrsas_softc *sc)
{
sc->max256vdSupport =
sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
/* Below is additional check to address future FW enhancement */
if (sc->ctrl_info->max_lds > 64)
sc->max256vdSupport = 1;
sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
* MRSAS_MAX_DEV_PER_CHANNEL;
sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
* MRSAS_MAX_DEV_PER_CHANNEL;
if (sc->max256vdSupport) {
sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
} else {
sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
}
sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
(sizeof(MR_LD_SPAN_MAP) *
(sc->fw_supported_vd_count - 1));
sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
(sizeof(MR_LD_SPAN_MAP) *
(sc->drv_supported_vd_count - 1));
sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
if (sc->max256vdSupport)
sc->current_map_sz = sc->new_map_sz;
else
sc->current_map_sz = sc->old_map_sz;
}
/*
* mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
* input: Adapter soft state
*
* Allocates DMAable memory for the controller info internal command.
*/
int
mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
{
int ctlr_info_size;
/* Allocate get controller info command */
ctlr_info_size = sizeof(struct mrsas_ctrl_info);
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
ctlr_info_size,
1,
ctlr_info_size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->ctlr_info_tag)) {
device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
return (ENOMEM);
}
if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
&sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
return (ENOMEM);
}
memset(sc->ctlr_info_mem, 0, ctlr_info_size);
return (0);
}
/*
* mrsas_free_ctlr_info_cmd: Free memory for controller info command
* input: Adapter soft state
*
* Deallocates memory of the get controller info cmd.
*/
void
mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
{
if (sc->ctlr_info_phys_addr)
bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
if (sc->ctlr_info_mem != NULL)
bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
if (sc->ctlr_info_tag != NULL)
bus_dma_tag_destroy(sc->ctlr_info_tag);
}
/*
* mrsas_issue_polled: Issues a polling command
* inputs: Adapter soft state
* Command packet to be issued
*
* This function is for posting of internal commands to Firmware. MFI requires
* the cmd_status to be set to 0xFF before posting. The maximun wait time of
* the poll response timer is 180 seconds.
*/
int
mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
struct mrsas_header *frame_hdr = &cmd->frame->hdr;
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
int i, retcode = SUCCESS;
frame_hdr->cmd_status = 0xFF;
frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
/* Issue the frame using inbound queue port */
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
return (1);
}
/*
* Poll response timer to wait for Firmware response. While this
* timer with the DELAY call could block CPU, the time interval for
* this is only 1 millisecond.
*/
if (frame_hdr->cmd_status == 0xFF) {
for (i = 0; i < (max_wait * 1000); i++) {
if (frame_hdr->cmd_status == 0xFF)
DELAY(1000);
else
break;
}
}
if (frame_hdr->cmd_status == 0xFF) {
device_printf(sc->mrsas_dev, "DCMD timed out after %d "
"seconds from %s\n", max_wait, __func__);
device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
cmd->frame->dcmd.opcode);
retcode = ETIMEDOUT;
}
return (retcode);
}
/*
* mrsas_issue_dcmd: Issues a MFI Pass thru cmd
* input: Adapter soft state mfi cmd pointer
*
* This function is called by mrsas_issued_blocked_cmd() and
* mrsas_issued_polled(), to build the MPT command and then fire the command
* to Firmware.
*/
int
mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
req_desc = mrsas_build_mpt_cmd(sc, cmd);
if (!req_desc) {
device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
return (1);
}
mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
return (0);
}
/*
* mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
* input: Adapter soft state mfi cmd to build
*
* This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
* command and prepares the MPT command to send to Firmware.
*/
MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u_int16_t index;
if (mrsas_build_mptmfi_passthru(sc, cmd)) {
device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
return NULL;
}
index = cmd->cmd_id.context.smid;
req_desc = mrsas_get_request_desc(sc, index - 1);
if (!req_desc)
return NULL;
req_desc->addr.Words = 0;
req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
req_desc->SCSIIO.SMID = index;
return (req_desc);
}
/*
* mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
* input: Adapter soft state mfi cmd pointer
*
* The MPT command and the io_request are setup as a passthru command. The SGE
* chain address is set to frame_phys_addr of the MFI command.
*/
u_int8_t
mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
{
MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
struct mrsas_mpt_cmd *mpt_cmd;
struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
mpt_cmd = mrsas_get_mpt_cmd(sc);
if (!mpt_cmd)
return (1);
/* Save the smid. To be used for returning the cmd */
mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
mpt_cmd->sync_cmd_idx = mfi_cmd->index;
/*
* For cmds where the flag is set, store the flag and check on
* completion. For cmds with this flag, don't call
* mrsas_complete_cmd.
*/
if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
io_req = mpt_cmd->io_request;
if (sc->mrsas_gen3_ctrl) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
sgl_ptr_end->Flags = 0;
}
mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
io_req->ChainOffset = sc->chain_offset_mfi_pthru;
mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
return (0);
}
/*
* mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
* input: Adapter soft state Command to be issued
*
* This function waits on an event for the command to be returned from the ISR.
* Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
* internal and ioctl commands.
*/
int
mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
unsigned long total_time = 0;
int retcode = SUCCESS;
/* Initialize cmd_status */
cmd->cmd_status = 0xFF;
/* Build MPT-MFI command for issue to FW */
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
return (1);
}
sc->chan = (void *)&cmd;
while (1) {
if (cmd->cmd_status == 0xFF) {
tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
} else
break;
if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
* command */
total_time++;
if (total_time >= max_wait) {
device_printf(sc->mrsas_dev,
"Internal command timed out after %d seconds.\n", max_wait);
retcode = 1;
break;
}
}
}
if (cmd->cmd_status == 0xFF) {
device_printf(sc->mrsas_dev, "DCMD timed out after %d "
"seconds from %s\n", max_wait, __func__);
device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
cmd->frame->dcmd.opcode);
retcode = ETIMEDOUT;
}
return (retcode);
}
/*
* mrsas_complete_mptmfi_passthru: Completes a command
* input: @sc: Adapter soft state
* @cmd: Command to be completed
* @status: cmd completion status
*
* This function is called from mrsas_complete_cmd() after an interrupt is
* received from Firmware, and io_request->Function is
* MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
*/
void
mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
u_int8_t status)
{
struct mrsas_header *hdr = &cmd->frame->hdr;
u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
/* Reset the retry counter for future re-tries */
cmd->retry_for_fw_reset = 0;
if (cmd->ccb_ptr)
cmd->ccb_ptr = NULL;
switch (hdr->cmd) {
case MFI_CMD_INVALID:
device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
break;
case MFI_CMD_PD_SCSI_IO:
case MFI_CMD_LD_SCSI_IO:
/*
* MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
* issued either through an IO path or an IOCTL path. If it
* was via IOCTL, we will send it to internal completion.
*/
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
mrsas_wakeup(sc, cmd);
break;
}
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_DCMD:
/* Check for LD map update */
if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
(cmd->frame->dcmd.mbox.b[1] == 1)) {
sc->fast_path_io = 0;
mtx_lock(&sc->raidmap_lock);
sc->map_update_cmd = NULL;
if (cmd_status != 0) {
if (cmd_status != MFI_STAT_NOT_FOUND)
device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
else {
mrsas_release_mfi_cmd(cmd);
mtx_unlock(&sc->raidmap_lock);
break;
}
} else
sc->map_id++;
mrsas_release_mfi_cmd(cmd);
if (MR_ValidateMapInfo(sc))
sc->fast_path_io = 0;
else
sc->fast_path_io = 1;
mrsas_sync_map_info(sc);
mtx_unlock(&sc->raidmap_lock);
break;
}
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
sc->mrsas_aen_triggered = 0;
}
/* FW has an updated PD sequence */
if ((cmd->frame->dcmd.opcode ==
MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
(cmd->frame->dcmd.mbox.b[0] == 1)) {
mtx_lock(&sc->raidmap_lock);
sc->jbod_seq_cmd = NULL;
mrsas_release_mfi_cmd(cmd);
if (cmd_status == MFI_STAT_OK) {
sc->pd_seq_map_id++;
/* Re-register a pd sync seq num cmd */
if (megasas_sync_pd_seq_num(sc, true))
sc->use_seqnum_jbod_fp = 0;
} else {
sc->use_seqnum_jbod_fp = 0;
device_printf(sc->mrsas_dev,
"Jbod map sync failed, status=%x\n", cmd_status);
}
mtx_unlock(&sc->raidmap_lock);
break;
}
/* See if got an event notification */
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
mrsas_complete_aen(sc, cmd);
else
mrsas_wakeup(sc, cmd);
break;
case MFI_CMD_ABORT:
/* Command issued to abort another cmd return */
mrsas_complete_abort(sc, cmd);
break;
default:
device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
break;
}
}
/*
* mrsas_wakeup: Completes an internal command
* input: Adapter soft state
* Command to be completed
*
* In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
* timer is started. This function is called from
* mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
* from the command wait.
*/
void
mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
cmd->cmd_status = cmd->frame->io.cmd_status;
if (cmd->cmd_status == 0xFF)
cmd->cmd_status = 0;
sc->chan = (void *)&cmd;
wakeup_one((void *)&sc->chan);
return;
}
/*
* mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
* Adapter soft state Shutdown/Hibernate
*
* This function issues a DCMD internal command to Firmware to initiate shutdown
* of the controller.
*/
static void
mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
return;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
return;
}
if (sc->aen_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
if (sc->map_update_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
if (sc->jbod_seq_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
dcmd->opcode = opcode;
device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
mrsas_issue_blocked_cmd(sc, cmd);
mrsas_release_mfi_cmd(cmd);
return;
}
/*
* mrsas_flush_cache: Requests FW to flush all its caches input:
* Adapter soft state
*
* This function is issues a DCMD internal command to Firmware to initiate
* flushing of all caches.
*/
static void
mrsas_flush_cache(struct mrsas_softc *sc)
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
return;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
return;
}
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
mrsas_issue_blocked_cmd(sc, cmd);
mrsas_release_mfi_cmd(cmd);
return;
}
int
megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
{
int retcode = 0;
u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
uint32_t pd_seq_map_sz;
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
bus_addr_t pd_seq_h;
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
(MAX_PHYSICAL_DEVICES - 1));
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev,
"Cannot alloc for ld map info cmd.\n");
return 1;
}
dcmd = &cmd->frame->dcmd;
pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
if (!pd_sync) {
device_printf(sc->mrsas_dev,
"Failed to alloc mem for jbod map info.\n");
mrsas_release_mfi_cmd(cmd);
return (ENOMEM);
}
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = (pd_seq_map_sz);
dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
if (pend) {
dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
dcmd->flags = (MFI_FRAME_DIR_WRITE);
sc->jbod_seq_cmd = cmd;
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev,
"Fail to send sync map info command.\n");
return 1;
} else
return 0;
} else
dcmd->flags = MFI_FRAME_DIR_READ;
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
device_printf(sc->mrsas_dev,
"driver supports max %d JBOD, but FW reports %d\n",
MAX_PHYSICAL_DEVICES, pd_sync->count);
retcode = -EINVAL;
}
if (!retcode)
sc->pd_seq_map_id++;
do_ocr = 0;
dcmd_timeout:
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
return (retcode);
}
/*
* mrsas_get_map_info: Load and validate RAID map input:
* Adapter instance soft state
*
* This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
* and validate RAID map. It returns 0 if successful, 1 other- wise.
*/
static int
mrsas_get_map_info(struct mrsas_softc *sc)
{
uint8_t retcode = 0;
sc->fast_path_io = 0;
if (!mrsas_get_ld_map_info(sc)) {
retcode = MR_ValidateMapInfo(sc);
if (retcode == 0) {
sc->fast_path_io = 1;
return 0;
}
}
return 1;
}
/*
* mrsas_get_ld_map_info: Get FW's ld_map structure input:
* Adapter instance soft state
*
* Issues an internal command (DCMD) to get the FW's controller PD list
* structure.
*/
static int
mrsas_get_ld_map_info(struct mrsas_softc *sc)
{
int retcode = 0;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
void *map;
bus_addr_t map_phys_addr = 0;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev,
"Cannot alloc for ld map info cmd.\n");
return 1;
}
dcmd = &cmd->frame->dcmd;
map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
if (!map) {
device_printf(sc->mrsas_dev,
"Failed to alloc mem for ld map info.\n");
mrsas_release_mfi_cmd(cmd);
return (ENOMEM);
}
memset(map, 0, sizeof(sc->max_map_sz));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sc->current_map_sz;
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
dcmd->sgl.sge32[0].length = sc->current_map_sz;
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
return (retcode);
}
/*
* mrsas_sync_map_info: Get FW's ld_map structure input:
* Adapter instance soft state
*
* Issues an internal command (DCMD) to get the FW's controller PD list
* structure.
*/
static int
mrsas_sync_map_info(struct mrsas_softc *sc)
{
int retcode = 0, i;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
uint32_t size_sync_info, num_lds;
MR_LD_TARGET_SYNC *target_map = NULL;
MR_DRV_RAID_MAP_ALL *map;
MR_LD_RAID *raid;
MR_LD_TARGET_SYNC *ld_sync;
bus_addr_t map_phys_addr = 0;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
return ENOMEM;
}
map = sc->ld_drv_map[sc->map_id & 1];
num_lds = map->raidMap.ldCount;
dcmd = &cmd->frame->dcmd;
size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
memset(target_map, 0, sc->max_map_sz);
map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
ld_sync = (MR_LD_TARGET_SYNC *) target_map;
for (i = 0; i < num_lds; i++, ld_sync++) {
raid = MR_LdRaidGet(i, map);
ld_sync->targetId = MR_GetLDTgtId(i, map);
ld_sync->seqNum = raid->seqNum;
}
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sc->current_map_sz;
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
dcmd->sgl.sge32[0].length = sc->current_map_sz;
sc->map_update_cmd = cmd;
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev,
"Fail to send sync map info command.\n");
return (1);
}
return (retcode);
}
/*
* mrsas_get_pd_list: Returns FW's PD list structure input:
* Adapter soft state
*
* Issues an internal command (DCMD) to get the FW's controller PD list
* structure. This information is mainly used to find out about system
* supported by Firmware.
*/
static int
mrsas_get_pd_list(struct mrsas_softc *sc)
{
int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
struct MR_PD_LIST *pd_list_mem;
struct MR_PD_ADDRESS *pd_addr;
bus_addr_t pd_list_phys_addr = 0;
struct mrsas_tmp_dcmd *tcmd;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev,
"Cannot alloc for get PD list cmd\n");
return 1;
}
dcmd = &cmd->frame->dcmd;
tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
device_printf(sc->mrsas_dev,
"Cannot alloc dmamap for get PD list cmd\n");
mrsas_release_mfi_cmd(cmd);
mrsas_free_tmp_dcmd(tcmd);
free(tcmd, M_MRSAS);
return (ENOMEM);
} else {
pd_list_mem = tcmd->tmp_dcmd_mem;
pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
dcmd->mbox.b[1] = 0;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
if (!sc->mask_interrupts)
retcode = mrsas_issue_blocked_cmd(sc, cmd);
else
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
/* Get the instance PD list */
pd_count = MRSAS_MAX_PD;
pd_addr = pd_list_mem->addr;
if (pd_list_mem->count < pd_count) {
memset(sc->local_pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
sc->local_pd_list[pd_addr->deviceId].driveType =
pd_addr->scsiDevType;
sc->local_pd_list[pd_addr->deviceId].driveState =
MR_PD_STATE_SYSTEM;
pd_addr++;
}
/*
* Use mutext/spinlock if pd_list component size increase more than
* 32 bit.
*/
memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
do_ocr = 0;
}
dcmd_timeout:
mrsas_free_tmp_dcmd(tcmd);
free(tcmd, M_MRSAS);
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
if (!sc->mask_interrupts)
mrsas_release_mfi_cmd(cmd);
return (retcode);
}
/*
* mrsas_get_ld_list: Returns FW's LD list structure input:
* Adapter soft state
*
* Issues an internal command (DCMD) to get the FW's controller PD list
* structure. This information is mainly used to find out about supported by
* the FW.
*/
static int
mrsas_get_ld_list(struct mrsas_softc *sc)
{
int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
struct MR_LD_LIST *ld_list_mem;
bus_addr_t ld_list_phys_addr = 0;
struct mrsas_tmp_dcmd *tcmd;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev,
"Cannot alloc for get LD list cmd\n");
return 1;
}
dcmd = &cmd->frame->dcmd;
tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
ld_list_size = sizeof(struct MR_LD_LIST);
if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
device_printf(sc->mrsas_dev,
"Cannot alloc dmamap for get LD list cmd\n");
mrsas_release_mfi_cmd(cmd);
mrsas_free_tmp_dcmd(tcmd);
free(tcmd, M_MRSAS);
return (ENOMEM);
} else {
ld_list_mem = tcmd->tmp_dcmd_mem;
ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
if (sc->max256vdSupport)
dcmd->mbox.b[0] = 1;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
dcmd->opcode = MR_DCMD_LD_GET_LIST;
dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
dcmd->pad_0 = 0;
if (!sc->mask_interrupts)
retcode = mrsas_issue_blocked_cmd(sc, cmd);
else
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
#if VD_EXT_DEBUG
printf("Number of LDs %d\n", ld_list_mem->ldCount);
#endif
/* Get the instance LD list */
if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
sc->CurLdCount = ld_list_mem->ldCount;
memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
if (ld_list_mem->ldList[ld_index].state != 0) {
ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
}
}
do_ocr = 0;
}
dcmd_timeout:
mrsas_free_tmp_dcmd(tcmd);
free(tcmd, M_MRSAS);
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
if (!sc->mask_interrupts)
mrsas_release_mfi_cmd(cmd);
return (retcode);
}
/*
* mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
* Adapter soft state Temp command Size of alloction
*
* Allocates DMAable memory for a temporary internal command. The allocated
* memory is initialized to all zeros upon successful loading of the dma
* mapped memory.
*/
int
mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
struct mrsas_tmp_dcmd *tcmd, int size)
{
if (bus_dma_tag_create(sc->mrsas_parent_tag,
1, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
size,
1,
size,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&tcmd->tmp_dcmd_tag)) {
device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
return (ENOMEM);
}
if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
return (ENOMEM);
}
if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
&tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
return (ENOMEM);
}
memset(tcmd->tmp_dcmd_mem, 0, size);
return (0);
}
/*
* mrsas_free_tmp_dcmd: Free memory for temporary command input:
* temporary dcmd pointer
*
* Deallocates memory of the temporary command for use in the construction of
* the internal DCMD.
*/
void
mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
{
if (tmp->tmp_dcmd_phys_addr)
bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
if (tmp->tmp_dcmd_mem != NULL)
bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
if (tmp->tmp_dcmd_tag != NULL)
bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
}
/*
* mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
* Adapter soft state Previously issued cmd to be aborted
*
* This function is used to abort previously issued commands, such as AEN and
* RAID map sync map commands. The abort command is sent as a DCMD internal
* command and subsequently the driver will wait for a return status. The
* max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
*/
static int
mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd_to_abort)
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_abort_frame *abort_fr;
u_int8_t retcode = 0;
unsigned long total_time = 0;
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
return (1);
}
abort_fr = &cmd->frame->abort;
/* Prepare and issue the abort frame */
abort_fr->cmd = MFI_CMD_ABORT;
abort_fr->cmd_status = 0xFF;
abort_fr->flags = 0;
abort_fr->abort_context = cmd_to_abort->index;
abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
abort_fr->abort_mfi_phys_addr_hi = 0;
cmd->sync_cmd = 1;
cmd->cmd_status = 0xFF;
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
return (1);
}
/* Wait for this cmd to complete */
sc->chan = (void *)&cmd;
while (1) {
if (cmd->cmd_status == 0xFF) {
tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
} else
break;
total_time++;
if (total_time >= max_wait) {
device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
retcode = 1;
break;
}
}
cmd->sync_cmd = 0;
mrsas_release_mfi_cmd(cmd);
return (retcode);
}
/*
* mrsas_complete_abort: Completes aborting a command input:
* Adapter soft state Cmd that was issued to abort another cmd
*
* The mrsas_issue_blocked_abort_cmd() function waits for the command status to
* change after sending the command. This function is called from
* mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
*/
void
mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
cmd->cmd_status = 0;
sc->chan = (void *)&cmd;
wakeup_one((void *)&sc->chan);
}
return;
}
/*
* mrsas_aen_handler: AEN processing callback function from thread context
* input: Adapter soft state
*
* Asynchronous event handler
*/
void
mrsas_aen_handler(struct mrsas_softc *sc)
{
union mrsas_evt_class_locale class_locale;
int doscan = 0;
u_int32_t seq_num;
int error, fail_aen = 0;
if (sc == NULL) {
printf("invalid instance!\n");
return;
}
if (sc->remove_in_progress || sc->reset_in_progress) {
device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
__func__, __LINE__);
return;
}
if (sc->evt_detail_mem) {
switch (sc->evt_detail_mem->code) {
case MR_EVT_PD_INSERTED:
fail_aen = mrsas_get_pd_list(sc);
if (!fail_aen)
mrsas_bus_scan_sim(sc, sc->sim_1);
else
goto skip_register_aen;
break;
case MR_EVT_PD_REMOVED:
fail_aen = mrsas_get_pd_list(sc);
if (!fail_aen)
mrsas_bus_scan_sim(sc, sc->sim_1);
else
goto skip_register_aen;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
mrsas_bus_scan_sim(sc, sc->sim_0);
break;
case MR_EVT_LD_CREATED:
fail_aen = mrsas_get_ld_list(sc);
if (!fail_aen)
mrsas_bus_scan_sim(sc, sc->sim_0);
else
goto skip_register_aen;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
doscan = 1;
break;
case MR_EVT_CTRL_PROP_CHANGED:
fail_aen = mrsas_get_ctrl_info(sc);
if (fail_aen)
goto skip_register_aen;
break;
default:
break;
}
} else {
device_printf(sc->mrsas_dev, "invalid evt_detail\n");
return;
}
if (doscan) {
fail_aen = mrsas_get_pd_list(sc);
if (!fail_aen) {
mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
mrsas_bus_scan_sim(sc, sc->sim_1);
} else
goto skip_register_aen;
fail_aen = mrsas_get_ld_list(sc);
if (!fail_aen) {
mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
mrsas_bus_scan_sim(sc, sc->sim_0);
} else
goto skip_register_aen;
}
seq_num = sc->evt_detail_mem->seq_num + 1;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
if (sc->aen_cmd != NULL)
return;
mtx_lock(&sc->aen_lock);
error = mrsas_register_aen(sc, seq_num,
class_locale.word);
mtx_unlock(&sc->aen_lock);
if (error)
device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
skip_register_aen:
return;
}
/*
* mrsas_complete_aen: Completes AEN command
* input: Adapter soft state
* Cmd that was issued to abort another cmd
*
* This function will be called from ISR and will continue event processing from
* thread context by enqueuing task in ev_tq (callback function
* "mrsas_aen_handler").
*/
void
mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
/*
* Don't signal app if it is just an aborted previously registered
* aen
*/
if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
sc->mrsas_aen_triggered = 1;
mtx_lock(&sc->aen_lock);
if (sc->mrsas_poll_waiting) {
sc->mrsas_poll_waiting = 0;
selwakeup(&sc->mrsas_select);
}
mtx_unlock(&sc->aen_lock);
} else
cmd->abort_aen = 0;
sc->aen_cmd = NULL;
mrsas_release_mfi_cmd(cmd);
taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
return;
}
static device_method_t mrsas_methods[] = {
DEVMETHOD(device_probe, mrsas_probe),
DEVMETHOD(device_attach, mrsas_attach),
DEVMETHOD(device_detach, mrsas_detach),
DEVMETHOD(device_suspend, mrsas_suspend),
DEVMETHOD(device_resume, mrsas_resume),
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
{0, 0}
};
static driver_t mrsas_driver = {
"mrsas",
mrsas_methods,
sizeof(struct mrsas_softc)
};
static devclass_t mrsas_devclass;
DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
MODULE_DEPEND(mrsas, cam, 1, 1, 1);
Index: head/sys/dev/mxge/if_mxge.c
===================================================================
--- head/sys/dev/mxge/if_mxge.c (revision 328217)
+++ head/sys/dev/mxge/if_mxge.c (revision 328218)
@@ -1,5025 +1,5026 @@
/******************************************************************************
SPDX-License-Identifier: BSD-2-Clause-FreeBSD
Copyright (c) 2006-2013, Myricom Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Myricom Inc, nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/endian.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <sys/zlib.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet6/ip6_var.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/smp.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pci_private.h> /* XXX for pci_cfg_restore */
#include <vm/vm.h> /* for pmap_mapdev() */
#include <vm/pmap.h>
#if defined(__i386) || defined(__amd64)
#include <machine/specialreg.h>
#endif
#include <dev/mxge/mxge_mcp.h>
#include <dev/mxge/mcp_gen_header.h>
/*#define MXGE_FAKE_IFP*/
#include <dev/mxge/if_mxge_var.h>
#ifdef IFNET_BUF_RING
#include <sys/buf_ring.h>
#endif
#include "opt_inet.h"
#include "opt_inet6.h"
/* tunable params */
static int mxge_nvidia_ecrc_enable = 1;
static int mxge_force_firmware = 0;
static int mxge_intr_coal_delay = 30;
static int mxge_deassert_wait = 1;
static int mxge_flow_control = 1;
static int mxge_verbose = 0;
static int mxge_ticks;
static int mxge_max_slices = 1;
static int mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
static int mxge_always_promisc = 0;
static int mxge_initial_mtu = ETHERMTU_JUMBO;
static int mxge_throttle = 0;
static char *mxge_fw_unaligned = "mxge_ethp_z8e";
static char *mxge_fw_aligned = "mxge_eth_z8e";
static char *mxge_fw_rss_aligned = "mxge_rss_eth_z8e";
static char *mxge_fw_rss_unaligned = "mxge_rss_ethp_z8e";
static int mxge_probe(device_t dev);
static int mxge_attach(device_t dev);
static int mxge_detach(device_t dev);
static int mxge_shutdown(device_t dev);
static void mxge_intr(void *arg);
static device_method_t mxge_methods[] =
{
/* Device interface */
DEVMETHOD(device_probe, mxge_probe),
DEVMETHOD(device_attach, mxge_attach),
DEVMETHOD(device_detach, mxge_detach),
DEVMETHOD(device_shutdown, mxge_shutdown),
DEVMETHOD_END
};
static driver_t mxge_driver =
{
"mxge",
mxge_methods,
sizeof(mxge_softc_t),
};
static devclass_t mxge_devclass;
/* Declare ourselves to be a child of the PCI bus.*/
DRIVER_MODULE(mxge, pci, mxge_driver, mxge_devclass, 0, 0);
MODULE_DEPEND(mxge, firmware, 1, 1, 1);
MODULE_DEPEND(mxge, zlib, 1, 1, 1);
static int mxge_load_firmware(mxge_softc_t *sc, int adopt);
static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data);
static int mxge_close(mxge_softc_t *sc, int down);
static int mxge_open(mxge_softc_t *sc);
static void mxge_tick(void *arg);
static int
mxge_probe(device_t dev)
{
int rev;
if ((pci_get_vendor(dev) == MXGE_PCI_VENDOR_MYRICOM) &&
((pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E) ||
(pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E_9))) {
rev = pci_get_revid(dev);
switch (rev) {
case MXGE_PCI_REV_Z8E:
device_set_desc(dev, "Myri10G-PCIE-8A");
break;
case MXGE_PCI_REV_Z8ES:
device_set_desc(dev, "Myri10G-PCIE-8B");
break;
default:
device_set_desc(dev, "Myri10G-PCIE-8??");
device_printf(dev, "Unrecognized rev %d NIC\n",
rev);
break;
}
return 0;
}
return ENXIO;
}
static void
mxge_enable_wc(mxge_softc_t *sc)
{
#if defined(__i386) || defined(__amd64)
vm_offset_t len;
int err;
sc->wc = 1;
len = rman_get_size(sc->mem_res);
err = pmap_change_attr((vm_offset_t) sc->sram,
len, PAT_WRITE_COMBINING);
if (err != 0) {
device_printf(sc->dev, "pmap_change_attr failed, %d\n",
err);
sc->wc = 0;
}
#endif
}
/* callback to get our DMA address */
static void
mxge_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error)
{
if (error == 0) {
*(bus_addr_t *) arg = segs->ds_addr;
}
}
static int
mxge_dma_alloc(mxge_softc_t *sc, mxge_dma_t *dma, size_t bytes,
bus_size_t alignment)
{
int err;
device_t dev = sc->dev;
bus_size_t boundary, maxsegsize;
if (bytes > 4096 && alignment == 4096) {
boundary = 0;
maxsegsize = bytes;
} else {
boundary = 4096;
maxsegsize = 4096;
}
/* allocate DMAable memory tags */
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
alignment, /* alignment */
boundary, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
bytes, /* maxsize */
1, /* num segs */
maxsegsize, /* maxsegsize */
BUS_DMA_COHERENT, /* flags */
NULL, NULL, /* lock */
&dma->dmat); /* tag */
if (err != 0) {
device_printf(dev, "couldn't alloc tag (err = %d)\n", err);
return err;
}
/* allocate DMAable memory & map */
err = bus_dmamem_alloc(dma->dmat, &dma->addr,
(BUS_DMA_WAITOK | BUS_DMA_COHERENT
| BUS_DMA_ZERO), &dma->map);
if (err != 0) {
device_printf(dev, "couldn't alloc mem (err = %d)\n", err);
goto abort_with_dmat;
}
/* load the memory */
err = bus_dmamap_load(dma->dmat, dma->map, dma->addr, bytes,
mxge_dmamap_callback,
(void *)&dma->bus_addr, 0);
if (err != 0) {
device_printf(dev, "couldn't load map (err = %d)\n", err);
goto abort_with_mem;
}
return 0;
abort_with_mem:
bus_dmamem_free(dma->dmat, dma->addr, dma->map);
abort_with_dmat:
(void)bus_dma_tag_destroy(dma->dmat);
return err;
}
static void
mxge_dma_free(mxge_dma_t *dma)
{
bus_dmamap_unload(dma->dmat, dma->map);
bus_dmamem_free(dma->dmat, dma->addr, dma->map);
(void)bus_dma_tag_destroy(dma->dmat);
}
/*
* The eeprom strings on the lanaiX have the format
* SN=x\0
* MAC=x:x:x:x:x:x\0
* PC=text\0
*/
static int
mxge_parse_strings(mxge_softc_t *sc)
{
char *ptr;
int i, found_mac, found_sn2;
char *endptr;
ptr = sc->eeprom_strings;
found_mac = 0;
found_sn2 = 0;
while (*ptr != '\0') {
if (strncmp(ptr, "MAC=", 4) == 0) {
ptr += 4;
for (i = 0;;) {
sc->mac_addr[i] = strtoul(ptr, &endptr, 16);
if (endptr - ptr != 2)
goto abort;
ptr = endptr;
if (++i == 6)
break;
if (*ptr++ != ':')
goto abort;
}
found_mac = 1;
} else if (strncmp(ptr, "PC=", 3) == 0) {
ptr += 3;
strlcpy(sc->product_code_string, ptr,
sizeof(sc->product_code_string));
} else if (!found_sn2 && (strncmp(ptr, "SN=", 3) == 0)) {
ptr += 3;
strlcpy(sc->serial_number_string, ptr,
sizeof(sc->serial_number_string));
} else if (strncmp(ptr, "SN2=", 4) == 0) {
/* SN2 takes precedence over SN */
ptr += 4;
found_sn2 = 1;
strlcpy(sc->serial_number_string, ptr,
sizeof(sc->serial_number_string));
}
while (*ptr++ != '\0') {}
}
if (found_mac)
return 0;
abort:
device_printf(sc->dev, "failed to parse eeprom_strings\n");
return ENXIO;
}
#if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__
static void
mxge_enable_nvidia_ecrc(mxge_softc_t *sc)
{
uint32_t val;
unsigned long base, off;
char *va, *cfgptr;
device_t pdev, mcp55;
uint16_t vendor_id, device_id, word;
uintptr_t bus, slot, func, ivend, idev;
uint32_t *ptr32;
if (!mxge_nvidia_ecrc_enable)
return;
pdev = device_get_parent(device_get_parent(sc->dev));
if (pdev == NULL) {
device_printf(sc->dev, "could not find parent?\n");
return;
}
vendor_id = pci_read_config(pdev, PCIR_VENDOR, 2);
device_id = pci_read_config(pdev, PCIR_DEVICE, 2);
if (vendor_id != 0x10de)
return;
base = 0;
if (device_id == 0x005d) {
/* ck804, base address is magic */
base = 0xe0000000UL;
} else if (device_id >= 0x0374 && device_id <= 0x378) {
/* mcp55, base address stored in chipset */
mcp55 = pci_find_bsf(0, 0, 0);
if (mcp55 &&
0x10de == pci_read_config(mcp55, PCIR_VENDOR, 2) &&
0x0369 == pci_read_config(mcp55, PCIR_DEVICE, 2)) {
word = pci_read_config(mcp55, 0x90, 2);
base = ((unsigned long)word & 0x7ffeU) << 25;
}
}
if (!base)
return;
/* XXXX
Test below is commented because it is believed that doing
config read/write beyond 0xff will access the config space
for the next larger function. Uncomment this and remove
the hacky pmap_mapdev() way of accessing config space when
FreeBSD grows support for extended pcie config space access
*/
#if 0
/* See if we can, by some miracle, access the extended
config space */
val = pci_read_config(pdev, 0x178, 4);
if (val != 0xffffffff) {
val |= 0x40;
pci_write_config(pdev, 0x178, val, 4);
return;
}
#endif
/* Rather than using normal pci config space writes, we must
* map the Nvidia config space ourselves. This is because on
* opteron/nvidia class machine the 0xe000000 mapping is
* handled by the nvidia chipset, that means the internal PCI
* device (the on-chip northbridge), or the amd-8131 bridge
* and things behind them are not visible by this method.
*/
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_BUS, &bus);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_SLOT, &slot);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_FUNCTION, &func);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_VENDOR, &ivend);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_DEVICE, &idev);
off = base
+ 0x00100000UL * (unsigned long)bus
+ 0x00001000UL * (unsigned long)(func
+ 8 * slot);
/* map it into the kernel */
va = pmap_mapdev(trunc_page((vm_paddr_t)off), PAGE_SIZE);
if (va == NULL) {
device_printf(sc->dev, "pmap_kenter_temporary didn't\n");
return;
}
/* get a pointer to the config space mapped into the kernel */
cfgptr = va + (off & PAGE_MASK);
/* make sure that we can really access it */
vendor_id = *(uint16_t *)(cfgptr + PCIR_VENDOR);
device_id = *(uint16_t *)(cfgptr + PCIR_DEVICE);
if (! (vendor_id == ivend && device_id == idev)) {
device_printf(sc->dev, "mapping failed: 0x%x:0x%x\n",
vendor_id, device_id);
pmap_unmapdev((vm_offset_t)va, PAGE_SIZE);
return;
}
ptr32 = (uint32_t*)(cfgptr + 0x178);
val = *ptr32;
if (val == 0xffffffff) {
device_printf(sc->dev, "extended mapping failed\n");
pmap_unmapdev((vm_offset_t)va, PAGE_SIZE);
return;
}
*ptr32 = val | 0x40;
pmap_unmapdev((vm_offset_t)va, PAGE_SIZE);
if (mxge_verbose)
device_printf(sc->dev,
"Enabled ECRC on upstream Nvidia bridge "
"at %d:%d:%d\n",
(int)bus, (int)slot, (int)func);
return;
}
#else
static void
mxge_enable_nvidia_ecrc(mxge_softc_t *sc)
{
device_printf(sc->dev,
"Nforce 4 chipset on non-x86/amd64!?!?!\n");
return;
}
#endif
static int
mxge_dma_test(mxge_softc_t *sc, int test_type)
{
mxge_cmd_t cmd;
bus_addr_t dmatest_bus = sc->dmabench_dma.bus_addr;
int status;
uint32_t len;
char *test = " ";
/* Run a small DMA test.
* The magic multipliers to the length tell the firmware
* to do DMA read, write, or read+write tests. The
* results are returned in cmd.data0. The upper 16
* bits of the return is the number of transfers completed.
* The lower 16 bits is the time in 0.5us ticks that the
* transfers took to complete.
*/
len = sc->tx_boundary;
cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10000;
status = mxge_send_cmd(sc, test_type, &cmd);
if (status != 0) {
test = "read";
goto abort;
}
sc->read_dma = ((cmd.data0>>16) * len * 2) /
(cmd.data0 & 0xffff);
cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x1;
status = mxge_send_cmd(sc, test_type, &cmd);
if (status != 0) {
test = "write";
goto abort;
}
sc->write_dma = ((cmd.data0>>16) * len * 2) /
(cmd.data0 & 0xffff);
cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10001;
status = mxge_send_cmd(sc, test_type, &cmd);
if (status != 0) {
test = "read/write";
goto abort;
}
sc->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) /
(cmd.data0 & 0xffff);
abort:
if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
device_printf(sc->dev, "DMA %s benchmark failed: %d\n",
test, status);
return status;
}
/*
* The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
* when the PCI-E Completion packets are aligned on an 8-byte
* boundary. Some PCI-E chip sets always align Completion packets; on
* the ones that do not, the alignment can be enforced by enabling
* ECRC generation (if supported).
*
* When PCI-E Completion packets are not aligned, it is actually more
* efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
*
* If the driver can neither enable ECRC nor verify that it has
* already been enabled, then it must use a firmware image which works
* around unaligned completion packets (ethp_z8e.dat), and it should
* also ensure that it never gives the device a Read-DMA which is
* larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
* enabled, then the driver should use the aligned (eth_z8e.dat)
* firmware image, and set tx_boundary to 4KB.
*/
static int
mxge_firmware_probe(mxge_softc_t *sc)
{
device_t dev = sc->dev;
int reg, status;
uint16_t pectl;
sc->tx_boundary = 4096;
/*
* Verify the max read request size was set to 4KB
* before trying the test with 4KB.
*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
pectl = pci_read_config(dev, reg + 0x8, 2);
if ((pectl & (5 << 12)) != (5 << 12)) {
device_printf(dev, "Max Read Req. size != 4k (0x%x\n",
pectl);
sc->tx_boundary = 2048;
}
}
/*
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
sc->fw_name = mxge_fw_aligned;
status = mxge_load_firmware(sc, 1);
if (status != 0) {
return status;
}
/*
* Enable ECRC if possible
*/
mxge_enable_nvidia_ecrc(sc);
/*
* Run a DMA test which watches for unaligned completions and
* aborts on the first one seen. Not required on Z8ES or newer.
*/
if (pci_get_revid(sc->dev) >= MXGE_PCI_REV_Z8ES)
return 0;
status = mxge_dma_test(sc, MXGEFW_CMD_UNALIGNED_TEST);
if (status == 0)
return 0; /* keep the aligned firmware */
if (status != E2BIG)
device_printf(dev, "DMA test failed: %d\n", status);
if (status == ENOSYS)
device_printf(dev, "Falling back to ethp! "
"Please install up to date fw\n");
return status;
}
static int
mxge_select_firmware(mxge_softc_t *sc)
{
int aligned = 0;
int force_firmware = mxge_force_firmware;
if (sc->throttle)
force_firmware = sc->throttle;
if (force_firmware != 0) {
if (force_firmware == 1)
aligned = 1;
else
aligned = 0;
if (mxge_verbose)
device_printf(sc->dev,
"Assuming %s completions (forced)\n",
aligned ? "aligned" : "unaligned");
goto abort;
}
/* if the PCIe link width is 4 or less, we can use the aligned
firmware and skip any checks */
if (sc->link_width != 0 && sc->link_width <= 4) {
device_printf(sc->dev,
"PCIe x%d Link, expect reduced performance\n",
sc->link_width);
aligned = 1;
goto abort;
}
if (0 == mxge_firmware_probe(sc))
return 0;
abort:
if (aligned) {
sc->fw_name = mxge_fw_aligned;
sc->tx_boundary = 4096;
} else {
sc->fw_name = mxge_fw_unaligned;
sc->tx_boundary = 2048;
}
return (mxge_load_firmware(sc, 0));
}
static int
mxge_validate_firmware(mxge_softc_t *sc, const mcp_gen_header_t *hdr)
{
if (be32toh(hdr->mcp_type) != MCP_TYPE_ETH) {
device_printf(sc->dev, "Bad firmware type: 0x%x\n",
be32toh(hdr->mcp_type));
return EIO;
}
/* save firmware version for sysctl */
strlcpy(sc->fw_version, hdr->version, sizeof(sc->fw_version));
if (mxge_verbose)
device_printf(sc->dev, "firmware id: %s\n", hdr->version);
sscanf(sc->fw_version, "%d.%d.%d", &sc->fw_ver_major,
&sc->fw_ver_minor, &sc->fw_ver_tiny);
if (!(sc->fw_ver_major == MXGEFW_VERSION_MAJOR
&& sc->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
device_printf(sc->dev, "Found firmware version %s\n",
sc->fw_version);
device_printf(sc->dev, "Driver needs %d.%d\n",
MXGEFW_VERSION_MAJOR, MXGEFW_VERSION_MINOR);
return EINVAL;
}
return 0;
}
static void *
z_alloc(void *nil, u_int items, u_int size)
{
void *ptr;
- ptr = mallocarray(items, size, M_TEMP, M_NOWAIT);
+ ptr = malloc(items * size, M_TEMP, M_NOWAIT);
return ptr;
}
static void
z_free(void *nil, void *ptr)
{
free(ptr, M_TEMP);
}
static int
mxge_load_firmware_helper(mxge_softc_t *sc, uint32_t *limit)
{
z_stream zs;
char *inflate_buffer;
const struct firmware *fw;
const mcp_gen_header_t *hdr;
unsigned hdr_offset;
int status;
unsigned int i;
char dummy;
size_t fw_len;
fw = firmware_get(sc->fw_name);
if (fw == NULL) {
device_printf(sc->dev, "Could not find firmware image %s\n",
sc->fw_name);
return ENOENT;
}
/* setup zlib and decompress f/w */
bzero(&zs, sizeof (zs));
zs.zalloc = z_alloc;
zs.zfree = z_free;
status = inflateInit(&zs);
if (status != Z_OK) {
status = EIO;
goto abort_with_fw;
}
/* the uncompressed size is stored as the firmware version,
which would otherwise go unused */
fw_len = (size_t) fw->version;
inflate_buffer = malloc(fw_len, M_TEMP, M_NOWAIT);
if (inflate_buffer == NULL)
goto abort_with_zs;
zs.avail_in = fw->datasize;
zs.next_in = __DECONST(char *, fw->data);
zs.avail_out = fw_len;
zs.next_out = inflate_buffer;
status = inflate(&zs, Z_FINISH);
if (status != Z_STREAM_END) {
device_printf(sc->dev, "zlib %d\n", status);
status = EIO;
goto abort_with_buffer;
}
/* check id */
hdr_offset = htobe32(*(const uint32_t *)
(inflate_buffer + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw_len) {
device_printf(sc->dev, "Bad firmware file");
status = EIO;
goto abort_with_buffer;
}
hdr = (const void*)(inflate_buffer + hdr_offset);
status = mxge_validate_firmware(sc, hdr);
if (status != 0)
goto abort_with_buffer;
/* Copy the inflated firmware to NIC SRAM. */
for (i = 0; i < fw_len; i += 256) {
mxge_pio_copy(sc->sram + MXGE_FW_OFFSET + i,
inflate_buffer + i,
min(256U, (unsigned)(fw_len - i)));
wmb();
dummy = *sc->sram;
wmb();
}
*limit = fw_len;
status = 0;
abort_with_buffer:
free(inflate_buffer, M_TEMP);
abort_with_zs:
inflateEnd(&zs);
abort_with_fw:
firmware_put(fw, FIRMWARE_UNLOAD);
return status;
}
/*
* Enable or disable periodic RDMAs from the host to make certain
* chipsets resend dropped PCIe messages
*/
static void
mxge_dummy_rdma(mxge_softc_t *sc, int enable)
{
char buf_bytes[72];
volatile uint32_t *confirm;
volatile char *submit;
uint32_t *buf, dma_low, dma_high;
int i;
buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL);
/* clear confirmation addr */
confirm = (volatile uint32_t *)sc->cmd;
*confirm = 0;
wmb();
/* send an rdma command to the PCIe engine, and wait for the
response in the confirmation address. The firmware should
write a -1 there to indicate it is alive and well
*/
dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr);
buf[0] = htobe32(dma_high); /* confirm addr MSW */
buf[1] = htobe32(dma_low); /* confirm addr LSW */
buf[2] = htobe32(0xffffffff); /* confirm data */
dma_low = MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr);
buf[3] = htobe32(dma_high); /* dummy addr MSW */
buf[4] = htobe32(dma_low); /* dummy addr LSW */
buf[5] = htobe32(enable); /* enable? */
submit = (volatile char *)(sc->sram + MXGEFW_BOOT_DUMMY_RDMA);
mxge_pio_copy(submit, buf, 64);
wmb();
DELAY(1000);
wmb();
i = 0;
while (*confirm != 0xffffffff && i < 20) {
DELAY(1000);
i++;
}
if (*confirm != 0xffffffff) {
device_printf(sc->dev, "dummy rdma %s failed (%p = 0x%x)",
(enable ? "enable" : "disable"), confirm,
*confirm);
}
return;
}
static int
mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data)
{
mcp_cmd_t *buf;
char buf_bytes[sizeof(*buf) + 8];
volatile mcp_cmd_response_t *response = sc->cmd;
volatile char *cmd_addr = sc->sram + MXGEFW_ETH_CMD;
uint32_t dma_low, dma_high;
int err, sleep_total = 0;
/* ensure buf is aligned to 8 bytes */
buf = (mcp_cmd_t *)((unsigned long)(buf_bytes + 7) & ~7UL);
buf->data0 = htobe32(data->data0);
buf->data1 = htobe32(data->data1);
buf->data2 = htobe32(data->data2);
buf->cmd = htobe32(cmd);
dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr);
buf->response_addr.low = htobe32(dma_low);
buf->response_addr.high = htobe32(dma_high);
mtx_lock(&sc->cmd_mtx);
response->result = 0xffffffff;
wmb();
mxge_pio_copy((volatile void *)cmd_addr, buf, sizeof (*buf));
/* wait up to 20ms */
err = EAGAIN;
for (sleep_total = 0; sleep_total < 20; sleep_total++) {
bus_dmamap_sync(sc->cmd_dma.dmat,
sc->cmd_dma.map, BUS_DMASYNC_POSTREAD);
wmb();
switch (be32toh(response->result)) {
case 0:
data->data0 = be32toh(response->data);
err = 0;
break;
case 0xffffffff:
DELAY(1000);
break;
case MXGEFW_CMD_UNKNOWN:
err = ENOSYS;
break;
case MXGEFW_CMD_ERROR_UNALIGNED:
err = E2BIG;
break;
case MXGEFW_CMD_ERROR_BUSY:
err = EBUSY;
break;
case MXGEFW_CMD_ERROR_I2C_ABSENT:
err = ENXIO;
break;
default:
device_printf(sc->dev,
"mxge: command %d "
"failed, result = %d\n",
cmd, be32toh(response->result));
err = ENXIO;
break;
}
if (err != EAGAIN)
break;
}
if (err == EAGAIN)
device_printf(sc->dev, "mxge: command %d timed out"
"result = %d\n",
cmd, be32toh(response->result));
mtx_unlock(&sc->cmd_mtx);
return err;
}
static int
mxge_adopt_running_firmware(mxge_softc_t *sc)
{
struct mcp_gen_header *hdr;
const size_t bytes = sizeof (struct mcp_gen_header);
size_t hdr_offset;
int status;
/* find running firmware header */
hdr_offset = htobe32(*(volatile uint32_t *)
(sc->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > sc->sram_size) {
device_printf(sc->dev,
"Running firmware has bad header offset (%d)\n",
(int)hdr_offset);
return EIO;
}
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = malloc(bytes, M_DEVBUF, M_NOWAIT);
if (hdr == NULL) {
device_printf(sc->dev, "could not malloc firmware hdr\n");
return ENOMEM;
}
bus_space_read_region_1(rman_get_bustag(sc->mem_res),
rman_get_bushandle(sc->mem_res),
hdr_offset, (char *)hdr, bytes);
status = mxge_validate_firmware(sc, hdr);
free(hdr, M_DEVBUF);
/*
* check to see if adopted firmware has bug where adopting
* it will cause broadcasts to be filtered unless the NIC
* is kept in ALLMULTI mode
*/
if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 &&
sc->fw_ver_tiny >= 4 && sc->fw_ver_tiny <= 11) {
sc->adopted_rx_filter_bug = 1;
device_printf(sc->dev, "Adopting fw %d.%d.%d: "
"working around rx filter bug\n",
sc->fw_ver_major, sc->fw_ver_minor,
sc->fw_ver_tiny);
}
return status;
}
static int
mxge_load_firmware(mxge_softc_t *sc, int adopt)
{
volatile uint32_t *confirm;
volatile char *submit;
char buf_bytes[72];
uint32_t *buf, size, dma_low, dma_high;
int status, i;
buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL);
size = sc->sram_size;
status = mxge_load_firmware_helper(sc, &size);
if (status) {
if (!adopt)
return status;
/* Try to use the currently running firmware, if
it is new enough */
status = mxge_adopt_running_firmware(sc);
if (status) {
device_printf(sc->dev,
"failed to adopt running firmware\n");
return status;
}
device_printf(sc->dev,
"Successfully adopted running firmware\n");
if (sc->tx_boundary == 4096) {
device_printf(sc->dev,
"Using firmware currently running on NIC"
". For optimal\n");
device_printf(sc->dev,
"performance consider loading optimized "
"firmware\n");
}
sc->fw_name = mxge_fw_unaligned;
sc->tx_boundary = 2048;
return 0;
}
/* clear confirmation addr */
confirm = (volatile uint32_t *)sc->cmd;
*confirm = 0;
wmb();
/* send a reload command to the bootstrap MCP, and wait for the
response in the confirmation address. The firmware should
write a -1 there to indicate it is alive and well
*/
dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr);
buf[0] = htobe32(dma_high); /* confirm addr MSW */
buf[1] = htobe32(dma_low); /* confirm addr LSW */
buf[2] = htobe32(0xffffffff); /* confirm data */
/* FIX: All newest firmware should un-protect the bottom of
the sram before handoff. However, the very first interfaces
do not. Therefore the handoff copy must skip the first 8 bytes
*/
/* where the code starts*/
buf[3] = htobe32(MXGE_FW_OFFSET + 8);
buf[4] = htobe32(size - 8); /* length of code */
buf[5] = htobe32(8); /* where to copy to */
buf[6] = htobe32(0); /* where to jump to */
submit = (volatile char *)(sc->sram + MXGEFW_BOOT_HANDOFF);
mxge_pio_copy(submit, buf, 64);
wmb();
DELAY(1000);
wmb();
i = 0;
while (*confirm != 0xffffffff && i < 20) {
DELAY(1000*10);
i++;
bus_dmamap_sync(sc->cmd_dma.dmat,
sc->cmd_dma.map, BUS_DMASYNC_POSTREAD);
}
if (*confirm != 0xffffffff) {
device_printf(sc->dev,"handoff failed (%p = 0x%x)",
confirm, *confirm);
return ENXIO;
}
return 0;
}
static int
mxge_update_mac_address(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
uint8_t *addr = sc->mac_addr;
int status;
cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
status = mxge_send_cmd(sc, MXGEFW_SET_MAC_ADDRESS, &cmd);
return status;
}
static int
mxge_change_pause(mxge_softc_t *sc, int pause)
{
mxge_cmd_t cmd;
int status;
if (pause)
status = mxge_send_cmd(sc, MXGEFW_ENABLE_FLOW_CONTROL,
&cmd);
else
status = mxge_send_cmd(sc, MXGEFW_DISABLE_FLOW_CONTROL,
&cmd);
if (status) {
device_printf(sc->dev, "Failed to set flow control mode\n");
return ENXIO;
}
sc->pause = pause;
return 0;
}
static void
mxge_change_promisc(mxge_softc_t *sc, int promisc)
{
mxge_cmd_t cmd;
int status;
if (mxge_always_promisc)
promisc = 1;
if (promisc)
status = mxge_send_cmd(sc, MXGEFW_ENABLE_PROMISC,
&cmd);
else
status = mxge_send_cmd(sc, MXGEFW_DISABLE_PROMISC,
&cmd);
if (status) {
device_printf(sc->dev, "Failed to set promisc mode\n");
}
}
static void
mxge_set_multicast_list(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
struct ifmultiaddr *ifma;
struct ifnet *ifp = sc->ifp;
int err;
/* This firmware is known to not support multicast */
if (!sc->fw_multicast_support)
return;
/* Disable multicast filtering while we play with the lists*/
err = mxge_send_cmd(sc, MXGEFW_ENABLE_ALLMULTI, &cmd);
if (err != 0) {
device_printf(sc->dev, "Failed MXGEFW_ENABLE_ALLMULTI,"
" error status: %d\n", err);
return;
}
if (sc->adopted_rx_filter_bug)
return;
if (ifp->if_flags & IFF_ALLMULTI)
/* request to disable multicast filtering, so quit here */
return;
/* Flush all the filters */
err = mxge_send_cmd(sc, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, &cmd);
if (err != 0) {
device_printf(sc->dev,
"Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
", error status: %d\n", err);
return;
}
/* Walk the multicast list, and add each address */
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
&cmd.data0, 4);
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr) + 4,
&cmd.data1, 2);
cmd.data0 = htonl(cmd.data0);
cmd.data1 = htonl(cmd.data1);
err = mxge_send_cmd(sc, MXGEFW_JOIN_MULTICAST_GROUP, &cmd);
if (err != 0) {
device_printf(sc->dev, "Failed "
"MXGEFW_JOIN_MULTICAST_GROUP, error status:"
"%d\t", err);
/* abort, leaving multicast filtering off */
if_maddr_runlock(ifp);
return;
}
}
if_maddr_runlock(ifp);
/* Enable multicast filtering */
err = mxge_send_cmd(sc, MXGEFW_DISABLE_ALLMULTI, &cmd);
if (err != 0) {
device_printf(sc->dev, "Failed MXGEFW_DISABLE_ALLMULTI"
", error status: %d\n", err);
}
}
static int
mxge_max_mtu(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
int status;
if (MJUMPAGESIZE - MXGEFW_PAD > MXGEFW_MAX_MTU)
return MXGEFW_MAX_MTU - MXGEFW_PAD;
/* try to set nbufs to see if it we can
use virtually contiguous jumbos */
cmd.data0 = 0;
status = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS,
&cmd);
if (status == 0)
return MXGEFW_MAX_MTU - MXGEFW_PAD;
/* otherwise, we're limited to MJUMPAGESIZE */
return MJUMPAGESIZE - MXGEFW_PAD;
}
static int
mxge_reset(mxge_softc_t *sc, int interrupts_setup)
{
struct mxge_slice_state *ss;
mxge_rx_done_t *rx_done;
volatile uint32_t *irq_claim;
mxge_cmd_t cmd;
int slice, status;
/* try to send a reset command to the card to see if it
is alive */
memset(&cmd, 0, sizeof (cmd));
status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd);
if (status != 0) {
device_printf(sc->dev, "failed reset\n");
return ENXIO;
}
mxge_dummy_rdma(sc, 1);
/* set the intrq size */
cmd.data0 = sc->rx_ring_size;
status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd);
/*
* Even though we already know how many slices are supported
* via mxge_slice_probe(), MXGEFW_CMD_GET_MAX_RSS_QUEUES
* has magic side effects, and must be called after a reset.
* It must be called prior to calling any RSS related cmds,
* including assigning an interrupt queue for anything but
* slice 0. It must also be called *after*
* MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
* the firmware to compute offsets.
*/
if (sc->num_slices > 1) {
/* ask the maximum number of slices it supports */
status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
&cmd);
if (status != 0) {
device_printf(sc->dev,
"failed to get number of slices\n");
return status;
}
/*
* MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
* to setting up the interrupt queue DMA
*/
cmd.data0 = sc->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
#ifdef IFNET_BUF_RING
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
#endif
status = mxge_send_cmd(sc, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd);
if (status != 0) {
device_printf(sc->dev,
"failed to set number of slices\n");
return status;
}
}
if (interrupts_setup) {
/* Now exchange information about interrupts */
for (slice = 0; slice < sc->num_slices; slice++) {
rx_done = &sc->ss[slice].rx_done;
memset(rx_done->entry, 0, sc->rx_ring_size);
cmd.data0 = MXGE_LOWPART_TO_U32(rx_done->dma.bus_addr);
cmd.data1 = MXGE_HIGHPART_TO_U32(rx_done->dma.bus_addr);
cmd.data2 = slice;
status |= mxge_send_cmd(sc,
MXGEFW_CMD_SET_INTRQ_DMA,
&cmd);
}
}
status |= mxge_send_cmd(sc,
MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd);
sc->intr_coal_delay_ptr = (volatile uint32_t *)(sc->sram + cmd.data0);
status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd);
irq_claim = (volatile uint32_t *)(sc->sram + cmd.data0);
status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
&cmd);
sc->irq_deassert = (volatile uint32_t *)(sc->sram + cmd.data0);
if (status != 0) {
device_printf(sc->dev, "failed set interrupt parameters\n");
return status;
}
*sc->intr_coal_delay_ptr = htobe32(sc->intr_coal_delay);
/* run a DMA benchmark */
(void) mxge_dma_test(sc, MXGEFW_DMA_TEST);
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
ss->irq_claim = irq_claim + (2 * slice);
/* reset mcp/driver shared state back to 0 */
ss->rx_done.idx = 0;
ss->rx_done.cnt = 0;
ss->tx.req = 0;
ss->tx.done = 0;
ss->tx.pkt_done = 0;
ss->tx.queue_active = 0;
ss->tx.activate = 0;
ss->tx.deactivate = 0;
ss->tx.wake = 0;
ss->tx.defrag = 0;
ss->tx.stall = 0;
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->lc.lro_bad_csum = 0;
ss->lc.lro_queued = 0;
ss->lc.lro_flushed = 0;
if (ss->fw_stats != NULL) {
bzero(ss->fw_stats, sizeof *ss->fw_stats);
}
}
sc->rdma_tags_available = 15;
status = mxge_update_mac_address(sc);
mxge_change_promisc(sc, sc->ifp->if_flags & IFF_PROMISC);
mxge_change_pause(sc, sc->pause);
mxge_set_multicast_list(sc);
if (sc->throttle) {
cmd.data0 = sc->throttle;
if (mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR,
&cmd)) {
device_printf(sc->dev,
"can't enable throttle\n");
}
}
return status;
}
static int
mxge_change_throttle(SYSCTL_HANDLER_ARGS)
{
mxge_cmd_t cmd;
mxge_softc_t *sc;
int err;
unsigned int throttle;
sc = arg1;
throttle = sc->throttle;
err = sysctl_handle_int(oidp, &throttle, arg2, req);
if (err != 0) {
return err;
}
if (throttle == sc->throttle)
return 0;
if (throttle < MXGE_MIN_THROTTLE || throttle > MXGE_MAX_THROTTLE)
return EINVAL;
mtx_lock(&sc->driver_mtx);
cmd.data0 = throttle;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd);
if (err == 0)
sc->throttle = throttle;
mtx_unlock(&sc->driver_mtx);
return err;
}
static int
mxge_change_intr_coal(SYSCTL_HANDLER_ARGS)
{
mxge_softc_t *sc;
unsigned int intr_coal_delay;
int err;
sc = arg1;
intr_coal_delay = sc->intr_coal_delay;
err = sysctl_handle_int(oidp, &intr_coal_delay, arg2, req);
if (err != 0) {
return err;
}
if (intr_coal_delay == sc->intr_coal_delay)
return 0;
if (intr_coal_delay == 0 || intr_coal_delay > 1000*1000)
return EINVAL;
mtx_lock(&sc->driver_mtx);
*sc->intr_coal_delay_ptr = htobe32(intr_coal_delay);
sc->intr_coal_delay = intr_coal_delay;
mtx_unlock(&sc->driver_mtx);
return err;
}
static int
mxge_change_flow_control(SYSCTL_HANDLER_ARGS)
{
mxge_softc_t *sc;
unsigned int enabled;
int err;
sc = arg1;
enabled = sc->pause;
err = sysctl_handle_int(oidp, &enabled, arg2, req);
if (err != 0) {
return err;
}
if (enabled == sc->pause)
return 0;
mtx_lock(&sc->driver_mtx);
err = mxge_change_pause(sc, enabled);
mtx_unlock(&sc->driver_mtx);
return err;
}
static int
mxge_handle_be32(SYSCTL_HANDLER_ARGS)
{
int err;
if (arg1 == NULL)
return EFAULT;
arg2 = be32toh(*(int *)arg1);
arg1 = NULL;
err = sysctl_handle_int(oidp, arg1, arg2, req);
return err;
}
static void
mxge_rem_sysctls(mxge_softc_t *sc)
{
struct mxge_slice_state *ss;
int slice;
if (sc->slice_sysctl_tree == NULL)
return;
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
if (ss == NULL || ss->sysctl_tree == NULL)
continue;
sysctl_ctx_free(&ss->sysctl_ctx);
ss->sysctl_tree = NULL;
}
sysctl_ctx_free(&sc->slice_sysctl_ctx);
sc->slice_sysctl_tree = NULL;
}
static void
mxge_add_sysctls(mxge_softc_t *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
mcp_irq_data_t *fw;
struct mxge_slice_state *ss;
int slice;
char slice_num[8];
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
fw = sc->ss[0].fw_stats;
/* random information */
SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
"firmware_version",
CTLFLAG_RD, sc->fw_version,
0, "firmware version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
"serial_number",
CTLFLAG_RD, sc->serial_number_string,
0, "serial number");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
"product_code",
CTLFLAG_RD, sc->product_code_string,
0, "product_code");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"pcie_link_width",
CTLFLAG_RD, &sc->link_width,
0, "tx_boundary");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_boundary",
CTLFLAG_RD, &sc->tx_boundary,
0, "tx_boundary");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"write_combine",
CTLFLAG_RD, &sc->wc,
0, "write combining PIO?");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"read_dma_MBs",
CTLFLAG_RD, &sc->read_dma,
0, "DMA Read speed in MB/s");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"write_dma_MBs",
CTLFLAG_RD, &sc->write_dma,
0, "DMA Write speed in MB/s");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"read_write_dma_MBs",
CTLFLAG_RD, &sc->read_write_dma,
0, "DMA concurrent Read/Write speed in MB/s");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"watchdog_resets",
CTLFLAG_RD, &sc->watchdog_resets,
0, "Number of times NIC was reset");
/* performance related tunables */
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"intr_coal_delay",
CTLTYPE_INT|CTLFLAG_RW, sc,
0, mxge_change_intr_coal,
"I", "interrupt coalescing delay in usecs");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"throttle",
CTLTYPE_INT|CTLFLAG_RW, sc,
0, mxge_change_throttle,
"I", "transmit throttling");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"flow_control_enabled",
CTLTYPE_INT|CTLFLAG_RW, sc,
0, mxge_change_flow_control,
"I", "interrupt coalescing delay in usecs");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"deassert_wait",
CTLFLAG_RW, &mxge_deassert_wait,
0, "Wait for IRQ line to go low in ihandler");
/* stats block from firmware is in network byte order.
Need to swap it */
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"link_up",
CTLTYPE_INT|CTLFLAG_RD, &fw->link_up,
0, mxge_handle_be32,
"I", "link up");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"rdma_tags_available",
CTLTYPE_INT|CTLFLAG_RD, &fw->rdma_tags_available,
0, mxge_handle_be32,
"I", "rdma_tags_available");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_bad_crc32",
CTLTYPE_INT|CTLFLAG_RD,
&fw->dropped_bad_crc32,
0, mxge_handle_be32,
"I", "dropped_bad_crc32");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_bad_phy",
CTLTYPE_INT|CTLFLAG_RD,
&fw->dropped_bad_phy,
0, mxge_handle_be32,
"I", "dropped_bad_phy");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_link_error_or_filtered",
CTLTYPE_INT|CTLFLAG_RD,
&fw->dropped_link_error_or_filtered,
0, mxge_handle_be32,
"I", "dropped_link_error_or_filtered");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_link_overflow",
CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_overflow,
0, mxge_handle_be32,
"I", "dropped_link_overflow");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_multicast_filtered",
CTLTYPE_INT|CTLFLAG_RD,
&fw->dropped_multicast_filtered,
0, mxge_handle_be32,
"I", "dropped_multicast_filtered");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_no_big_buffer",
CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_big_buffer,
0, mxge_handle_be32,
"I", "dropped_no_big_buffer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_no_small_buffer",
CTLTYPE_INT|CTLFLAG_RD,
&fw->dropped_no_small_buffer,
0, mxge_handle_be32,
"I", "dropped_no_small_buffer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_overrun",
CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_overrun,
0, mxge_handle_be32,
"I", "dropped_overrun");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_pause",
CTLTYPE_INT|CTLFLAG_RD,
&fw->dropped_pause,
0, mxge_handle_be32,
"I", "dropped_pause");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_runt",
CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_runt,
0, mxge_handle_be32,
"I", "dropped_runt");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_unicast_filtered",
CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_unicast_filtered,
0, mxge_handle_be32,
"I", "dropped_unicast_filtered");
/* verbose printing? */
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"verbose",
CTLFLAG_RW, &mxge_verbose,
0, "verbose printing");
/* add counters exported for debugging from all slices */
sysctl_ctx_init(&sc->slice_sysctl_ctx);
sc->slice_sysctl_tree =
SYSCTL_ADD_NODE(&sc->slice_sysctl_ctx, children, OID_AUTO,
"slice", CTLFLAG_RD, 0, "");
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
sysctl_ctx_init(&ss->sysctl_ctx);
ctx = &ss->sysctl_ctx;
children = SYSCTL_CHILDREN(sc->slice_sysctl_tree);
sprintf(slice_num, "%d", slice);
ss->sysctl_tree =
SYSCTL_ADD_NODE(ctx, children, OID_AUTO, slice_num,
CTLFLAG_RD, 0, "");
children = SYSCTL_CHILDREN(ss->sysctl_tree);
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"rx_small_cnt",
CTLFLAG_RD, &ss->rx_small.cnt,
0, "rx_small_cnt");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"rx_big_cnt",
CTLFLAG_RD, &ss->rx_big.cnt,
0, "rx_small_cnt");
SYSCTL_ADD_U64(ctx, children, OID_AUTO,
"lro_flushed", CTLFLAG_RD, &ss->lc.lro_flushed,
0, "number of lro merge queues flushed");
SYSCTL_ADD_U64(ctx, children, OID_AUTO,
"lro_bad_csum", CTLFLAG_RD, &ss->lc.lro_bad_csum,
0, "number of bad csums preventing LRO");
SYSCTL_ADD_U64(ctx, children, OID_AUTO,
"lro_queued", CTLFLAG_RD, &ss->lc.lro_queued,
0, "number of frames appended to lro merge"
"queues");
#ifndef IFNET_BUF_RING
/* only transmit from slice 0 for now */
if (slice > 0)
continue;
#endif
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_req",
CTLFLAG_RD, &ss->tx.req,
0, "tx_req");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_done",
CTLFLAG_RD, &ss->tx.done,
0, "tx_done");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_pkt_done",
CTLFLAG_RD, &ss->tx.pkt_done,
0, "tx_done");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_stall",
CTLFLAG_RD, &ss->tx.stall,
0, "tx_stall");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_wake",
CTLFLAG_RD, &ss->tx.wake,
0, "tx_wake");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_defrag",
CTLFLAG_RD, &ss->tx.defrag,
0, "tx_defrag");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_queue_active",
CTLFLAG_RD, &ss->tx.queue_active,
0, "tx_queue_active");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_activate",
CTLFLAG_RD, &ss->tx.activate,
0, "tx_activate");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_deactivate",
CTLFLAG_RD, &ss->tx.deactivate,
0, "tx_deactivate");
}
}
/* copy an array of mcp_kreq_ether_send_t's to the mcp. Copy
backwards one at a time and handle ring wraps */
static inline void
mxge_submit_req_backwards(mxge_tx_ring_t *tx,
mcp_kreq_ether_send_t *src, int cnt)
{
int idx, starting_slot;
starting_slot = tx->req;
while (cnt > 1) {
cnt--;
idx = (starting_slot + cnt) & tx->mask;
mxge_pio_copy(&tx->lanai[idx],
&src[cnt], sizeof(*src));
wmb();
}
}
/*
* copy an array of mcp_kreq_ether_send_t's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's flags
* to mark them valid only after writing the entire chain
*/
static inline void
mxge_submit_req(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src,
int cnt)
{
int idx, i;
uint32_t *src_ints;
volatile uint32_t *dst_ints;
mcp_kreq_ether_send_t *srcp;
volatile mcp_kreq_ether_send_t *dstp, *dst;
uint8_t last_flags;
idx = tx->req & tx->mask;
last_flags = src->flags;
src->flags = 0;
wmb();
dst = dstp = &tx->lanai[idx];
srcp = src;
if ((idx + cnt) < tx->mask) {
for (i = 0; i < (cnt - 1); i += 2) {
mxge_pio_copy(dstp, srcp, 2 * sizeof(*src));
wmb(); /* force write every 32 bytes */
srcp += 2;
dstp += 2;
}
} else {
/* submit all but the first request, and ensure
that it is submitted below */
mxge_submit_req_backwards(tx, src, cnt);
i = 0;
}
if (i < cnt) {
/* submit the first request */
mxge_pio_copy(dstp, srcp, sizeof(*src));
wmb(); /* barrier before setting valid flag */
}
/* re-write the last 32-bits with the valid flags */
src->flags = last_flags;
src_ints = (uint32_t *)src;
src_ints+=3;
dst_ints = (volatile uint32_t *)dst;
dst_ints+=3;
*dst_ints = *src_ints;
tx->req += cnt;
wmb();
}
static int
mxge_parse_tx(struct mxge_slice_state *ss, struct mbuf *m,
struct mxge_pkt_info *pi)
{
struct ether_vlan_header *eh;
uint16_t etype;
int tso = m->m_pkthdr.csum_flags & (CSUM_TSO);
#if IFCAP_TSO6 && defined(INET6)
int nxt;
#endif
eh = mtod(m, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
pi->ip_off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
etype = ntohs(eh->evl_encap_proto);
pi->ip_off = ETHER_HDR_LEN;
}
switch (etype) {
case ETHERTYPE_IP:
/*
* ensure ip header is in first mbuf, copy it to a
* scratch buffer if not
*/
pi->ip = (struct ip *)(m->m_data + pi->ip_off);
pi->ip6 = NULL;
if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip))) {
m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip),
ss->scratch);
pi->ip = (struct ip *)(ss->scratch + pi->ip_off);
}
pi->ip_hlen = pi->ip->ip_hl << 2;
if (!tso)
return 0;
if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr))) {
m_copydata(m, 0, pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr), ss->scratch);
pi->ip = (struct ip *)(ss->scratch + pi->ip_off);
}
pi->tcp = (struct tcphdr *)((char *)pi->ip + pi->ip_hlen);
break;
#if IFCAP_TSO6 && defined(INET6)
case ETHERTYPE_IPV6:
pi->ip6 = (struct ip6_hdr *)(m->m_data + pi->ip_off);
if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip6))) {
m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip6),
ss->scratch);
pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off);
}
nxt = 0;
pi->ip_hlen = ip6_lasthdr(m, pi->ip_off, IPPROTO_IPV6, &nxt);
pi->ip_hlen -= pi->ip_off;
if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP)
return EINVAL;
if (!tso)
return 0;
if (pi->ip_off + pi->ip_hlen > ss->sc->max_tso6_hlen)
return EINVAL;
if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr))) {
m_copydata(m, 0, pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr), ss->scratch);
pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off);
}
pi->tcp = (struct tcphdr *)((char *)pi->ip6 + pi->ip_hlen);
break;
#endif
default:
return EINVAL;
}
return 0;
}
#if IFCAP_TSO4
static void
mxge_encap_tso(struct mxge_slice_state *ss, struct mbuf *m,
int busdma_seg_cnt, struct mxge_pkt_info *pi)
{
mxge_tx_ring_t *tx;
mcp_kreq_ether_send_t *req;
bus_dma_segment_t *seg;
uint32_t low, high_swapped;
int len, seglen, cum_len, cum_len_next;
int next_is_first, chop, cnt, rdma_count, small;
uint16_t pseudo_hdr_offset, cksum_offset, mss, sum;
uint8_t flags, flags_next;
static int once;
mss = m->m_pkthdr.tso_segsz;
/* negative cum_len signifies to the
* send loop that we are still in the
* header portion of the TSO packet.
*/
cksum_offset = pi->ip_off + pi->ip_hlen;
cum_len = -(cksum_offset + (pi->tcp->th_off << 2));
/* TSO implies checksum offload on this hardware */
if (__predict_false((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) == 0)) {
/*
* If packet has full TCP csum, replace it with pseudo hdr
* sum that the NIC expects, otherwise the NIC will emit
* packets with bad TCP checksums.
*/
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
if (pi->ip6) {
#if (CSUM_TCP_IPV6 != 0) && defined(INET6)
m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
sum = in6_cksum_pseudo(pi->ip6,
m->m_pkthdr.len - cksum_offset,
IPPROTO_TCP, 0);
#endif
} else {
#ifdef INET
m->m_pkthdr.csum_flags |= CSUM_TCP;
sum = in_pseudo(pi->ip->ip_src.s_addr,
pi->ip->ip_dst.s_addr,
htons(IPPROTO_TCP + (m->m_pkthdr.len -
cksum_offset)));
#endif
}
m_copyback(m, offsetof(struct tcphdr, th_sum) +
cksum_offset, sizeof(sum), (caddr_t)&sum);
}
flags = MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST;
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put
* the checksum by parsing the header. */
pseudo_hdr_offset = htobe16(mss);
if (pi->ip6) {
/*
* for IPv6 TSO, the "checksum offset" is re-purposed
* to store the TCP header len
*/
cksum_offset = (pi->tcp->th_off << 2);
}
tx = &ss->tx;
req = tx->req_list;
seg = tx->seg_list;
cnt = 0;
rdma_count = 0;
/* "rdma_count" is the number of RDMAs belonging to the
* current packet BEFORE the current send request. For
* non-TSO packets, this is equal to "count".
* For TSO packets, rdma_count needs to be reset
* to 0 after a segment cut.
*
* The rdma_count field of the send request is
* the number of RDMAs of the packet starting at
* that request. For TSO send requests with one ore more cuts
* in the middle, this is the number of RDMAs starting
* after the last cut in the request. All previous
* segments before the last cut implicitly have 1 RDMA.
*
* Since the number of RDMAs is not known beforehand,
* it must be filled-in retroactively - after each
* segmentation cut or at the end of the entire packet.
*/
while (busdma_seg_cnt) {
/* Break the busdma segment up into pieces*/
low = MXGE_LOWPART_TO_U32(seg->ds_addr);
high_swapped = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr));
len = seg->ds_len;
while (len) {
flags_next = flags & ~MXGEFW_FLAGS_FIRST;
seglen = len;
cum_len_next = cum_len + seglen;
(req-rdma_count)->rdma_count = rdma_count + 1;
if (__predict_true(cum_len >= 0)) {
/* payload */
chop = (cum_len_next > mss);
cum_len_next = cum_len_next % mss;
next_is_first = (cum_len_next == 0);
flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
flags_next |= next_is_first *
MXGEFW_FLAGS_FIRST;
rdma_count |= -(chop | next_is_first);
rdma_count += chop & !next_is_first;
} else if (cum_len_next >= 0) {
/* header ends */
rdma_count = -1;
cum_len_next = 0;
seglen = -cum_len;
small = (mss <= MXGEFW_SEND_SMALL_SIZE);
flags_next = MXGEFW_FLAGS_TSO_PLD |
MXGEFW_FLAGS_FIRST |
(small * MXGEFW_FLAGS_SMALL);
}
req->addr_high = high_swapped;
req->addr_low = htobe32(low);
req->pseudo_hdr_offset = pseudo_hdr_offset;
req->pad = 0;
req->rdma_count = 1;
req->length = htobe16(seglen);
req->cksum_offset = cksum_offset;
req->flags = flags | ((cum_len & 1) *
MXGEFW_FLAGS_ALIGN_ODD);
low += seglen;
len -= seglen;
cum_len = cum_len_next;
flags = flags_next;
req++;
cnt++;
rdma_count++;
if (cksum_offset != 0 && !pi->ip6) {
if (__predict_false(cksum_offset > seglen))
cksum_offset -= seglen;
else
cksum_offset = 0;
}
if (__predict_false(cnt > tx->max_desc))
goto drop;
}
busdma_seg_cnt--;
seg++;
}
(req-rdma_count)->rdma_count = rdma_count;
do {
req--;
req->flags |= MXGEFW_FLAGS_TSO_LAST;
} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | MXGEFW_FLAGS_FIRST)));
tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1;
mxge_submit_req(tx, tx->req_list, cnt);
#ifdef IFNET_BUF_RING
if ((ss->sc->num_slices > 1) && tx->queue_active == 0) {
/* tell the NIC to start polling this slice */
*tx->send_go = 1;
tx->queue_active = 1;
tx->activate++;
wmb();
}
#endif
return;
drop:
bus_dmamap_unload(tx->dmat, tx->info[tx->req & tx->mask].map);
m_freem(m);
ss->oerrors++;
if (!once) {
printf("tx->max_desc exceeded via TSO!\n");
printf("mss = %d, %ld, %d!\n", mss,
(long)seg - (long)tx->seg_list, tx->max_desc);
once = 1;
}
return;
}
#endif /* IFCAP_TSO4 */
#ifdef MXGE_NEW_VLAN_API
/*
* We reproduce the software vlan tag insertion from
* net/if_vlan.c:vlan_start() here so that we can advertise "hardware"
* vlan tag insertion. We need to advertise this in order to have the
* vlan interface respect our csum offload flags.
*/
static struct mbuf *
mxge_vlan_tag_insert(struct mbuf *m)
{
struct ether_vlan_header *evl;
M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
if (__predict_false(m == NULL))
return NULL;
if (m->m_len < sizeof(*evl)) {
m = m_pullup(m, sizeof(*evl));
if (__predict_false(m == NULL))
return NULL;
}
/*
* Transform the Ethernet header into an Ethernet header
* with 802.1Q encapsulation.
*/
evl = mtod(m, struct ether_vlan_header *);
bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
(char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
evl->evl_tag = htons(m->m_pkthdr.ether_vtag);
m->m_flags &= ~M_VLANTAG;
return m;
}
#endif /* MXGE_NEW_VLAN_API */
static void
mxge_encap(struct mxge_slice_state *ss, struct mbuf *m)
{
struct mxge_pkt_info pi = {0,0,0,0};
mxge_softc_t *sc;
mcp_kreq_ether_send_t *req;
bus_dma_segment_t *seg;
struct mbuf *m_tmp;
struct ifnet *ifp;
mxge_tx_ring_t *tx;
int cnt, cum_len, err, i, idx, odd_flag;
uint16_t pseudo_hdr_offset;
uint8_t flags, cksum_offset;
sc = ss->sc;
ifp = sc->ifp;
tx = &ss->tx;
#ifdef MXGE_NEW_VLAN_API
if (m->m_flags & M_VLANTAG) {
m = mxge_vlan_tag_insert(m);
if (__predict_false(m == NULL))
goto drop_without_m;
}
#endif
if (m->m_pkthdr.csum_flags &
(CSUM_TSO | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) {
if (mxge_parse_tx(ss, m, &pi))
goto drop;
}
/* (try to) map the frame for DMA */
idx = tx->req & tx->mask;
err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map,
m, tx->seg_list, &cnt,
BUS_DMA_NOWAIT);
if (__predict_false(err == EFBIG)) {
/* Too many segments in the chain. Try
to defrag */
m_tmp = m_defrag(m, M_NOWAIT);
if (m_tmp == NULL) {
goto drop;
}
ss->tx.defrag++;
m = m_tmp;
err = bus_dmamap_load_mbuf_sg(tx->dmat,
tx->info[idx].map,
m, tx->seg_list, &cnt,
BUS_DMA_NOWAIT);
}
if (__predict_false(err != 0)) {
device_printf(sc->dev, "bus_dmamap_load_mbuf_sg returned %d"
" packet len = %d\n", err, m->m_pkthdr.len);
goto drop;
}
bus_dmamap_sync(tx->dmat, tx->info[idx].map,
BUS_DMASYNC_PREWRITE);
tx->info[idx].m = m;
#if IFCAP_TSO4
/* TSO is different enough, we handle it in another routine */
if (m->m_pkthdr.csum_flags & (CSUM_TSO)) {
mxge_encap_tso(ss, m, cnt, &pi);
return;
}
#endif
req = tx->req_list;
cksum_offset = 0;
pseudo_hdr_offset = 0;
flags = MXGEFW_FLAGS_NO_TSO;
/* checksum offloading? */
if (m->m_pkthdr.csum_flags &
(CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) {
/* ensure ip header is in first mbuf, copy
it to a scratch buffer if not */
cksum_offset = pi.ip_off + pi.ip_hlen;
pseudo_hdr_offset = cksum_offset + m->m_pkthdr.csum_data;
pseudo_hdr_offset = htobe16(pseudo_hdr_offset);
req->cksum_offset = cksum_offset;
flags |= MXGEFW_FLAGS_CKSUM;
odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
} else {
odd_flag = 0;
}
if (m->m_pkthdr.len < MXGEFW_SEND_SMALL_SIZE)
flags |= MXGEFW_FLAGS_SMALL;
/* convert segments into a request list */
cum_len = 0;
seg = tx->seg_list;
req->flags = MXGEFW_FLAGS_FIRST;
for (i = 0; i < cnt; i++) {
req->addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr));
req->addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr));
req->length = htobe16(seg->ds_len);
req->cksum_offset = cksum_offset;
if (cksum_offset > seg->ds_len)
cksum_offset -= seg->ds_len;
else
cksum_offset = 0;
req->pseudo_hdr_offset = pseudo_hdr_offset;
req->pad = 0; /* complete solid 16-byte block */
req->rdma_count = 1;
req->flags |= flags | ((cum_len & 1) * odd_flag);
cum_len += seg->ds_len;
seg++;
req++;
req->flags = 0;
}
req--;
/* pad runts to 60 bytes */
if (cum_len < 60) {
req++;
req->addr_low =
htobe32(MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr));
req->addr_high =
htobe32(MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr));
req->length = htobe16(60 - cum_len);
req->cksum_offset = 0;
req->pseudo_hdr_offset = pseudo_hdr_offset;
req->pad = 0; /* complete solid 16-byte block */
req->rdma_count = 1;
req->flags |= flags | ((cum_len & 1) * odd_flag);
cnt++;
}
tx->req_list[0].rdma_count = cnt;
#if 0
/* print what the firmware will see */
for (i = 0; i < cnt; i++) {
printf("%d: addr: 0x%x 0x%x len:%d pso%d,"
"cso:%d, flags:0x%x, rdma:%d\n",
i, (int)ntohl(tx->req_list[i].addr_high),
(int)ntohl(tx->req_list[i].addr_low),
(int)ntohs(tx->req_list[i].length),
(int)ntohs(tx->req_list[i].pseudo_hdr_offset),
tx->req_list[i].cksum_offset, tx->req_list[i].flags,
tx->req_list[i].rdma_count);
}
printf("--------------\n");
#endif
tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1;
mxge_submit_req(tx, tx->req_list, cnt);
#ifdef IFNET_BUF_RING
if ((ss->sc->num_slices > 1) && tx->queue_active == 0) {
/* tell the NIC to start polling this slice */
*tx->send_go = 1;
tx->queue_active = 1;
tx->activate++;
wmb();
}
#endif
return;
drop:
m_freem(m);
drop_without_m:
ss->oerrors++;
return;
}
#ifdef IFNET_BUF_RING
static void
mxge_qflush(struct ifnet *ifp)
{
mxge_softc_t *sc = ifp->if_softc;
mxge_tx_ring_t *tx;
struct mbuf *m;
int slice;
for (slice = 0; slice < sc->num_slices; slice++) {
tx = &sc->ss[slice].tx;
mtx_lock(&tx->mtx);
while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
m_freem(m);
mtx_unlock(&tx->mtx);
}
if_qflush(ifp);
}
static inline void
mxge_start_locked(struct mxge_slice_state *ss)
{
mxge_softc_t *sc;
struct mbuf *m;
struct ifnet *ifp;
mxge_tx_ring_t *tx;
sc = ss->sc;
ifp = sc->ifp;
tx = &ss->tx;
while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) {
m = drbr_dequeue(ifp, tx->br);
if (m == NULL) {
return;
}
/* let BPF see it */
BPF_MTAP(ifp, m);
/* give it to the nic */
mxge_encap(ss, m);
}
/* ran out of transmit slots */
if (((ss->if_drv_flags & IFF_DRV_OACTIVE) == 0)
&& (!drbr_empty(ifp, tx->br))) {
ss->if_drv_flags |= IFF_DRV_OACTIVE;
tx->stall++;
}
}
static int
mxge_transmit_locked(struct mxge_slice_state *ss, struct mbuf *m)
{
mxge_softc_t *sc;
struct ifnet *ifp;
mxge_tx_ring_t *tx;
int err;
sc = ss->sc;
ifp = sc->ifp;
tx = &ss->tx;
if ((ss->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
err = drbr_enqueue(ifp, tx->br, m);
return (err);
}
if (!drbr_needs_enqueue(ifp, tx->br) &&
((tx->mask - (tx->req - tx->done)) > tx->max_desc)) {
/* let BPF see it */
BPF_MTAP(ifp, m);
/* give it to the nic */
mxge_encap(ss, m);
} else if ((err = drbr_enqueue(ifp, tx->br, m)) != 0) {
return (err);
}
if (!drbr_empty(ifp, tx->br))
mxge_start_locked(ss);
return (0);
}
static int
mxge_transmit(struct ifnet *ifp, struct mbuf *m)
{
mxge_softc_t *sc = ifp->if_softc;
struct mxge_slice_state *ss;
mxge_tx_ring_t *tx;
int err = 0;
int slice;
slice = m->m_pkthdr.flowid;
slice &= (sc->num_slices - 1); /* num_slices always power of 2 */
ss = &sc->ss[slice];
tx = &ss->tx;
if (mtx_trylock(&tx->mtx)) {
err = mxge_transmit_locked(ss, m);
mtx_unlock(&tx->mtx);
} else {
err = drbr_enqueue(ifp, tx->br, m);
}
return (err);
}
#else
static inline void
mxge_start_locked(struct mxge_slice_state *ss)
{
mxge_softc_t *sc;
struct mbuf *m;
struct ifnet *ifp;
mxge_tx_ring_t *tx;
sc = ss->sc;
ifp = sc->ifp;
tx = &ss->tx;
while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL) {
return;
}
/* let BPF see it */
BPF_MTAP(ifp, m);
/* give it to the nic */
mxge_encap(ss, m);
}
/* ran out of transmit slots */
if ((sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE;
tx->stall++;
}
}
#endif
static void
mxge_start(struct ifnet *ifp)
{
mxge_softc_t *sc = ifp->if_softc;
struct mxge_slice_state *ss;
/* only use the first slice for now */
ss = &sc->ss[0];
mtx_lock(&ss->tx.mtx);
mxge_start_locked(ss);
mtx_unlock(&ss->tx.mtx);
}
/*
* copy an array of mcp_kreq_ether_recv_t's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's low
* DMA address to mark it valid only after we write the entire chunk
* in a burst
*/
static inline void
mxge_submit_8rx(volatile mcp_kreq_ether_recv_t *dst,
mcp_kreq_ether_recv_t *src)
{
uint32_t low;
low = src->addr_low;
src->addr_low = 0xffffffff;
mxge_pio_copy(dst, src, 4 * sizeof (*src));
wmb();
mxge_pio_copy(dst + 4, src + 4, 4 * sizeof (*src));
wmb();
src->addr_low = low;
dst->addr_low = low;
wmb();
}
static int
mxge_get_buf_small(struct mxge_slice_state *ss, bus_dmamap_t map, int idx)
{
bus_dma_segment_t seg;
struct mbuf *m;
mxge_rx_ring_t *rx = &ss->rx_small;
int cnt, err;
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
rx->alloc_fail++;
err = ENOBUFS;
goto done;
}
m->m_len = MHLEN;
err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m,
&seg, &cnt, BUS_DMA_NOWAIT);
if (err != 0) {
m_free(m);
goto done;
}
rx->info[idx].m = m;
rx->shadow[idx].addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg.ds_addr));
rx->shadow[idx].addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg.ds_addr));
done:
if ((idx & 7) == 7)
mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]);
return err;
}
static int
mxge_get_buf_big(struct mxge_slice_state *ss, bus_dmamap_t map, int idx)
{
bus_dma_segment_t seg[3];
struct mbuf *m;
mxge_rx_ring_t *rx = &ss->rx_big;
int cnt, err, i;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx->cl_size);
if (m == NULL) {
rx->alloc_fail++;
err = ENOBUFS;
goto done;
}
m->m_len = rx->mlen;
err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m,
seg, &cnt, BUS_DMA_NOWAIT);
if (err != 0) {
m_free(m);
goto done;
}
rx->info[idx].m = m;
rx->shadow[idx].addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr));
rx->shadow[idx].addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr));
#if MXGE_VIRT_JUMBOS
for (i = 1; i < cnt; i++) {
rx->shadow[idx + i].addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg[i].ds_addr));
rx->shadow[idx + i].addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg[i].ds_addr));
}
#endif
done:
for (i = 0; i < rx->nbufs; i++) {
if ((idx & 7) == 7) {
mxge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
}
idx++;
}
return err;
}
#ifdef INET6
static uint16_t
mxge_csum_generic(uint16_t *raw, int len)
{
uint32_t csum;
csum = 0;
while (len > 0) {
csum += *raw;
raw++;
len -= 2;
}
csum = (csum >> 16) + (csum & 0xffff);
csum = (csum >> 16) + (csum & 0xffff);
return (uint16_t)csum;
}
static inline uint16_t
mxge_rx_csum6(void *p, struct mbuf *m, uint32_t csum)
{
uint32_t partial;
int nxt, cksum_offset;
struct ip6_hdr *ip6 = p;
uint16_t c;
nxt = ip6->ip6_nxt;
cksum_offset = sizeof (*ip6) + ETHER_HDR_LEN;
if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) {
cksum_offset = ip6_lasthdr(m, ETHER_HDR_LEN,
IPPROTO_IPV6, &nxt);
if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP)
return (1);
}
/*
* IPv6 headers do not contain a checksum, and hence
* do not checksum to zero, so they don't "fall out"
* of the partial checksum calculation like IPv4
* headers do. We need to fix the partial checksum by
* subtracting the checksum of the IPv6 header.
*/
partial = mxge_csum_generic((uint16_t *)ip6, cksum_offset -
ETHER_HDR_LEN);
csum += ~partial;
csum += (csum < ~partial);
csum = (csum >> 16) + (csum & 0xFFFF);
csum = (csum >> 16) + (csum & 0xFFFF);
c = in6_cksum_pseudo(ip6, m->m_pkthdr.len - cksum_offset, nxt,
csum);
c ^= 0xffff;
return (c);
}
#endif /* INET6 */
/*
* Myri10GE hardware checksums are not valid if the sender
* padded the frame with non-zero padding. This is because
* the firmware just does a simple 16-bit 1s complement
* checksum across the entire frame, excluding the first 14
* bytes. It is best to simply to check the checksum and
* tell the stack about it only if the checksum is good
*/
static inline uint16_t
mxge_rx_csum(struct mbuf *m, int csum)
{
struct ether_header *eh;
#ifdef INET
struct ip *ip;
#endif
#if defined(INET) || defined(INET6)
int cap = m->m_pkthdr.rcvif->if_capenable;
#endif
uint16_t c, etype;
eh = mtod(m, struct ether_header *);
etype = ntohs(eh->ether_type);
switch (etype) {
#ifdef INET
case ETHERTYPE_IP:
if ((cap & IFCAP_RXCSUM) == 0)
return (1);
ip = (struct ip *)(eh + 1);
if (ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP)
return (1);
c = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
htonl(ntohs(csum) + ntohs(ip->ip_len) -
(ip->ip_hl << 2) + ip->ip_p));
c ^= 0xffff;
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
if ((cap & IFCAP_RXCSUM_IPV6) == 0)
return (1);
c = mxge_rx_csum6((eh + 1), m, csum);
break;
#endif
default:
c = 1;
}
return (c);
}
static void
mxge_vlan_tag_remove(struct mbuf *m, uint32_t *csum)
{
struct ether_vlan_header *evl;
struct ether_header *eh;
uint32_t partial;
evl = mtod(m, struct ether_vlan_header *);
eh = mtod(m, struct ether_header *);
/*
* fix checksum by subtracting ETHER_VLAN_ENCAP_LEN bytes
* after what the firmware thought was the end of the ethernet
* header.
*/
/* put checksum into host byte order */
*csum = ntohs(*csum);
partial = ntohl(*(uint32_t *)(mtod(m, char *) + ETHER_HDR_LEN));
(*csum) += ~partial;
(*csum) += ((*csum) < ~partial);
(*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF);
(*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF);
/* restore checksum to network byte order;
later consumers expect this */
*csum = htons(*csum);
/* save the tag */
#ifdef MXGE_NEW_VLAN_API
m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
#else
{
struct m_tag *mtag;
mtag = m_tag_alloc(MTAG_VLAN, MTAG_VLAN_TAG, sizeof(u_int),
M_NOWAIT);
if (mtag == NULL)
return;
VLAN_TAG_VALUE(mtag) = ntohs(evl->evl_tag);
m_tag_prepend(m, mtag);
}
#endif
m->m_flags |= M_VLANTAG;
/*
* Remove the 802.1q header by copying the Ethernet
* addresses over it and adjusting the beginning of
* the data in the mbuf. The encapsulated Ethernet
* type field is already in place.
*/
bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
static inline void
mxge_rx_done_big(struct mxge_slice_state *ss, uint32_t len,
uint32_t csum, int lro)
{
mxge_softc_t *sc;
struct ifnet *ifp;
struct mbuf *m;
struct ether_header *eh;
mxge_rx_ring_t *rx;
bus_dmamap_t old_map;
int idx;
sc = ss->sc;
ifp = sc->ifp;
rx = &ss->rx_big;
idx = rx->cnt & rx->mask;
rx->cnt += rx->nbufs;
/* save a pointer to the received mbuf */
m = rx->info[idx].m;
/* try to replace the received mbuf */
if (mxge_get_buf_big(ss, rx->extra_map, idx)) {
/* drop the frame -- the old mbuf is re-cycled */
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
/* unmap the received buffer */
old_map = rx->info[idx].map;
bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rx->dmat, old_map);
/* swap the bus_dmamap_t's */
rx->info[idx].map = rx->extra_map;
rx->extra_map = old_map;
/* mcp implicitly skips 1st 2 bytes so that packet is properly
* aligned */
m->m_data += MXGEFW_PAD;
m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len;
ss->ipackets++;
eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
mxge_vlan_tag_remove(m, &csum);
}
/* flowid only valid if RSS hashing is enabled */
if (sc->num_slices > 1) {
m->m_pkthdr.flowid = (ss - sc->ss);
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
}
/* if the checksum is valid, mark it in the mbuf header */
if ((ifp->if_capenable & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) &&
(0 == mxge_rx_csum(m, csum))) {
/* Tell the stack that the checksum is good */
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR |
CSUM_DATA_VALID;
#if defined(INET) || defined (INET6)
if (lro && (0 == tcp_lro_rx(&ss->lc, m, 0)))
return;
#endif
}
/* pass the frame up the stack */
(*ifp->if_input)(ifp, m);
}
static inline void
mxge_rx_done_small(struct mxge_slice_state *ss, uint32_t len,
uint32_t csum, int lro)
{
mxge_softc_t *sc;
struct ifnet *ifp;
struct ether_header *eh;
struct mbuf *m;
mxge_rx_ring_t *rx;
bus_dmamap_t old_map;
int idx;
sc = ss->sc;
ifp = sc->ifp;
rx = &ss->rx_small;
idx = rx->cnt & rx->mask;
rx->cnt++;
/* save a pointer to the received mbuf */
m = rx->info[idx].m;
/* try to replace the received mbuf */
if (mxge_get_buf_small(ss, rx->extra_map, idx)) {
/* drop the frame -- the old mbuf is re-cycled */
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
/* unmap the received buffer */
old_map = rx->info[idx].map;
bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rx->dmat, old_map);
/* swap the bus_dmamap_t's */
rx->info[idx].map = rx->extra_map;
rx->extra_map = old_map;
/* mcp implicitly skips 1st 2 bytes so that packet is properly
* aligned */
m->m_data += MXGEFW_PAD;
m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len;
ss->ipackets++;
eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
mxge_vlan_tag_remove(m, &csum);
}
/* flowid only valid if RSS hashing is enabled */
if (sc->num_slices > 1) {
m->m_pkthdr.flowid = (ss - sc->ss);
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
}
/* if the checksum is valid, mark it in the mbuf header */
if ((ifp->if_capenable & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) &&
(0 == mxge_rx_csum(m, csum))) {
/* Tell the stack that the checksum is good */
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR |
CSUM_DATA_VALID;
#if defined(INET) || defined (INET6)
if (lro && (0 == tcp_lro_rx(&ss->lc, m, csum)))
return;
#endif
}
/* pass the frame up the stack */
(*ifp->if_input)(ifp, m);
}
static inline void
mxge_clean_rx_done(struct mxge_slice_state *ss)
{
mxge_rx_done_t *rx_done = &ss->rx_done;
int limit = 0;
uint16_t length;
uint16_t checksum;
int lro;
lro = ss->sc->ifp->if_capenable & IFCAP_LRO;
while (rx_done->entry[rx_done->idx].length != 0) {
length = ntohs(rx_done->entry[rx_done->idx].length);
rx_done->entry[rx_done->idx].length = 0;
checksum = rx_done->entry[rx_done->idx].checksum;
if (length <= (MHLEN - MXGEFW_PAD))
mxge_rx_done_small(ss, length, checksum, lro);
else
mxge_rx_done_big(ss, length, checksum, lro);
rx_done->cnt++;
rx_done->idx = rx_done->cnt & rx_done->mask;
/* limit potential for livelock */
if (__predict_false(++limit > rx_done->mask / 2))
break;
}
#if defined(INET) || defined (INET6)
tcp_lro_flush_all(&ss->lc);
#endif
}
static inline void
mxge_tx_done(struct mxge_slice_state *ss, uint32_t mcp_idx)
{
struct ifnet *ifp;
mxge_tx_ring_t *tx;
struct mbuf *m;
bus_dmamap_t map;
int idx;
int *flags;
tx = &ss->tx;
ifp = ss->sc->ifp;
while (tx->pkt_done != mcp_idx) {
idx = tx->done & tx->mask;
tx->done++;
m = tx->info[idx].m;
/* mbuf and DMA map only attached to the first
segment per-mbuf */
if (m != NULL) {
ss->obytes += m->m_pkthdr.len;
if (m->m_flags & M_MCAST)
ss->omcasts++;
ss->opackets++;
tx->info[idx].m = NULL;
map = tx->info[idx].map;
bus_dmamap_unload(tx->dmat, map);
m_freem(m);
}
if (tx->info[idx].flag) {
tx->info[idx].flag = 0;
tx->pkt_done++;
}
}
/* If we have space, clear IFF_OACTIVE to tell the stack that
its OK to send packets */
#ifdef IFNET_BUF_RING
flags = &ss->if_drv_flags;
#else
flags = &ifp->if_drv_flags;
#endif
mtx_lock(&ss->tx.mtx);
if ((*flags) & IFF_DRV_OACTIVE &&
tx->req - tx->done < (tx->mask + 1)/4) {
*(flags) &= ~IFF_DRV_OACTIVE;
ss->tx.wake++;
mxge_start_locked(ss);
}
#ifdef IFNET_BUF_RING
if ((ss->sc->num_slices > 1) && (tx->req == tx->done)) {
/* let the NIC stop polling this queue, since there
* are no more transmits pending */
if (tx->req == tx->done) {
*tx->send_stop = 1;
tx->queue_active = 0;
tx->deactivate++;
wmb();
}
}
#endif
mtx_unlock(&ss->tx.mtx);
}
static struct mxge_media_type mxge_xfp_media_types[] =
{
{IFM_10G_CX4, 0x7f, "10GBASE-CX4 (module)"},
{IFM_10G_SR, (1 << 7), "10GBASE-SR"},
{IFM_10G_LR, (1 << 6), "10GBASE-LR"},
{0, (1 << 5), "10GBASE-ER"},
{IFM_10G_LRM, (1 << 4), "10GBASE-LRM"},
{0, (1 << 3), "10GBASE-SW"},
{0, (1 << 2), "10GBASE-LW"},
{0, (1 << 1), "10GBASE-EW"},
{0, (1 << 0), "Reserved"}
};
static struct mxge_media_type mxge_sfp_media_types[] =
{
{IFM_10G_TWINAX, 0, "10GBASE-Twinax"},
{0, (1 << 7), "Reserved"},
{IFM_10G_LRM, (1 << 6), "10GBASE-LRM"},
{IFM_10G_LR, (1 << 5), "10GBASE-LR"},
{IFM_10G_SR, (1 << 4), "10GBASE-SR"},
{IFM_10G_TWINAX,(1 << 0), "10GBASE-Twinax"}
};
static void
mxge_media_set(mxge_softc_t *sc, int media_type)
{
ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | media_type,
0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | media_type);
sc->current_media = media_type;
sc->media.ifm_media = sc->media.ifm_cur->ifm_media;
}
static void
mxge_media_init(mxge_softc_t *sc)
{
char *ptr;
int i;
ifmedia_removeall(&sc->media);
mxge_media_set(sc, IFM_AUTO);
/*
* parse the product code to deterimine the interface type
* (CX4, XFP, Quad Ribbon Fiber) by looking at the character
* after the 3rd dash in the driver's cached copy of the
* EEPROM's product code string.
*/
ptr = sc->product_code_string;
if (ptr == NULL) {
device_printf(sc->dev, "Missing product code\n");
return;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
device_printf(sc->dev,
"only %d dashes in PC?!?\n", i);
return;
}
}
if (*ptr == 'C' || *(ptr +1) == 'C') {
/* -C is CX4 */
sc->connector = MXGE_CX4;
mxge_media_set(sc, IFM_10G_CX4);
} else if (*ptr == 'Q') {
/* -Q is Quad Ribbon Fiber */
sc->connector = MXGE_QRF;
device_printf(sc->dev, "Quad Ribbon Fiber Media\n");
/* FreeBSD has no media type for Quad ribbon fiber */
} else if (*ptr == 'R') {
/* -R is XFP */
sc->connector = MXGE_XFP;
} else if (*ptr == 'S' || *(ptr +1) == 'S') {
/* -S or -2S is SFP+ */
sc->connector = MXGE_SFP;
} else {
device_printf(sc->dev, "Unknown media type: %c\n", *ptr);
}
}
/*
* Determine the media type for a NIC. Some XFPs will identify
* themselves only when their link is up, so this is initiated via a
* link up interrupt. However, this can potentially take up to
* several milliseconds, so it is run via the watchdog routine, rather
* than in the interrupt handler itself.
*/
static void
mxge_media_probe(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
char *cage_type;
struct mxge_media_type *mxge_media_types = NULL;
int i, err, ms, mxge_media_type_entries;
uint32_t byte;
sc->need_media_probe = 0;
if (sc->connector == MXGE_XFP) {
/* -R is XFP */
mxge_media_types = mxge_xfp_media_types;
mxge_media_type_entries =
nitems(mxge_xfp_media_types);
byte = MXGE_XFP_COMPLIANCE_BYTE;
cage_type = "XFP";
} else if (sc->connector == MXGE_SFP) {
/* -S or -2S is SFP+ */
mxge_media_types = mxge_sfp_media_types;
mxge_media_type_entries =
nitems(mxge_sfp_media_types);
cage_type = "SFP+";
byte = 3;
} else {
/* nothing to do; media type cannot change */
return;
}
/*
* At this point we know the NIC has an XFP cage, so now we
* try to determine what is in the cage by using the
* firmware's XFP I2C commands to read the XFP 10GbE compilance
* register. We read just one byte, which may take over
* a millisecond
*/
cmd.data0 = 0; /* just fetch 1 byte, not all 256 */
cmd.data1 = byte;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd);
if (err == MXGEFW_CMD_ERROR_I2C_FAILURE) {
device_printf(sc->dev, "failed to read XFP\n");
}
if (err == MXGEFW_CMD_ERROR_I2C_ABSENT) {
device_printf(sc->dev, "Type R/S with no XFP!?!?\n");
}
if (err != MXGEFW_CMD_OK) {
return;
}
/* now we wait for the data to be cached */
cmd.data0 = byte;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd);
for (ms = 0; (err == EBUSY) && (ms < 50); ms++) {
DELAY(1000);
cmd.data0 = byte;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd);
}
if (err != MXGEFW_CMD_OK) {
device_printf(sc->dev, "failed to read %s (%d, %dms)\n",
cage_type, err, ms);
return;
}
if (cmd.data0 == mxge_media_types[0].bitmask) {
if (mxge_verbose)
device_printf(sc->dev, "%s:%s\n", cage_type,
mxge_media_types[0].name);
if (sc->current_media != mxge_media_types[0].flag) {
mxge_media_init(sc);
mxge_media_set(sc, mxge_media_types[0].flag);
}
return;
}
for (i = 1; i < mxge_media_type_entries; i++) {
if (cmd.data0 & mxge_media_types[i].bitmask) {
if (mxge_verbose)
device_printf(sc->dev, "%s:%s\n",
cage_type,
mxge_media_types[i].name);
if (sc->current_media != mxge_media_types[i].flag) {
mxge_media_init(sc);
mxge_media_set(sc, mxge_media_types[i].flag);
}
return;
}
}
if (mxge_verbose)
device_printf(sc->dev, "%s media 0x%x unknown\n",
cage_type, cmd.data0);
return;
}
static void
mxge_intr(void *arg)
{
struct mxge_slice_state *ss = arg;
mxge_softc_t *sc = ss->sc;
mcp_irq_data_t *stats = ss->fw_stats;
mxge_tx_ring_t *tx = &ss->tx;
mxge_rx_done_t *rx_done = &ss->rx_done;
uint32_t send_done_count;
uint8_t valid;
#ifndef IFNET_BUF_RING
/* an interrupt on a non-zero slice is implicitly valid
since MSI-X irqs are not shared */
if (ss != sc->ss) {
mxge_clean_rx_done(ss);
*ss->irq_claim = be32toh(3);
return;
}
#endif
/* make sure the DMA has finished */
if (!stats->valid) {
return;
}
valid = stats->valid;
if (sc->legacy_irq) {
/* lower legacy IRQ */
*sc->irq_deassert = 0;
if (!mxge_deassert_wait)
/* don't wait for conf. that irq is low */
stats->valid = 0;
} else {
stats->valid = 0;
}
/* loop while waiting for legacy irq deassertion */
do {
/* check for transmit completes and receives */
send_done_count = be32toh(stats->send_done_count);
while ((send_done_count != tx->pkt_done) ||
(rx_done->entry[rx_done->idx].length != 0)) {
if (send_done_count != tx->pkt_done)
mxge_tx_done(ss, (int)send_done_count);
mxge_clean_rx_done(ss);
send_done_count = be32toh(stats->send_done_count);
}
if (sc->legacy_irq && mxge_deassert_wait)
wmb();
} while (*((volatile uint8_t *) &stats->valid));
/* fw link & error stats meaningful only on the first slice */
if (__predict_false((ss == sc->ss) && stats->stats_updated)) {
if (sc->link_state != stats->link_up) {
sc->link_state = stats->link_up;
if (sc->link_state) {
if_link_state_change(sc->ifp, LINK_STATE_UP);
if (mxge_verbose)
device_printf(sc->dev, "link up\n");
} else {
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
if (mxge_verbose)
device_printf(sc->dev, "link down\n");
}
sc->need_media_probe = 1;
}
if (sc->rdma_tags_available !=
be32toh(stats->rdma_tags_available)) {
sc->rdma_tags_available =
be32toh(stats->rdma_tags_available);
device_printf(sc->dev, "RDMA timed out! %d tags "
"left\n", sc->rdma_tags_available);
}
if (stats->link_down) {
sc->down_cnt += stats->link_down;
sc->link_state = 0;
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
}
/* check to see if we have rx token to pass back */
if (valid & 0x1)
*ss->irq_claim = be32toh(3);
*(ss->irq_claim + 1) = be32toh(3);
}
static void
mxge_init(void *arg)
{
mxge_softc_t *sc = arg;
struct ifnet *ifp = sc->ifp;
mtx_lock(&sc->driver_mtx);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
(void) mxge_open(sc);
mtx_unlock(&sc->driver_mtx);
}
static void
mxge_free_slice_mbufs(struct mxge_slice_state *ss)
{
int i;
#if defined(INET) || defined(INET6)
tcp_lro_free(&ss->lc);
#endif
for (i = 0; i <= ss->rx_big.mask; i++) {
if (ss->rx_big.info[i].m == NULL)
continue;
bus_dmamap_unload(ss->rx_big.dmat,
ss->rx_big.info[i].map);
m_freem(ss->rx_big.info[i].m);
ss->rx_big.info[i].m = NULL;
}
for (i = 0; i <= ss->rx_small.mask; i++) {
if (ss->rx_small.info[i].m == NULL)
continue;
bus_dmamap_unload(ss->rx_small.dmat,
ss->rx_small.info[i].map);
m_freem(ss->rx_small.info[i].m);
ss->rx_small.info[i].m = NULL;
}
/* transmit ring used only on the first slice */
if (ss->tx.info == NULL)
return;
for (i = 0; i <= ss->tx.mask; i++) {
ss->tx.info[i].flag = 0;
if (ss->tx.info[i].m == NULL)
continue;
bus_dmamap_unload(ss->tx.dmat,
ss->tx.info[i].map);
m_freem(ss->tx.info[i].m);
ss->tx.info[i].m = NULL;
}
}
static void
mxge_free_mbufs(mxge_softc_t *sc)
{
int slice;
for (slice = 0; slice < sc->num_slices; slice++)
mxge_free_slice_mbufs(&sc->ss[slice]);
}
static void
mxge_free_slice_rings(struct mxge_slice_state *ss)
{
int i;
if (ss->rx_done.entry != NULL)
mxge_dma_free(&ss->rx_done.dma);
ss->rx_done.entry = NULL;
if (ss->tx.req_bytes != NULL)
free(ss->tx.req_bytes, M_DEVBUF);
ss->tx.req_bytes = NULL;
if (ss->tx.seg_list != NULL)
free(ss->tx.seg_list, M_DEVBUF);
ss->tx.seg_list = NULL;
if (ss->rx_small.shadow != NULL)
free(ss->rx_small.shadow, M_DEVBUF);
ss->rx_small.shadow = NULL;
if (ss->rx_big.shadow != NULL)
free(ss->rx_big.shadow, M_DEVBUF);
ss->rx_big.shadow = NULL;
if (ss->tx.info != NULL) {
if (ss->tx.dmat != NULL) {
for (i = 0; i <= ss->tx.mask; i++) {
bus_dmamap_destroy(ss->tx.dmat,
ss->tx.info[i].map);
}
bus_dma_tag_destroy(ss->tx.dmat);
}
free(ss->tx.info, M_DEVBUF);
}
ss->tx.info = NULL;
if (ss->rx_small.info != NULL) {
if (ss->rx_small.dmat != NULL) {
for (i = 0; i <= ss->rx_small.mask; i++) {
bus_dmamap_destroy(ss->rx_small.dmat,
ss->rx_small.info[i].map);
}
bus_dmamap_destroy(ss->rx_small.dmat,
ss->rx_small.extra_map);
bus_dma_tag_destroy(ss->rx_small.dmat);
}
free(ss->rx_small.info, M_DEVBUF);
}
ss->rx_small.info = NULL;
if (ss->rx_big.info != NULL) {
if (ss->rx_big.dmat != NULL) {
for (i = 0; i <= ss->rx_big.mask; i++) {
bus_dmamap_destroy(ss->rx_big.dmat,
ss->rx_big.info[i].map);
}
bus_dmamap_destroy(ss->rx_big.dmat,
ss->rx_big.extra_map);
bus_dma_tag_destroy(ss->rx_big.dmat);
}
free(ss->rx_big.info, M_DEVBUF);
}
ss->rx_big.info = NULL;
}
static void
mxge_free_rings(mxge_softc_t *sc)
{
int slice;
for (slice = 0; slice < sc->num_slices; slice++)
mxge_free_slice_rings(&sc->ss[slice]);
}
static int
mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
int tx_ring_entries)
{
mxge_softc_t *sc = ss->sc;
size_t bytes;
int err, i;
/* allocate per-slice receive resources */
ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
ss->rx_done.mask = (2 * rx_ring_entries) - 1;
/* allocate the rx shadow rings */
bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow);
ss->rx_small.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow);
ss->rx_big.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
/* allocate the rx host info rings */
bytes = rx_ring_entries * sizeof (*ss->rx_small.info);
ss->rx_small.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
bytes = rx_ring_entries * sizeof (*ss->rx_big.info);
ss->rx_big.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
/* allocate the rx busdma resources */
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
1, /* alignment */
4096, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
MHLEN, /* maxsize */
1, /* num segs */
MHLEN, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lock */
&ss->rx_small.dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating rx_small dmat\n",
err);
return err;
}
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
1, /* alignment */
#if MXGE_VIRT_JUMBOS
4096, /* boundary */
#else
0, /* boundary */
#endif
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
3*4096, /* maxsize */
#if MXGE_VIRT_JUMBOS
3, /* num segs */
4096, /* maxsegsize*/
#else
1, /* num segs */
MJUM9BYTES, /* maxsegsize*/
#endif
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lock */
&ss->rx_big.dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating rx_big dmat\n",
err);
return err;
}
for (i = 0; i <= ss->rx_small.mask; i++) {
err = bus_dmamap_create(ss->rx_small.dmat, 0,
&ss->rx_small.info[i].map);
if (err != 0) {
device_printf(sc->dev, "Err %d rx_small dmamap\n",
err);
return err;
}
}
err = bus_dmamap_create(ss->rx_small.dmat, 0,
&ss->rx_small.extra_map);
if (err != 0) {
device_printf(sc->dev, "Err %d extra rx_small dmamap\n",
err);
return err;
}
for (i = 0; i <= ss->rx_big.mask; i++) {
err = bus_dmamap_create(ss->rx_big.dmat, 0,
&ss->rx_big.info[i].map);
if (err != 0) {
device_printf(sc->dev, "Err %d rx_big dmamap\n",
err);
return err;
}
}
err = bus_dmamap_create(ss->rx_big.dmat, 0,
&ss->rx_big.extra_map);
if (err != 0) {
device_printf(sc->dev, "Err %d extra rx_big dmamap\n",
err);
return err;
}
/* now allocate TX resources */
#ifndef IFNET_BUF_RING
/* only use a single TX ring for now */
if (ss != ss->sc->ss)
return 0;
#endif
ss->tx.mask = tx_ring_entries - 1;
ss->tx.max_desc = MIN(MXGE_MAX_SEND_DESC, tx_ring_entries / 4);
/* allocate the tx request copy block */
bytes = 8 +
sizeof (*ss->tx.req_list) * (ss->tx.max_desc + 4);
ss->tx.req_bytes = malloc(bytes, M_DEVBUF, M_WAITOK);
/* ensure req_list entries are aligned to 8 bytes */
ss->tx.req_list = (mcp_kreq_ether_send_t *)
((unsigned long)(ss->tx.req_bytes + 7) & ~7UL);
/* allocate the tx busdma segment list */
bytes = sizeof (*ss->tx.seg_list) * ss->tx.max_desc;
ss->tx.seg_list = (bus_dma_segment_t *)
malloc(bytes, M_DEVBUF, M_WAITOK);
/* allocate the tx host info ring */
bytes = tx_ring_entries * sizeof (*ss->tx.info);
ss->tx.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
/* allocate the tx busdma resources */
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
1, /* alignment */
sc->tx_boundary, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
65536 + 256, /* maxsize */
ss->tx.max_desc - 2, /* num segs */
sc->tx_boundary, /* maxsegsz */
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lock */
&ss->tx.dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating tx dmat\n",
err);
return err;
}
/* now use these tags to setup dmamaps for each slot
in the ring */
for (i = 0; i <= ss->tx.mask; i++) {
err = bus_dmamap_create(ss->tx.dmat, 0,
&ss->tx.info[i].map);
if (err != 0) {
device_printf(sc->dev, "Err %d tx dmamap\n",
err);
return err;
}
}
return 0;
}
static int
mxge_alloc_rings(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
int tx_ring_size;
int tx_ring_entries, rx_ring_entries;
int err, slice;
/* get ring sizes */
err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd);
tx_ring_size = cmd.data0;
if (err != 0) {
device_printf(sc->dev, "Cannot determine tx ring sizes\n");
goto abort;
}
tx_ring_entries = tx_ring_size / sizeof (mcp_kreq_ether_send_t);
rx_ring_entries = sc->rx_ring_size / sizeof (mcp_dma_addr_t);
IFQ_SET_MAXLEN(&sc->ifp->if_snd, tx_ring_entries - 1);
sc->ifp->if_snd.ifq_drv_maxlen = sc->ifp->if_snd.ifq_maxlen;
IFQ_SET_READY(&sc->ifp->if_snd);
for (slice = 0; slice < sc->num_slices; slice++) {
err = mxge_alloc_slice_rings(&sc->ss[slice],
rx_ring_entries,
tx_ring_entries);
if (err != 0)
goto abort;
}
return 0;
abort:
mxge_free_rings(sc);
return err;
}
static void
mxge_choose_params(int mtu, int *big_buf_size, int *cl_size, int *nbufs)
{
int bufsize = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD;
if (bufsize < MCLBYTES) {
/* easy, everything fits in a single buffer */
*big_buf_size = MCLBYTES;
*cl_size = MCLBYTES;
*nbufs = 1;
return;
}
if (bufsize < MJUMPAGESIZE) {
/* still easy, everything still fits in a single buffer */
*big_buf_size = MJUMPAGESIZE;
*cl_size = MJUMPAGESIZE;
*nbufs = 1;
return;
}
#if MXGE_VIRT_JUMBOS
/* now we need to use virtually contiguous buffers */
*cl_size = MJUM9BYTES;
*big_buf_size = 4096;
*nbufs = mtu / 4096 + 1;
/* needs to be a power of two, so round up */
if (*nbufs == 3)
*nbufs = 4;
#else
*cl_size = MJUM9BYTES;
*big_buf_size = MJUM9BYTES;
*nbufs = 1;
#endif
}
static int
mxge_slice_open(struct mxge_slice_state *ss, int nbufs, int cl_size)
{
mxge_softc_t *sc;
mxge_cmd_t cmd;
bus_dmamap_t map;
int err, i, slice;
sc = ss->sc;
slice = ss - sc->ss;
#if defined(INET) || defined(INET6)
(void)tcp_lro_init(&ss->lc);
#endif
ss->lc.ifp = sc->ifp;
/* get the lanai pointers to the send and receive rings */
err = 0;
#ifndef IFNET_BUF_RING
/* We currently only send from the first slice */
if (slice == 0) {
#endif
cmd.data0 = slice;
err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_OFFSET, &cmd);
ss->tx.lanai =
(volatile mcp_kreq_ether_send_t *)(sc->sram + cmd.data0);
ss->tx.send_go = (volatile uint32_t *)
(sc->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
ss->tx.send_stop = (volatile uint32_t *)
(sc->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
#ifndef IFNET_BUF_RING
}
#endif
cmd.data0 = slice;
err |= mxge_send_cmd(sc,
MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd);
ss->rx_small.lanai =
(volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0);
cmd.data0 = slice;
err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd);
ss->rx_big.lanai =
(volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0);
if (err != 0) {
device_printf(sc->dev,
"failed to get ring sizes or locations\n");
return EIO;
}
/* stock receive rings */
for (i = 0; i <= ss->rx_small.mask; i++) {
map = ss->rx_small.info[i].map;
err = mxge_get_buf_small(ss, map, i);
if (err) {
device_printf(sc->dev, "alloced %d/%d smalls\n",
i, ss->rx_small.mask + 1);
return ENOMEM;
}
}
for (i = 0; i <= ss->rx_big.mask; i++) {
ss->rx_big.shadow[i].addr_low = 0xffffffff;
ss->rx_big.shadow[i].addr_high = 0xffffffff;
}
ss->rx_big.nbufs = nbufs;
ss->rx_big.cl_size = cl_size;
ss->rx_big.mlen = ss->sc->ifp->if_mtu + ETHER_HDR_LEN +
ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD;
for (i = 0; i <= ss->rx_big.mask; i += ss->rx_big.nbufs) {
map = ss->rx_big.info[i].map;
err = mxge_get_buf_big(ss, map, i);
if (err) {
device_printf(sc->dev, "alloced %d/%d bigs\n",
i, ss->rx_big.mask + 1);
return ENOMEM;
}
}
return 0;
}
static int
mxge_open(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
int err, big_bytes, nbufs, slice, cl_size, i;
bus_addr_t bus;
volatile uint8_t *itable;
struct mxge_slice_state *ss;
/* Copy the MAC address in case it was overridden */
bcopy(IF_LLADDR(sc->ifp), sc->mac_addr, ETHER_ADDR_LEN);
err = mxge_reset(sc, 1);
if (err != 0) {
device_printf(sc->dev, "failed to reset\n");
return EIO;
}
if (sc->num_slices > 1) {
/* setup the indirection table */
cmd.data0 = sc->num_slices;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
&cmd);
err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd);
if (err != 0) {
device_printf(sc->dev,
"failed to setup rss tables\n");
return err;
}
/* just enable an identity mapping */
itable = sc->sram + cmd.data0;
for (i = 0; i < sc->num_slices; i++)
itable[i] = (uint8_t)i;
cmd.data0 = 1;
cmd.data1 = mxge_rss_hash_type;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_ENABLE, &cmd);
if (err != 0) {
device_printf(sc->dev, "failed to enable slices\n");
return err;
}
}
mxge_choose_params(sc->ifp->if_mtu, &big_bytes, &cl_size, &nbufs);
cmd.data0 = nbufs;
err = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS,
&cmd);
/* error is only meaningful if we're trying to set
MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS > 1 */
if (err && nbufs > 1) {
device_printf(sc->dev,
"Failed to set alway-use-n to %d\n",
nbufs);
return EIO;
}
/* Give the firmware the mtu and the big and small buffer
sizes. The firmware wants the big buf size to be a power
of two. Luckily, FreeBSD's clusters are powers of two */
cmd.data0 = sc->ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_MTU, &cmd);
cmd.data0 = MHLEN - MXGEFW_PAD;
err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE,
&cmd);
cmd.data0 = big_bytes;
err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd);
if (err != 0) {
device_printf(sc->dev, "failed to setup params\n");
goto abort;
}
/* Now give him the pointer to the stats block */
for (slice = 0;
#ifdef IFNET_BUF_RING
slice < sc->num_slices;
#else
slice < 1;
#endif
slice++) {
ss = &sc->ss[slice];
cmd.data0 =
MXGE_LOWPART_TO_U32(ss->fw_stats_dma.bus_addr);
cmd.data1 =
MXGE_HIGHPART_TO_U32(ss->fw_stats_dma.bus_addr);
cmd.data2 = sizeof(struct mcp_irq_data);
cmd.data2 |= (slice << 16);
err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd);
}
if (err != 0) {
bus = sc->ss->fw_stats_dma.bus_addr;
bus += offsetof(struct mcp_irq_data, send_done_count);
cmd.data0 = MXGE_LOWPART_TO_U32(bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(bus);
err = mxge_send_cmd(sc,
MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
&cmd);
/* Firmware cannot support multicast without STATS_DMA_V2 */
sc->fw_multicast_support = 0;
} else {
sc->fw_multicast_support = 1;
}
if (err != 0) {
device_printf(sc->dev, "failed to setup params\n");
goto abort;
}
for (slice = 0; slice < sc->num_slices; slice++) {
err = mxge_slice_open(&sc->ss[slice], nbufs, cl_size);
if (err != 0) {
device_printf(sc->dev, "couldn't open slice %d\n",
slice);
goto abort;
}
}
/* Finally, start the firmware running */
err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_UP, &cmd);
if (err) {
device_printf(sc->dev, "Couldn't bring up link\n");
goto abort;
}
#ifdef IFNET_BUF_RING
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
ss->if_drv_flags |= IFF_DRV_RUNNING;
ss->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
#endif
sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
return 0;
abort:
mxge_free_mbufs(sc);
return err;
}
static int
mxge_close(mxge_softc_t *sc, int down)
{
mxge_cmd_t cmd;
int err, old_down_cnt;
#ifdef IFNET_BUF_RING
struct mxge_slice_state *ss;
int slice;
#endif
#ifdef IFNET_BUF_RING
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
ss->if_drv_flags &= ~IFF_DRV_RUNNING;
}
#endif
sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if (!down) {
old_down_cnt = sc->down_cnt;
wmb();
err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_DOWN, &cmd);
if (err) {
device_printf(sc->dev,
"Couldn't bring down link\n");
}
if (old_down_cnt == sc->down_cnt) {
/* wait for down irq */
DELAY(10 * sc->intr_coal_delay);
}
wmb();
if (old_down_cnt == sc->down_cnt) {
device_printf(sc->dev, "never got down irq\n");
}
}
mxge_free_mbufs(sc);
return 0;
}
static void
mxge_setup_cfg_space(mxge_softc_t *sc)
{
device_t dev = sc->dev;
int reg;
uint16_t lnk, pectl;
/* find the PCIe link width and set max read request to 4KB*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
lnk = pci_read_config(dev, reg + 0x12, 2);
sc->link_width = (lnk >> 4) & 0x3f;
if (sc->pectl == 0) {
pectl = pci_read_config(dev, reg + 0x8, 2);
pectl = (pectl & ~0x7000) | (5 << 12);
pci_write_config(dev, reg + 0x8, pectl, 2);
sc->pectl = pectl;
} else {
/* restore saved pectl after watchdog reset */
pci_write_config(dev, reg + 0x8, sc->pectl, 2);
}
}
/* Enable DMA and Memory space access */
pci_enable_busmaster(dev);
}
static uint32_t
mxge_read_reboot(mxge_softc_t *sc)
{
device_t dev = sc->dev;
uint32_t vs;
/* find the vendor specific offset */
if (pci_find_cap(dev, PCIY_VENDOR, &vs) != 0) {
device_printf(sc->dev,
"could not find vendor specific offset\n");
return (uint32_t)-1;
}
/* enable read32 mode */
pci_write_config(dev, vs + 0x10, 0x3, 1);
/* tell NIC which register to read */
pci_write_config(dev, vs + 0x18, 0xfffffff0, 4);
return (pci_read_config(dev, vs + 0x14, 4));
}
static void
mxge_watchdog_reset(mxge_softc_t *sc)
{
struct pci_devinfo *dinfo;
struct mxge_slice_state *ss;
int err, running, s, num_tx_slices = 1;
uint32_t reboot;
uint16_t cmd;
err = ENXIO;
device_printf(sc->dev, "Watchdog reset!\n");
/*
* check to see if the NIC rebooted. If it did, then all of
* PCI config space has been reset, and things like the
* busmaster bit will be zero. If this is the case, then we
* must restore PCI config space before the NIC can be used
* again
*/
cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2);
if (cmd == 0xffff) {
/*
* maybe the watchdog caught the NIC rebooting; wait
* up to 100ms for it to finish. If it does not come
* back, then give up
*/
DELAY(1000*100);
cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2);
if (cmd == 0xffff) {
device_printf(sc->dev, "NIC disappeared!\n");
}
}
if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) {
/* print the reboot status */
reboot = mxge_read_reboot(sc);
device_printf(sc->dev, "NIC rebooted, status = 0x%x\n",
reboot);
running = sc->ifp->if_drv_flags & IFF_DRV_RUNNING;
if (running) {
/*
* quiesce NIC so that TX routines will not try to
* xmit after restoration of BAR
*/
/* Mark the link as down */
if (sc->link_state) {
sc->link_state = 0;
if_link_state_change(sc->ifp,
LINK_STATE_DOWN);
}
#ifdef IFNET_BUF_RING
num_tx_slices = sc->num_slices;
#endif
/* grab all TX locks to ensure no tx */
for (s = 0; s < num_tx_slices; s++) {
ss = &sc->ss[s];
mtx_lock(&ss->tx.mtx);
}
mxge_close(sc, 1);
}
/* restore PCI configuration space */
dinfo = device_get_ivars(sc->dev);
pci_cfg_restore(sc->dev, dinfo);
/* and redo any changes we made to our config space */
mxge_setup_cfg_space(sc);
/* reload f/w */
err = mxge_load_firmware(sc, 0);
if (err) {
device_printf(sc->dev,
"Unable to re-load f/w\n");
}
if (running) {
if (!err)
err = mxge_open(sc);
/* release all TX locks */
for (s = 0; s < num_tx_slices; s++) {
ss = &sc->ss[s];
#ifdef IFNET_BUF_RING
mxge_start_locked(ss);
#endif
mtx_unlock(&ss->tx.mtx);
}
}
sc->watchdog_resets++;
} else {
device_printf(sc->dev,
"NIC did not reboot, not resetting\n");
err = 0;
}
if (err) {
device_printf(sc->dev, "watchdog reset failed\n");
} else {
if (sc->dying == 2)
sc->dying = 0;
callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc);
}
}
static void
mxge_watchdog_task(void *arg, int pending)
{
mxge_softc_t *sc = arg;
mtx_lock(&sc->driver_mtx);
mxge_watchdog_reset(sc);
mtx_unlock(&sc->driver_mtx);
}
static void
mxge_warn_stuck(mxge_softc_t *sc, mxge_tx_ring_t *tx, int slice)
{
tx = &sc->ss[slice].tx;
device_printf(sc->dev, "slice %d struck? ring state:\n", slice);
device_printf(sc->dev,
"tx.req=%d tx.done=%d, tx.queue_active=%d\n",
tx->req, tx->done, tx->queue_active);
device_printf(sc->dev, "tx.activate=%d tx.deactivate=%d\n",
tx->activate, tx->deactivate);
device_printf(sc->dev, "pkt_done=%d fw=%d\n",
tx->pkt_done,
be32toh(sc->ss->fw_stats->send_done_count));
}
static int
mxge_watchdog(mxge_softc_t *sc)
{
mxge_tx_ring_t *tx;
uint32_t rx_pause = be32toh(sc->ss->fw_stats->dropped_pause);
int i, err = 0;
/* see if we have outstanding transmits, which
have been pending for more than mxge_ticks */
for (i = 0;
#ifdef IFNET_BUF_RING
(i < sc->num_slices) && (err == 0);
#else
(i < 1) && (err == 0);
#endif
i++) {
tx = &sc->ss[i].tx;
if (tx->req != tx->done &&
tx->watchdog_req != tx->watchdog_done &&
tx->done == tx->watchdog_done) {
/* check for pause blocking before resetting */
if (tx->watchdog_rx_pause == rx_pause) {
mxge_warn_stuck(sc, tx, i);
taskqueue_enqueue(sc->tq, &sc->watchdog_task);
return (ENXIO);
}
else
device_printf(sc->dev, "Flow control blocking "
"xmits, check link partner\n");
}
tx->watchdog_req = tx->req;
tx->watchdog_done = tx->done;
tx->watchdog_rx_pause = rx_pause;
}
if (sc->need_media_probe)
mxge_media_probe(sc);
return (err);
}
static uint64_t
mxge_get_counter(struct ifnet *ifp, ift_counter cnt)
{
struct mxge_softc *sc;
uint64_t rv;
sc = if_getsoftc(ifp);
rv = 0;
switch (cnt) {
case IFCOUNTER_IPACKETS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].ipackets;
return (rv);
case IFCOUNTER_OPACKETS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].opackets;
return (rv);
case IFCOUNTER_OERRORS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].oerrors;
return (rv);
#ifdef IFNET_BUF_RING
case IFCOUNTER_OBYTES:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].obytes;
return (rv);
case IFCOUNTER_OMCASTS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].omcasts;
return (rv);
case IFCOUNTER_OQDROPS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].tx.br->br_drops;
return (rv);
#endif
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
mxge_tick(void *arg)
{
mxge_softc_t *sc = arg;
u_long pkts = 0;
int err = 0;
int running, ticks;
uint16_t cmd;
ticks = mxge_ticks;
running = sc->ifp->if_drv_flags & IFF_DRV_RUNNING;
if (running) {
if (!sc->watchdog_countdown) {
err = mxge_watchdog(sc);
sc->watchdog_countdown = 4;
}
sc->watchdog_countdown--;
}
if (pkts == 0) {
/* ensure NIC did not suffer h/w fault while idle */
cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2);
if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) {
sc->dying = 2;
taskqueue_enqueue(sc->tq, &sc->watchdog_task);
err = ENXIO;
}
/* look less often if NIC is idle */
ticks *= 4;
}
if (err == 0)
callout_reset(&sc->co_hdl, ticks, mxge_tick, sc);
}
static int
mxge_media_change(struct ifnet *ifp)
{
return EINVAL;
}
static int
mxge_change_mtu(mxge_softc_t *sc, int mtu)
{
struct ifnet *ifp = sc->ifp;
int real_mtu, old_mtu;
int err = 0;
real_mtu = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
if ((real_mtu > sc->max_mtu) || real_mtu < 60)
return EINVAL;
mtx_lock(&sc->driver_mtx);
old_mtu = ifp->if_mtu;
ifp->if_mtu = mtu;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
mxge_close(sc, 0);
err = mxge_open(sc);
if (err != 0) {
ifp->if_mtu = old_mtu;
mxge_close(sc, 0);
(void) mxge_open(sc);
}
}
mtx_unlock(&sc->driver_mtx);
return err;
}
static void
mxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
mxge_softc_t *sc = ifp->if_softc;
if (sc == NULL)
return;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER | IFM_FDX;
ifmr->ifm_status |= sc->link_state ? IFM_ACTIVE : 0;
ifmr->ifm_active |= sc->current_media;
}
static int
mxge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
mxge_softc_t *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
int err, mask;
err = 0;
switch (command) {
case SIOCSIFADDR:
case SIOCGIFADDR:
err = ether_ioctl(ifp, command, data);
break;
case SIOCSIFMTU:
err = mxge_change_mtu(sc, ifr->ifr_mtu);
break;
case SIOCSIFFLAGS:
mtx_lock(&sc->driver_mtx);
if (sc->dying) {
mtx_unlock(&sc->driver_mtx);
return EINVAL;
}
if (ifp->if_flags & IFF_UP) {
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
err = mxge_open(sc);
} else {
/* take care of promis can allmulti
flag chages */
mxge_change_promisc(sc,
ifp->if_flags & IFF_PROMISC);
mxge_set_multicast_list(sc);
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
mxge_close(sc, 0);
}
}
mtx_unlock(&sc->driver_mtx);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
mtx_lock(&sc->driver_mtx);
mxge_set_multicast_list(sc);
mtx_unlock(&sc->driver_mtx);
break;
case SIOCSIFCAP:
mtx_lock(&sc->driver_mtx);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if (mask & IFCAP_TXCSUM) {
if (IFCAP_TXCSUM & ifp->if_capenable) {
ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP);
} else {
ifp->if_capenable |= IFCAP_TXCSUM;
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
}
} else if (mask & IFCAP_RXCSUM) {
if (IFCAP_RXCSUM & ifp->if_capenable) {
ifp->if_capenable &= ~IFCAP_RXCSUM;
} else {
ifp->if_capenable |= IFCAP_RXCSUM;
}
}
if (mask & IFCAP_TSO4) {
if (IFCAP_TSO4 & ifp->if_capenable) {
ifp->if_capenable &= ~IFCAP_TSO4;
} else if (IFCAP_TXCSUM & ifp->if_capenable) {
ifp->if_capenable |= IFCAP_TSO4;
ifp->if_hwassist |= CSUM_TSO;
} else {
printf("mxge requires tx checksum offload"
" be enabled to use TSO\n");
err = EINVAL;
}
}
#if IFCAP_TSO6
if (mask & IFCAP_TXCSUM_IPV6) {
if (IFCAP_TXCSUM_IPV6 & ifp->if_capenable) {
ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6
| IFCAP_TSO6);
ifp->if_hwassist &= ~(CSUM_TCP_IPV6
| CSUM_UDP);
} else {
ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
ifp->if_hwassist |= (CSUM_TCP_IPV6
| CSUM_UDP_IPV6);
}
} else if (mask & IFCAP_RXCSUM_IPV6) {
if (IFCAP_RXCSUM_IPV6 & ifp->if_capenable) {
ifp->if_capenable &= ~IFCAP_RXCSUM_IPV6;
} else {
ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
}
}
if (mask & IFCAP_TSO6) {
if (IFCAP_TSO6 & ifp->if_capenable) {
ifp->if_capenable &= ~IFCAP_TSO6;
} else if (IFCAP_TXCSUM_IPV6 & ifp->if_capenable) {
ifp->if_capenable |= IFCAP_TSO6;
ifp->if_hwassist |= CSUM_TSO;
} else {
printf("mxge requires tx checksum offload"
" be enabled to use TSO\n");
err = EINVAL;
}
}
#endif /*IFCAP_TSO6 */
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (!(ifp->if_capabilities & IFCAP_VLAN_HWTSO) ||
!(ifp->if_capenable & IFCAP_VLAN_HWTAGGING))
ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
mtx_unlock(&sc->driver_mtx);
VLAN_CAPABILITIES(ifp);
break;
case SIOCGIFMEDIA:
mtx_lock(&sc->driver_mtx);
mxge_media_probe(sc);
mtx_unlock(&sc->driver_mtx);
err = ifmedia_ioctl(ifp, (struct ifreq *)data,
&sc->media, command);
break;
default:
err = ENOTTY;
}
return err;
}
static void
mxge_fetch_tunables(mxge_softc_t *sc)
{
TUNABLE_INT_FETCH("hw.mxge.max_slices", &mxge_max_slices);
TUNABLE_INT_FETCH("hw.mxge.flow_control_enabled",
&mxge_flow_control);
TUNABLE_INT_FETCH("hw.mxge.intr_coal_delay",
&mxge_intr_coal_delay);
TUNABLE_INT_FETCH("hw.mxge.nvidia_ecrc_enable",
&mxge_nvidia_ecrc_enable);
TUNABLE_INT_FETCH("hw.mxge.force_firmware",
&mxge_force_firmware);
TUNABLE_INT_FETCH("hw.mxge.deassert_wait",
&mxge_deassert_wait);
TUNABLE_INT_FETCH("hw.mxge.verbose",
&mxge_verbose);
TUNABLE_INT_FETCH("hw.mxge.ticks", &mxge_ticks);
TUNABLE_INT_FETCH("hw.mxge.always_promisc", &mxge_always_promisc);
TUNABLE_INT_FETCH("hw.mxge.rss_hash_type", &mxge_rss_hash_type);
TUNABLE_INT_FETCH("hw.mxge.rss_hashtype", &mxge_rss_hash_type);
TUNABLE_INT_FETCH("hw.mxge.initial_mtu", &mxge_initial_mtu);
TUNABLE_INT_FETCH("hw.mxge.throttle", &mxge_throttle);
if (bootverbose)
mxge_verbose = 1;
if (mxge_intr_coal_delay < 0 || mxge_intr_coal_delay > 10*1000)
mxge_intr_coal_delay = 30;
if (mxge_ticks == 0)
mxge_ticks = hz / 2;
sc->pause = mxge_flow_control;
if (mxge_rss_hash_type < MXGEFW_RSS_HASH_TYPE_IPV4
|| mxge_rss_hash_type > MXGEFW_RSS_HASH_TYPE_MAX) {
mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
}
if (mxge_initial_mtu > ETHERMTU_JUMBO ||
mxge_initial_mtu < ETHER_MIN_LEN)
mxge_initial_mtu = ETHERMTU_JUMBO;
if (mxge_throttle && mxge_throttle > MXGE_MAX_THROTTLE)
mxge_throttle = MXGE_MAX_THROTTLE;
if (mxge_throttle && mxge_throttle < MXGE_MIN_THROTTLE)
mxge_throttle = MXGE_MIN_THROTTLE;
sc->throttle = mxge_throttle;
}
static void
mxge_free_slices(mxge_softc_t *sc)
{
struct mxge_slice_state *ss;
int i;
if (sc->ss == NULL)
return;
for (i = 0; i < sc->num_slices; i++) {
ss = &sc->ss[i];
if (ss->fw_stats != NULL) {
mxge_dma_free(&ss->fw_stats_dma);
ss->fw_stats = NULL;
#ifdef IFNET_BUF_RING
if (ss->tx.br != NULL) {
drbr_free(ss->tx.br, M_DEVBUF);
ss->tx.br = NULL;
}
#endif
mtx_destroy(&ss->tx.mtx);
}
if (ss->rx_done.entry != NULL) {
mxge_dma_free(&ss->rx_done.dma);
ss->rx_done.entry = NULL;
}
}
free(sc->ss, M_DEVBUF);
sc->ss = NULL;
}
static int
mxge_alloc_slices(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
struct mxge_slice_state *ss;
size_t bytes;
int err, i, max_intr_slots;
err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd);
if (err != 0) {
device_printf(sc->dev, "Cannot determine rx ring size\n");
return err;
}
sc->rx_ring_size = cmd.data0;
max_intr_slots = 2 * (sc->rx_ring_size / sizeof (mcp_dma_addr_t));
- sc->ss = mallocarray(sc->num_slices, sizeof(*sc->ss), M_DEVBUF,
- M_NOWAIT | M_ZERO);
+ bytes = sizeof (*sc->ss) * sc->num_slices;
+ sc->ss = malloc(bytes, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->ss == NULL)
return (ENOMEM);
for (i = 0; i < sc->num_slices; i++) {
ss = &sc->ss[i];
ss->sc = sc;
/* allocate per-slice rx interrupt queues */
bytes = max_intr_slots * sizeof (*ss->rx_done.entry);
err = mxge_dma_alloc(sc, &ss->rx_done.dma, bytes, 4096);
if (err != 0)
goto abort;
ss->rx_done.entry = ss->rx_done.dma.addr;
bzero(ss->rx_done.entry, bytes);
/*
* allocate the per-slice firmware stats; stats
* (including tx) are used used only on the first
* slice for now
*/
#ifndef IFNET_BUF_RING
if (i > 0)
continue;
#endif
bytes = sizeof (*ss->fw_stats);
err = mxge_dma_alloc(sc, &ss->fw_stats_dma,
sizeof (*ss->fw_stats), 64);
if (err != 0)
goto abort;
ss->fw_stats = (mcp_irq_data_t *)ss->fw_stats_dma.addr;
snprintf(ss->tx.mtx_name, sizeof(ss->tx.mtx_name),
"%s:tx(%d)", device_get_nameunit(sc->dev), i);
mtx_init(&ss->tx.mtx, ss->tx.mtx_name, NULL, MTX_DEF);
#ifdef IFNET_BUF_RING
ss->tx.br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK,
&ss->tx.mtx);
#endif
}
return (0);
abort:
mxge_free_slices(sc);
return (ENOMEM);
}
static void
mxge_slice_probe(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
char *old_fw;
int msix_cnt, status, max_intr_slots;
sc->num_slices = 1;
/*
* don't enable multiple slices if they are not enabled,
* or if this is not an SMP system
*/
if (mxge_max_slices == 0 || mxge_max_slices == 1 || mp_ncpus < 2)
return;
/* see how many MSI-X interrupts are available */
msix_cnt = pci_msix_count(sc->dev);
if (msix_cnt < 2)
return;
/* now load the slice aware firmware see what it supports */
old_fw = sc->fw_name;
if (old_fw == mxge_fw_aligned)
sc->fw_name = mxge_fw_rss_aligned;
else
sc->fw_name = mxge_fw_rss_unaligned;
status = mxge_load_firmware(sc, 0);
if (status != 0) {
device_printf(sc->dev, "Falling back to a single slice\n");
return;
}
/* try to send a reset command to the card to see if it
is alive */
memset(&cmd, 0, sizeof (cmd));
status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd);
if (status != 0) {
device_printf(sc->dev, "failed reset\n");
goto abort_with_fw;
}
/* get rx ring size */
status = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd);
if (status != 0) {
device_printf(sc->dev, "Cannot determine rx ring size\n");
goto abort_with_fw;
}
max_intr_slots = 2 * (cmd.data0 / sizeof (mcp_dma_addr_t));
/* tell it the size of the interrupt queues */
cmd.data0 = max_intr_slots * sizeof (struct mcp_slot);
status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd);
if (status != 0) {
device_printf(sc->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
goto abort_with_fw;
}
/* ask the maximum number of slices it supports */
status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd);
if (status != 0) {
device_printf(sc->dev,
"failed MXGEFW_CMD_GET_MAX_RSS_QUEUES\n");
goto abort_with_fw;
}
sc->num_slices = cmd.data0;
if (sc->num_slices > msix_cnt)
sc->num_slices = msix_cnt;
if (mxge_max_slices == -1) {
/* cap to number of CPUs in system */
if (sc->num_slices > mp_ncpus)
sc->num_slices = mp_ncpus;
} else {
if (sc->num_slices > mxge_max_slices)
sc->num_slices = mxge_max_slices;
}
/* make sure it is a power of two */
while (sc->num_slices & (sc->num_slices - 1))
sc->num_slices--;
if (mxge_verbose)
device_printf(sc->dev, "using %d slices\n",
sc->num_slices);
return;
abort_with_fw:
sc->fw_name = old_fw;
(void) mxge_load_firmware(sc, 0);
}
static int
mxge_add_msix_irqs(mxge_softc_t *sc)
{
+ size_t bytes;
int count, err, i, rid;
rid = PCIR_BAR(2);
sc->msix_table_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->msix_table_res == NULL) {
device_printf(sc->dev, "couldn't alloc MSIX table res\n");
return ENXIO;
}
count = sc->num_slices;
err = pci_alloc_msix(sc->dev, &count);
if (err != 0) {
device_printf(sc->dev, "pci_alloc_msix: failed, wanted %d"
"err = %d \n", sc->num_slices, err);
goto abort_with_msix_table;
}
if (count < sc->num_slices) {
device_printf(sc->dev, "pci_alloc_msix: need %d, got %d\n",
count, sc->num_slices);
device_printf(sc->dev,
"Try setting hw.mxge.max_slices to %d\n",
count);
err = ENOSPC;
goto abort_with_msix;
}
- sc->msix_irq_res = mallocarray(sc->num_slices,
- sizeof(*sc->msix_irq_res), M_DEVBUF, M_NOWAIT|M_ZERO);
+ bytes = sizeof (*sc->msix_irq_res) * sc->num_slices;
+ sc->msix_irq_res = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO);
if (sc->msix_irq_res == NULL) {
err = ENOMEM;
goto abort_with_msix;
}
for (i = 0; i < sc->num_slices; i++) {
rid = i + 1;
sc->msix_irq_res[i] = bus_alloc_resource_any(sc->dev,
SYS_RES_IRQ,
&rid, RF_ACTIVE);
if (sc->msix_irq_res[i] == NULL) {
device_printf(sc->dev, "couldn't allocate IRQ res"
" for message %d\n", i);
err = ENXIO;
goto abort_with_res;
}
}
- sc->msix_ih = mallocarray(sc->num_slices, sizeof(*sc->msix_ih),
- M_DEVBUF, M_NOWAIT|M_ZERO);
+ bytes = sizeof (*sc->msix_ih) * sc->num_slices;
+ sc->msix_ih = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO);
for (i = 0; i < sc->num_slices; i++) {
err = bus_setup_intr(sc->dev, sc->msix_irq_res[i],
INTR_TYPE_NET | INTR_MPSAFE,
#if __FreeBSD_version > 700030
NULL,
#endif
mxge_intr, &sc->ss[i], &sc->msix_ih[i]);
if (err != 0) {
device_printf(sc->dev, "couldn't setup intr for "
"message %d\n", i);
goto abort_with_intr;
}
bus_describe_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_ih[i], "s%d", i);
}
if (mxge_verbose) {
device_printf(sc->dev, "using %d msix IRQs:",
sc->num_slices);
for (i = 0; i < sc->num_slices; i++)
printf(" %jd", rman_get_start(sc->msix_irq_res[i]));
printf("\n");
}
return (0);
abort_with_intr:
for (i = 0; i < sc->num_slices; i++) {
if (sc->msix_ih[i] != NULL) {
bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_ih[i]);
sc->msix_ih[i] = NULL;
}
}
free(sc->msix_ih, M_DEVBUF);
abort_with_res:
for (i = 0; i < sc->num_slices; i++) {
rid = i + 1;
if (sc->msix_irq_res[i] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, rid,
sc->msix_irq_res[i]);
sc->msix_irq_res[i] = NULL;
}
free(sc->msix_irq_res, M_DEVBUF);
abort_with_msix:
pci_release_msi(sc->dev);
abort_with_msix_table:
bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2),
sc->msix_table_res);
return err;
}
static int
mxge_add_single_irq(mxge_softc_t *sc)
{
int count, err, rid;
count = pci_msi_count(sc->dev);
if (count == 1 && pci_alloc_msi(sc->dev, &count) == 0) {
rid = 1;
} else {
rid = 0;
sc->legacy_irq = 1;
}
sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->irq_res == NULL) {
device_printf(sc->dev, "could not alloc interrupt\n");
return ENXIO;
}
if (mxge_verbose)
device_printf(sc->dev, "using %s irq %jd\n",
sc->legacy_irq ? "INTx" : "MSI",
rman_get_start(sc->irq_res));
err = bus_setup_intr(sc->dev, sc->irq_res,
INTR_TYPE_NET | INTR_MPSAFE,
#if __FreeBSD_version > 700030
NULL,
#endif
mxge_intr, &sc->ss[0], &sc->ih);
if (err != 0) {
bus_release_resource(sc->dev, SYS_RES_IRQ,
sc->legacy_irq ? 0 : 1, sc->irq_res);
if (!sc->legacy_irq)
pci_release_msi(sc->dev);
}
return err;
}
static void
mxge_rem_msix_irqs(mxge_softc_t *sc)
{
int i, rid;
for (i = 0; i < sc->num_slices; i++) {
if (sc->msix_ih[i] != NULL) {
bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_ih[i]);
sc->msix_ih[i] = NULL;
}
}
free(sc->msix_ih, M_DEVBUF);
for (i = 0; i < sc->num_slices; i++) {
rid = i + 1;
if (sc->msix_irq_res[i] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, rid,
sc->msix_irq_res[i]);
sc->msix_irq_res[i] = NULL;
}
free(sc->msix_irq_res, M_DEVBUF);
bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2),
sc->msix_table_res);
pci_release_msi(sc->dev);
return;
}
static void
mxge_rem_single_irq(mxge_softc_t *sc)
{
bus_teardown_intr(sc->dev, sc->irq_res, sc->ih);
bus_release_resource(sc->dev, SYS_RES_IRQ,
sc->legacy_irq ? 0 : 1, sc->irq_res);
if (!sc->legacy_irq)
pci_release_msi(sc->dev);
}
static void
mxge_rem_irq(mxge_softc_t *sc)
{
if (sc->num_slices > 1)
mxge_rem_msix_irqs(sc);
else
mxge_rem_single_irq(sc);
}
static int
mxge_add_irq(mxge_softc_t *sc)
{
int err;
if (sc->num_slices > 1)
err = mxge_add_msix_irqs(sc);
else
err = mxge_add_single_irq(sc);
if (0 && err == 0 && sc->num_slices > 1) {
mxge_rem_msix_irqs(sc);
err = mxge_add_msix_irqs(sc);
}
return err;
}
static int
mxge_attach(device_t dev)
{
mxge_cmd_t cmd;
mxge_softc_t *sc = device_get_softc(dev);
struct ifnet *ifp;
int err, rid;
sc->dev = dev;
mxge_fetch_tunables(sc);
TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc);
sc->tq = taskqueue_create("mxge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
if (sc->tq == NULL) {
err = ENOMEM;
goto abort_with_nothing;
}
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
65536 + 256, /* maxsize */
MXGE_MAX_SEND_DESC, /* num segs */
65536, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lock */
&sc->parent_dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating parent dmat\n",
err);
goto abort_with_tq;
}
ifp = sc->ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "can not if_alloc()\n");
err = ENOSPC;
goto abort_with_parent_dmat;
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
snprintf(sc->cmd_mtx_name, sizeof(sc->cmd_mtx_name), "%s:cmd",
device_get_nameunit(dev));
mtx_init(&sc->cmd_mtx, sc->cmd_mtx_name, NULL, MTX_DEF);
snprintf(sc->driver_mtx_name, sizeof(sc->driver_mtx_name),
"%s:drv", device_get_nameunit(dev));
mtx_init(&sc->driver_mtx, sc->driver_mtx_name,
MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->co_hdl, &sc->driver_mtx, 0);
mxge_setup_cfg_space(sc);
/* Map the board into the kernel */
rid = PCIR_BARS;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(dev, "could not map memory\n");
err = ENXIO;
goto abort_with_lock;
}
sc->sram = rman_get_virtual(sc->mem_res);
sc->sram_size = 2*1024*1024 - (2*(48*1024)+(32*1024)) - 0x100;
if (sc->sram_size > rman_get_size(sc->mem_res)) {
device_printf(dev, "impossible memory region size %jd\n",
rman_get_size(sc->mem_res));
err = ENXIO;
goto abort_with_mem_res;
}
/* make NULL terminated copy of the EEPROM strings section of
lanai SRAM */
bzero(sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE);
bus_space_read_region_1(rman_get_bustag(sc->mem_res),
rman_get_bushandle(sc->mem_res),
sc->sram_size - MXGE_EEPROM_STRINGS_SIZE,
sc->eeprom_strings,
MXGE_EEPROM_STRINGS_SIZE - 2);
err = mxge_parse_strings(sc);
if (err != 0)
goto abort_with_mem_res;
/* Enable write combining for efficient use of PCIe bus */
mxge_enable_wc(sc);
/* Allocate the out of band dma memory */
err = mxge_dma_alloc(sc, &sc->cmd_dma,
sizeof (mxge_cmd_t), 64);
if (err != 0)
goto abort_with_mem_res;
sc->cmd = (mcp_cmd_response_t *) sc->cmd_dma.addr;
err = mxge_dma_alloc(sc, &sc->zeropad_dma, 64, 64);
if (err != 0)
goto abort_with_cmd_dma;
err = mxge_dma_alloc(sc, &sc->dmabench_dma, 4096, 4096);
if (err != 0)
goto abort_with_zeropad_dma;
/* select & load the firmware */
err = mxge_select_firmware(sc);
if (err != 0)
goto abort_with_dmabench;
sc->intr_coal_delay = mxge_intr_coal_delay;
mxge_slice_probe(sc);
err = mxge_alloc_slices(sc);
if (err != 0)
goto abort_with_dmabench;
err = mxge_reset(sc, 0);
if (err != 0)
goto abort_with_slices;
err = mxge_alloc_rings(sc);
if (err != 0) {
device_printf(sc->dev, "failed to allocate rings\n");
goto abort_with_slices;
}
err = mxge_add_irq(sc);
if (err != 0) {
device_printf(sc->dev, "failed to add irq\n");
goto abort_with_rings;
}
ifp->if_baudrate = IF_Gbps(10);
ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4 |
IFCAP_VLAN_MTU | IFCAP_LINKSTATE | IFCAP_TXCSUM_IPV6 |
IFCAP_RXCSUM_IPV6;
#if defined(INET) || defined(INET6)
ifp->if_capabilities |= IFCAP_LRO;
#endif
#ifdef MXGE_NEW_VLAN_API
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
/* Only FW 1.4.32 and newer can do TSO over vlans */
if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 &&
sc->fw_ver_tiny >= 32)
ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
#endif
sc->max_mtu = mxge_max_mtu(sc);
if (sc->max_mtu >= 9000)
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
else
device_printf(dev, "MTU limited to %d. Install "
"latest firmware for 9000 byte jumbo support\n",
sc->max_mtu - ETHER_HDR_LEN);
ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
/* check to see if f/w supports TSO for IPv6 */
if (!mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, &cmd)) {
if (CSUM_TCP_IPV6)
ifp->if_capabilities |= IFCAP_TSO6;
sc->max_tso6_hlen = min(cmd.data0,
sizeof (sc->ss[0].scratch));
}
ifp->if_capenable = ifp->if_capabilities;
if (sc->lro_cnt == 0)
ifp->if_capenable &= ~IFCAP_LRO;
ifp->if_init = mxge_init;
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = mxge_ioctl;
ifp->if_start = mxge_start;
ifp->if_get_counter = mxge_get_counter;
/* Initialise the ifmedia structure */
ifmedia_init(&sc->media, 0, mxge_media_change,
mxge_media_status);
mxge_media_init(sc);
mxge_media_probe(sc);
sc->dying = 0;
ether_ifattach(ifp, sc->mac_addr);
/* ether_ifattach sets mtu to ETHERMTU */
if (mxge_initial_mtu != ETHERMTU)
mxge_change_mtu(sc, mxge_initial_mtu);
mxge_add_sysctls(sc);
#ifdef IFNET_BUF_RING
ifp->if_transmit = mxge_transmit;
ifp->if_qflush = mxge_qflush;
#endif
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc);
return 0;
abort_with_rings:
mxge_free_rings(sc);
abort_with_slices:
mxge_free_slices(sc);
abort_with_dmabench:
mxge_dma_free(&sc->dmabench_dma);
abort_with_zeropad_dma:
mxge_dma_free(&sc->zeropad_dma);
abort_with_cmd_dma:
mxge_dma_free(&sc->cmd_dma);
abort_with_mem_res:
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res);
abort_with_lock:
pci_disable_busmaster(dev);
mtx_destroy(&sc->cmd_mtx);
mtx_destroy(&sc->driver_mtx);
if_free(ifp);
abort_with_parent_dmat:
bus_dma_tag_destroy(sc->parent_dmat);
abort_with_tq:
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->watchdog_task);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
abort_with_nothing:
return err;
}
static int
mxge_detach(device_t dev)
{
mxge_softc_t *sc = device_get_softc(dev);
if (mxge_vlans_active(sc)) {
device_printf(sc->dev,
"Detach vlans before removing module\n");
return EBUSY;
}
mtx_lock(&sc->driver_mtx);
sc->dying = 1;
if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
mxge_close(sc, 0);
mtx_unlock(&sc->driver_mtx);
ether_ifdetach(sc->ifp);
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->watchdog_task);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
callout_drain(&sc->co_hdl);
ifmedia_removeall(&sc->media);
mxge_dummy_rdma(sc, 0);
mxge_rem_sysctls(sc);
mxge_rem_irq(sc);
mxge_free_rings(sc);
mxge_free_slices(sc);
mxge_dma_free(&sc->dmabench_dma);
mxge_dma_free(&sc->zeropad_dma);
mxge_dma_free(&sc->cmd_dma);
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res);
pci_disable_busmaster(dev);
mtx_destroy(&sc->cmd_mtx);
mtx_destroy(&sc->driver_mtx);
if_free(sc->ifp);
bus_dma_tag_destroy(sc->parent_dmat);
return 0;
}
static int
mxge_shutdown(device_t dev)
{
return 0;
}
/*
This file uses Myri10GE driver indentation.
Local Variables:
c-file-style:"linux"
tab-width:8
End:
*/
Index: head/sys/dev/netmap/if_ptnet.c
===================================================================
--- head/sys/dev/netmap/if_ptnet.c (revision 328217)
+++ head/sys/dev/netmap/if_ptnet.c (revision 328218)
@@ -1,2276 +1,2276 @@
/*-
* Copyright (c) 2016, Vincenzo Maffione
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/* Driver for ptnet paravirtualized network device. */
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <sys/time.h>
#include <machine/smp.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/sctp.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
#include <net/netmap_virt.h>
#include <dev/netmap/netmap_mem2.h>
#include <dev/virtio/network/virtio_net.h>
#ifndef PTNET_CSB_ALLOC
#error "No support for on-device CSB"
#endif
#ifndef INET
#error "INET not defined, cannot support offloadings"
#endif
#if __FreeBSD_version >= 1100000
static uint64_t ptnet_get_counter(if_t, ift_counter);
#else
typedef struct ifnet *if_t;
#define if_getsoftc(_ifp) (_ifp)->if_softc
#endif
//#define PTNETMAP_STATS
//#define DEBUG
#ifdef DEBUG
#define DBG(x) x
#else /* !DEBUG */
#define DBG(x)
#endif /* !DEBUG */
extern int ptnet_vnet_hdr; /* Tunable parameter */
struct ptnet_softc;
struct ptnet_queue_stats {
uint64_t packets; /* if_[io]packets */
uint64_t bytes; /* if_[io]bytes */
uint64_t errors; /* if_[io]errors */
uint64_t iqdrops; /* if_iqdrops */
uint64_t mcasts; /* if_[io]mcasts */
#ifdef PTNETMAP_STATS
uint64_t intrs;
uint64_t kicks;
#endif /* PTNETMAP_STATS */
};
struct ptnet_queue {
struct ptnet_softc *sc;
struct resource *irq;
void *cookie;
int kring_id;
struct ptnet_ring *ptring;
unsigned int kick;
struct mtx lock;
struct buf_ring *bufring; /* for TX queues */
struct ptnet_queue_stats stats;
#ifdef PTNETMAP_STATS
struct ptnet_queue_stats last_stats;
#endif /* PTNETMAP_STATS */
struct taskqueue *taskq;
struct task task;
char lock_name[16];
};
#define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
#define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
#define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
struct ptnet_softc {
device_t dev;
if_t ifp;
struct ifmedia media;
struct mtx lock;
char lock_name[16];
char hwaddr[ETHER_ADDR_LEN];
/* Mirror of PTFEAT register. */
uint32_t ptfeatures;
unsigned int vnet_hdr_len;
/* PCI BARs support. */
struct resource *iomem;
struct resource *msix_mem;
unsigned int num_rings;
unsigned int num_tx_rings;
struct ptnet_queue *queues;
struct ptnet_queue *rxqueues;
struct ptnet_csb *csb;
unsigned int min_tx_space;
struct netmap_pt_guest_adapter *ptna;
struct callout tick;
#ifdef PTNETMAP_STATS
struct timeval last_ts;
#endif /* PTNETMAP_STATS */
};
#define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
#define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
static int ptnet_probe(device_t);
static int ptnet_attach(device_t);
static int ptnet_detach(device_t);
static int ptnet_suspend(device_t);
static int ptnet_resume(device_t);
static int ptnet_shutdown(device_t);
static void ptnet_init(void *opaque);
static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int ptnet_init_locked(struct ptnet_softc *sc);
static int ptnet_stop(struct ptnet_softc *sc);
static int ptnet_transmit(if_t ifp, struct mbuf *m);
static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
unsigned int budget,
bool may_resched);
static void ptnet_qflush(if_t ifp);
static void ptnet_tx_task(void *context, int pending);
static int ptnet_media_change(if_t ifp);
static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
#ifdef PTNETMAP_STATS
static void ptnet_tick(void *opaque);
#endif
static int ptnet_irqs_init(struct ptnet_softc *sc);
static void ptnet_irqs_fini(struct ptnet_softc *sc);
static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd);
static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr,
unsigned *txd, unsigned *rxr, unsigned *rxd);
static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
static void ptnet_tx_intr(void *opaque);
static void ptnet_rx_intr(void *opaque);
static unsigned ptnet_rx_discard(struct netmap_kring *kring,
unsigned int head);
static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
bool may_resched);
static void ptnet_rx_task(void *context, int pending);
#ifdef DEVICE_POLLING
static poll_handler_t ptnet_poll;
#endif
static device_method_t ptnet_methods[] = {
DEVMETHOD(device_probe, ptnet_probe),
DEVMETHOD(device_attach, ptnet_attach),
DEVMETHOD(device_detach, ptnet_detach),
DEVMETHOD(device_suspend, ptnet_suspend),
DEVMETHOD(device_resume, ptnet_resume),
DEVMETHOD(device_shutdown, ptnet_shutdown),
DEVMETHOD_END
};
static driver_t ptnet_driver = {
"ptnet",
ptnet_methods,
sizeof(struct ptnet_softc)
};
/* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
static devclass_t ptnet_devclass;
DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass,
NULL, NULL, SI_ORDER_MIDDLE + 2);
static int
ptnet_probe(device_t dev)
{
if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
return (ENXIO);
}
device_set_desc(dev, "ptnet network adapter");
return (BUS_PROBE_DEFAULT);
}
static inline void ptnet_kick(struct ptnet_queue *pq)
{
#ifdef PTNETMAP_STATS
pq->stats.kicks ++;
#endif /* PTNETMAP_STATS */
bus_write_4(pq->sc->iomem, pq->kick, 0);
}
#define PTNET_BUF_RING_SIZE 4096
#define PTNET_RX_BUDGET 512
#define PTNET_RX_BATCH 1
#define PTNET_TX_BUDGET 512
#define PTNET_TX_BATCH 64
#define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
#define PTNET_MAX_PKT_SIZE 65536
#define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP)
#define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\
CSUM_SCTP_IPV6)
#define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
PTNET_CSUM_OFFLOAD_IPV6)
static int
ptnet_attach(device_t dev)
{
uint32_t ptfeatures = 0;
unsigned int num_rx_rings, num_tx_rings;
struct netmap_adapter na_arg;
unsigned int nifp_offset;
struct ptnet_softc *sc;
if_t ifp;
uint32_t macreg;
int err, rid;
int i;
sc = device_get_softc(dev);
sc->dev = dev;
/* Setup PCI resources. */
pci_enable_busmaster(dev);
rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
RF_ACTIVE);
if (sc->iomem == NULL) {
device_printf(dev, "Failed to map I/O BAR\n");
return (ENXIO);
}
/* Negotiate features with the hypervisor. */
if (ptnet_vnet_hdr) {
ptfeatures |= PTNETMAP_F_VNET_HDR;
}
bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
sc->ptfeatures = ptfeatures;
/* Allocate CSB and carry out CSB allocation protocol (CSBBAH first,
* then CSBBAL). */
sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (sc->csb == NULL) {
device_printf(dev, "Failed to allocate CSB\n");
err = ENOMEM;
goto err_path;
}
{
/*
* We use uint64_t rather than vm_paddr_t since we
* need 64 bit addresses even on 32 bit platforms.
*/
uint64_t paddr = vtophys(sc->csb);
bus_write_4(sc->iomem, PTNET_IO_CSBBAH,
(paddr >> 32) & 0xffffffff);
bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff);
}
num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
sc->num_rings = num_tx_rings + num_rx_rings;
sc->num_tx_rings = num_tx_rings;
/* Allocate and initialize per-queue data structures. */
- sc->queues = mallocarray(sc->num_rings, sizeof(struct ptnet_queue),
+ sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->queues == NULL) {
err = ENOMEM;
goto err_path;
}
sc->rxqueues = sc->queues + num_tx_rings;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
pq->sc = sc;
pq->kring_id = i;
pq->kick = PTNET_IO_KICK_BASE + 4 * i;
pq->ptring = sc->csb->rings + i;
snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
device_get_nameunit(dev), i);
mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
if (i >= num_tx_rings) {
/* RX queue: fix kring_id. */
pq->kring_id -= num_tx_rings;
} else {
/* TX queue: allocate buf_ring. */
pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
M_DEVBUF, M_NOWAIT, &pq->lock);
if (pq->bufring == NULL) {
err = ENOMEM;
goto err_path;
}
}
}
sc->min_tx_space = 64; /* Safe initial value. */
err = ptnet_irqs_init(sc);
if (err) {
goto err_path;
}
/* Setup Ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "Failed to allocate ifnet\n");
err = ENOMEM;
goto err_path;
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_baudrate = IF_Gbps(10);
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
ifp->if_init = ptnet_init;
ifp->if_ioctl = ptnet_ioctl;
#if __FreeBSD_version >= 1100000
ifp->if_get_counter = ptnet_get_counter;
#endif
ifp->if_transmit = ptnet_transmit;
ifp->if_qflush = ptnet_qflush;
ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
ptnet_media_status);
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
sc->hwaddr[0] = (macreg >> 8) & 0xff;
sc->hwaddr[1] = macreg & 0xff;
macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
sc->hwaddr[2] = (macreg >> 24) & 0xff;
sc->hwaddr[3] = (macreg >> 16) & 0xff;
sc->hwaddr[4] = (macreg >> 8) & 0xff;
sc->hwaddr[5] = macreg & 0xff;
ether_ifattach(ifp, sc->hwaddr);
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
/* Similarly to what the vtnet driver does, we can emulate
* VLAN offloadings by inserting and removing the 802.1Q
* header during transmit and receive. We are then able
* to do checksum offloading of VLAN frames. */
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
| IFCAP_VLAN_HWCSUM
| IFCAP_TSO | IFCAP_LRO
| IFCAP_VLAN_HWTSO
| IFCAP_VLAN_HWTAGGING;
}
ifp->if_capenable = ifp->if_capabilities;
#ifdef DEVICE_POLLING
/* Don't enable polling by default. */
ifp->if_capabilities |= IFCAP_POLLING;
#endif
snprintf(sc->lock_name, sizeof(sc->lock_name),
"%s", device_get_nameunit(dev));
mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
callout_init_mtx(&sc->tick, &sc->lock, 0);
/* Prepare a netmap_adapter struct instance to do netmap_attach(). */
nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
memset(&na_arg, 0, sizeof(na_arg));
na_arg.ifp = ifp;
na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
na_arg.num_tx_rings = num_tx_rings;
na_arg.num_rx_rings = num_rx_rings;
na_arg.nm_config = ptnet_nm_config;
na_arg.nm_krings_create = ptnet_nm_krings_create;
na_arg.nm_krings_delete = ptnet_nm_krings_delete;
na_arg.nm_dtor = ptnet_nm_dtor;
na_arg.nm_register = ptnet_nm_register;
na_arg.nm_txsync = ptnet_nm_txsync;
na_arg.nm_rxsync = ptnet_nm_rxsync;
netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset,
bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
/* Now a netmap adapter for this ifp has been allocated, and it
* can be accessed through NA(ifp). We also have to initialize the CSB
* pointer. */
sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
/* If virtio-net header was negotiated, set the virt_hdr_len field in
* the netmap adapter, to inform users that this netmap adapter requires
* the application to deal with the headers. */
ptnet_update_vnet_hdr(sc);
device_printf(dev, "%s() completed\n", __func__);
return (0);
err_path:
ptnet_detach(dev);
return err;
}
static int
ptnet_detach(device_t dev)
{
struct ptnet_softc *sc = device_get_softc(dev);
int i;
#ifdef DEVICE_POLLING
if (sc->ifp->if_capenable & IFCAP_POLLING) {
ether_poll_deregister(sc->ifp);
}
#endif
callout_drain(&sc->tick);
if (sc->queues) {
/* Drain taskqueues before calling if_detach. */
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (pq->taskq) {
taskqueue_drain(pq->taskq, &pq->task);
}
}
}
if (sc->ifp) {
ether_ifdetach(sc->ifp);
/* Uninitialize netmap adapters for this device. */
netmap_detach(sc->ifp);
ifmedia_removeall(&sc->media);
if_free(sc->ifp);
sc->ifp = NULL;
}
ptnet_irqs_fini(sc);
if (sc->csb) {
bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0);
bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0);
free(sc->csb, M_DEVBUF);
sc->csb = NULL;
}
if (sc->queues) {
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (mtx_initialized(&pq->lock)) {
mtx_destroy(&pq->lock);
}
if (pq->bufring != NULL) {
buf_ring_free(pq->bufring, M_DEVBUF);
}
}
free(sc->queues, M_DEVBUF);
sc->queues = NULL;
}
if (sc->iomem) {
bus_release_resource(dev, SYS_RES_IOPORT,
PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
sc->iomem = NULL;
}
mtx_destroy(&sc->lock);
device_printf(dev, "%s() completed\n", __func__);
return (0);
}
static int
ptnet_suspend(device_t dev)
{
struct ptnet_softc *sc;
sc = device_get_softc(dev);
(void)sc;
return (0);
}
static int
ptnet_resume(device_t dev)
{
struct ptnet_softc *sc;
sc = device_get_softc(dev);
(void)sc;
return (0);
}
static int
ptnet_shutdown(device_t dev)
{
/*
* Suspend already does all of what we need to
* do here; we just never expect to be resumed.
*/
return (ptnet_suspend(dev));
}
static int
ptnet_irqs_init(struct ptnet_softc *sc)
{
int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
int nvecs = sc->num_rings;
device_t dev = sc->dev;
int err = ENOSPC;
int cpu_cur;
int i;
if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
device_printf(dev, "Could not find MSI-X capability\n");
return (ENXIO);
}
sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->msix_mem == NULL) {
device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
return (ENXIO);
}
if (pci_msix_count(dev) < nvecs) {
device_printf(dev, "Not enough MSI-X vectors\n");
goto err_path;
}
err = pci_alloc_msix(dev, &nvecs);
if (err) {
device_printf(dev, "Failed to allocate MSI-X vectors\n");
goto err_path;
}
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
rid = i + 1;
pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (pq->irq == NULL) {
device_printf(dev, "Failed to allocate interrupt "
"for queue #%d\n", i);
err = ENOSPC;
goto err_path;
}
}
cpu_cur = CPU_FIRST();
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
void (*handler)(void *) = ptnet_tx_intr;
if (i >= sc->num_tx_rings) {
handler = ptnet_rx_intr;
}
err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL /* intr_filter */, handler,
pq, &pq->cookie);
if (err) {
device_printf(dev, "Failed to register intr handler "
"for queue #%d\n", i);
goto err_path;
}
bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
#if 0
bus_bind_intr(sc->dev, pq->irq, cpu_cur);
#endif
cpu_cur = CPU_NEXT(cpu_cur);
}
device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
cpu_cur = CPU_FIRST();
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
static void (*handler)(void *context, int pending);
handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task;
TASK_INIT(&pq->task, 0, handler, pq);
pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
taskqueue_thread_enqueue, &pq->taskq);
taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
device_get_nameunit(sc->dev), cpu_cur);
cpu_cur = CPU_NEXT(cpu_cur);
}
return 0;
err_path:
ptnet_irqs_fini(sc);
return err;
}
static void
ptnet_irqs_fini(struct ptnet_softc *sc)
{
device_t dev = sc->dev;
int i;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (pq->taskq) {
taskqueue_free(pq->taskq);
pq->taskq = NULL;
}
if (pq->cookie) {
bus_teardown_intr(dev, pq->irq, pq->cookie);
pq->cookie = NULL;
}
if (pq->irq) {
bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
pq->irq = NULL;
}
}
if (sc->msix_mem) {
pci_release_msi(dev);
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
sc->msix_mem);
sc->msix_mem = NULL;
}
}
static void
ptnet_init(void *opaque)
{
struct ptnet_softc *sc = opaque;
PTNET_CORE_LOCK(sc);
ptnet_init_locked(sc);
PTNET_CORE_UNLOCK(sc);
}
static int
ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
device_t dev = sc->dev;
struct ifreq *ifr = (struct ifreq *)data;
int mask, err = 0;
switch (cmd) {
case SIOCSIFFLAGS:
device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags);
PTNET_CORE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
/* Network stack wants the iff to be up. */
err = ptnet_init_locked(sc);
} else {
/* Network stack wants the iff to be down. */
err = ptnet_stop(sc);
}
/* We don't need to do nothing to support IFF_PROMISC,
* since that is managed by the backend port. */
PTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFCAP:
device_printf(dev, "SIOCSIFCAP %x %x\n",
ifr->ifr_reqcap, ifp->if_capenable);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
struct ptnet_queue *pq;
int i;
if (ifr->ifr_reqcap & IFCAP_POLLING) {
err = ether_poll_register(ptnet_poll, ifp);
if (err) {
break;
}
/* Stop queues and sync with taskqueues. */
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
for (i = 0; i < sc->num_rings; i++) {
pq = sc-> queues + i;
/* Make sure the worker sees the
* IFF_DRV_RUNNING down. */
PTNET_Q_LOCK(pq);
pq->ptring->guest_need_kick = 0;
PTNET_Q_UNLOCK(pq);
/* Wait for rescheduling to finish. */
if (pq->taskq) {
taskqueue_drain(pq->taskq,
&pq->task);
}
}
ifp->if_drv_flags |= IFF_DRV_RUNNING;
} else {
err = ether_poll_deregister(ifp);
for (i = 0; i < sc->num_rings; i++) {
pq = sc-> queues + i;
PTNET_Q_LOCK(pq);
pq->ptring->guest_need_kick = 1;
PTNET_Q_UNLOCK(pq);
}
}
}
#endif /* DEVICE_POLLING */
ifp->if_capenable = ifr->ifr_reqcap;
break;
case SIOCSIFMTU:
/* We support any reasonable MTU. */
if (ifr->ifr_mtu < ETHERMIN ||
ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
err = EINVAL;
} else {
PTNET_CORE_LOCK(sc);
ifp->if_mtu = ifr->ifr_mtu;
PTNET_CORE_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
break;
default:
err = ether_ioctl(ifp, cmd, data);
break;
}
return err;
}
static int
ptnet_init_locked(struct ptnet_softc *sc)
{
if_t ifp = sc->ifp;
struct netmap_adapter *na_dr = &sc->ptna->dr.up;
struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
unsigned int nm_buf_size;
int ret;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
return 0; /* nothing to do */
}
device_printf(sc->dev, "%s\n", __func__);
/* Translate offload capabilities according to if_capenable. */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= PTNET_CSUM_OFFLOAD;
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6;
if (ifp->if_capenable & IFCAP_TSO4)
ifp->if_hwassist |= CSUM_IP_TSO;
if (ifp->if_capenable & IFCAP_TSO6)
ifp->if_hwassist |= CSUM_IP6_TSO;
/*
* Prepare the interface for netmap mode access.
*/
netmap_update_config(na_dr);
ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
if (ret) {
device_printf(sc->dev, "netmap_mem_finalize() failed\n");
return ret;
}
if (sc->ptna->backend_regifs == 0) {
ret = ptnet_nm_krings_create(na_nm);
if (ret) {
device_printf(sc->dev, "ptnet_nm_krings_create() "
"failed\n");
goto err_mem_finalize;
}
ret = netmap_mem_rings_create(na_dr);
if (ret) {
device_printf(sc->dev, "netmap_mem_rings_create() "
"failed\n");
goto err_rings_create;
}
ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
if (ret) {
device_printf(sc->dev, "netmap_mem_get_lut() "
"failed\n");
goto err_get_lut;
}
}
ret = ptnet_nm_register(na_dr, 1 /* on */);
if (ret) {
goto err_register;
}
nm_buf_size = NETMAP_BUF_SIZE(na_dr);
KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
sc->min_tx_space);
#ifdef PTNETMAP_STATS
callout_reset(&sc->tick, hz, ptnet_tick, sc);
#endif
ifp->if_drv_flags |= IFF_DRV_RUNNING;
return 0;
err_register:
memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
err_get_lut:
netmap_mem_rings_delete(na_dr);
err_rings_create:
ptnet_nm_krings_delete(na_nm);
err_mem_finalize:
netmap_mem_deref(na_dr->nm_mem, na_dr);
return ret;
}
/* To be called under core lock. */
static int
ptnet_stop(struct ptnet_softc *sc)
{
if_t ifp = sc->ifp;
struct netmap_adapter *na_dr = &sc->ptna->dr.up;
struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
int i;
device_printf(sc->dev, "%s\n", __func__);
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
return 0; /* nothing to do */
}
/* Clear the driver-ready flag, and synchronize with all the queues,
* so that after this loop we are sure nobody is working anymore with
* the device. This scheme is taken from the vtnet driver. */
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
callout_stop(&sc->tick);
for (i = 0; i < sc->num_rings; i++) {
PTNET_Q_LOCK(sc->queues + i);
PTNET_Q_UNLOCK(sc->queues + i);
}
ptnet_nm_register(na_dr, 0 /* off */);
if (sc->ptna->backend_regifs == 0) {
netmap_mem_rings_delete(na_dr);
ptnet_nm_krings_delete(na_nm);
}
netmap_mem_deref(na_dr->nm_mem, na_dr);
return 0;
}
static void
ptnet_qflush(if_t ifp)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
int i;
/* Flush all the bufrings and do the interface flush. */
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
struct mbuf *m;
PTNET_Q_LOCK(pq);
if (pq->bufring) {
while ((m = buf_ring_dequeue_sc(pq->bufring))) {
m_freem(m);
}
}
PTNET_Q_UNLOCK(pq);
}
if_qflush(ifp);
}
static int
ptnet_media_change(if_t ifp)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
struct ifmedia *ifm = &sc->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
return EINVAL;
}
return 0;
}
#if __FreeBSD_version >= 1100000
static uint64_t
ptnet_get_counter(if_t ifp, ift_counter cnt)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
struct ptnet_queue_stats stats[2];
int i;
/* Accumulate statistics over the queues. */
memset(stats, 0, sizeof(stats));
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
int idx = (i < sc->num_tx_rings) ? 0 : 1;
stats[idx].packets += pq->stats.packets;
stats[idx].bytes += pq->stats.bytes;
stats[idx].errors += pq->stats.errors;
stats[idx].iqdrops += pq->stats.iqdrops;
stats[idx].mcasts += pq->stats.mcasts;
}
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (stats[1].packets);
case IFCOUNTER_IQDROPS:
return (stats[1].iqdrops);
case IFCOUNTER_IERRORS:
return (stats[1].errors);
case IFCOUNTER_OPACKETS:
return (stats[0].packets);
case IFCOUNTER_OBYTES:
return (stats[0].bytes);
case IFCOUNTER_OMCASTS:
return (stats[0].mcasts);
default:
return (if_get_counter_default(ifp, cnt));
}
}
#endif
#ifdef PTNETMAP_STATS
/* Called under core lock. */
static void
ptnet_tick(void *opaque)
{
struct ptnet_softc *sc = opaque;
int i;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
struct ptnet_queue_stats cur = pq->stats;
struct timeval now;
unsigned int delta;
microtime(&now);
delta = now.tv_usec - sc->last_ts.tv_usec +
(now.tv_sec - sc->last_ts.tv_sec) * 1000000;
delta /= 1000; /* in milliseconds */
if (delta == 0)
continue;
device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
"intr %lu\n", i, delta,
(cur.packets - pq->last_stats.packets),
(cur.kicks - pq->last_stats.kicks),
(cur.intrs - pq->last_stats.intrs));
pq->last_stats = cur;
}
microtime(&sc->last_ts);
callout_schedule(&sc->tick, hz);
}
#endif /* PTNETMAP_STATS */
static void
ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
{
/* We are always active, as the backend netmap port is
* always open in netmap mode. */
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
}
static uint32_t
ptnet_nm_ptctl(if_t ifp, uint32_t cmd)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
/*
* Write a command and read back error status,
* with zero meaning success.
*/
bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
}
static int
ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd,
unsigned *rxr, unsigned *rxd)
{
struct ptnet_softc *sc = if_getsoftc(na->ifp);
*txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
*rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
*txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
*rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n",
*txr, *rxr, *txd, *rxd);
return 0;
}
static void
ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
{
int i;
/* Sync krings from the host, reading from
* CSB. */
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_ring *ptring = sc->queues[i].ptring;
struct netmap_kring *kring;
if (i < na->num_tx_rings) {
kring = na->tx_rings + i;
} else {
kring = na->rx_rings + i - na->num_tx_rings;
}
kring->rhead = kring->ring->head = ptring->head;
kring->rcur = kring->ring->cur = ptring->cur;
kring->nr_hwcur = ptring->hwcur;
kring->nr_hwtail = kring->rtail =
kring->ring->tail = ptring->hwtail;
ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
ptring->hwcur, ptring->head, ptring->cur,
ptring->hwtail);
ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
kring->ring->head, kring->ring->cur, kring->nr_hwtail,
kring->rtail, kring->ring->tail);
}
}
static void
ptnet_update_vnet_hdr(struct ptnet_softc *sc)
{
unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
}
static int
ptnet_nm_register(struct netmap_adapter *na, int onoff)
{
/* device-specific */
if_t ifp = na->ifp;
struct ptnet_softc *sc = if_getsoftc(ifp);
int native = (na == &sc->ptna->hwup.up);
struct ptnet_queue *pq;
enum txrx t;
int ret = 0;
int i;
if (!onoff) {
sc->ptna->backend_regifs--;
}
/* If this is the last netmap client, guest interrupt enable flags may
* be in arbitrary state. Since these flags are going to be used also
* by the netdevice driver, we have to make sure to start with
* notifications enabled. Also, schedule NAPI to flush pending packets
* in the RX rings, since we will not receive further interrupts
* until these will be processed. */
if (native && !onoff && na->active_fds == 0) {
D("Exit netmap mode, re-enable interrupts");
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->ptring->guest_need_kick = 1;
}
}
if (onoff) {
if (sc->ptna->backend_regifs == 0) {
/* Initialize notification enable fields in the CSB. */
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->ptring->host_need_kick = 1;
pq->ptring->guest_need_kick =
(!(ifp->if_capenable & IFCAP_POLLING)
&& i >= sc->num_tx_rings);
}
/* Set the virtio-net header length. */
ptnet_update_vnet_hdr(sc);
/* Make sure the host adapter passed through is ready
* for txsync/rxsync. */
ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE);
if (ret) {
return ret;
}
}
/* Sync from CSB must be done after REGIF PTCTL. Skip this
* step only if this is a netmap client and it is not the
* first one. */
if ((!native && sc->ptna->backend_regifs == 0) ||
(native && na->active_fds == 0)) {
ptnet_sync_from_csb(sc, na);
}
/* If not native, don't call nm_set_native_flags, since we don't want
* to replace if_transmit method, nor set NAF_NETMAP_ON */
if (native) {
for_rx_tx(t) {
for (i = 0; i <= nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = &NMR(na, t)[i];
if (nm_kring_pending_on(kring)) {
kring->nr_mode = NKR_NETMAP_ON;
}
}
}
nm_set_native_flags(na);
}
} else {
if (native) {
nm_clear_native_flags(na);
for_rx_tx(t) {
for (i = 0; i <= nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = &NMR(na, t)[i];
if (nm_kring_pending_off(kring)) {
kring->nr_mode = NKR_NETMAP_OFF;
}
}
}
}
/* Sync from CSB must be done before UNREGIF PTCTL, on the last
* netmap client. */
if (native && na->active_fds == 0) {
ptnet_sync_from_csb(sc, na);
}
if (sc->ptna->backend_regifs == 0) {
ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE);
}
}
if (onoff) {
sc->ptna->backend_regifs++;
}
return ret;
}
static int
ptnet_nm_txsync(struct netmap_kring *kring, int flags)
{
struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
struct ptnet_queue *pq = sc->queues + kring->ring_id;
bool notify;
notify = netmap_pt_guest_txsync(pq->ptring, kring, flags);
if (notify) {
ptnet_kick(pq);
}
return 0;
}
static int
ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
{
struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
bool notify;
notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags);
if (notify) {
ptnet_kick(pq);
}
return 0;
}
static void
ptnet_tx_intr(void *opaque)
{
struct ptnet_queue *pq = opaque;
struct ptnet_softc *sc = pq->sc;
DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
#ifdef PTNETMAP_STATS
pq->stats.intrs ++;
#endif /* PTNETMAP_STATS */
if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
return;
}
/* Schedule the tasqueue to flush process transmissions requests.
* However, vtnet, if_em and if_igb just call ptnet_transmit() here,
* at least when using MSI-X interrupts. The if_em driver, instead
* schedule taskqueue when using legacy interrupts. */
taskqueue_enqueue(pq->taskq, &pq->task);
}
static void
ptnet_rx_intr(void *opaque)
{
struct ptnet_queue *pq = opaque;
struct ptnet_softc *sc = pq->sc;
unsigned int unused;
DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
#ifdef PTNETMAP_STATS
pq->stats.intrs ++;
#endif /* PTNETMAP_STATS */
if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
return;
}
/* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
* receive-side processing is executed directly in the interrupt
* service routine. Alternatively, we may schedule the taskqueue. */
ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
}
/* The following offloadings-related functions are taken from the vtnet
* driver, but the same functionality is required for the ptnet driver.
* As a temporary solution, I copied this code from vtnet and I started
* to generalize it (taking away driver-specific statistic accounting),
* making as little modifications as possible.
* In the future we need to share these functions between vtnet and ptnet.
*/
static int
ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start)
{
struct ether_vlan_header *evh;
int offset;
evh = mtod(m, struct ether_vlan_header *);
if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
/* BMV: We should handle nested VLAN tags too. */
*etype = ntohs(evh->evl_proto);
offset = sizeof(struct ether_vlan_header);
} else {
*etype = ntohs(evh->evl_encap_proto);
offset = sizeof(struct ether_header);
}
switch (*etype) {
#if defined(INET)
case ETHERTYPE_IP: {
struct ip *ip, iphdr;
if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
m_copydata(m, offset, sizeof(struct ip),
(caddr_t) &iphdr);
ip = &iphdr;
} else
ip = (struct ip *)(m->m_data + offset);
*proto = ip->ip_p;
*start = offset + (ip->ip_hl << 2);
break;
}
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
*proto = -1;
*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
/* Assert the network stack sent us a valid packet. */
KASSERT(*start > offset,
("%s: mbuf %p start %d offset %d proto %d", __func__, m,
*start, offset, *proto));
break;
#endif
default:
/* Here we should increment the tx_csum_bad_ethtype counter. */
return (EINVAL);
}
return (0);
}
static int
ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type,
int offset, bool allow_ecn, struct virtio_net_hdr *hdr)
{
static struct timeval lastecn;
static int curecn;
struct tcphdr *tcp, tcphdr;
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
tcp = &tcphdr;
} else
tcp = (struct tcphdr *)(m->m_data + offset);
hdr->hdr_len = offset + (tcp->th_off << 2);
hdr->gso_size = m->m_pkthdr.tso_segsz;
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
if (tcp->th_flags & TH_CWR) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
* ECN support is not on a per-interface basis, but globally via
* the net.inet.tcp.ecn.enable sysctl knob. The default is off.
*/
if (!allow_ecn) {
if (ppsratecheck(&lastecn, &curecn, 1))
if_printf(ifp,
"TSO with ECN not negotiated with host\n");
return (ENOTSUP);
}
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
}
/* Here we should increment tx_tso counter. */
return (0);
}
static struct mbuf *
ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn,
struct virtio_net_hdr *hdr)
{
int flags, etype, csum_start, proto, error;
flags = m->m_pkthdr.csum_flags;
error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start);
if (error)
goto drop;
if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) ||
(etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) {
/*
* We could compare the IP protocol vs the CSUM_ flag too,
* but that really should not be necessary.
*/
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = csum_start;
hdr->csum_offset = m->m_pkthdr.csum_data;
/* Here we should increment the tx_csum counter. */
}
if (flags & CSUM_TSO) {
if (__predict_false(proto != IPPROTO_TCP)) {
/* Likely failed to correctly parse the mbuf.
* Here we should increment the tx_tso_not_tcp
* counter. */
goto drop;
}
KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
("%s: mbuf %p TSO without checksum offload %#x",
__func__, m, flags));
error = ptnet_tx_offload_tso(ifp, m, etype, csum_start,
allow_ecn, hdr);
if (error)
goto drop;
}
return (m);
drop:
m_freem(m);
return (NULL);
}
static void
ptnet_vlan_tag_remove(struct mbuf *m)
{
struct ether_vlan_header *evh;
evh = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
m->m_flags |= M_VLANTAG;
/* Strip the 802.1Q header. */
bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
/*
* Use the checksum offset in the VirtIO header to set the
* correct CSUM_* flags.
*/
static int
ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start,
struct virtio_net_hdr *hdr)
{
#if defined(INET) || defined(INET6)
int offset = hdr->csum_start + hdr->csum_offset;
#endif
/* Only do a basic sanity check on the offset. */
switch (eth_type) {
#if defined(INET)
case ETHERTYPE_IP:
if (__predict_false(offset < ip_start + sizeof(struct ip)))
return (1);
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
return (1);
break;
#endif
default:
/* Here we should increment the rx_csum_bad_ethtype counter. */
return (1);
}
/*
* Use the offset to determine the appropriate CSUM_* flags. This is
* a bit dirty, but we can get by with it since the checksum offsets
* happen to be different. We assume the host host does not do IPv4
* header checksum offloading.
*/
switch (hdr->csum_offset) {
case offsetof(struct udphdr, uh_sum):
case offsetof(struct tcphdr, th_sum):
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
case offsetof(struct sctphdr, checksum):
m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
break;
default:
/* Here we should increment the rx_csum_bad_offset counter. */
return (1);
}
return (0);
}
static int
ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start,
struct virtio_net_hdr *hdr)
{
int offset, proto;
switch (eth_type) {
#if defined(INET)
case ETHERTYPE_IP: {
struct ip *ip;
if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
return (1);
ip = (struct ip *)(m->m_data + ip_start);
proto = ip->ip_p;
offset = ip_start + (ip->ip_hl << 2);
break;
}
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < ip_start +
sizeof(struct ip6_hdr)))
return (1);
offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
if (__predict_false(offset < 0))
return (1);
break;
#endif
default:
/* Here we should increment the rx_csum_bad_ethtype counter. */
return (1);
}
switch (proto) {
case IPPROTO_TCP:
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
return (1);
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
case IPPROTO_UDP:
if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
return (1);
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
case IPPROTO_SCTP:
if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
return (1);
m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
break;
default:
/*
* For the remaining protocols, FreeBSD does not support
* checksum offloading, so the checksum will be recomputed.
*/
#if 0
if_printf(ifp, "cksum offload of unsupported "
"protocol eth_type=%#x proto=%d csum_start=%d "
"csum_offset=%d\n", __func__, eth_type, proto,
hdr->csum_start, hdr->csum_offset);
#endif
break;
}
return (0);
}
/*
* Set the appropriate CSUM_* flags. Unfortunately, the information
* provided is not directly useful to us. The VirtIO header gives the
* offset of the checksum, which is all Linux needs, but this is not
* how FreeBSD does things. We are forced to peek inside the packet
* a bit.
*
* It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
* could accept the offsets and let the stack figure it out.
*/
static int
ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr)
{
struct ether_header *eh;
struct ether_vlan_header *evh;
uint16_t eth_type;
int offset, error;
eh = mtod(m, struct ether_header *);
eth_type = ntohs(eh->ether_type);
if (eth_type == ETHERTYPE_VLAN) {
/* BMV: We should handle nested VLAN tags too. */
evh = mtod(m, struct ether_vlan_header *);
eth_type = ntohs(evh->evl_proto);
offset = sizeof(struct ether_vlan_header);
} else
offset = sizeof(struct ether_header);
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr);
else
error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr);
return (error);
}
/* End of offloading-related functions to be shared with vtnet. */
static inline void
ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring)
{
struct netmap_ring *ring = kring->ring;
/* Update hwcur and hwtail as known by the host. */
ptnetmap_guest_read_kring_csb(ptring, kring);
/* nm_sync_finalize */
ring->tail = kring->rtail = kring->nr_hwtail;
}
static void
ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
unsigned int head, unsigned int sync_flags)
{
struct netmap_ring *ring = kring->ring;
struct ptnet_ring *ptring = pq->ptring;
/* Some packets have been pushed to the netmap ring. We have
* to tell the host to process the new packets, updating cur
* and head in the CSB. */
ring->head = ring->cur = head;
/* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
kring->rcur = kring->rhead = head;
ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead);
/* Kick the host if needed. */
if (NM_ACCESS_ONCE(ptring->host_need_kick)) {
ptring->sync_flags = sync_flags;
ptnet_kick(pq);
}
}
#define PTNET_TX_NOSPACE(_h, _k, _min) \
((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
(_k)->rtail - (_h)) < (_min)
/* This function may be called by the network stack, or by
* by the taskqueue thread. */
static int
ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
bool may_resched)
{
struct ptnet_softc *sc = pq->sc;
bool have_vnet_hdr = sc->vnet_hdr_len;
struct netmap_adapter *na = &sc->ptna->dr.up;
if_t ifp = sc->ifp;
unsigned int batch_count = 0;
struct ptnet_ring *ptring;
struct netmap_kring *kring;
struct netmap_ring *ring;
struct netmap_slot *slot;
unsigned int count = 0;
unsigned int minspace;
unsigned int head;
unsigned int lim;
struct mbuf *mhead;
struct mbuf *mf;
int nmbuf_bytes;
uint8_t *nmbuf;
if (!PTNET_Q_TRYLOCK(pq)) {
/* We failed to acquire the lock, schedule the taskqueue. */
RD(1, "Deferring TX work");
if (may_resched) {
taskqueue_enqueue(pq->taskq, &pq->task);
}
return 0;
}
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
PTNET_Q_UNLOCK(pq);
RD(1, "Interface is down");
return ENETDOWN;
}
ptring = pq->ptring;
kring = na->tx_rings + pq->kring_id;
ring = kring->ring;
lim = kring->nkr_num_slots - 1;
head = ring->head;
minspace = sc->min_tx_space;
while (count < budget) {
if (PTNET_TX_NOSPACE(head, kring, minspace)) {
/* We ran out of slot, let's see if the host has
* freed up some, by reading hwcur and hwtail from
* the CSB. */
ptnet_sync_tail(ptring, kring);
if (PTNET_TX_NOSPACE(head, kring, minspace)) {
/* Still no slots available. Reactivate the
* interrupts so that we can be notified
* when some free slots are made available by
* the host. */
ptring->guest_need_kick = 1;
/* Double-check. */
ptnet_sync_tail(ptring, kring);
if (likely(PTNET_TX_NOSPACE(head, kring,
minspace))) {
break;
}
RD(1, "Found more slots by doublecheck");
/* More slots were freed before reactivating
* the interrupts. */
ptring->guest_need_kick = 0;
}
}
mhead = drbr_peek(ifp, pq->bufring);
if (!mhead) {
break;
}
/* Initialize transmission state variables. */
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_bytes = 0;
/* If needed, prepare the virtio-net header at the beginning
* of the first slot. */
if (have_vnet_hdr) {
struct virtio_net_hdr *vh =
(struct virtio_net_hdr *)nmbuf;
/* For performance, we could replace this memset() with
* two 8-bytes-wide writes. */
memset(nmbuf, 0, PTNET_HDR_SIZE);
if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
mhead = ptnet_tx_offload(ifp, mhead, false,
vh);
if (unlikely(!mhead)) {
/* Packet dropped because errors
* occurred while preparing the vnet
* header. Let's go ahead with the next
* packet. */
pq->stats.errors ++;
drbr_advance(ifp, pq->bufring);
continue;
}
}
ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
"csum_start %u csum_ofs %u hdr_len = %u "
"gso_size %u gso_type %x", __func__,
mhead->m_pkthdr.csum_flags, vh->flags,
vh->csum_start, vh->csum_offset, vh->hdr_len,
vh->gso_size, vh->gso_type);
nmbuf += PTNET_HDR_SIZE;
nmbuf_bytes += PTNET_HDR_SIZE;
}
for (mf = mhead; mf; mf = mf->m_next) {
uint8_t *mdata = mf->m_data;
int mlen = mf->m_len;
for (;;) {
int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
if (mlen < copy) {
copy = mlen;
}
memcpy(nmbuf, mdata, copy);
mdata += copy;
mlen -= copy;
nmbuf += copy;
nmbuf_bytes += copy;
if (!mlen) {
break;
}
slot->len = nmbuf_bytes;
slot->flags = NS_MOREFRAG;
head = nm_next(head, lim);
KASSERT(head != ring->tail,
("Unexpectedly run out of TX space"));
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_bytes = 0;
}
}
/* Complete last slot and update head. */
slot->len = nmbuf_bytes;
slot->flags = 0;
head = nm_next(head, lim);
/* Consume the packet just processed. */
drbr_advance(ifp, pq->bufring);
/* Copy the packet to listeners. */
ETHER_BPF_MTAP(ifp, mhead);
pq->stats.packets ++;
pq->stats.bytes += mhead->m_pkthdr.len;
if (mhead->m_flags & M_MCAST) {
pq->stats.mcasts ++;
}
m_freem(mhead);
count ++;
if (++batch_count == PTNET_TX_BATCH) {
ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
batch_count = 0;
}
}
if (batch_count) {
ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
}
if (count >= budget && may_resched) {
DBG(RD(1, "out of budget: resched, %d mbufs pending\n",
drbr_inuse(ifp, pq->bufring)));
taskqueue_enqueue(pq->taskq, &pq->task);
}
PTNET_Q_UNLOCK(pq);
return count;
}
static int
ptnet_transmit(if_t ifp, struct mbuf *m)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
struct ptnet_queue *pq;
unsigned int queue_idx;
int err;
DBG(device_printf(sc->dev, "transmit %p\n", m));
/* Insert 802.1Q header if needed. */
if (m->m_flags & M_VLANTAG) {
m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
if (m == NULL) {
return ENOBUFS;
}
m->m_flags &= ~M_VLANTAG;
}
/* Get the flow-id if available. */
queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
m->m_pkthdr.flowid : curcpu;
if (unlikely(queue_idx >= sc->num_tx_rings)) {
queue_idx %= sc->num_tx_rings;
}
pq = sc->queues + queue_idx;
err = drbr_enqueue(ifp, pq->bufring, m);
if (err) {
/* ENOBUFS when the bufring is full */
RD(1, "%s: drbr_enqueue() failed %d\n",
__func__, err);
pq->stats.errors ++;
return err;
}
if (ifp->if_capenable & IFCAP_POLLING) {
/* If polling is on, the transmit queues will be
* drained by the poller. */
return 0;
}
err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
return (err < 0) ? err : 0;
}
static unsigned int
ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
{
struct netmap_ring *ring = kring->ring;
struct netmap_slot *slot = ring->slot + head;
for (;;) {
head = nm_next(head, kring->nkr_num_slots - 1);
if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
break;
}
slot = ring->slot + head;
}
return head;
}
static inline struct mbuf *
ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
{
uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
do {
unsigned int copy;
if (mtail->m_len == MCLBYTES) {
struct mbuf *mf;
mf = m_getcl(M_NOWAIT, MT_DATA, 0);
if (unlikely(!mf)) {
return NULL;
}
mtail->m_next = mf;
mtail = mf;
mdata = mtod(mtail, uint8_t *);
mtail->m_len = 0;
}
copy = MCLBYTES - mtail->m_len;
if (nmbuf_len < copy) {
copy = nmbuf_len;
}
memcpy(mdata, nmbuf, copy);
nmbuf += copy;
nmbuf_len -= copy;
mdata += copy;
mtail->m_len += copy;
} while (nmbuf_len);
return mtail;
}
static int
ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
{
struct ptnet_softc *sc = pq->sc;
bool have_vnet_hdr = sc->vnet_hdr_len;
struct ptnet_ring *ptring = pq->ptring;
struct netmap_adapter *na = &sc->ptna->dr.up;
struct netmap_kring *kring = na->rx_rings + pq->kring_id;
struct netmap_ring *ring = kring->ring;
unsigned int const lim = kring->nkr_num_slots - 1;
unsigned int head = ring->head;
unsigned int batch_count = 0;
if_t ifp = sc->ifp;
unsigned int count = 0;
PTNET_Q_LOCK(pq);
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
goto unlock;
}
kring->nr_kflags &= ~NKR_PENDINTR;
while (count < budget) {
unsigned int prev_head = head;
struct mbuf *mhead, *mtail;
struct virtio_net_hdr *vh;
struct netmap_slot *slot;
unsigned int nmbuf_len;
uint8_t *nmbuf;
host_sync:
if (head == ring->tail) {
/* We ran out of slot, let's see if the host has
* added some, by reading hwcur and hwtail from
* the CSB. */
ptnet_sync_tail(ptring, kring);
if (head == ring->tail) {
/* Still no slots available. Reactivate
* interrupts as they were disabled by the
* host thread right before issuing the
* last interrupt. */
ptring->guest_need_kick = 1;
/* Double-check. */
ptnet_sync_tail(ptring, kring);
if (likely(head == ring->tail)) {
break;
}
ptring->guest_need_kick = 0;
}
}
/* Initialize ring state variables, possibly grabbing the
* virtio-net header. */
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_len = slot->len;
vh = (struct virtio_net_hdr *)nmbuf;
if (have_vnet_hdr) {
if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
/* There is no good reason why host should
* put the header in multiple netmap slots.
* If this is the case, discard. */
RD(1, "Fragmented vnet-hdr: dropping");
head = ptnet_rx_discard(kring, head);
pq->stats.iqdrops ++;
goto skip;
}
ND(1, "%s: vnet hdr: flags %x csum_start %u "
"csum_ofs %u hdr_len = %u gso_size %u "
"gso_type %x", __func__, vh->flags,
vh->csum_start, vh->csum_offset, vh->hdr_len,
vh->gso_size, vh->gso_type);
nmbuf += PTNET_HDR_SIZE;
nmbuf_len -= PTNET_HDR_SIZE;
}
/* Allocate the head of a new mbuf chain.
* We use m_getcl() to allocate an mbuf with standard cluster
* size (MCLBYTES). In the future we could use m_getjcl()
* to choose different sizes. */
mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (unlikely(mhead == NULL)) {
device_printf(sc->dev, "%s: failed to allocate mbuf "
"head\n", __func__);
pq->stats.errors ++;
break;
}
/* Initialize the mbuf state variables. */
mhead->m_pkthdr.len = nmbuf_len;
mtail->m_len = 0;
/* Scan all the netmap slots containing the current packet. */
for (;;) {
DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
"len %u, flags %u\n", __func__,
head, ring->tail, slot->len,
slot->flags));
mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
if (unlikely(!mtail)) {
/* Ouch. We ran out of memory while processing
* a packet. We have to restore the previous
* head position, free the mbuf chain, and
* schedule the taskqueue to give the packet
* another chance. */
device_printf(sc->dev, "%s: failed to allocate"
" mbuf frag, reset head %u --> %u\n",
__func__, head, prev_head);
head = prev_head;
m_freem(mhead);
pq->stats.errors ++;
if (may_resched) {
taskqueue_enqueue(pq->taskq,
&pq->task);
}
goto escape;
}
/* We have to increment head irrespective of the
* NS_MOREFRAG being set or not. */
head = nm_next(head, lim);
if (!(slot->flags & NS_MOREFRAG)) {
break;
}
if (unlikely(head == ring->tail)) {
/* The very last slot prepared by the host has
* the NS_MOREFRAG set. Drop it and continue
* the outer cycle (to do the double-check). */
RD(1, "Incomplete packet: dropping");
m_freem(mhead);
pq->stats.iqdrops ++;
goto host_sync;
}
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_len = slot->len;
mhead->m_pkthdr.len += nmbuf_len;
}
mhead->m_pkthdr.rcvif = ifp;
mhead->m_pkthdr.csum_flags = 0;
/* Store the queue idx in the packet header. */
mhead->m_pkthdr.flowid = pq->kring_id;
M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
struct ether_header *eh;
eh = mtod(mhead, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ptnet_vlan_tag_remove(mhead);
/*
* With the 802.1Q header removed, update the
* checksum starting location accordingly.
*/
if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
}
}
if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM
| VIRTIO_NET_HDR_F_DATA_VALID))) {
if (unlikely(ptnet_rx_csum(mhead, vh))) {
m_freem(mhead);
RD(1, "Csum offload error: dropping");
pq->stats.iqdrops ++;
goto skip;
}
}
pq->stats.packets ++;
pq->stats.bytes += mhead->m_pkthdr.len;
PTNET_Q_UNLOCK(pq);
(*ifp->if_input)(ifp, mhead);
PTNET_Q_LOCK(pq);
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
/* The interface has gone down while we didn't
* have the lock. Stop any processing and exit. */
goto unlock;
}
skip:
count ++;
if (++batch_count == PTNET_RX_BATCH) {
/* Some packets have been pushed to the network stack.
* We need to update the CSB to tell the host about the new
* ring->cur and ring->head (RX buffer refill). */
ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
batch_count = 0;
}
}
escape:
if (batch_count) {
ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
}
if (count >= budget && may_resched) {
/* If we ran out of budget or the double-check found new
* slots to process, schedule the taskqueue. */
DBG(RD(1, "out of budget: resched h %u t %u\n",
head, ring->tail));
taskqueue_enqueue(pq->taskq, &pq->task);
}
unlock:
PTNET_Q_UNLOCK(pq);
return count;
}
static void
ptnet_rx_task(void *context, int pending)
{
struct ptnet_queue *pq = context;
DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
}
static void
ptnet_tx_task(void *context, int pending)
{
struct ptnet_queue *pq = context;
DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
}
#ifdef DEVICE_POLLING
/* We don't need to handle differently POLL_AND_CHECK_STATUS and
* POLL_ONLY, since we don't have an Interrupt Status Register. */
static int
ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
unsigned int queue_budget;
unsigned int count = 0;
bool borrow = false;
int i;
KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
queue_budget = MAX(budget / sc->num_rings, 1);
RD(1, "Per-queue budget is %d", queue_budget);
while (budget) {
unsigned int rcnt = 0;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (borrow) {
queue_budget = MIN(queue_budget, budget);
if (queue_budget == 0) {
break;
}
}
if (i < sc->num_tx_rings) {
rcnt += ptnet_drain_transmit_queue(pq,
queue_budget, false);
} else {
rcnt += ptnet_rx_eof(pq, queue_budget,
false);
}
}
if (!rcnt) {
/* A scan of the queues gave no result, we can
* stop here. */
break;
}
if (rcnt > budget) {
/* This may happen when initial budget < sc->num_rings,
* since one packet budget is given to each queue
* anyway. Just pretend we didn't eat "so much". */
rcnt = budget;
}
count += rcnt;
budget -= rcnt;
borrow = true;
}
return count;
}
#endif /* DEVICE_POLLING */
Index: head/sys/dev/nvme/nvme_ns.c
===================================================================
--- head/sys/dev/nvme/nvme_ns.c (revision 328217)
+++ head/sys/dev/nvme/nvme_ns.c (revision 328218)
@@ -1,585 +1,584 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (C) 2012-2013 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/disk.h>
#include <sys/fcntl.h>
#include <sys/ioccom.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <dev/pci/pcivar.h>
#include <geom/geom.h>
#include "nvme_private.h"
static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
static void nvme_bio_child_done(void *arg,
const struct nvme_completion *cpl);
static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size,
uint32_t alignment);
static void nvme_free_child_bios(int num_bios,
struct bio **child_bios);
static struct bio ** nvme_allocate_child_bios(int num_bios);
static struct bio ** nvme_construct_child_bios(struct bio *bp,
uint32_t alignment,
int *num_bios);
static int nvme_ns_split_bio(struct nvme_namespace *ns,
struct bio *bp,
uint32_t alignment);
static int
nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
struct thread *td)
{
struct nvme_namespace *ns;
struct nvme_controller *ctrlr;
struct nvme_pt_command *pt;
ns = cdev->si_drv1;
ctrlr = ns->ctrlr;
switch (cmd) {
case NVME_IO_TEST:
case NVME_BIO_TEST:
nvme_ns_test(ns, cmd, arg);
break;
case NVME_PASSTHROUGH_CMD:
pt = (struct nvme_pt_command *)arg;
return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
1 /* is_user_buffer */, 0 /* is_admin_cmd */));
case DIOCGMEDIASIZE:
*(off_t *)arg = (off_t)nvme_ns_get_size(ns);
break;
case DIOCGSECTORSIZE:
*(u_int *)arg = nvme_ns_get_sector_size(ns);
break;
default:
return (ENOTTY);
}
return (0);
}
static int
nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
int error = 0;
if (flags & FWRITE)
error = securelevel_gt(td->td_ucred, 0);
return (error);
}
static int
nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
return (0);
}
static void
nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
{
struct bio *bp = arg;
/*
* TODO: add more extensive translation of NVMe status codes
* to different bio error codes (i.e. EIO, EINVAL, etc.)
*/
if (nvme_completion_is_error(cpl)) {
bp->bio_error = EIO;
bp->bio_flags |= BIO_ERROR;
bp->bio_resid = bp->bio_bcount;
} else
bp->bio_resid = 0;
biodone(bp);
}
static void
nvme_ns_strategy(struct bio *bp)
{
struct nvme_namespace *ns;
int err;
ns = bp->bio_dev->si_drv1;
err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
if (err) {
bp->bio_error = err;
bp->bio_flags |= BIO_ERROR;
bp->bio_resid = bp->bio_bcount;
biodone(bp);
}
}
static struct cdevsw nvme_ns_cdevsw = {
.d_version = D_VERSION,
.d_flags = D_DISK,
.d_read = physread,
.d_write = physwrite,
.d_open = nvme_ns_open,
.d_close = nvme_ns_close,
.d_strategy = nvme_ns_strategy,
.d_ioctl = nvme_ns_ioctl
};
uint32_t
nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
{
return ns->ctrlr->max_xfer_size;
}
uint32_t
nvme_ns_get_sector_size(struct nvme_namespace *ns)
{
return (1 << ns->data.lbaf[ns->data.flbas.format].lbads);
}
uint64_t
nvme_ns_get_num_sectors(struct nvme_namespace *ns)
{
return (ns->data.nsze);
}
uint64_t
nvme_ns_get_size(struct nvme_namespace *ns)
{
return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
}
uint32_t
nvme_ns_get_flags(struct nvme_namespace *ns)
{
return (ns->flags);
}
const char *
nvme_ns_get_serial_number(struct nvme_namespace *ns)
{
return ((const char *)ns->ctrlr->cdata.sn);
}
const char *
nvme_ns_get_model_number(struct nvme_namespace *ns)
{
return ((const char *)ns->ctrlr->cdata.mn);
}
const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace *ns)
{
return (&ns->data);
}
uint32_t
nvme_ns_get_stripesize(struct nvme_namespace *ns)
{
return (ns->stripesize);
}
static void
nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
{
struct bio *bp = arg;
nvme_cb_fn_t bp_cb_fn;
bp_cb_fn = bp->bio_driver1;
if (bp->bio_driver2)
free(bp->bio_driver2, M_NVME);
if (nvme_completion_is_error(status)) {
bp->bio_flags |= BIO_ERROR;
if (bp->bio_error == 0)
bp->bio_error = EIO;
}
if ((bp->bio_flags & BIO_ERROR) == 0)
bp->bio_resid = 0;
else
bp->bio_resid = bp->bio_bcount;
bp_cb_fn(bp, status);
}
static void
nvme_bio_child_inbed(struct bio *parent, int bio_error)
{
struct nvme_completion parent_cpl;
int children, inbed;
if (bio_error != 0) {
parent->bio_flags |= BIO_ERROR;
parent->bio_error = bio_error;
}
/*
* atomic_fetchadd will return value before adding 1, so we still
* must add 1 to get the updated inbed number. Save bio_children
* before incrementing to guard against race conditions when
* two children bios complete on different queues.
*/
children = atomic_load_acq_int(&parent->bio_children);
inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
if (inbed == children) {
bzero(&parent_cpl, sizeof(parent_cpl));
if (parent->bio_flags & BIO_ERROR)
parent_cpl.status.sc = NVME_SC_DATA_TRANSFER_ERROR;
nvme_ns_bio_done(parent, &parent_cpl);
}
}
static void
nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
{
struct bio *child = arg;
struct bio *parent;
int bio_error;
parent = child->bio_parent;
g_destroy_bio(child);
bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
nvme_bio_child_inbed(parent, bio_error);
}
static uint32_t
nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align)
{
uint32_t num_segs, offset, remainder;
if (align == 0)
return (1);
KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n"));
num_segs = size / align;
remainder = size & (align - 1);
offset = addr & (align - 1);
if (remainder > 0 || offset > 0)
num_segs += 1 + (remainder + offset - 1) / align;
return (num_segs);
}
static void
nvme_free_child_bios(int num_bios, struct bio **child_bios)
{
int i;
for (i = 0; i < num_bios; i++) {
if (child_bios[i] != NULL)
g_destroy_bio(child_bios[i]);
}
free(child_bios, M_NVME);
}
static struct bio **
nvme_allocate_child_bios(int num_bios)
{
struct bio **child_bios;
int err = 0, i;
- child_bios = mallocarray(num_bios, sizeof(struct bio *), M_NVME,
- M_NOWAIT);
+ child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT);
if (child_bios == NULL)
return (NULL);
for (i = 0; i < num_bios; i++) {
child_bios[i] = g_new_bio();
if (child_bios[i] == NULL)
err = ENOMEM;
}
if (err == ENOMEM) {
nvme_free_child_bios(num_bios, child_bios);
return (NULL);
}
return (child_bios);
}
static struct bio **
nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios)
{
struct bio **child_bios;
struct bio *child;
uint64_t cur_offset;
caddr_t data;
uint32_t rem_bcount;
int i;
#ifdef NVME_UNMAPPED_BIO_SUPPORT
struct vm_page **ma;
uint32_t ma_offset;
#endif
*num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount,
alignment);
child_bios = nvme_allocate_child_bios(*num_bios);
if (child_bios == NULL)
return (NULL);
bp->bio_children = *num_bios;
bp->bio_inbed = 0;
cur_offset = bp->bio_offset;
rem_bcount = bp->bio_bcount;
data = bp->bio_data;
#ifdef NVME_UNMAPPED_BIO_SUPPORT
ma_offset = bp->bio_ma_offset;
ma = bp->bio_ma;
#endif
for (i = 0; i < *num_bios; i++) {
child = child_bios[i];
child->bio_parent = bp;
child->bio_cmd = bp->bio_cmd;
child->bio_offset = cur_offset;
child->bio_bcount = min(rem_bcount,
alignment - (cur_offset & (alignment - 1)));
child->bio_flags = bp->bio_flags;
#ifdef NVME_UNMAPPED_BIO_SUPPORT
if (bp->bio_flags & BIO_UNMAPPED) {
child->bio_ma_offset = ma_offset;
child->bio_ma = ma;
child->bio_ma_n =
nvme_get_num_segments(child->bio_ma_offset,
child->bio_bcount, PAGE_SIZE);
ma_offset = (ma_offset + child->bio_bcount) &
PAGE_MASK;
ma += child->bio_ma_n;
if (ma_offset != 0)
ma -= 1;
} else
#endif
{
child->bio_data = data;
data += child->bio_bcount;
}
cur_offset += child->bio_bcount;
rem_bcount -= child->bio_bcount;
}
return (child_bios);
}
static int
nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
uint32_t alignment)
{
struct bio *child;
struct bio **child_bios;
int err, i, num_bios;
child_bios = nvme_construct_child_bios(bp, alignment, &num_bios);
if (child_bios == NULL)
return (ENOMEM);
for (i = 0; i < num_bios; i++) {
child = child_bios[i];
err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
if (err != 0) {
nvme_bio_child_inbed(bp, err);
g_destroy_bio(child);
}
}
free(child_bios, M_NVME);
return (0);
}
int
nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn)
{
struct nvme_dsm_range *dsm_range;
uint32_t num_bios;
int err;
bp->bio_driver1 = cb_fn;
if (ns->stripesize > 0 &&
(bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
num_bios = nvme_get_num_segments(bp->bio_offset,
bp->bio_bcount, ns->stripesize);
if (num_bios > 1)
return (nvme_ns_split_bio(ns, bp, ns->stripesize));
}
switch (bp->bio_cmd) {
case BIO_READ:
err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
break;
case BIO_WRITE:
err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
break;
case BIO_FLUSH:
err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
break;
case BIO_DELETE:
dsm_range =
malloc(sizeof(struct nvme_dsm_range), M_NVME,
M_ZERO | M_WAITOK);
dsm_range->length =
bp->bio_bcount/nvme_ns_get_sector_size(ns);
dsm_range->starting_lba =
bp->bio_offset/nvme_ns_get_sector_size(ns);
bp->bio_driver2 = dsm_range;
err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
nvme_ns_bio_done, bp);
if (err != 0)
free(dsm_range, M_NVME);
break;
default:
err = EIO;
break;
}
return (err);
}
int
nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
struct nvme_controller *ctrlr)
{
struct nvme_completion_poll_status status;
int unit;
ns->ctrlr = ctrlr;
ns->id = id;
ns->stripesize = 0;
if (pci_get_devid(ctrlr->dev) == 0x09538086 && ctrlr->cdata.vs[3] != 0)
ns->stripesize =
(1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size;
/*
* Namespaces are reconstructed after a controller reset, so check
* to make sure we only call mtx_init once on each mtx.
*
* TODO: Move this somewhere where it gets called at controller
* construction time, which is not invoked as part of each
* controller reset.
*/
if (!mtx_initialized(&ns->lock))
mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
status.done = FALSE;
nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
nvme_completion_poll_cb, &status);
while (status.done == FALSE)
DELAY(5);
if (nvme_completion_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
return (ENXIO);
}
/*
* If the size of is zero, chances are this isn't a valid
* namespace (eg one that's not been configured yet). The
* standard says the entire id will be zeros, so this is a
* cheap way to test for that.
*/
if (ns->data.nsze == 0)
return (ENXIO);
/*
* Note: format is a 0-based value, so > is appropriate here,
* not >=.
*/
if (ns->data.flbas.format > ns->data.nlbaf) {
printf("lba format %d exceeds number supported (%d)\n",
ns->data.flbas.format, ns->data.nlbaf+1);
return (ENXIO);
}
if (ctrlr->cdata.oncs.dsm)
ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
if (ctrlr->cdata.vwc.present)
ns->flags |= NVME_NS_FLUSH_SUPPORTED;
/*
* cdev may have already been created, if we are reconstructing the
* namespace after a controller-level reset.
*/
if (ns->cdev != NULL)
return (0);
/*
* Namespace IDs start at 1, so we need to subtract 1 to create a
* correct unit number.
*/
unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1;
/*
* MAKEDEV_ETERNAL was added in r210923, for cdevs that will never
* be destroyed. This avoids refcounting on the cdev object.
* That should be OK case here, as long as we're not supporting PCIe
* surprise removal nor namespace deletion.
*/
#ifdef MAKEDEV_ETERNAL_KLD
ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, unit,
NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
device_get_unit(ctrlr->dev), ns->id);
#else
ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, unit,
NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
device_get_unit(ctrlr->dev), ns->id);
#endif
#ifdef NVME_UNMAPPED_BIO_SUPPORT
ns->cdev->si_flags |= SI_UNMAPPED;
#endif
if (ns->cdev != NULL)
ns->cdev->si_drv1 = ns;
return (0);
}
void nvme_ns_destruct(struct nvme_namespace *ns)
{
if (ns->cdev != NULL)
destroy_dev(ns->cdev);
}
Index: head/sys/dev/pst/pst-iop.c
===================================================================
--- head/sys/dev/pst/pst-iop.c (revision 328217)
+++ head/sys/dev/pst/pst-iop.c (revision 328218)
@@ -1,504 +1,504 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2001,2002,2003 Søren Schmidt <sos@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/bio.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include "dev/pst/pst-iop.h"
struct iop_request {
struct i2o_single_reply *reply;
u_int32_t mfa;
};
/* local vars */
MALLOC_DEFINE(M_PSTIOP, "PSTIOP", "Promise SuperTrak IOP driver");
int
iop_init(struct iop_softc *sc)
{
int mfa, timeout = 10000;
while ((mfa = sc->reg->iqueue) == 0xffffffff && --timeout)
DELAY(1000);
if (!timeout) {
printf("pstiop: no free mfa\n");
return 0;
}
iop_free_mfa(sc, mfa);
sc->reg->oqueue_intr_mask = 0xffffffff;
if (!iop_reset(sc)) {
printf("pstiop: no reset response\n");
return 0;
}
if (!iop_init_outqueue(sc)) {
printf("pstiop: init outbound queue failed\n");
return 0;
}
/* register iop_attach to be run when interrupts are enabled */
if (!(sc->iop_delayed_attach = (struct intr_config_hook *)
malloc(sizeof(struct intr_config_hook),
M_PSTIOP, M_NOWAIT | M_ZERO))) {
printf("pstiop: malloc of delayed attach hook failed\n");
return 0;
}
sc->iop_delayed_attach->ich_func = iop_attach;
sc->iop_delayed_attach->ich_arg = sc;
if (config_intrhook_establish(sc->iop_delayed_attach)) {
printf("pstiop: config_intrhook_establish failed\n");
free(sc->iop_delayed_attach, M_PSTIOP);
}
return 1;
}
void
iop_attach(void *arg)
{
struct iop_softc *sc;
int i;
sc = arg;
if (sc->iop_delayed_attach) {
config_intrhook_disestablish(sc->iop_delayed_attach);
free(sc->iop_delayed_attach, M_PSTIOP);
sc->iop_delayed_attach = NULL;
}
if (!iop_get_lct(sc)) {
printf("pstiop: get LCT failed\n");
return;
}
/* figure out what devices are here and config as needed */
for (i = 0; sc->lct[i].entry_size == I2O_LCT_ENTRYSIZE; i++) {
#ifdef PSTDEBUG
struct i2o_get_param_reply *reply;
printf("pstiop: LCT entry %d ", i);
printf("class=%04x ", sc->lct[i].class);
printf("sub=%04x ", sc->lct[i].sub_class);
printf("localtid=%04x ", sc->lct[i].local_tid);
printf("usertid=%04x ", sc->lct[i].user_tid);
printf("parentid=%04x\n", sc->lct[i].parent_tid);
if ((reply = iop_get_util_params(sc, sc->lct[i].local_tid,
I2O_PARAMS_OPERATION_FIELD_GET,
I2O_UTIL_DEVICE_IDENTITY_GROUP_NO))) {
struct i2o_device_identity *ident =
(struct i2o_device_identity *)reply->result;
printf("pstiop: vendor=<%.16s> product=<%.16s>\n",
ident->vendor, ident->product);
printf("pstiop: description=<%.16s> revision=<%.8s>\n",
ident->description, ident->revision);
contigfree(reply, PAGE_SIZE, M_PSTIOP);
}
#endif
if (sc->lct[i].user_tid != I2O_TID_NONE &&
sc->lct[i].user_tid != I2O_TID_HOST)
continue;
switch (sc->lct[i].class) {
case I2O_CLASS_DDM:
if (sc->lct[i].sub_class == I2O_SUBCLASS_ISM)
sc->ism = sc->lct[i].local_tid;
break;
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
pst_add_raid(sc, &sc->lct[i]);
break;
}
}
/* setup and enable interrupts */
bus_setup_intr(sc->dev, sc->r_irq, INTR_TYPE_BIO|INTR_ENTROPY|INTR_MPSAFE,
NULL, iop_intr, sc, &sc->handle);
sc->reg->oqueue_intr_mask = 0x0;
}
void
iop_intr(void *data)
{
struct iop_softc *sc = (struct iop_softc *)data;
struct i2o_single_reply *reply;
u_int32_t mfa;
/* we might get more than one finished request pr interrupt */
mtx_lock(&sc->mtx);
while (1) {
if ((mfa = sc->reg->oqueue) == 0xffffffff)
if ((mfa = sc->reg->oqueue) == 0xffffffff)
break;
reply = (struct i2o_single_reply *)(sc->obase + (mfa - sc->phys_obase));
/* if this is an event register reply, shout! */
if (reply->function == I2O_UTIL_EVENT_REGISTER) {
struct i2o_util_event_reply_message *event =
(struct i2o_util_event_reply_message *)reply;
printf("pstiop: EVENT!! idx=%08x data=%08x\n",
event->event_mask, event->event_data[0]);
break;
}
/* if reply is a failurenotice we need to free the original mfa */
if (reply->message_flags & I2O_MESSAGE_FLAGS_FAIL)
iop_free_mfa(sc,((struct i2o_fault_reply *)(reply))->preserved_mfa);
/* reply->initiator_context points to the service routine */
((void (*)(struct iop_softc *, u_int32_t, struct i2o_single_reply *))
(reply->initiator_context))(sc, mfa, reply);
}
mtx_unlock(&sc->mtx);
}
int
iop_reset(struct iop_softc *sc)
{
struct i2o_exec_iop_reset_message *msg;
int mfa, timeout = 5000;
volatile u_int32_t reply = 0;
mfa = iop_get_mfa(sc);
msg = (struct i2o_exec_iop_reset_message *)(sc->ibase + mfa);
bzero(msg, sizeof(struct i2o_exec_iop_reset_message));
msg->version_offset = 0x1;
msg->message_flags = 0x0;
msg->message_size = sizeof(struct i2o_exec_iop_reset_message) >> 2;
msg->target_address = I2O_TID_IOP;
msg->initiator_address = I2O_TID_HOST;
msg->function = I2O_EXEC_IOP_RESET;
msg->status_word_low_addr = vtophys(&reply);
msg->status_word_high_addr = 0;
sc->reg->iqueue = mfa;
while (--timeout && !reply)
DELAY(1000);
/* wait for iqueue ready */
timeout = 10000;
while ((mfa = sc->reg->iqueue) == 0xffffffff && --timeout)
DELAY(1000);
iop_free_mfa(sc, mfa);
return reply;
}
int
iop_init_outqueue(struct iop_softc *sc)
{
struct i2o_exec_init_outqueue_message *msg;
int i, mfa, timeout = 5000;
volatile u_int32_t reply = 0;
if (!(sc->obase = contigmalloc(I2O_IOP_OUTBOUND_FRAME_COUNT *
I2O_IOP_OUTBOUND_FRAME_SIZE,
M_PSTIOP, M_NOWAIT,
0x00010000, 0xFFFFFFFF,
PAGE_SIZE, 0))) {
printf("pstiop: contigmalloc of outqueue buffers failed!\n");
return 0;
}
sc->phys_obase = vtophys(sc->obase);
mfa = iop_get_mfa(sc);
msg = (struct i2o_exec_init_outqueue_message *)(sc->ibase + mfa);
bzero(msg, sizeof(struct i2o_exec_init_outqueue_message));
msg->version_offset = 0x61;
msg->message_flags = 0x0;
msg->message_size = sizeof(struct i2o_exec_init_outqueue_message) >> 2;
msg->target_address = I2O_TID_IOP;
msg->initiator_address = I2O_TID_HOST;
msg->function = I2O_EXEC_OUTBOUND_INIT;
msg->host_pagesize = PAGE_SIZE;
msg->init_code = 0x00; /* SOS XXX should be 0x80 == OS */
msg->queue_framesize = I2O_IOP_OUTBOUND_FRAME_SIZE / sizeof(u_int32_t);
msg->sgl[0].flags = I2O_SGL_SIMPLE | I2O_SGL_END | I2O_SGL_EOB;
msg->sgl[0].count = sizeof(reply);
msg->sgl[0].phys_addr[0] = vtophys(&reply);
msg->sgl[1].flags = I2O_SGL_END | I2O_SGL_EOB;
msg->sgl[1].count = 1;
msg->sgl[1].phys_addr[0] = 0;
sc->reg->iqueue = mfa;
/* wait for init to complete */
while (--timeout && reply != I2O_EXEC_OUTBOUND_INIT_COMPLETE)
DELAY(1000);
if (!timeout) {
printf("pstiop: timeout waiting for init-complete response\n");
iop_free_mfa(sc, mfa);
return 0;
}
/* now init our oqueue bufs */
for (i = 0; i < I2O_IOP_OUTBOUND_FRAME_COUNT; i++) {
sc->reg->oqueue = sc->phys_obase + (i * I2O_IOP_OUTBOUND_FRAME_SIZE);
DELAY(1000);
}
return 1;
}
int
iop_get_lct(struct iop_softc *sc)
{
struct i2o_exec_get_lct_message *msg;
struct i2o_get_lct_reply *reply;
int mfa;
#define ALLOCSIZE (PAGE_SIZE + (256 * sizeof(struct i2o_lct_entry)))
if (!(reply = contigmalloc(ALLOCSIZE, M_PSTIOP, M_NOWAIT | M_ZERO,
0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0)))
return 0;
mfa = iop_get_mfa(sc);
msg = (struct i2o_exec_get_lct_message *)(sc->ibase + mfa);
bzero(msg, sizeof(struct i2o_exec_get_lct_message));
msg->version_offset = 0x61;
msg->message_flags = 0x0;
msg->message_size = sizeof(struct i2o_exec_get_lct_message) >> 2;
msg->target_address = I2O_TID_IOP;
msg->initiator_address = I2O_TID_HOST;
msg->function = I2O_EXEC_LCT_NOTIFY;
msg->class = I2O_CLASS_MATCH_ANYCLASS;
msg->last_change_id = 0;
msg->sgl.flags = I2O_SGL_SIMPLE | I2O_SGL_END | I2O_SGL_EOB;
msg->sgl.count = ALLOCSIZE;
msg->sgl.phys_addr[0] = vtophys(reply);
if (iop_queue_wait_msg(sc, mfa, (struct i2o_basic_message *)msg)) {
contigfree(reply, ALLOCSIZE, M_PSTIOP);
return 0;
}
- if (!(sc->lct = mallocarray(reply->table_size, sizeof(struct i2o_lct_entry),
+ if (!(sc->lct = malloc(reply->table_size * sizeof(struct i2o_lct_entry),
M_PSTIOP, M_NOWAIT | M_ZERO))) {
contigfree(reply, ALLOCSIZE, M_PSTIOP);
return 0;
}
bcopy(&reply->entry[0], sc->lct,
reply->table_size * sizeof(struct i2o_lct_entry));
sc->lct_count = reply->table_size;
contigfree(reply, ALLOCSIZE, M_PSTIOP);
return 1;
}
struct i2o_get_param_reply *
iop_get_util_params(struct iop_softc *sc, int target, int operation, int group)
{
struct i2o_util_get_param_message *msg;
struct i2o_get_param_operation *param;
struct i2o_get_param_reply *reply;
int mfa;
if (!(param = contigmalloc(PAGE_SIZE, M_PSTIOP, M_NOWAIT | M_ZERO,
0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0)))
return NULL;
if (!(reply = contigmalloc(PAGE_SIZE, M_PSTIOP, M_NOWAIT | M_ZERO,
0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0)))
return NULL;
mfa = iop_get_mfa(sc);
msg = (struct i2o_util_get_param_message *)(sc->ibase + mfa);
bzero(msg, sizeof(struct i2o_util_get_param_message));
msg->version_offset = 0x51;
msg->message_flags = 0x0;
msg->message_size = sizeof(struct i2o_util_get_param_message) >> 2;
msg->target_address = target;
msg->initiator_address = I2O_TID_HOST;
msg->function = I2O_UTIL_PARAMS_GET;
msg->operation_flags = 0;
param->operation_count = 1;
param->operation[0].operation = operation;
param->operation[0].group = group;
param->operation[0].field_count = 0xffff;
msg->sgl[0].flags = I2O_SGL_SIMPLE | I2O_SGL_DIR | I2O_SGL_EOB;
msg->sgl[0].count = sizeof(struct i2o_get_param_operation);
msg->sgl[0].phys_addr[0] = vtophys(param);
msg->sgl[1].flags = I2O_SGL_SIMPLE | I2O_SGL_END | I2O_SGL_EOB;
msg->sgl[1].count = PAGE_SIZE;
msg->sgl[1].phys_addr[0] = vtophys(reply);
if (iop_queue_wait_msg(sc, mfa, (struct i2o_basic_message *)msg) ||
reply->error_info_size) {
contigfree(reply, PAGE_SIZE, M_PSTIOP);
reply = NULL;
}
contigfree(param, PAGE_SIZE, M_PSTIOP);
return reply;
}
u_int32_t
iop_get_mfa(struct iop_softc *sc)
{
u_int32_t mfa;
int timeout = 10000;
while ((mfa = sc->reg->iqueue) == 0xffffffff && timeout) {
DELAY(1000);
timeout--;
}
if (!timeout)
printf("pstiop: no free mfa\n");
return mfa;
}
void
iop_free_mfa(struct iop_softc *sc, int mfa)
{
struct i2o_basic_message *msg = (struct i2o_basic_message *)(sc->ibase+mfa);
bzero(msg, sizeof(struct i2o_basic_message));
msg->version = 0x01;
msg->message_flags = 0x0;
msg->message_size = sizeof(struct i2o_basic_message) >> 2;
msg->target_address = I2O_TID_IOP;
msg->initiator_address = I2O_TID_HOST;
msg->function = I2O_UTIL_NOP;
sc->reg->iqueue = mfa;
}
static void
iop_done(struct iop_softc *sc, u_int32_t mfa, struct i2o_single_reply *reply)
{
struct iop_request *request =
(struct iop_request *)reply->transaction_context;
request->reply = reply;
request->mfa = mfa;
wakeup(request);
}
int
iop_queue_wait_msg(struct iop_softc *sc, int mfa, struct i2o_basic_message *msg)
{
struct i2o_single_reply *reply;
struct iop_request request;
u_int32_t out_mfa;
int status, timeout = 10000;
mtx_lock(&sc->mtx);
if (!(sc->reg->oqueue_intr_mask & 0x08)) {
msg->transaction_context = (u_int32_t)&request;
msg->initiator_context = (u_int32_t)iop_done;
sc->reg->iqueue = mfa;
if (msleep(&request, &sc->mtx, PRIBIO, "pstwt", 10 * hz)) {
printf("pstiop: timeout waiting for message response\n");
iop_free_mfa(sc, mfa);
mtx_unlock(&sc->mtx);
return -1;
}
status = request.reply->status;
sc->reg->oqueue = request.mfa;
}
else {
sc->reg->iqueue = mfa;
while (--timeout && ((out_mfa = sc->reg->oqueue) == 0xffffffff))
DELAY(1000);
if (!timeout) {
printf("pstiop: timeout waiting for message response\n");
iop_free_mfa(sc, mfa);
mtx_unlock(&sc->mtx);
return -1;
}
reply = (struct i2o_single_reply *)(sc->obase+(out_mfa-sc->phys_obase));
status = reply->status;
sc->reg->oqueue = out_mfa;
}
mtx_unlock(&sc->mtx);
return status;
}
int
iop_create_sgl(struct i2o_basic_message *msg, caddr_t data, int count, int dir)
{
struct i2o_sgl *sgl = (struct i2o_sgl *)((int32_t *)msg + msg->offset);
u_int32_t sgl_count, sgl_phys;
int i = 0;
if (((uintptr_t)data & 3) || (count & 3)) {
printf("pstiop: non aligned DMA transfer attempted\n");
return 0;
}
if (!count) {
printf("pstiop: zero length DMA transfer attempted\n");
return 0;
}
sgl_count = min(count, (PAGE_SIZE - ((uintptr_t)data & PAGE_MASK)));
sgl_phys = vtophys(data);
sgl->flags = dir | I2O_SGL_PAGELIST | I2O_SGL_EOB | I2O_SGL_END;
sgl->count = count;
data += sgl_count;
count -= sgl_count;
while (count) {
sgl->phys_addr[i] = sgl_phys;
sgl_phys = vtophys(data);
data += min(count, PAGE_SIZE);
count -= min(count, PAGE_SIZE);
if (++i >= I2O_SGL_MAX_SEGS) {
printf("pstiop: too many segments in SGL\n");
return 0;
}
}
sgl->phys_addr[i] = sgl_phys;
msg->message_size += i;
return 1;
}
Index: head/sys/dev/ral/rt2560.c
===================================================================
--- head/sys/dev/ral/rt2560.c (revision 328217)
+++ head/sys/dev/ral/rt2560.c (revision 328218)
@@ -1,2767 +1,2767 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005, 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2560 chipset driver
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/ral/rt2560reg.h>
#include <dev/ral/rt2560var.h>
#define RT2560_RSSI(sc, rssi) \
((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \
((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0)
#define RAL_DEBUG
#ifdef RAL_DEBUG
#define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug > 0) \
printf(fmt, __VA_ARGS__); \
} while (0)
#define DPRINTFN(sc, n, fmt, ...) do { \
if (sc->sc_debug >= (n)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, fmt, ...)
#define DPRINTFN(sc, n, fmt, ...)
#endif
static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void rt2560_vap_delete(struct ieee80211vap *);
static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int,
int);
static int rt2560_alloc_tx_ring(struct rt2560_softc *,
struct rt2560_tx_ring *, int);
static void rt2560_reset_tx_ring(struct rt2560_softc *,
struct rt2560_tx_ring *);
static void rt2560_free_tx_ring(struct rt2560_softc *,
struct rt2560_tx_ring *);
static int rt2560_alloc_rx_ring(struct rt2560_softc *,
struct rt2560_rx_ring *, int);
static void rt2560_reset_rx_ring(struct rt2560_softc *,
struct rt2560_rx_ring *);
static void rt2560_free_rx_ring(struct rt2560_softc *,
struct rt2560_rx_ring *);
static int rt2560_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t);
static void rt2560_encryption_intr(struct rt2560_softc *);
static void rt2560_tx_intr(struct rt2560_softc *);
static void rt2560_prio_intr(struct rt2560_softc *);
static void rt2560_decryption_intr(struct rt2560_softc *);
static void rt2560_rx_intr(struct rt2560_softc *);
static void rt2560_beacon_update(struct ieee80211vap *, int item);
static void rt2560_beacon_expire(struct rt2560_softc *);
static void rt2560_wakeup_expire(struct rt2560_softc *);
static void rt2560_scan_start(struct ieee80211com *);
static void rt2560_scan_end(struct ieee80211com *);
static void rt2560_getradiocaps(struct ieee80211com *, int, int *,
struct ieee80211_channel[]);
static void rt2560_set_channel(struct ieee80211com *);
static void rt2560_setup_tx_desc(struct rt2560_softc *,
struct rt2560_tx_desc *, uint32_t, int, int, int,
bus_addr_t);
static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2560_transmit(struct ieee80211com *, struct mbuf *);
static void rt2560_start(struct rt2560_softc *);
static void rt2560_watchdog(void *);
static void rt2560_parent(struct ieee80211com *);
static void rt2560_bbp_write(struct rt2560_softc *, uint8_t,
uint8_t);
static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t);
static void rt2560_rf_write(struct rt2560_softc *, uint8_t,
uint32_t);
static void rt2560_set_chan(struct rt2560_softc *,
struct ieee80211_channel *);
#if 0
static void rt2560_disable_rf_tune(struct rt2560_softc *);
#endif
static void rt2560_enable_tsf_sync(struct rt2560_softc *);
static void rt2560_enable_tsf(struct rt2560_softc *);
static void rt2560_update_plcp(struct rt2560_softc *);
static void rt2560_update_slot(struct ieee80211com *);
static void rt2560_set_basicrates(struct rt2560_softc *,
const struct ieee80211_rateset *);
static void rt2560_update_led(struct rt2560_softc *, int, int);
static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *);
static void rt2560_set_macaddr(struct rt2560_softc *,
const uint8_t *);
static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *);
static void rt2560_update_promisc(struct ieee80211com *);
static const char *rt2560_get_rf(int);
static void rt2560_read_config(struct rt2560_softc *);
static int rt2560_bbp_init(struct rt2560_softc *);
static void rt2560_set_txantenna(struct rt2560_softc *, int);
static void rt2560_set_rxantenna(struct rt2560_softc *, int);
static void rt2560_init_locked(struct rt2560_softc *);
static void rt2560_init(void *);
static void rt2560_stop_locked(struct rt2560_softc *);
static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static const struct {
uint32_t reg;
uint32_t val;
} rt2560_def_mac[] = {
RT2560_DEF_MAC
};
static const struct {
uint8_t reg;
uint8_t val;
} rt2560_def_bbp[] = {
RT2560_DEF_BBP
};
static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2;
static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2;
static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2;
static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2;
static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2;
static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2;
static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2;
static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2;
static const uint8_t rt2560_chan_2ghz[] =
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
static const uint8_t rt2560_chan_5ghz[] =
{ 36, 40, 44, 48, 52, 56, 60, 64,
100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140,
149, 153, 157, 161 };
static const struct {
uint8_t chan;
uint32_t r1, r2, r4;
} rt2560_rf5222[] = {
RT2560_RF5222
};
int
rt2560_attach(device_t dev, int id)
{
struct rt2560_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
int error;
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0);
mbufq_init(&sc->sc_snd, ifqmaxlen);
/* retrieve RT2560 rev. no */
sc->asic_rev = RAL_READ(sc, RT2560_CSR0);
/* retrieve RF rev. no and various other things from EEPROM */
rt2560_read_config(sc);
device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n",
sc->asic_rev, rt2560_get_rf(sc->rf_rev));
/*
* Allocate Tx and Rx rings.
*/
error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Tx ring\n");
goto fail1;
}
error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate ATIM ring\n");
goto fail2;
}
error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Prio ring\n");
goto fail3;
}
error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Beacon ring\n");
goto fail4;
}
error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Rx ring\n");
goto fail5;
}
/* retrieve MAC address */
rt2560_get_macaddr(sc, ic->ic_macaddr);
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_WDS /* 4-address traffic works */
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#ifdef notyet
| IEEE80211_C_TXFRAG /* handle tx frags */
#endif
;
rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
ieee80211_ifattach(ic);
ic->ic_raw_xmit = rt2560_raw_xmit;
ic->ic_updateslot = rt2560_update_slot;
ic->ic_update_promisc = rt2560_update_promisc;
ic->ic_scan_start = rt2560_scan_start;
ic->ic_scan_end = rt2560_scan_end;
ic->ic_getradiocaps = rt2560_getradiocaps;
ic->ic_set_channel = rt2560_set_channel;
ic->ic_vap_create = rt2560_vap_create;
ic->ic_vap_delete = rt2560_vap_delete;
ic->ic_parent = rt2560_parent;
ic->ic_transmit = rt2560_transmit;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RT2560_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RT2560_RX_RADIOTAP_PRESENT);
/*
* Add a few sysctl knobs.
*/
#ifdef RAL_DEBUG
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs");
#endif
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)");
if (bootverbose)
ieee80211_announce(ic);
return 0;
fail5: rt2560_free_tx_ring(sc, &sc->bcnq);
fail4: rt2560_free_tx_ring(sc, &sc->prioq);
fail3: rt2560_free_tx_ring(sc, &sc->atimq);
fail2: rt2560_free_tx_ring(sc, &sc->txq);
fail1: mtx_destroy(&sc->sc_mtx);
return ENXIO;
}
int
rt2560_detach(void *xsc)
{
struct rt2560_softc *sc = xsc;
struct ieee80211com *ic = &sc->sc_ic;
rt2560_stop(sc);
ieee80211_ifdetach(ic);
mbufq_drain(&sc->sc_snd);
rt2560_free_tx_ring(sc, &sc->txq);
rt2560_free_tx_ring(sc, &sc->atimq);
rt2560_free_tx_ring(sc, &sc->prioq);
rt2560_free_tx_ring(sc, &sc->bcnq);
rt2560_free_rx_ring(sc, &sc->rxq);
mtx_destroy(&sc->sc_mtx);
return 0;
}
static struct ieee80211vap *
rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct rt2560_softc *sc = ic->ic_softc;
struct rt2560_vap *rvp;
struct ieee80211vap *vap;
switch (opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_MONITOR:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
/* XXXRP: TBD */
if (!TAILQ_EMPTY(&ic->ic_vaps)) {
device_printf(sc->sc_dev, "only 1 vap supported\n");
return NULL;
}
if (opmode == IEEE80211_M_STA)
flags |= IEEE80211_CLONE_NOBEACONS;
break;
case IEEE80211_M_WDS:
if (TAILQ_EMPTY(&ic->ic_vaps) ||
ic->ic_opmode != IEEE80211_M_HOSTAP) {
device_printf(sc->sc_dev,
"wds only supported in ap mode\n");
return NULL;
}
/*
* Silently remove any request for a unique
* bssid; WDS vap's always share the local
* mac address.
*/
flags &= ~IEEE80211_CLONE_BSSID;
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return NULL;
}
rvp = malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->ral_vap;
ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override state transition machine */
rvp->ral_newstate = vap->iv_newstate;
vap->iv_newstate = rt2560_newstate;
vap->iv_update_beacon = rt2560_beacon_update;
ieee80211_ratectl_init(vap);
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
ieee80211_media_status, mac);
if (TAILQ_FIRST(&ic->ic_vaps) == vap)
ic->ic_opmode = opmode;
return vap;
}
static void
rt2560_vap_delete(struct ieee80211vap *vap)
{
struct rt2560_vap *rvp = RT2560_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
void
rt2560_resume(void *xsc)
{
struct rt2560_softc *sc = xsc;
if (sc->sc_ic.ic_nrunning > 0)
rt2560_init(sc);
}
static void
rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring,
int count)
{
int i, error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = 0;
ring->cur_encrypt = ring->next_encrypt = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
- ring->data = mallocarray(count, sizeof(struct rt2560_tx_data), M_DEVBUF,
+ ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
error = bus_dmamap_create(ring->data_dmat, 0,
&ring->data[i].map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
}
return 0;
fail: rt2560_free_tx_ring(sc, ring);
return error;
}
static void
rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring)
{
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
int i;
for (i = 0; i < ring->count; i++) {
desc = &ring->desc[i];
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
desc->flags = 0;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->queued = 0;
ring->cur = ring->next = 0;
ring->cur_encrypt = ring->next_encrypt = 0;
}
static void
rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring)
{
struct rt2560_tx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring,
int count)
{
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
bus_addr_t physaddr;
int i, error;
ring->count = count;
ring->cur = ring->next = 0;
ring->cur_decrypt = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
- ring->data = mallocarray(count, sizeof (struct rt2560_rx_data),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
/*
* Pre-allocate Rx buffers and populate Rx ring.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
desc = &sc->rxq.desc[i];
data = &sc->rxq.data[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr,
&physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
desc->flags = htole32(RT2560_RX_BUSY);
desc->physaddr = htole32(physaddr);
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: rt2560_free_rx_ring(sc, ring);
return error;
}
static void
rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring)
{
int i;
for (i = 0; i < ring->count; i++) {
ring->desc[i].flags = htole32(RT2560_RX_BUSY);
ring->data[i].drop = 0;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->cur = ring->next = 0;
ring->cur_decrypt = 0;
}
static void
rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring)
{
struct rt2560_rx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rt2560_vap *rvp = RT2560_VAP(vap);
struct rt2560_softc *sc = vap->iv_ic->ic_softc;
int error;
if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) {
/* abort TSF synchronization */
RAL_WRITE(sc, RT2560_CSR14, 0);
/* turn association led off */
rt2560_update_led(sc, 0, 0);
}
error = rvp->ral_newstate(vap, nstate, arg);
if (error == 0 && nstate == IEEE80211_S_RUN) {
struct ieee80211_node *ni = vap->iv_bss;
struct mbuf *m;
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
rt2560_update_plcp(sc);
rt2560_set_basicrates(sc, &ni->ni_rates);
rt2560_set_bssid(sc, ni->ni_bssid);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS) {
m = ieee80211_beacon_alloc(ni);
if (m == NULL) {
device_printf(sc->sc_dev,
"could not allocate beacon\n");
return ENOBUFS;
}
ieee80211_ref_node(ni);
error = rt2560_tx_bcn(sc, m, ni);
if (error != 0)
return error;
}
/* turn association led on */
rt2560_update_led(sc, 1, 0);
if (vap->iv_opmode != IEEE80211_M_MONITOR)
rt2560_enable_tsf_sync(sc);
else
rt2560_enable_tsf(sc);
}
return error;
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or
* 93C66).
*/
static uint16_t
rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
RT2560_EEPROM_CTL(sc, 0);
RT2560_EEPROM_CTL(sc, RT2560_S);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C);
RT2560_EEPROM_CTL(sc, RT2560_S);
/* write start bit (1) */
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C);
/* write READ opcode (10) */
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C);
RT2560_EEPROM_CTL(sc, RT2560_S);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C);
/* write address (A5-A0 or A7-A0) */
n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7;
for (; n >= 0; n--) {
RT2560_EEPROM_CTL(sc, RT2560_S |
(((addr >> n) & 1) << RT2560_SHIFT_D));
RT2560_EEPROM_CTL(sc, RT2560_S |
(((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C);
}
RT2560_EEPROM_CTL(sc, RT2560_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C);
tmp = RAL_READ(sc, RT2560_CSR21);
val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n;
RT2560_EEPROM_CTL(sc, RT2560_S);
}
RT2560_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
RT2560_EEPROM_CTL(sc, RT2560_S);
RT2560_EEPROM_CTL(sc, 0);
RT2560_EEPROM_CTL(sc, RT2560_C);
return val;
}
/*
* Some frames were processed by the hardware cipher engine and are ready for
* transmission.
*/
static void
rt2560_encryption_intr(struct rt2560_softc *sc)
{
struct rt2560_tx_desc *desc;
int hw;
/* retrieve last descriptor index processed by cipher engine */
hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr;
hw /= RT2560_TX_DESC_SIZE;
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_POSTREAD);
while (sc->txq.next_encrypt != hw) {
if (sc->txq.next_encrypt == sc->txq.cur_encrypt) {
printf("hw encrypt %d, cur_encrypt %d\n", hw,
sc->txq.cur_encrypt);
break;
}
desc = &sc->txq.desc[sc->txq.next_encrypt];
if ((le32toh(desc->flags) & RT2560_TX_BUSY) ||
(le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY))
break;
/* for TKIP, swap eiv field to fix a bug in ASIC */
if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) ==
RT2560_TX_CIPHER_TKIP)
desc->eiv = bswap32(desc->eiv);
/* mark the frame ready for transmission */
desc->flags |= htole32(RT2560_TX_VALID);
desc->flags |= htole32(RT2560_TX_BUSY);
DPRINTFN(sc, 15, "encryption done idx=%u\n",
sc->txq.next_encrypt);
sc->txq.next_encrypt =
(sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT;
}
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_PREWRITE);
/* kick Tx */
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX);
}
static void
rt2560_tx_intr(struct rt2560_softc *sc)
{
struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct mbuf *m;
struct ieee80211_node *ni;
uint32_t flags;
int status;
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_POSTREAD);
txs->flags = IEEE80211_RATECTL_STATUS_LONG_RETRY;
for (;;) {
desc = &sc->txq.desc[sc->txq.next];
data = &sc->txq.data[sc->txq.next];
flags = le32toh(desc->flags);
if ((flags & RT2560_TX_BUSY) ||
(flags & RT2560_TX_CIPHER_BUSY) ||
!(flags & RT2560_TX_VALID))
break;
m = data->m;
ni = data->ni;
switch (flags & RT2560_TX_RESULT_MASK) {
case RT2560_TX_SUCCESS:
txs->status = IEEE80211_RATECTL_TX_SUCCESS;
txs->long_retries = 0;
DPRINTFN(sc, 10, "%s\n", "data frame sent successfully");
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(ni, txs);
status = 0;
break;
case RT2560_TX_SUCCESS_RETRY:
txs->status = IEEE80211_RATECTL_TX_SUCCESS;
txs->long_retries = RT2560_TX_RETRYCNT(flags);
DPRINTFN(sc, 9, "data frame sent after %u retries\n",
txs->long_retries);
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(ni, txs);
status = 0;
break;
case RT2560_TX_FAIL_RETRY:
txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
txs->long_retries = RT2560_TX_RETRYCNT(flags);
DPRINTFN(sc, 9, "data frame failed after %d retries\n",
txs->long_retries);
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(ni, txs);
status = 1;
break;
case RT2560_TX_FAIL_INVALID:
case RT2560_TX_FAIL_OTHER:
default:
device_printf(sc->sc_dev, "sending data frame failed "
"0x%08x\n", flags);
status = 1;
}
bus_dmamap_sync(sc->txq.data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txq.data_dmat, data->map);
ieee80211_tx_complete(ni, m, status);
data->ni = NULL;
data->m = NULL;
/* descriptor is no longer valid */
desc->flags &= ~htole32(RT2560_TX_VALID);
DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next);
sc->txq.queued--;
sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT;
}
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_PREWRITE);
if (sc->prioq.queued == 0 && sc->txq.queued == 0)
sc->sc_tx_timer = 0;
if (sc->txq.queued < RT2560_TX_RING_COUNT - 1)
rt2560_start(sc);
}
static void
rt2560_prio_intr(struct rt2560_softc *sc)
{
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct ieee80211_node *ni;
struct mbuf *m;
int flags;
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &sc->prioq.desc[sc->prioq.next];
data = &sc->prioq.data[sc->prioq.next];
flags = le32toh(desc->flags);
if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0)
break;
switch (flags & RT2560_TX_RESULT_MASK) {
case RT2560_TX_SUCCESS:
DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully");
break;
case RT2560_TX_SUCCESS_RETRY:
DPRINTFN(sc, 9, "mgt frame sent after %u retries\n",
(flags >> 5) & 0x7);
break;
case RT2560_TX_FAIL_RETRY:
DPRINTFN(sc, 9, "%s\n",
"sending mgt frame failed (too much retries)");
break;
case RT2560_TX_FAIL_INVALID:
case RT2560_TX_FAIL_OTHER:
default:
device_printf(sc->sc_dev, "sending mgt frame failed "
"0x%08x\n", flags);
break;
}
bus_dmamap_sync(sc->prioq.data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->prioq.data_dmat, data->map);
m = data->m;
data->m = NULL;
ni = data->ni;
data->ni = NULL;
/* descriptor is no longer valid */
desc->flags &= ~htole32(RT2560_TX_VALID);
DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next);
sc->prioq.queued--;
sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT;
if (m->m_flags & M_TXCB)
ieee80211_process_callback(ni, m,
(flags & RT2560_TX_RESULT_MASK) &~
(RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY));
m_freem(m);
ieee80211_free_node(ni);
}
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_PREWRITE);
if (sc->prioq.queued == 0 && sc->txq.queued == 0)
sc->sc_tx_timer = 0;
if (sc->prioq.queued < RT2560_PRIO_RING_COUNT)
rt2560_start(sc);
}
/*
* Some frames were processed by the hardware cipher engine and are ready for
* handoff to the IEEE802.11 layer.
*/
static void
rt2560_decryption_intr(struct rt2560_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
bus_addr_t physaddr;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *mnew, *m;
int hw, error;
int8_t rssi, nf;
/* retrieve last descriptor index processed by cipher engine */
hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr;
hw /= RT2560_RX_DESC_SIZE;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
for (; sc->rxq.cur_decrypt != hw;) {
desc = &sc->rxq.desc[sc->rxq.cur_decrypt];
data = &sc->rxq.data[sc->rxq.cur_decrypt];
if ((le32toh(desc->flags) & RT2560_RX_BUSY) ||
(le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY))
break;
if (data->drop) {
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 &&
(le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) {
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* Try to allocate a new mbuf for this ring element and load it
* before processing the current mbuf. If the ring element
* cannot be loaded, drop the received packet and reuse the old
* mbuf. In the unlikely case that the old mbuf can't be
* reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr,
&physaddr, 0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES,
rt2560_dma_map_addr, &physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = mnew;
desc->physaddr = htole32(physaddr);
/* finalize mbuf */
m->m_pkthdr.len = m->m_len =
(le32toh(desc->flags) >> 16) & 0xfff;
rssi = RT2560_RSSI(sc, desc->rssi);
nf = RT2560_NOISE_FLOOR;
if (ieee80211_radiotap_active(ic)) {
struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap;
uint32_t tsf_lo, tsf_hi;
/* get timestamp (low and high 32 bits) */
tsf_hi = RAL_READ(sc, RT2560_CSR17);
tsf_lo = RAL_READ(sc, RT2560_CSR16);
tap->wr_tsf =
htole64(((uint64_t)tsf_hi << 32) | tsf_lo);
tap->wr_flags = 0;
tap->wr_rate = ieee80211_plcp2rate(desc->rate,
(desc->flags & htole32(RT2560_RX_OFDM)) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antenna = sc->rx_ant;
tap->wr_antsignal = nf + rssi;
tap->wr_antnoise = nf;
}
sc->sc_flags |= RT2560_F_INPUT_RUNNING;
RAL_UNLOCK(sc);
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
RAL_LOCK(sc);
sc->sc_flags &= ~RT2560_F_INPUT_RUNNING;
skip: desc->flags = htole32(RT2560_RX_BUSY);
DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt);
sc->rxq.cur_decrypt =
(sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT;
}
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
}
/*
* Some frames were received. Pass them to the hardware cipher engine before
* sending them to the 802.11 layer.
*/
static void
rt2560_rx_intr(struct rt2560_softc *sc)
{
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &sc->rxq.desc[sc->rxq.cur];
data = &sc->rxq.data[sc->rxq.cur];
if ((le32toh(desc->flags) & RT2560_RX_BUSY) ||
(le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY))
break;
data->drop = 0;
if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) ||
(le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) {
/*
* This should not happen since we did not request
* to receive those frames when we filled RXCSR0.
*/
DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n",
le32toh(desc->flags));
data->drop = 1;
}
if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) {
DPRINTFN(sc, 5, "%s\n", "bad length");
data->drop = 1;
}
/* mark the frame for decryption */
desc->flags |= htole32(RT2560_RX_CIPHER_BUSY);
DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur);
sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT;
}
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
/* kick decrypt */
RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT);
}
static void
rt2560_beacon_update(struct ieee80211vap *vap, int item)
{
struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
setbit(bo->bo_flags, item);
}
/*
* This function is called periodically in IBSS mode when a new beacon must be
* sent out.
*/
static void
rt2560_beacon_expire(struct rt2560_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct rt2560_tx_data *data;
if (ic->ic_opmode != IEEE80211_M_IBSS &&
ic->ic_opmode != IEEE80211_M_HOSTAP &&
ic->ic_opmode != IEEE80211_M_MBSS)
return;
data = &sc->bcnq.data[sc->bcnq.next];
/*
* Don't send beacon if bsschan isn't set
*/
if (data->ni == NULL)
return;
bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bcnq.data_dmat, data->map);
/* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */
ieee80211_beacon_update(data->ni, data->m, 1);
rt2560_tx_bcn(sc, data->m, data->ni);
DPRINTFN(sc, 15, "%s", "beacon expired\n");
sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT;
}
/* ARGSUSED */
static void
rt2560_wakeup_expire(struct rt2560_softc *sc)
{
DPRINTFN(sc, 2, "%s", "wakeup expired\n");
}
void
rt2560_intr(void *arg)
{
struct rt2560_softc *sc = arg;
uint32_t r;
RAL_LOCK(sc);
/* disable interrupts */
RAL_WRITE(sc, RT2560_CSR8, 0xffffffff);
/* don't re-enable interrupts if we're shutting down */
if (!(sc->sc_flags & RT2560_F_RUNNING)) {
RAL_UNLOCK(sc);
return;
}
r = RAL_READ(sc, RT2560_CSR7);
RAL_WRITE(sc, RT2560_CSR7, r);
if (r & RT2560_BEACON_EXPIRE)
rt2560_beacon_expire(sc);
if (r & RT2560_WAKEUP_EXPIRE)
rt2560_wakeup_expire(sc);
if (r & RT2560_ENCRYPTION_DONE)
rt2560_encryption_intr(sc);
if (r & RT2560_TX_DONE)
rt2560_tx_intr(sc);
if (r & RT2560_PRIO_DONE)
rt2560_prio_intr(sc);
if (r & RT2560_DECRYPTION_DONE)
rt2560_decryption_intr(sc);
if (r & RT2560_RX_DONE) {
rt2560_rx_intr(sc);
rt2560_encryption_intr(sc);
}
/* re-enable interrupts */
RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK);
RAL_UNLOCK(sc);
}
#define RAL_SIFS 10 /* us */
#define RT2560_TXRX_TURNAROUND 10 /* us */
static uint8_t
rt2560_plcp_signal(int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12: return 0xb;
case 18: return 0xf;
case 24: return 0xa;
case 36: return 0xe;
case 48: return 0x9;
case 72: return 0xd;
case 96: return 0x8;
case 108: return 0xc;
/* CCK rates (NB: not IEEE std, device-specific) */
case 2: return 0x0;
case 4: return 0x1;
case 11: return 0x2;
case 22: return 0x3;
}
return 0xff; /* XXX unsupported/unknown rate */
}
static void
rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc,
uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr)
{
struct ieee80211com *ic = &sc->sc_ic;
uint16_t plcp_length;
int remainder;
desc->flags = htole32(flags);
desc->flags |= htole32(len << 16);
desc->physaddr = htole32(physaddr);
desc->wme = htole16(
RT2560_AIFSN(2) |
RT2560_LOGCWMIN(3) |
RT2560_LOGCWMAX(8));
/* setup PLCP fields */
desc->plcp_signal = rt2560_plcp_signal(rate);
desc->plcp_service = 4;
len += IEEE80211_CRC_LEN;
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) {
desc->flags |= htole32(RT2560_TX_OFDM);
plcp_length = len & 0xfff;
desc->plcp_length_hi = plcp_length >> 6;
desc->plcp_length_lo = plcp_length & 0x3f;
} else {
plcp_length = howmany(16 * len, rate);
if (rate == 22) {
remainder = (16 * len) % 22;
if (remainder != 0 && remainder < 7)
desc->plcp_service |= RT2560_PLCP_LENGEXT;
}
desc->plcp_length_hi = plcp_length >> 8;
desc->plcp_length_lo = plcp_length & 0xff;
if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->plcp_signal |= 0x08;
}
if (!encrypt)
desc->flags |= htole32(RT2560_TX_VALID);
desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY)
: htole32(RT2560_TX_BUSY);
}
static int
rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
int nsegs, rate, error;
desc = &sc->bcnq.desc[sc->bcnq.cur];
data = &sc->bcnq.data[sc->bcnq.cur];
/* XXX maybe a separate beacon rate? */
rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate;
error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF |
RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr);
DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->bcnq.cur, rate);
bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map,
BUS_DMASYNC_PREWRITE);
sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT;
return 0;
}
static int
rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
uint16_t dur;
uint32_t flags = 0;
int nsegs, rate, error;
desc = &sc->prioq.desc[sc->prioq.cur];
data = &sc->prioq.data[sc->prioq.cur];
rate = ni->ni_txparms->mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
}
error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* management frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
wh = mtod(m0, struct ieee80211_frame *);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2560_TX_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
/* tell hardware to add timestamp for probe responses */
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
IEEE80211_FC0_TYPE_MGT &&
(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= RT2560_TX_TIMESTAMP;
}
rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0,
segs->ds_addr);
bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->prioq.cur, rate);
/* kick prio */
sc->prioq.queued++;
sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT;
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO);
return 0;
}
static int
rt2560_sendprot(struct rt2560_softc *sc,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_frame *wh;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct mbuf *mprot;
int protrate, ackrate, pktlen, flags, isshort, error;
uint16_t dur;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
int nsegs;
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, const struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags = RT2560_TX_MORE_FRAG;
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags |= RT2560_TX_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
/* XXX stat + msg */
return ENOBUFS;
}
desc = &sc->txq.desc[sc->txq.cur_encrypt];
data = &sc->txq.data[sc->txq.cur_encrypt];
error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map,
mprot, segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(mprot);
return error;
}
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
/* ctl frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1,
segs->ds_addr);
bus_dmamap_sync(sc->txq.data_dmat, data->map,
BUS_DMASYNC_PREWRITE);
sc->txq.queued++;
sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT;
return 0;
}
static int
rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
uint32_t flags;
int nsegs, rate, error;
desc = &sc->prioq.desc[sc->prioq.cur];
data = &sc->prioq.data[sc->prioq.cur];
rate = params->ibp_rate0;
if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
/* XXX fall back to mcast/mgmt rate? */
m_freem(m0);
return EINVAL;
}
flags = 0;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
flags |= RT2560_TX_ACK;
if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) {
error = rt2560_sendprot(sc, m0, ni,
params->ibp_flags & IEEE80211_BPF_RTS ?
IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY,
rate);
if (error) {
m_freem(m0);
return error;
}
flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS;
}
error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(ni->ni_vap, m0);
}
data->m = m0;
data->ni = ni;
/* XXX need to setup descriptor ourself */
rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len,
rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0,
segs->ds_addr);
bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->prioq.cur, rate);
/* kick prio */
sc->prioq.queued++;
sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT;
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO);
return 0;
}
static int
rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp = ni->ni_txparms;
struct ieee80211_key *k;
struct mbuf *mnew;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
uint16_t dur;
uint32_t flags;
int nsegs, rate, error;
wh = mtod(m0, struct ieee80211_frame *);
if (m0->m_flags & M_EAPOL) {
rate = tp->mgmtrate;
} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
rate = tp->mcastrate;
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
int prot = IEEE80211_PROT_NONE;
if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
prot = IEEE80211_PROT_RTSCTS;
else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM)
prot = ic->ic_protmode;
if (prot != IEEE80211_PROT_NONE) {
error = rt2560_sendprot(sc, m0, ni, prot, rate);
if (error) {
m_freem(m0);
return error;
}
flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS;
}
}
data = &sc->txq.data[sc->txq.cur_encrypt];
desc = &sc->txq.desc[sc->txq.cur_encrypt];
error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map,
m0, segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* remember link conditions for rate adaptation algorithm */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
data->rix = ni->ni_txrate;
/* XXX probably need last rssi value and not avg */
data->rssi = ic->ic_node_getrssi(ni);
} else
data->rix = IEEE80211_FIXED_RATE_NONE;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2560_TX_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
}
rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1,
segs->ds_addr);
bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->txq.cur_encrypt, rate);
/* kick encrypt */
sc->txq.queued++;
sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT;
RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT);
return 0;
}
static int
rt2560_transmit(struct ieee80211com *ic, struct mbuf *m)
{
struct rt2560_softc *sc = ic->ic_softc;
int error;
RAL_LOCK(sc);
if ((sc->sc_flags & RT2560_F_RUNNING) == 0) {
RAL_UNLOCK(sc);
return (ENXIO);
}
error = mbufq_enqueue(&sc->sc_snd, m);
if (error) {
RAL_UNLOCK(sc);
return (error);
}
rt2560_start(sc);
RAL_UNLOCK(sc);
return (0);
}
static void
rt2560_start(struct rt2560_softc *sc)
{
struct ieee80211_node *ni;
struct mbuf *m;
RAL_LOCK_ASSERT(sc);
while (sc->txq.queued < RT2560_TX_RING_COUNT - 1 &&
(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (rt2560_tx_data(sc, m, ni) != 0) {
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
break;
}
sc->sc_tx_timer = 5;
}
}
static void
rt2560_watchdog(void *arg)
{
struct rt2560_softc *sc = arg;
RAL_LOCK_ASSERT(sc);
KASSERT(sc->sc_flags & RT2560_F_RUNNING, ("not running"));
if (sc->sc_invalid) /* card ejected */
return;
rt2560_encryption_intr(sc);
rt2560_tx_intr(sc);
if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
rt2560_init_locked(sc);
counter_u64_add(sc->sc_ic.ic_oerrors, 1);
/* NB: callout is reset in rt2560_init() */
return;
}
callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc);
}
static void
rt2560_parent(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
int startall = 0;
RAL_LOCK(sc);
if (ic->ic_nrunning > 0) {
if ((sc->sc_flags & RT2560_F_RUNNING) == 0) {
rt2560_init_locked(sc);
startall = 1;
} else
rt2560_update_promisc(ic);
} else if (sc->sc_flags & RT2560_F_RUNNING)
rt2560_stop_locked(sc);
RAL_UNLOCK(sc);
if (startall)
ieee80211_start_all(ic);
}
static void
rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to BBP\n");
return;
}
tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val;
RAL_WRITE(sc, RT2560_BBPCSR, tmp);
DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val);
}
static uint8_t
rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg)
{
uint32_t val;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
val = RT2560_BBP_BUSY | reg << 8;
RAL_WRITE(sc, RT2560_BBPCSR, val);
for (ntries = 0; ntries < 100; ntries++) {
val = RAL_READ(sc, RT2560_BBPCSR);
if (!(val & RT2560_BBP_BUSY))
return val & 0xff;
DELAY(1);
}
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
static void
rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 |
(reg & 0x3);
RAL_WRITE(sc, RT2560_RFCSR, tmp);
/* remember last written value in sc */
sc->rf_regs[reg] = val;
DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff);
}
static void
rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c)
{
struct ieee80211com *ic = &sc->sc_ic;
uint8_t power, tmp;
u_int i, chan;
chan = ieee80211_chan2ieee(ic, c);
KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan));
if (IEEE80211_IS_CHAN_2GHZ(c))
power = min(sc->txpow[chan - 1], 31);
else
power = 31;
/* adjust txpower using ifconfig settings */
power -= (100 - ic->ic_txpowlimit) / 8;
DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power);
switch (sc->rf_rev) {
case RT2560_RF_2522:
rt2560_rf_write(sc, RAL_RF1, 0x00814);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
break;
case RT2560_RF_2523:
rt2560_rf_write(sc, RAL_RF1, 0x08804);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RT2560_RF_2524:
rt2560_rf_write(sc, RAL_RF1, 0x0c808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RT2560_RF_2525:
rt2560_rf_write(sc, RAL_RF1, 0x08808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
rt2560_rf_write(sc, RAL_RF1, 0x08808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RT2560_RF_2525E:
rt2560_rf_write(sc, RAL_RF1, 0x08808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
break;
case RT2560_RF_2526:
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
rt2560_rf_write(sc, RAL_RF1, 0x08804);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
break;
/* dual-band RF */
case RT2560_RF_5222:
for (i = 0; rt2560_rf5222[i].chan != chan; i++);
rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4);
break;
default:
printf("unknown ral rev=%d\n", sc->rf_rev);
}
/* XXX */
if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
/* set Japan filter bit for channel 14 */
tmp = rt2560_bbp_read(sc, 70);
tmp &= ~RT2560_JAPAN_FILTER;
if (chan == 14)
tmp |= RT2560_JAPAN_FILTER;
rt2560_bbp_write(sc, 70, tmp);
/* clear CRC errors */
RAL_READ(sc, RT2560_CNT0);
}
}
static void
rt2560_getradiocaps(struct ieee80211com *ic,
int maxchans, int *nchans, struct ieee80211_channel chans[])
{
struct rt2560_softc *sc = ic->ic_softc;
uint8_t bands[IEEE80211_MODE_BYTES];
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
ieee80211_add_channel_list_2ghz(chans, maxchans, nchans,
rt2560_chan_2ghz, nitems(rt2560_chan_2ghz), bands, 0);
if (sc->rf_rev == RT2560_RF_5222) {
setbit(bands, IEEE80211_MODE_11A);
ieee80211_add_channel_list_5ghz(chans, maxchans, nchans,
rt2560_chan_5ghz, nitems(rt2560_chan_5ghz), bands, 0);
}
}
static void
rt2560_set_channel(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
rt2560_set_chan(sc, ic->ic_curchan);
RAL_UNLOCK(sc);
}
#if 0
/*
* Disable RF auto-tuning.
*/
static void
rt2560_disable_rf_tune(struct rt2560_softc *sc)
{
uint32_t tmp;
if (sc->rf_rev != RT2560_RF_2523) {
tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
rt2560_rf_write(sc, RAL_RF1, tmp);
}
tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
rt2560_rf_write(sc, RAL_RF3, tmp);
DPRINTFN(sc, 2, "%s", "disabling RF autotune\n");
}
#endif
/*
* Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
* synchronization.
*/
static void
rt2560_enable_tsf_sync(struct rt2560_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint16_t logcwmin, preload;
uint32_t tmp;
/* first, disable TSF synchronization */
RAL_WRITE(sc, RT2560_CSR14, 0);
tmp = 16 * vap->iv_bss->ni_intval;
RAL_WRITE(sc, RT2560_CSR12, tmp);
RAL_WRITE(sc, RT2560_CSR13, 0);
logcwmin = 5;
preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024;
tmp = logcwmin << 16 | preload;
RAL_WRITE(sc, RT2560_BCNOCSR, tmp);
/* finally, enable TSF synchronization */
tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN;
if (ic->ic_opmode == IEEE80211_M_STA)
tmp |= RT2560_ENABLE_TSF_SYNC(1);
else
tmp |= RT2560_ENABLE_TSF_SYNC(2) |
RT2560_ENABLE_BEACON_GENERATOR;
RAL_WRITE(sc, RT2560_CSR14, tmp);
DPRINTF(sc, "%s", "enabling TSF synchronization\n");
}
static void
rt2560_enable_tsf(struct rt2560_softc *sc)
{
RAL_WRITE(sc, RT2560_CSR14, 0);
RAL_WRITE(sc, RT2560_CSR14,
RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_TSF);
}
static void
rt2560_update_plcp(struct rt2560_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
/* no short preamble for 1Mbps */
RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400);
if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) {
/* values taken from the reference driver */
RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401);
RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402);
RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403);
} else {
/* same values as above or'ed 0x8 */
RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409);
RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a);
RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b);
}
DPRINTF(sc, "updating PLCP for %s preamble\n",
(ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long");
}
/*
* This function can be called by ieee80211_set_shortslottime(). Refer to
* IEEE Std 802.11-1999 pp. 85 to know how these values are computed.
*/
static void
rt2560_update_slot(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
uint8_t slottime;
uint16_t tx_sifs, tx_pifs, tx_difs, eifs;
uint32_t tmp;
#ifndef FORCE_SLOTTIME
slottime = IEEE80211_GET_SLOTTIME(ic);
#else
/*
* Setting slot time according to "short slot time" capability
* in beacon/probe_resp seems to cause problem to acknowledge
* certain AP's data frames transimitted at CCK/DS rates: the
* problematic AP keeps retransmitting data frames, probably
* because MAC level acks are not received by hardware.
* So we cheat a little bit here by claiming we are capable of
* "short slot time" but setting hardware slot time to the normal
* slot time. ral(4) does not seem to have trouble to receive
* frames transmitted using short slot time even if hardware
* slot time is set to normal slot time. If we didn't use this
* trick, we would have to claim that short slot time is not
* supported; this would give relative poor RX performance
* (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short
* slot time.
*/
slottime = IEEE80211_DUR_SLOT;
#endif
/* update the MAC slot boundaries */
tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND;
tx_pifs = tx_sifs + slottime;
tx_difs = IEEE80211_DUR_DIFS(tx_sifs, slottime);
eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60;
tmp = RAL_READ(sc, RT2560_CSR11);
tmp = (tmp & ~0x1f00) | slottime << 8;
RAL_WRITE(sc, RT2560_CSR11, tmp);
tmp = tx_pifs << 16 | tx_sifs;
RAL_WRITE(sc, RT2560_CSR18, tmp);
tmp = eifs << 16 | tx_difs;
RAL_WRITE(sc, RT2560_CSR19, tmp);
DPRINTF(sc, "setting slottime to %uus\n", slottime);
}
static void
rt2560_set_basicrates(struct rt2560_softc *sc,
const struct ieee80211_rateset *rs)
{
struct ieee80211com *ic = &sc->sc_ic;
uint32_t mask = 0;
uint8_t rate;
int i;
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
if (!(rate & IEEE80211_RATE_BASIC))
continue;
mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt,
IEEE80211_RV(rate));
}
RAL_WRITE(sc, RT2560_ARSP_PLCP_1, mask);
DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask);
}
static void
rt2560_update_led(struct rt2560_softc *sc, int led1, int led2)
{
uint32_t tmp;
/* set ON period to 70ms and OFF period to 30ms */
tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30;
RAL_WRITE(sc, RT2560_LEDCSR, tmp);
}
static void
rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid)
{
uint32_t tmp;
tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24;
RAL_WRITE(sc, RT2560_CSR5, tmp);
tmp = bssid[4] | bssid[5] << 8;
RAL_WRITE(sc, RT2560_CSR6, tmp);
DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":");
}
static void
rt2560_set_macaddr(struct rt2560_softc *sc, const uint8_t *addr)
{
uint32_t tmp;
tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24;
RAL_WRITE(sc, RT2560_CSR3, tmp);
tmp = addr[4] | addr[5] << 8;
RAL_WRITE(sc, RT2560_CSR4, tmp);
DPRINTF(sc, "setting MAC address to %6D\n", addr, ":");
}
static void
rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr)
{
uint32_t tmp;
tmp = RAL_READ(sc, RT2560_CSR3);
addr[0] = tmp & 0xff;
addr[1] = (tmp >> 8) & 0xff;
addr[2] = (tmp >> 16) & 0xff;
addr[3] = (tmp >> 24);
tmp = RAL_READ(sc, RT2560_CSR4);
addr[4] = tmp & 0xff;
addr[5] = (tmp >> 8) & 0xff;
}
static void
rt2560_update_promisc(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2560_RXCSR0);
tmp &= ~RT2560_DROP_NOT_TO_ME;
if (ic->ic_promisc == 0)
tmp |= RT2560_DROP_NOT_TO_ME;
RAL_WRITE(sc, RT2560_RXCSR0, tmp);
DPRINTF(sc, "%s promiscuous mode\n",
(ic->ic_promisc > 0) ? "entering" : "leaving");
}
static const char *
rt2560_get_rf(int rev)
{
switch (rev) {
case RT2560_RF_2522: return "RT2522";
case RT2560_RF_2523: return "RT2523";
case RT2560_RF_2524: return "RT2524";
case RT2560_RF_2525: return "RT2525";
case RT2560_RF_2525E: return "RT2525e";
case RT2560_RF_2526: return "RT2526";
case RT2560_RF_5222: return "RT5222";
default: return "unknown";
}
}
static void
rt2560_read_config(struct rt2560_softc *sc)
{
uint16_t val;
int i;
val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0);
sc->rf_rev = (val >> 11) & 0x7;
sc->hw_radio = (val >> 10) & 0x1;
sc->led_mode = (val >> 6) & 0x7;
sc->rx_ant = (val >> 4) & 0x3;
sc->tx_ant = (val >> 2) & 0x3;
sc->nb_ant = val & 0x3;
/* read default values for BBP registers */
for (i = 0; i < 16; i++) {
val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i);
if (val == 0 || val == 0xffff)
continue;
sc->bbp_prom[i].reg = val >> 8;
sc->bbp_prom[i].val = val & 0xff;
}
/* read Tx power for all b/g channels */
for (i = 0; i < 14 / 2; i++) {
val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i);
sc->txpow[i * 2] = val & 0xff;
sc->txpow[i * 2 + 1] = val >> 8;
}
for (i = 0; i < 14; ++i) {
if (sc->txpow[i] > 31)
sc->txpow[i] = 24;
}
val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE);
if ((val & 0xff) == 0xff)
sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR;
else
sc->rssi_corr = val & 0xff;
DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n",
sc->rssi_corr, val);
}
static void
rt2560_scan_start(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
/* abort TSF synchronization */
RAL_WRITE(sc, RT2560_CSR14, 0);
rt2560_set_bssid(sc, ieee80211broadcastaddr);
}
static void
rt2560_scan_end(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = ic->ic_scan->ss_vap;
rt2560_enable_tsf_sync(sc);
/* XXX keep local copy */
rt2560_set_bssid(sc, vap->iv_bss->ni_bssid);
}
static int
rt2560_bbp_init(struct rt2560_softc *sc)
{
int i, ntries;
/* wait for BBP to be ready */
for (ntries = 0; ntries < 100; ntries++) {
if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0)
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for BBP\n");
return EIO;
}
/* initialize BBP registers to default values */
for (i = 0; i < nitems(rt2560_def_bbp); i++) {
rt2560_bbp_write(sc, rt2560_def_bbp[i].reg,
rt2560_def_bbp[i].val);
}
/* initialize BBP registers to values stored in EEPROM */
for (i = 0; i < 16; i++) {
if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0)
break;
rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
}
rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */
return 0;
}
static void
rt2560_set_txantenna(struct rt2560_softc *sc, int antenna)
{
uint32_t tmp;
uint8_t tx;
tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK;
if (antenna == 1)
tx |= RT2560_BBP_ANTA;
else if (antenna == 2)
tx |= RT2560_BBP_ANTB;
else
tx |= RT2560_BBP_DIVERSITY;
/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 ||
sc->rf_rev == RT2560_RF_5222)
tx |= RT2560_BBP_FLIPIQ;
rt2560_bbp_write(sc, RT2560_BBP_TX, tx);
/* update values for CCK and OFDM in BBPCSR1 */
tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007;
tmp |= (tx & 0x7) << 16 | (tx & 0x7);
RAL_WRITE(sc, RT2560_BBPCSR1, tmp);
}
static void
rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna)
{
uint8_t rx;
rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK;
if (antenna == 1)
rx |= RT2560_BBP_ANTA;
else if (antenna == 2)
rx |= RT2560_BBP_ANTB;
else
rx |= RT2560_BBP_DIVERSITY;
/* need to force no I/Q flip for RF 2525e and 2526 */
if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526)
rx &= ~RT2560_BBP_FLIPIQ;
rt2560_bbp_write(sc, RT2560_BBP_RX, rx);
}
static void
rt2560_init_locked(struct rt2560_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
int i;
RAL_LOCK_ASSERT(sc);
rt2560_stop_locked(sc);
/* setup tx rings */
tmp = RT2560_PRIO_RING_COUNT << 24 |
RT2560_ATIM_RING_COUNT << 16 |
RT2560_TX_RING_COUNT << 8 |
RT2560_TX_DESC_SIZE;
/* rings must be initialized in this exact order */
RAL_WRITE(sc, RT2560_TXCSR2, tmp);
RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr);
RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr);
RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr);
RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr);
/* setup rx ring */
tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE;
RAL_WRITE(sc, RT2560_RXCSR1, tmp);
RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr);
/* initialize MAC registers to default values */
for (i = 0; i < nitems(rt2560_def_mac); i++)
RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val);
rt2560_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/* set basic rate set (will be updated later) */
RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153);
rt2560_update_slot(ic);
rt2560_update_plcp(sc);
rt2560_update_led(sc, 0, 0);
RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC);
RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY);
if (rt2560_bbp_init(sc) != 0) {
rt2560_stop_locked(sc);
return;
}
rt2560_set_txantenna(sc, sc->tx_ant);
rt2560_set_rxantenna(sc, sc->rx_ant);
/* set default BSS channel */
rt2560_set_chan(sc, ic->ic_curchan);
/* kick Rx */
tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR;
if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
ic->ic_opmode != IEEE80211_M_MBSS)
tmp |= RT2560_DROP_TODS;
if (ic->ic_promisc == 0)
tmp |= RT2560_DROP_NOT_TO_ME;
}
RAL_WRITE(sc, RT2560_RXCSR0, tmp);
/* clear old FCS and Rx FIFO errors */
RAL_READ(sc, RT2560_CNT0);
RAL_READ(sc, RT2560_CNT4);
/* clear any pending interrupts */
RAL_WRITE(sc, RT2560_CSR7, 0xffffffff);
/* enable interrupts */
RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK);
sc->sc_flags |= RT2560_F_RUNNING;
callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc);
}
static void
rt2560_init(void *priv)
{
struct rt2560_softc *sc = priv;
struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2560_init_locked(sc);
RAL_UNLOCK(sc);
if (sc->sc_flags & RT2560_F_RUNNING)
ieee80211_start_all(ic); /* start all vap's */
}
static void
rt2560_stop_locked(struct rt2560_softc *sc)
{
volatile int *flags = &sc->sc_flags;
RAL_LOCK_ASSERT(sc);
while (*flags & RT2560_F_INPUT_RUNNING)
msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10);
callout_stop(&sc->watchdog_ch);
sc->sc_tx_timer = 0;
if (sc->sc_flags & RT2560_F_RUNNING) {
sc->sc_flags &= ~RT2560_F_RUNNING;
/* abort Tx */
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX);
/* disable Rx */
RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX);
/* reset ASIC (imply reset BBP) */
RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC);
RAL_WRITE(sc, RT2560_CSR1, 0);
/* disable interrupts */
RAL_WRITE(sc, RT2560_CSR8, 0xffffffff);
/* reset Tx and Rx rings */
rt2560_reset_tx_ring(sc, &sc->txq);
rt2560_reset_tx_ring(sc, &sc->atimq);
rt2560_reset_tx_ring(sc, &sc->prioq);
rt2560_reset_tx_ring(sc, &sc->bcnq);
rt2560_reset_rx_ring(sc, &sc->rxq);
}
}
void
rt2560_stop(void *arg)
{
struct rt2560_softc *sc = arg;
RAL_LOCK(sc);
rt2560_stop_locked(sc);
RAL_UNLOCK(sc);
}
static int
rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
if (!(sc->sc_flags & RT2560_F_RUNNING)) {
RAL_UNLOCK(sc);
m_freem(m);
return ENETDOWN;
}
if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) {
RAL_UNLOCK(sc);
m_freem(m);
return ENOBUFS; /* XXX */
}
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
if (rt2560_tx_mgt(sc, m, ni) != 0)
goto bad;
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
if (rt2560_tx_raw(sc, m, ni, params))
goto bad;
}
sc->sc_tx_timer = 5;
RAL_UNLOCK(sc);
return 0;
bad:
RAL_UNLOCK(sc);
return EIO; /* XXX */
}
Index: head/sys/dev/ral/rt2661.c
===================================================================
--- head/sys/dev/ral/rt2661.c (revision 328217)
+++ head/sys/dev/ral/rt2661.c (revision 328218)
@@ -1,2794 +1,2794 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2561, RT2561S and RT2661 chipset driver
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/ral/rt2661reg.h>
#include <dev/ral/rt2661var.h>
#define RAL_DEBUG
#ifdef RAL_DEBUG
#define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug > 0) \
printf(fmt, __VA_ARGS__); \
} while (0)
#define DPRINTFN(sc, n, fmt, ...) do { \
if (sc->sc_debug >= (n)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, fmt, ...)
#define DPRINTFN(sc, n, fmt, ...)
#endif
static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void rt2661_vap_delete(struct ieee80211vap *);
static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int,
int);
static int rt2661_alloc_tx_ring(struct rt2661_softc *,
struct rt2661_tx_ring *, int);
static void rt2661_reset_tx_ring(struct rt2661_softc *,
struct rt2661_tx_ring *);
static void rt2661_free_tx_ring(struct rt2661_softc *,
struct rt2661_tx_ring *);
static int rt2661_alloc_rx_ring(struct rt2661_softc *,
struct rt2661_rx_ring *, int);
static void rt2661_reset_rx_ring(struct rt2661_softc *,
struct rt2661_rx_ring *);
static void rt2661_free_rx_ring(struct rt2661_softc *,
struct rt2661_rx_ring *);
static int rt2661_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t);
static void rt2661_rx_intr(struct rt2661_softc *);
static void rt2661_tx_intr(struct rt2661_softc *);
static void rt2661_tx_dma_intr(struct rt2661_softc *,
struct rt2661_tx_ring *);
static void rt2661_mcu_beacon_expire(struct rt2661_softc *);
static void rt2661_mcu_wakeup(struct rt2661_softc *);
static void rt2661_mcu_cmd_intr(struct rt2661_softc *);
static void rt2661_scan_start(struct ieee80211com *);
static void rt2661_scan_end(struct ieee80211com *);
static void rt2661_getradiocaps(struct ieee80211com *, int, int *,
struct ieee80211_channel[]);
static void rt2661_set_channel(struct ieee80211com *);
static void rt2661_setup_tx_desc(struct rt2661_softc *,
struct rt2661_tx_desc *, uint32_t, uint16_t, int,
int, const bus_dma_segment_t *, int, int);
static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *,
struct ieee80211_node *, int);
static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2661_transmit(struct ieee80211com *, struct mbuf *);
static void rt2661_start(struct rt2661_softc *);
static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void rt2661_watchdog(void *);
static void rt2661_parent(struct ieee80211com *);
static void rt2661_bbp_write(struct rt2661_softc *, uint8_t,
uint8_t);
static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t);
static void rt2661_rf_write(struct rt2661_softc *, uint8_t,
uint32_t);
static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t,
uint16_t);
static void rt2661_select_antenna(struct rt2661_softc *);
static void rt2661_enable_mrr(struct rt2661_softc *);
static void rt2661_set_txpreamble(struct rt2661_softc *);
static void rt2661_set_basicrates(struct rt2661_softc *,
const struct ieee80211_rateset *);
static void rt2661_select_band(struct rt2661_softc *,
struct ieee80211_channel *);
static void rt2661_set_chan(struct rt2661_softc *,
struct ieee80211_channel *);
static void rt2661_set_bssid(struct rt2661_softc *,
const uint8_t *);
static void rt2661_set_macaddr(struct rt2661_softc *,
const uint8_t *);
static void rt2661_update_promisc(struct ieee80211com *);
static int rt2661_wme_update(struct ieee80211com *) __unused;
static void rt2661_update_slot(struct ieee80211com *);
static const char *rt2661_get_rf(int);
static void rt2661_read_eeprom(struct rt2661_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static int rt2661_bbp_init(struct rt2661_softc *);
static void rt2661_init_locked(struct rt2661_softc *);
static void rt2661_init(void *);
static void rt2661_stop_locked(struct rt2661_softc *);
static void rt2661_stop(void *);
static int rt2661_load_microcode(struct rt2661_softc *);
#ifdef notyet
static void rt2661_rx_tune(struct rt2661_softc *);
static void rt2661_radar_start(struct rt2661_softc *);
static int rt2661_radar_stop(struct rt2661_softc *);
#endif
static int rt2661_prepare_beacon(struct rt2661_softc *,
struct ieee80211vap *);
static void rt2661_enable_tsf_sync(struct rt2661_softc *);
static void rt2661_enable_tsf(struct rt2661_softc *);
static int rt2661_get_rssi(struct rt2661_softc *, uint8_t);
static const struct {
uint32_t reg;
uint32_t val;
} rt2661_def_mac[] = {
RT2661_DEF_MAC
};
static const struct {
uint8_t reg;
uint8_t val;
} rt2661_def_bbp[] = {
RT2661_DEF_BBP
};
static const struct rfprog {
uint8_t chan;
uint32_t r1, r2, r3, r4;
} rt2661_rf5225_1[] = {
RT2661_RF5225_1
}, rt2661_rf5225_2[] = {
RT2661_RF5225_2
};
static const uint8_t rt2661_chan_2ghz[] =
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
static const uint8_t rt2661_chan_5ghz[] =
{ 36, 40, 44, 48, 52, 56, 60, 64,
100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140,
149, 153, 157, 161, 165 };
int
rt2661_attach(device_t dev, int id)
{
struct rt2661_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
uint32_t val;
int error, ac, ntries;
sc->sc_id = id;
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0);
mbufq_init(&sc->sc_snd, ifqmaxlen);
/* wait for NIC to initialize */
for (ntries = 0; ntries < 1000; ntries++) {
if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0)
break;
DELAY(1000);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for NIC to initialize\n");
error = EIO;
goto fail1;
}
/* retrieve RF rev. no and various other things from EEPROM */
rt2661_read_eeprom(sc, ic->ic_macaddr);
device_printf(dev, "MAC/BBP RT%X, RF %s\n", val,
rt2661_get_rf(sc->rf_rev));
/*
* Allocate Tx and Rx rings.
*/
for (ac = 0; ac < 4; ac++) {
error = rt2661_alloc_tx_ring(sc, &sc->txq[ac],
RT2661_TX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate Tx ring %d\n", ac);
goto fail2;
}
}
error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Mgt ring\n");
goto fail2;
}
error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Rx ring\n");
goto fail3;
}
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_WDS /* 4-address traffic works */
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#ifdef notyet
| IEEE80211_C_TXFRAG /* handle tx frags */
| IEEE80211_C_WME /* 802.11e */
#endif
;
rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
ieee80211_ifattach(ic);
#if 0
ic->ic_wme.wme_update = rt2661_wme_update;
#endif
ic->ic_scan_start = rt2661_scan_start;
ic->ic_scan_end = rt2661_scan_end;
ic->ic_getradiocaps = rt2661_getradiocaps;
ic->ic_set_channel = rt2661_set_channel;
ic->ic_updateslot = rt2661_update_slot;
ic->ic_update_promisc = rt2661_update_promisc;
ic->ic_raw_xmit = rt2661_raw_xmit;
ic->ic_transmit = rt2661_transmit;
ic->ic_parent = rt2661_parent;
ic->ic_vap_create = rt2661_vap_create;
ic->ic_vap_delete = rt2661_vap_delete;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RT2661_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RT2661_RX_RADIOTAP_PRESENT);
#ifdef RAL_DEBUG
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs");
#endif
if (bootverbose)
ieee80211_announce(ic);
return 0;
fail3: rt2661_free_tx_ring(sc, &sc->mgtq);
fail2: while (--ac >= 0)
rt2661_free_tx_ring(sc, &sc->txq[ac]);
fail1: mtx_destroy(&sc->sc_mtx);
return error;
}
int
rt2661_detach(void *xsc)
{
struct rt2661_softc *sc = xsc;
struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2661_stop_locked(sc);
RAL_UNLOCK(sc);
ieee80211_ifdetach(ic);
mbufq_drain(&sc->sc_snd);
rt2661_free_tx_ring(sc, &sc->txq[0]);
rt2661_free_tx_ring(sc, &sc->txq[1]);
rt2661_free_tx_ring(sc, &sc->txq[2]);
rt2661_free_tx_ring(sc, &sc->txq[3]);
rt2661_free_tx_ring(sc, &sc->mgtq);
rt2661_free_rx_ring(sc, &sc->rxq);
mtx_destroy(&sc->sc_mtx);
return 0;
}
static struct ieee80211vap *
rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct rt2661_softc *sc = ic->ic_softc;
struct rt2661_vap *rvp;
struct ieee80211vap *vap;
switch (opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_MONITOR:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
/* XXXRP: TBD */
if (!TAILQ_EMPTY(&ic->ic_vaps)) {
device_printf(sc->sc_dev, "only 1 vap supported\n");
return NULL;
}
if (opmode == IEEE80211_M_STA)
flags |= IEEE80211_CLONE_NOBEACONS;
break;
case IEEE80211_M_WDS:
if (TAILQ_EMPTY(&ic->ic_vaps) ||
ic->ic_opmode != IEEE80211_M_HOSTAP) {
device_printf(sc->sc_dev,
"wds only supported in ap mode\n");
return NULL;
}
/*
* Silently remove any request for a unique
* bssid; WDS vap's always share the local
* mac address.
*/
flags &= ~IEEE80211_CLONE_BSSID;
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return NULL;
}
rvp = malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->ral_vap;
ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override state transition machine */
rvp->ral_newstate = vap->iv_newstate;
vap->iv_newstate = rt2661_newstate;
#if 0
vap->iv_update_beacon = rt2661_beacon_update;
#endif
ieee80211_ratectl_init(vap);
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
ieee80211_media_status, mac);
if (TAILQ_FIRST(&ic->ic_vaps) == vap)
ic->ic_opmode = opmode;
return vap;
}
static void
rt2661_vap_delete(struct ieee80211vap *vap)
{
struct rt2661_vap *rvp = RT2661_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
void
rt2661_shutdown(void *xsc)
{
struct rt2661_softc *sc = xsc;
rt2661_stop(sc);
}
void
rt2661_suspend(void *xsc)
{
struct rt2661_softc *sc = xsc;
rt2661_stop(sc);
}
void
rt2661_resume(void *xsc)
{
struct rt2661_softc *sc = xsc;
if (sc->sc_ic.ic_nrunning > 0)
rt2661_init(sc);
}
static void
rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring,
int count)
{
int i, error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = ring->stat = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
- ring->data = mallocarray(count, sizeof(struct rt2661_tx_data), M_DEVBUF,
+ ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
error = bus_dmamap_create(ring->data_dmat, 0,
&ring->data[i].map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
}
return 0;
fail: rt2661_free_tx_ring(sc, ring);
return error;
}
static void
rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring)
{
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
int i;
for (i = 0; i < ring->count; i++) {
desc = &ring->desc[i];
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
desc->flags = 0;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->queued = 0;
ring->cur = ring->next = ring->stat = 0;
}
static void
rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring)
{
struct rt2661_tx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring,
int count)
{
struct rt2661_rx_desc *desc;
struct rt2661_rx_data *data;
bus_addr_t physaddr;
int i, error;
ring->count = count;
ring->cur = ring->next = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
- ring->data = mallocarray(count, sizeof(struct rt2661_rx_data), M_DEVBUF,
+ ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
/*
* Pre-allocate Rx buffers and populate Rx ring.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
desc = &sc->rxq.desc[i];
data = &sc->rxq.data[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr,
&physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
desc->flags = htole32(RT2661_RX_BUSY);
desc->physaddr = htole32(physaddr);
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: rt2661_free_rx_ring(sc, ring);
return error;
}
static void
rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring)
{
int i;
for (i = 0; i < ring->count; i++)
ring->desc[i].flags = htole32(RT2661_RX_BUSY);
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->cur = ring->next = 0;
}
static void
rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring)
{
struct rt2661_rx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rt2661_vap *rvp = RT2661_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct rt2661_softc *sc = ic->ic_softc;
int error;
if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) {
uint32_t tmp;
/* abort TSF synchronization */
tmp = RAL_READ(sc, RT2661_TXRX_CSR9);
RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff);
}
error = rvp->ral_newstate(vap, nstate, arg);
if (error == 0 && nstate == IEEE80211_S_RUN) {
struct ieee80211_node *ni = vap->iv_bss;
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
rt2661_enable_mrr(sc);
rt2661_set_txpreamble(sc);
rt2661_set_basicrates(sc, &ni->ni_rates);
rt2661_set_bssid(sc, ni->ni_bssid);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS) {
error = rt2661_prepare_beacon(sc, vap);
if (error != 0)
return error;
}
if (vap->iv_opmode != IEEE80211_M_MONITOR)
rt2661_enable_tsf_sync(sc);
else
rt2661_enable_tsf(sc);
}
return error;
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or
* 93C66).
*/
static uint16_t
rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
RT2661_EEPROM_CTL(sc, 0);
RT2661_EEPROM_CTL(sc, RT2661_S);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C);
RT2661_EEPROM_CTL(sc, RT2661_S);
/* write start bit (1) */
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C);
/* write READ opcode (10) */
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C);
RT2661_EEPROM_CTL(sc, RT2661_S);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C);
/* write address (A5-A0 or A7-A0) */
n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7;
for (; n >= 0; n--) {
RT2661_EEPROM_CTL(sc, RT2661_S |
(((addr >> n) & 1) << RT2661_SHIFT_D));
RT2661_EEPROM_CTL(sc, RT2661_S |
(((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C);
}
RT2661_EEPROM_CTL(sc, RT2661_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C);
tmp = RAL_READ(sc, RT2661_E2PROM_CSR);
val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n;
RT2661_EEPROM_CTL(sc, RT2661_S);
}
RT2661_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
RT2661_EEPROM_CTL(sc, RT2661_S);
RT2661_EEPROM_CTL(sc, 0);
RT2661_EEPROM_CTL(sc, RT2661_C);
return val;
}
static void
rt2661_tx_intr(struct rt2661_softc *sc)
{
struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
struct rt2661_tx_ring *txq;
struct rt2661_tx_data *data;
uint32_t val;
int error, qid;
txs->flags = IEEE80211_RATECTL_TX_FAIL_LONG;
for (;;) {
struct ieee80211_node *ni;
struct mbuf *m;
val = RAL_READ(sc, RT2661_STA_CSR4);
if (!(val & RT2661_TX_STAT_VALID))
break;
/* retrieve the queue in which this frame was sent */
qid = RT2661_TX_QID(val);
txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq;
/* retrieve rate control algorithm context */
data = &txq->data[txq->stat];
m = data->m;
data->m = NULL;
ni = data->ni;
data->ni = NULL;
/* if no frame has been sent, ignore */
if (ni == NULL)
continue;
switch (RT2661_TX_RESULT(val)) {
case RT2661_TX_SUCCESS:
txs->status = IEEE80211_RATECTL_TX_SUCCESS;
txs->long_retries = RT2661_TX_RETRYCNT(val);
DPRINTFN(sc, 10, "data frame sent successfully after "
"%d retries\n", txs->long_retries);
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(ni, txs);
error = 0;
break;
case RT2661_TX_RETRY_FAIL:
txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
txs->long_retries = RT2661_TX_RETRYCNT(val);
DPRINTFN(sc, 9, "%s\n",
"sending data frame failed (too much retries)");
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(ni, txs);
error = 1;
break;
default:
/* other failure */
device_printf(sc->sc_dev,
"sending data frame failed 0x%08x\n", val);
error = 1;
}
DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat);
txq->queued--;
if (++txq->stat >= txq->count) /* faster than % count */
txq->stat = 0;
ieee80211_tx_complete(ni, m, error);
}
sc->sc_tx_timer = 0;
rt2661_start(sc);
}
static void
rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq)
{
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &txq->desc[txq->next];
data = &txq->data[txq->next];
if ((le32toh(desc->flags) & RT2661_TX_BUSY) ||
!(le32toh(desc->flags) & RT2661_TX_VALID))
break;
bus_dmamap_sync(txq->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->data_dmat, data->map);
/* descriptor is no longer valid */
desc->flags &= ~htole32(RT2661_TX_VALID);
DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next);
if (++txq->next >= txq->count) /* faster than % count */
txq->next = 0;
}
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
}
static void
rt2661_rx_intr(struct rt2661_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct rt2661_rx_desc *desc;
struct rt2661_rx_data *data;
bus_addr_t physaddr;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *mnew, *m;
int error;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
int8_t rssi, nf;
desc = &sc->rxq.desc[sc->rxq.cur];
data = &sc->rxq.data[sc->rxq.cur];
if (le32toh(desc->flags) & RT2661_RX_BUSY)
break;
if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) ||
(le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) {
/*
* This should not happen since we did not request
* to receive those frames when we filled TXRX_CSR0.
*/
DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n",
le32toh(desc->flags));
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) {
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* Try to allocate a new mbuf for this ring element and load it
* before processing the current mbuf. If the ring element
* cannot be loaded, drop the received packet and reuse the old
* mbuf. In the unlikely case that the old mbuf can't be
* reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr,
&physaddr, 0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES,
rt2661_dma_map_addr, &physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = mnew;
desc->physaddr = htole32(physaddr);
/* finalize mbuf */
m->m_pkthdr.len = m->m_len =
(le32toh(desc->flags) >> 16) & 0xfff;
rssi = rt2661_get_rssi(sc, desc->rssi);
/* Error happened during RSSI conversion. */
if (rssi < 0)
rssi = -30; /* XXX ignored by net80211 */
nf = RT2661_NOISE_FLOOR;
if (ieee80211_radiotap_active(ic)) {
struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap;
uint32_t tsf_lo, tsf_hi;
/* get timestamp (low and high 32 bits) */
tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13);
tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12);
tap->wr_tsf =
htole64(((uint64_t)tsf_hi << 32) | tsf_lo);
tap->wr_flags = 0;
tap->wr_rate = ieee80211_plcp2rate(desc->rate,
(desc->flags & htole32(RT2661_RX_OFDM)) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antsignal = nf + rssi;
tap->wr_antnoise = nf;
}
sc->sc_flags |= RAL_INPUT_RUNNING;
RAL_UNLOCK(sc);
wh = mtod(m, struct ieee80211_frame *);
/* send the frame to the 802.11 layer */
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
RAL_LOCK(sc);
sc->sc_flags &= ~RAL_INPUT_RUNNING;
skip: desc->flags |= htole32(RT2661_RX_BUSY);
DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur);
sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT;
}
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
}
/* ARGSUSED */
static void
rt2661_mcu_beacon_expire(struct rt2661_softc *sc)
{
/* do nothing */
}
static void
rt2661_mcu_wakeup(struct rt2661_softc *sc)
{
RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16);
RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7);
RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18);
RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20);
/* send wakeup command to MCU */
rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0);
}
static void
rt2661_mcu_cmd_intr(struct rt2661_softc *sc)
{
RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR);
RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff);
}
void
rt2661_intr(void *arg)
{
struct rt2661_softc *sc = arg;
uint32_t r1, r2;
RAL_LOCK(sc);
/* disable MAC and MCU interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff);
/* don't re-enable interrupts if we're shutting down */
if (!(sc->sc_flags & RAL_RUNNING)) {
RAL_UNLOCK(sc);
return;
}
r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR);
RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1);
r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR);
RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2);
if (r1 & RT2661_MGT_DONE)
rt2661_tx_dma_intr(sc, &sc->mgtq);
if (r1 & RT2661_RX_DONE)
rt2661_rx_intr(sc);
if (r1 & RT2661_TX0_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[0]);
if (r1 & RT2661_TX1_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[1]);
if (r1 & RT2661_TX2_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[2]);
if (r1 & RT2661_TX3_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[3]);
if (r1 & RT2661_TX_DONE)
rt2661_tx_intr(sc);
if (r2 & RT2661_MCU_CMD_DONE)
rt2661_mcu_cmd_intr(sc);
if (r2 & RT2661_MCU_BEACON_EXPIRE)
rt2661_mcu_beacon_expire(sc);
if (r2 & RT2661_MCU_WAKEUP)
rt2661_mcu_wakeup(sc);
/* re-enable MAC and MCU interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0);
RAL_UNLOCK(sc);
}
static uint8_t
rt2661_plcp_signal(int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12: return 0xb;
case 18: return 0xf;
case 24: return 0xa;
case 36: return 0xe;
case 48: return 0x9;
case 72: return 0xd;
case 96: return 0x8;
case 108: return 0xc;
/* CCK rates (NB: not IEEE std, device-specific) */
case 2: return 0x0;
case 4: return 0x1;
case 11: return 0x2;
case 22: return 0x3;
}
return 0xff; /* XXX unsupported/unknown rate */
}
static void
rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc,
uint32_t flags, uint16_t xflags, int len, int rate,
const bus_dma_segment_t *segs, int nsegs, int ac)
{
struct ieee80211com *ic = &sc->sc_ic;
uint16_t plcp_length;
int i, remainder;
desc->flags = htole32(flags);
desc->flags |= htole32(len << 16);
desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID);
desc->xflags = htole16(xflags);
desc->xflags |= htole16(nsegs << 13);
desc->wme = htole16(
RT2661_QID(ac) |
RT2661_AIFSN(2) |
RT2661_LOGCWMIN(4) |
RT2661_LOGCWMAX(10));
/*
* Remember in which queue this frame was sent. This field is driver
* private data only. It will be made available by the NIC in STA_CSR4
* on Tx interrupts.
*/
desc->qid = ac;
/* setup PLCP fields */
desc->plcp_signal = rt2661_plcp_signal(rate);
desc->plcp_service = 4;
len += IEEE80211_CRC_LEN;
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) {
desc->flags |= htole32(RT2661_TX_OFDM);
plcp_length = len & 0xfff;
desc->plcp_length_hi = plcp_length >> 6;
desc->plcp_length_lo = plcp_length & 0x3f;
} else {
plcp_length = howmany(16 * len, rate);
if (rate == 22) {
remainder = (16 * len) % 22;
if (remainder != 0 && remainder < 7)
desc->plcp_service |= RT2661_PLCP_LENGEXT;
}
desc->plcp_length_hi = plcp_length >> 8;
desc->plcp_length_lo = plcp_length & 0xff;
if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->plcp_signal |= 0x08;
}
/* RT2x61 supports scatter with up to 5 segments */
for (i = 0; i < nsegs; i++) {
desc->addr[i] = htole32(segs[i].ds_addr);
desc->len [i] = htole16(segs[i].ds_len);
}
}
static int
rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
bus_dma_segment_t segs[RT2661_MAX_SCATTER];
uint16_t dur;
uint32_t flags = 0; /* XXX HWSEQ */
int nsegs, rate, error;
desc = &sc->mgtq.desc[sc->mgtq.cur];
data = &sc->mgtq.data[sc->mgtq.cur];
rate = ni->ni_txparms->mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
}
error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* management frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
wh = mtod(m0, struct ieee80211_frame *);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2661_TX_NEED_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
/* tell hardware to add timestamp in probe responses */
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
flags |= RT2661_TX_TIMESTAMP;
}
rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */,
m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT);
bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->mgtq.cur, rate);
/* kick mgt */
sc->mgtq.queued++;
sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT;
RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT);
return 0;
}
static int
rt2661_sendprot(struct rt2661_softc *sc, int ac,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
struct rt2661_tx_ring *txq = &sc->txq[ac];
const struct ieee80211_frame *wh;
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
struct mbuf *mprot;
int protrate, ackrate, pktlen, flags, isshort, error;
uint16_t dur;
bus_dma_segment_t segs[RT2661_MAX_SCATTER];
int nsegs;
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, const struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags = RT2661_TX_MORE_FRAG;
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags |= RT2661_TX_NEED_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
/* XXX stat + msg */
return ENOBUFS;
}
data = &txq->data[txq->cur];
desc = &txq->desc[txq->cur];
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs,
&nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(mprot);
return error;
}
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
/* ctl frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len,
protrate, segs, 1, ac);
bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
txq->queued++;
txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT;
return 0;
}
static int
rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni, int ac)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = &sc->sc_ic;
struct rt2661_tx_ring *txq = &sc->txq[ac];
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp = ni->ni_txparms;
struct ieee80211_key *k;
struct mbuf *mnew;
bus_dma_segment_t segs[RT2661_MAX_SCATTER];
uint16_t dur;
uint32_t flags;
int error, nsegs, rate, noack = 0;
wh = mtod(m0, struct ieee80211_frame *);
if (m0->m_flags & M_EAPOL) {
rate = tp->mgmtrate;
} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
rate = tp->mcastrate;
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
rate &= IEEE80211_RATE_VAL;
if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
noack = !! ieee80211_wme_vap_ac_is_noack(vap, ac);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
int prot = IEEE80211_PROT_NONE;
if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
prot = IEEE80211_PROT_RTSCTS;
else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM)
prot = ic->ic_protmode;
if (prot != IEEE80211_PROT_NONE) {
error = rt2661_sendprot(sc, ac, m0, ni, prot, rate);
if (error) {
m_freem(m0);
return error;
}
flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS;
}
}
data = &txq->data[txq->cur];
desc = &txq->desc[txq->cur];
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs,
&nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
/* packet header have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* remember link conditions for rate adaptation algorithm */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
data->rix = ni->ni_txrate;
/* XXX probably need last rssi value and not avg */
data->rssi = ic->ic_node_getrssi(ni);
} else
data->rix = IEEE80211_FIXED_RATE_NONE;
if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2661_TX_NEED_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
}
rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs,
nsegs, ac);
bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, txq->cur, rate);
/* kick Tx */
txq->queued++;
txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT;
RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac);
return 0;
}
static int
rt2661_transmit(struct ieee80211com *ic, struct mbuf *m)
{
struct rt2661_softc *sc = ic->ic_softc;
int error;
RAL_LOCK(sc);
if ((sc->sc_flags & RAL_RUNNING) == 0) {
RAL_UNLOCK(sc);
return (ENXIO);
}
error = mbufq_enqueue(&sc->sc_snd, m);
if (error) {
RAL_UNLOCK(sc);
return (error);
}
rt2661_start(sc);
RAL_UNLOCK(sc);
return (0);
}
static void
rt2661_start(struct rt2661_softc *sc)
{
struct mbuf *m;
struct ieee80211_node *ni;
int ac;
RAL_LOCK_ASSERT(sc);
/* prevent management frames from being sent if we're not ready */
if (!(sc->sc_flags & RAL_RUNNING) || sc->sc_invalid)
return;
while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ac = M_WME_GETAC(m);
if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) {
/* there is no place left in this ring */
mbufq_prepend(&sc->sc_snd, m);
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (rt2661_tx_data(sc, m, ni, ac) != 0) {
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
break;
}
sc->sc_tx_timer = 5;
}
}
static int
rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
struct rt2661_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
if (!(sc->sc_flags & RAL_RUNNING)) {
RAL_UNLOCK(sc);
m_freem(m);
return ENETDOWN;
}
if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) {
RAL_UNLOCK(sc);
m_freem(m);
return ENOBUFS; /* XXX */
}
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
* XXX raw path
*/
if (rt2661_tx_mgt(sc, m, ni) != 0)
goto bad;
sc->sc_tx_timer = 5;
RAL_UNLOCK(sc);
return 0;
bad:
RAL_UNLOCK(sc);
return EIO; /* XXX */
}
static void
rt2661_watchdog(void *arg)
{
struct rt2661_softc *sc = (struct rt2661_softc *)arg;
RAL_LOCK_ASSERT(sc);
KASSERT(sc->sc_flags & RAL_RUNNING, ("not running"));
if (sc->sc_invalid) /* card ejected */
return;
if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
rt2661_init_locked(sc);
counter_u64_add(sc->sc_ic.ic_oerrors, 1);
/* NB: callout is reset in rt2661_init() */
return;
}
callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc);
}
static void
rt2661_parent(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
int startall = 0;
RAL_LOCK(sc);
if (ic->ic_nrunning > 0) {
if ((sc->sc_flags & RAL_RUNNING) == 0) {
rt2661_init_locked(sc);
startall = 1;
} else
rt2661_update_promisc(ic);
} else if (sc->sc_flags & RAL_RUNNING)
rt2661_stop_locked(sc);
RAL_UNLOCK(sc);
if (startall)
ieee80211_start_all(ic);
}
static void
rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to BBP\n");
return;
}
tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val;
RAL_WRITE(sc, RT2661_PHY_CSR3, tmp);
DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val);
}
static uint8_t
rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg)
{
uint32_t val;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8;
RAL_WRITE(sc, RT2661_PHY_CSR3, val);
for (ntries = 0; ntries < 100; ntries++) {
val = RAL_READ(sc, RT2661_PHY_CSR3);
if (!(val & RT2661_BBP_BUSY))
return val & 0xff;
DELAY(1);
}
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
static void
rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 |
(reg & 3);
RAL_WRITE(sc, RT2661_PHY_CSR4, tmp);
/* remember last written value in sc */
sc->rf_regs[reg] = val;
DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff);
}
static int
rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg)
{
if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY)
return EIO; /* there is already a command pending */
RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR,
RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg);
RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd);
return 0;
}
static void
rt2661_select_antenna(struct rt2661_softc *sc)
{
uint8_t bbp4, bbp77;
uint32_t tmp;
bbp4 = rt2661_bbp_read(sc, 4);
bbp77 = rt2661_bbp_read(sc, 77);
/* TBD */
/* make sure Rx is disabled before switching antenna */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX);
rt2661_bbp_write(sc, 4, bbp4);
rt2661_bbp_write(sc, 77, bbp77);
/* restore Rx filter */
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
}
/*
* Enable multi-rate retries for frames sent at OFDM rates.
* In 802.11b/g mode, allow fallback to CCK rates.
*/
static void
rt2661_enable_mrr(struct rt2661_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = RAL_READ(sc, RT2661_TXRX_CSR4);
tmp &= ~RT2661_MRR_CCK_FALLBACK;
if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
tmp |= RT2661_MRR_CCK_FALLBACK;
tmp |= RT2661_MRR_ENABLED;
RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp);
}
static void
rt2661_set_txpreamble(struct rt2661_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = RAL_READ(sc, RT2661_TXRX_CSR4);
tmp &= ~RT2661_SHORT_PREAMBLE;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
tmp |= RT2661_SHORT_PREAMBLE;
RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp);
}
static void
rt2661_set_basicrates(struct rt2661_softc *sc,
const struct ieee80211_rateset *rs)
{
struct ieee80211com *ic = &sc->sc_ic;
uint32_t mask = 0;
uint8_t rate;
int i;
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
if (!(rate & IEEE80211_RATE_BASIC))
continue;
mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt,
IEEE80211_RV(rate));
}
RAL_WRITE(sc, RT2661_TXRX_CSR5, mask);
DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask);
}
/*
* Reprogram MAC/BBP to switch to a new band. Values taken from the reference
* driver.
*/
static void
rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c)
{
uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104;
uint32_t tmp;
/* update all BBP registers that depend on the band */
bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c;
bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48;
if (IEEE80211_IS_CHAN_5GHZ(c)) {
bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c;
bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10;
}
if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) {
bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10;
}
rt2661_bbp_write(sc, 17, bbp17);
rt2661_bbp_write(sc, 96, bbp96);
rt2661_bbp_write(sc, 104, bbp104);
if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) {
rt2661_bbp_write(sc, 75, 0x80);
rt2661_bbp_write(sc, 86, 0x80);
rt2661_bbp_write(sc, 88, 0x80);
}
rt2661_bbp_write(sc, 35, bbp35);
rt2661_bbp_write(sc, 97, bbp97);
rt2661_bbp_write(sc, 98, bbp98);
tmp = RAL_READ(sc, RT2661_PHY_CSR0);
tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ);
if (IEEE80211_IS_CHAN_2GHZ(c))
tmp |= RT2661_PA_PE_2GHZ;
else
tmp |= RT2661_PA_PE_5GHZ;
RAL_WRITE(sc, RT2661_PHY_CSR0, tmp);
}
static void
rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c)
{
struct ieee80211com *ic = &sc->sc_ic;
const struct rfprog *rfprog;
uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT;
int8_t power;
u_int i, chan;
chan = ieee80211_chan2ieee(ic, c);
KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan));
/* select the appropriate RF settings based on what EEPROM says */
rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2;
/* find the settings for this channel (we know it exists) */
for (i = 0; rfprog[i].chan != chan; i++);
power = sc->txpow[i];
if (power < 0) {
bbp94 += power;
power = 0;
} else if (power > 31) {
bbp94 += power - 31;
power = 31;
}
/*
* If we are switching from the 2GHz band to the 5GHz band or
* vice-versa, BBP registers need to be reprogrammed.
*/
if (c->ic_flags != sc->sc_curchan->ic_flags) {
rt2661_select_band(sc, c);
rt2661_select_antenna(sc);
}
sc->sc_curchan = c;
rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1);
rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2);
rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7);
rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10);
DELAY(200);
rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1);
rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2);
rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1);
rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10);
DELAY(200);
rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1);
rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2);
rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7);
rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10);
/* enable smart mode for MIMO-capable RFs */
bbp3 = rt2661_bbp_read(sc, 3);
bbp3 &= ~RT2661_SMART_MODE;
if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529)
bbp3 |= RT2661_SMART_MODE;
rt2661_bbp_write(sc, 3, bbp3);
if (bbp94 != RT2661_BBPR94_DEFAULT)
rt2661_bbp_write(sc, 94, bbp94);
/* 5GHz radio needs a 1ms delay here */
if (IEEE80211_IS_CHAN_5GHZ(c))
DELAY(1000);
}
static void
rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid)
{
uint32_t tmp;
tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24;
RAL_WRITE(sc, RT2661_MAC_CSR4, tmp);
tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16;
RAL_WRITE(sc, RT2661_MAC_CSR5, tmp);
}
static void
rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr)
{
uint32_t tmp;
tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24;
RAL_WRITE(sc, RT2661_MAC_CSR2, tmp);
tmp = addr[4] | addr[5] << 8;
RAL_WRITE(sc, RT2661_MAC_CSR3, tmp);
}
static void
rt2661_update_promisc(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
tmp &= ~RT2661_DROP_NOT_TO_ME;
if (ic->ic_promisc == 0)
tmp |= RT2661_DROP_NOT_TO_ME;
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
DPRINTF(sc, "%s promiscuous mode\n",
(ic->ic_promisc > 0) ? "entering" : "leaving");
}
/*
* Update QoS (802.11e) settings for each h/w Tx ring.
*/
static int
rt2661_wme_update(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
struct chanAccParams chp;
const struct wmeParams *wmep;
ieee80211_wme_ic_getparams(ic, &chp);
wmep = chp.cap_wmeParams;
/* XXX: not sure about shifts. */
/* XXX: the reference driver plays with AC_VI settings too. */
/* update TxOp */
RAL_WRITE(sc, RT2661_AC_TXOP_CSR0,
wmep[WME_AC_BE].wmep_txopLimit << 16 |
wmep[WME_AC_BK].wmep_txopLimit);
RAL_WRITE(sc, RT2661_AC_TXOP_CSR1,
wmep[WME_AC_VI].wmep_txopLimit << 16 |
wmep[WME_AC_VO].wmep_txopLimit);
/* update CWmin */
RAL_WRITE(sc, RT2661_CWMIN_CSR,
wmep[WME_AC_BE].wmep_logcwmin << 12 |
wmep[WME_AC_BK].wmep_logcwmin << 8 |
wmep[WME_AC_VI].wmep_logcwmin << 4 |
wmep[WME_AC_VO].wmep_logcwmin);
/* update CWmax */
RAL_WRITE(sc, RT2661_CWMAX_CSR,
wmep[WME_AC_BE].wmep_logcwmax << 12 |
wmep[WME_AC_BK].wmep_logcwmax << 8 |
wmep[WME_AC_VI].wmep_logcwmax << 4 |
wmep[WME_AC_VO].wmep_logcwmax);
/* update Aifsn */
RAL_WRITE(sc, RT2661_AIFSN_CSR,
wmep[WME_AC_BE].wmep_aifsn << 12 |
wmep[WME_AC_BK].wmep_aifsn << 8 |
wmep[WME_AC_VI].wmep_aifsn << 4 |
wmep[WME_AC_VO].wmep_aifsn);
return 0;
}
static void
rt2661_update_slot(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
uint8_t slottime;
uint32_t tmp;
slottime = IEEE80211_GET_SLOTTIME(ic);
tmp = RAL_READ(sc, RT2661_MAC_CSR9);
tmp = (tmp & ~0xff) | slottime;
RAL_WRITE(sc, RT2661_MAC_CSR9, tmp);
}
static const char *
rt2661_get_rf(int rev)
{
switch (rev) {
case RT2661_RF_5225: return "RT5225";
case RT2661_RF_5325: return "RT5325 (MIMO XR)";
case RT2661_RF_2527: return "RT2527";
case RT2661_RF_2529: return "RT2529 (MIMO XR)";
default: return "unknown";
}
}
static void
rt2661_read_eeprom(struct rt2661_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
{
uint16_t val;
int i;
/* read MAC address */
val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01);
macaddr[0] = val & 0xff;
macaddr[1] = val >> 8;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23);
macaddr[2] = val & 0xff;
macaddr[3] = val >> 8;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45);
macaddr[4] = val & 0xff;
macaddr[5] = val >> 8;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA);
/* XXX: test if different from 0xffff? */
sc->rf_rev = (val >> 11) & 0x1f;
sc->hw_radio = (val >> 10) & 0x1;
sc->rx_ant = (val >> 4) & 0x3;
sc->tx_ant = (val >> 2) & 0x3;
sc->nb_ant = val & 0x3;
DPRINTF(sc, "RF revision=%d\n", sc->rf_rev);
val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2);
sc->ext_5ghz_lna = (val >> 6) & 0x1;
sc->ext_2ghz_lna = (val >> 4) & 0x1;
DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n",
sc->ext_2ghz_lna, sc->ext_5ghz_lna);
val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET);
if ((val & 0xff) != 0xff)
sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */
/* Only [-10, 10] is valid */
if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10)
sc->rssi_2ghz_corr = 0;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET);
if ((val & 0xff) != 0xff)
sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */
/* Only [-10, 10] is valid */
if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10)
sc->rssi_5ghz_corr = 0;
/* adjust RSSI correction for external low-noise amplifier */
if (sc->ext_2ghz_lna)
sc->rssi_2ghz_corr -= 14;
if (sc->ext_5ghz_lna)
sc->rssi_5ghz_corr -= 14;
DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n",
sc->rssi_2ghz_corr, sc->rssi_5ghz_corr);
val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET);
if ((val >> 8) != 0xff)
sc->rfprog = (val >> 8) & 0x3;
if ((val & 0xff) != 0xff)
sc->rffreq = val & 0xff;
DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq);
/* read Tx power for all a/b/g channels */
for (i = 0; i < 19; i++) {
val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i);
sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */
DPRINTF(sc, "Channel=%d Tx power=%d\n",
rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]);
sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */
DPRINTF(sc, "Channel=%d Tx power=%d\n",
rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]);
}
/* read vendor-specific BBP values */
for (i = 0; i < 16; i++) {
val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i);
if (val == 0 || val == 0xffff)
continue; /* skip invalid entries */
sc->bbp_prom[i].reg = val >> 8;
sc->bbp_prom[i].val = val & 0xff;
DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg,
sc->bbp_prom[i].val);
}
}
static int
rt2661_bbp_init(struct rt2661_softc *sc)
{
int i, ntries;
uint8_t val;
/* wait for BBP to be ready */
for (ntries = 0; ntries < 100; ntries++) {
val = rt2661_bbp_read(sc, 0);
if (val != 0 && val != 0xff)
break;
DELAY(100);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for BBP\n");
return EIO;
}
/* initialize BBP registers to default values */
for (i = 0; i < nitems(rt2661_def_bbp); i++) {
rt2661_bbp_write(sc, rt2661_def_bbp[i].reg,
rt2661_def_bbp[i].val);
}
/* write vendor-specific BBP values (from EEPROM) */
for (i = 0; i < 16; i++) {
if (sc->bbp_prom[i].reg == 0)
continue;
rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
}
return 0;
}
static void
rt2661_init_locked(struct rt2661_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp, sta[3];
int i, error, ntries;
RAL_LOCK_ASSERT(sc);
if ((sc->sc_flags & RAL_FW_LOADED) == 0) {
error = rt2661_load_microcode(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not load 8051 microcode, error %d\n",
__func__, error);
return;
}
sc->sc_flags |= RAL_FW_LOADED;
}
rt2661_stop_locked(sc);
/* initialize Tx rings */
RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr);
RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr);
RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr);
RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr);
/* initialize Mgt ring */
RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr);
/* initialize Rx ring */
RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr);
/* initialize Tx rings sizes */
RAL_WRITE(sc, RT2661_TX_RING_CSR0,
RT2661_TX_RING_COUNT << 24 |
RT2661_TX_RING_COUNT << 16 |
RT2661_TX_RING_COUNT << 8 |
RT2661_TX_RING_COUNT);
RAL_WRITE(sc, RT2661_TX_RING_CSR1,
RT2661_TX_DESC_WSIZE << 16 |
RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */
RT2661_MGT_RING_COUNT);
/* initialize Rx rings */
RAL_WRITE(sc, RT2661_RX_RING_CSR,
RT2661_RX_DESC_BACK << 16 |
RT2661_RX_DESC_WSIZE << 8 |
RT2661_RX_RING_COUNT);
/* XXX: some magic here */
RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa);
/* load base addresses of all 5 Tx rings (4 data + 1 mgt) */
RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f);
/* load base address of Rx ring */
RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2);
/* initialize MAC registers to default values */
for (i = 0; i < nitems(rt2661_def_mac); i++)
RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val);
rt2661_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/* set host ready */
RAL_WRITE(sc, RT2661_MAC_CSR1, 3);
RAL_WRITE(sc, RT2661_MAC_CSR1, 0);
/* wait for BBP/RF to wakeup */
for (ntries = 0; ntries < 1000; ntries++) {
if (RAL_READ(sc, RT2661_MAC_CSR12) & 8)
break;
DELAY(1000);
}
if (ntries == 1000) {
printf("timeout waiting for BBP/RF to wakeup\n");
rt2661_stop_locked(sc);
return;
}
if (rt2661_bbp_init(sc) != 0) {
rt2661_stop_locked(sc);
return;
}
/* select default channel */
sc->sc_curchan = ic->ic_curchan;
rt2661_select_band(sc, sc->sc_curchan);
rt2661_select_antenna(sc);
rt2661_set_chan(sc, sc->sc_curchan);
/* update Rx filter */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff;
tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR |
RT2661_DROP_ACKCTS;
if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
ic->ic_opmode != IEEE80211_M_MBSS)
tmp |= RT2661_DROP_TODS;
if (ic->ic_promisc == 0)
tmp |= RT2661_DROP_NOT_TO_ME;
}
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
/* clear STA registers */
RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, nitems(sta));
/* initialize ASIC */
RAL_WRITE(sc, RT2661_MAC_CSR1, 4);
/* clear any pending interrupt */
RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff);
/* enable interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0);
/* kick Rx */
RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1);
sc->sc_flags |= RAL_RUNNING;
callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc);
}
static void
rt2661_init(void *priv)
{
struct rt2661_softc *sc = priv;
struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2661_init_locked(sc);
RAL_UNLOCK(sc);
if (sc->sc_flags & RAL_RUNNING)
ieee80211_start_all(ic); /* start all vap's */
}
void
rt2661_stop_locked(struct rt2661_softc *sc)
{
volatile int *flags = &sc->sc_flags;
uint32_t tmp;
while (*flags & RAL_INPUT_RUNNING)
msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10);
callout_stop(&sc->watchdog_ch);
sc->sc_tx_timer = 0;
if (sc->sc_flags & RAL_RUNNING) {
sc->sc_flags &= ~RAL_RUNNING;
/* abort Tx (for all 5 Tx rings) */
RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16);
/* disable Rx (value remains after reset!) */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX);
/* reset ASIC */
RAL_WRITE(sc, RT2661_MAC_CSR1, 3);
RAL_WRITE(sc, RT2661_MAC_CSR1, 0);
/* disable interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff);
/* clear any pending interrupt */
RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff);
RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff);
/* reset Tx and Rx rings */
rt2661_reset_tx_ring(sc, &sc->txq[0]);
rt2661_reset_tx_ring(sc, &sc->txq[1]);
rt2661_reset_tx_ring(sc, &sc->txq[2]);
rt2661_reset_tx_ring(sc, &sc->txq[3]);
rt2661_reset_tx_ring(sc, &sc->mgtq);
rt2661_reset_rx_ring(sc, &sc->rxq);
}
}
void
rt2661_stop(void *priv)
{
struct rt2661_softc *sc = priv;
RAL_LOCK(sc);
rt2661_stop_locked(sc);
RAL_UNLOCK(sc);
}
static int
rt2661_load_microcode(struct rt2661_softc *sc)
{
const struct firmware *fp;
const char *imagename;
int ntries, error;
RAL_LOCK_ASSERT(sc);
switch (sc->sc_id) {
case 0x0301: imagename = "rt2561sfw"; break;
case 0x0302: imagename = "rt2561fw"; break;
case 0x0401: imagename = "rt2661fw"; break;
default:
device_printf(sc->sc_dev, "%s: unexpected pci device id 0x%x, "
"don't know how to retrieve firmware\n",
__func__, sc->sc_id);
return EINVAL;
}
RAL_UNLOCK(sc);
fp = firmware_get(imagename);
RAL_LOCK(sc);
if (fp == NULL) {
device_printf(sc->sc_dev,
"%s: unable to retrieve firmware image %s\n",
__func__, imagename);
return EINVAL;
}
/*
* Load 8051 microcode into NIC.
*/
/* reset 8051 */
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET);
/* cancel any pending Host to MCU command */
RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0);
RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff);
RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0);
/* write 8051's microcode */
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL);
RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize);
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET);
/* kick 8051's ass */
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0);
/* wait for 8051 to initialize */
for (ntries = 0; ntries < 500; ntries++) {
if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY)
break;
DELAY(100);
}
if (ntries == 500) {
device_printf(sc->sc_dev,
"%s: timeout waiting for MCU to initialize\n", __func__);
error = EIO;
} else
error = 0;
firmware_put(fp, FIRMWARE_UNLOAD);
return error;
}
#ifdef notyet
/*
* Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and
* false CCA count. This function is called periodically (every seconds) when
* in the RUN state. Values taken from the reference driver.
*/
static void
rt2661_rx_tune(struct rt2661_softc *sc)
{
uint8_t bbp17;
uint16_t cca;
int lo, hi, dbm;
/*
* Tuning range depends on operating band and on the presence of an
* external low-noise amplifier.
*/
lo = 0x20;
if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan))
lo += 0x08;
if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna))
lo += 0x10;
hi = lo + 0x20;
/* retrieve false CCA count since last call (clear on read) */
cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff;
if (dbm >= -35) {
bbp17 = 0x60;
} else if (dbm >= -58) {
bbp17 = hi;
} else if (dbm >= -66) {
bbp17 = lo + 0x10;
} else if (dbm >= -74) {
bbp17 = lo + 0x08;
} else {
/* RSSI < -74dBm, tune using false CCA count */
bbp17 = sc->bbp17; /* current value */
hi -= 2 * (-74 - dbm);
if (hi < lo)
hi = lo;
if (bbp17 > hi) {
bbp17 = hi;
} else if (cca > 512) {
if (++bbp17 > hi)
bbp17 = hi;
} else if (cca < 100) {
if (--bbp17 < lo)
bbp17 = lo;
}
}
if (bbp17 != sc->bbp17) {
rt2661_bbp_write(sc, 17, bbp17);
sc->bbp17 = bbp17;
}
}
/*
* Enter/Leave radar detection mode.
* This is for 802.11h additional regulatory domains.
*/
static void
rt2661_radar_start(struct rt2661_softc *sc)
{
uint32_t tmp;
/* disable Rx */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX);
rt2661_bbp_write(sc, 82, 0x20);
rt2661_bbp_write(sc, 83, 0x00);
rt2661_bbp_write(sc, 84, 0x40);
/* save current BBP registers values */
sc->bbp18 = rt2661_bbp_read(sc, 18);
sc->bbp21 = rt2661_bbp_read(sc, 21);
sc->bbp22 = rt2661_bbp_read(sc, 22);
sc->bbp16 = rt2661_bbp_read(sc, 16);
sc->bbp17 = rt2661_bbp_read(sc, 17);
sc->bbp64 = rt2661_bbp_read(sc, 64);
rt2661_bbp_write(sc, 18, 0xff);
rt2661_bbp_write(sc, 21, 0x3f);
rt2661_bbp_write(sc, 22, 0x3f);
rt2661_bbp_write(sc, 16, 0xbd);
rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34);
rt2661_bbp_write(sc, 64, 0x21);
/* restore Rx filter */
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
}
static int
rt2661_radar_stop(struct rt2661_softc *sc)
{
uint8_t bbp66;
/* read radar detection result */
bbp66 = rt2661_bbp_read(sc, 66);
/* restore BBP registers values */
rt2661_bbp_write(sc, 16, sc->bbp16);
rt2661_bbp_write(sc, 17, sc->bbp17);
rt2661_bbp_write(sc, 18, sc->bbp18);
rt2661_bbp_write(sc, 21, sc->bbp21);
rt2661_bbp_write(sc, 22, sc->bbp22);
rt2661_bbp_write(sc, 64, sc->bbp64);
return bbp66 == 1;
}
#endif
static int
rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct rt2661_tx_desc desc;
struct mbuf *m0;
int rate;
if ((m0 = ieee80211_beacon_alloc(vap->iv_bss))== NULL) {
device_printf(sc->sc_dev, "could not allocate beacon frame\n");
return ENOBUFS;
}
/* send beacons at the lowest available rate */
rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2;
rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ,
m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT);
/* copy the first 24 bytes of Tx descriptor into NIC memory */
RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24);
/* copy beacon header and payload into NIC memory */
RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24,
mtod(m0, uint8_t *), m0->m_pkthdr.len);
m_freem(m0);
return 0;
}
/*
* Enable TSF synchronization and tell h/w to start sending beacons for IBSS
* and HostAP operating modes.
*/
static void
rt2661_enable_tsf_sync(struct rt2661_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
if (vap->iv_opmode != IEEE80211_M_STA) {
/*
* Change default 16ms TBTT adjustment to 8ms.
* Must be done before enabling beacon generation.
*/
RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8);
}
tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000;
/* set beacon interval (in 1/16ms unit) */
tmp |= vap->iv_bss->ni_intval * 16;
tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT;
if (vap->iv_opmode == IEEE80211_M_STA)
tmp |= RT2661_TSF_MODE(1);
else
tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON;
RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp);
}
static void
rt2661_enable_tsf(struct rt2661_softc *sc)
{
RAL_WRITE(sc, RT2661_TXRX_CSR9,
(RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000)
| RT2661_TSF_TICKING | RT2661_TSF_MODE(2));
}
/*
* Retrieve the "Received Signal Strength Indicator" from the raw values
* contained in Rx descriptors. The computation depends on which band the
* frame was received. Correction values taken from the reference driver.
*/
static int
rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw)
{
int lna, agc, rssi;
lna = (raw >> 5) & 0x3;
agc = raw & 0x1f;
if (lna == 0) {
/*
* No mapping available.
*
* NB: Since RSSI is relative to noise floor, -1 is
* adequate for caller to know error happened.
*/
return -1;
}
rssi = (2 * agc) - RT2661_NOISE_FLOOR;
if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) {
rssi += sc->rssi_2ghz_corr;
if (lna == 1)
rssi -= 64;
else if (lna == 2)
rssi -= 74;
else if (lna == 3)
rssi -= 90;
} else {
rssi += sc->rssi_5ghz_corr;
if (lna == 1)
rssi -= 64;
else if (lna == 2)
rssi -= 86;
else if (lna == 3)
rssi -= 100;
}
return rssi;
}
static void
rt2661_scan_start(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
uint32_t tmp;
/* abort TSF synchronization */
tmp = RAL_READ(sc, RT2661_TXRX_CSR9);
RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff);
rt2661_set_bssid(sc, ieee80211broadcastaddr);
}
static void
rt2661_scan_end(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
rt2661_enable_tsf_sync(sc);
/* XXX keep local copy */
rt2661_set_bssid(sc, vap->iv_bss->ni_bssid);
}
static void
rt2661_getradiocaps(struct ieee80211com *ic,
int maxchans, int *nchans, struct ieee80211_channel chans[])
{
struct rt2661_softc *sc = ic->ic_softc;
uint8_t bands[IEEE80211_MODE_BYTES];
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
ieee80211_add_channel_list_2ghz(chans, maxchans, nchans,
rt2661_chan_2ghz, nitems(rt2661_chan_2ghz), bands, 0);
if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325) {
setbit(bands, IEEE80211_MODE_11A);
ieee80211_add_channel_list_5ghz(chans, maxchans, nchans,
rt2661_chan_5ghz, nitems(rt2661_chan_5ghz), bands, 0);
}
}
static void
rt2661_set_channel(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
rt2661_set_chan(sc, ic->ic_curchan);
RAL_UNLOCK(sc);
}
Index: head/sys/dev/rp/rp.c
===================================================================
--- head/sys/dev/rp/rp.c (revision 328217)
+++ head/sys/dev/rp/rp.c (revision 328218)
@@ -1,1116 +1,1115 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) Comtrol Corporation <support@comtrol.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted prodived that the follwoing conditions
* are met.
* 1. Redistributions of source code must retain the above copyright
* notive, this list of conditions and the following disclainer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials prodided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Comtrol Corporation.
* 4. The name of Comtrol Corporation may not be used to endorse or
* promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* rp.c - for RocketPort FreeBSD
*/
#include "opt_compat.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/serial.h>
#include <sys/tty.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/bus.h>
#include <sys/rman.h>
#define ROCKET_C
#include <dev/rp/rpreg.h>
#include <dev/rp/rpvar.h>
static const char RocketPortVersion[] = "3.02";
static Byte_t RData[RDATASIZE] =
{
0x00, 0x09, 0xf6, 0x82,
0x02, 0x09, 0x86, 0xfb,
0x04, 0x09, 0x00, 0x0a,
0x06, 0x09, 0x01, 0x0a,
0x08, 0x09, 0x8a, 0x13,
0x0a, 0x09, 0xc5, 0x11,
0x0c, 0x09, 0x86, 0x85,
0x0e, 0x09, 0x20, 0x0a,
0x10, 0x09, 0x21, 0x0a,
0x12, 0x09, 0x41, 0xff,
0x14, 0x09, 0x82, 0x00,
0x16, 0x09, 0x82, 0x7b,
0x18, 0x09, 0x8a, 0x7d,
0x1a, 0x09, 0x88, 0x81,
0x1c, 0x09, 0x86, 0x7a,
0x1e, 0x09, 0x84, 0x81,
0x20, 0x09, 0x82, 0x7c,
0x22, 0x09, 0x0a, 0x0a
};
static Byte_t RRegData[RREGDATASIZE]=
{
0x00, 0x09, 0xf6, 0x82, /* 00: Stop Rx processor */
0x08, 0x09, 0x8a, 0x13, /* 04: Tx software flow control */
0x0a, 0x09, 0xc5, 0x11, /* 08: XON char */
0x0c, 0x09, 0x86, 0x85, /* 0c: XANY */
0x12, 0x09, 0x41, 0xff, /* 10: Rx mask char */
0x14, 0x09, 0x82, 0x00, /* 14: Compare/Ignore #0 */
0x16, 0x09, 0x82, 0x7b, /* 18: Compare #1 */
0x18, 0x09, 0x8a, 0x7d, /* 1c: Compare #2 */
0x1a, 0x09, 0x88, 0x81, /* 20: Interrupt #1 */
0x1c, 0x09, 0x86, 0x7a, /* 24: Ignore/Replace #1 */
0x1e, 0x09, 0x84, 0x81, /* 28: Interrupt #2 */
0x20, 0x09, 0x82, 0x7c, /* 2c: Ignore/Replace #2 */
0x22, 0x09, 0x0a, 0x0a /* 30: Rx FIFO Enable */
};
#if 0
/* IRQ number to MUDBAC register 2 mapping */
Byte_t sIRQMap[16] =
{
0,0,0,0x10,0x20,0x30,0,0,0,0x40,0x50,0x60,0x70,0,0,0x80
};
#endif
Byte_t rp_sBitMapClrTbl[8] =
{
0xfe,0xfd,0xfb,0xf7,0xef,0xdf,0xbf,0x7f
};
Byte_t rp_sBitMapSetTbl[8] =
{
0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
};
static void rpfree(void *);
/***************************************************************************
Function: sReadAiopID
Purpose: Read the AIOP idenfication number directly from an AIOP.
Call: sReadAiopID(CtlP, aiop)
CONTROLLER_T *CtlP; Ptr to controller structure
int aiop: AIOP index
Return: int: Flag AIOPID_XXXX if a valid AIOP is found, where X
is replace by an identifying number.
Flag AIOPID_NULL if no valid AIOP is found
Warnings: No context switches are allowed while executing this function.
*/
int sReadAiopID(CONTROLLER_T *CtlP, int aiop)
{
Byte_t AiopID; /* ID byte from AIOP */
rp_writeaiop1(CtlP, aiop, _CMD_REG, RESET_ALL); /* reset AIOP */
rp_writeaiop1(CtlP, aiop, _CMD_REG, 0x0);
AiopID = rp_readaiop1(CtlP, aiop, _CHN_STAT0) & 0x07;
if(AiopID == 0x06)
return(1);
else /* AIOP does not exist */
return(-1);
}
/***************************************************************************
Function: sReadAiopNumChan
Purpose: Read the number of channels available in an AIOP directly from
an AIOP.
Call: sReadAiopNumChan(CtlP, aiop)
CONTROLLER_T *CtlP; Ptr to controller structure
int aiop: AIOP index
Return: int: The number of channels available
Comments: The number of channels is determined by write/reads from identical
offsets within the SRAM address spaces for channels 0 and 4.
If the channel 4 space is mirrored to channel 0 it is a 4 channel
AIOP, otherwise it is an 8 channel.
Warnings: No context switches are allowed while executing this function.
*/
int sReadAiopNumChan(CONTROLLER_T *CtlP, int aiop)
{
Word_t x, y;
rp_writeaiop4(CtlP, aiop, _INDX_ADDR,0x12340000L); /* write to chan 0 SRAM */
rp_writeaiop2(CtlP, aiop, _INDX_ADDR,0); /* read from SRAM, chan 0 */
x = rp_readaiop2(CtlP, aiop, _INDX_DATA);
rp_writeaiop2(CtlP, aiop, _INDX_ADDR,0x4000); /* read from SRAM, chan 4 */
y = rp_readaiop2(CtlP, aiop, _INDX_DATA);
if(x != y) /* if different must be 8 chan */
return(8);
else
return(4);
}
/***************************************************************************
Function: sInitChan
Purpose: Initialization of a channel and channel structure
Call: sInitChan(CtlP,ChP,AiopNum,ChanNum)
CONTROLLER_T *CtlP; Ptr to controller structure
CHANNEL_T *ChP; Ptr to channel structure
int AiopNum; AIOP number within controller
int ChanNum; Channel number within AIOP
Return: int: TRUE if initialization succeeded, FALSE if it fails because channel
number exceeds number of channels available in AIOP.
Comments: This function must be called before a channel can be used.
Warnings: No range checking on any of the parameters is done.
No context switches are allowed while executing this function.
*/
int sInitChan( CONTROLLER_T *CtlP,
CHANNEL_T *ChP,
int AiopNum,
int ChanNum)
{
int i, ChOff;
Byte_t *ChR;
static Byte_t R[4];
if(ChanNum >= CtlP->AiopNumChan[AiopNum])
return(FALSE); /* exceeds num chans in AIOP */
/* Channel, AIOP, and controller identifiers */
ChP->CtlP = CtlP;
ChP->ChanID = CtlP->AiopID[AiopNum];
ChP->AiopNum = AiopNum;
ChP->ChanNum = ChanNum;
/* Initialize the channel from the RData array */
for(i=0; i < RDATASIZE; i+=4)
{
R[0] = RData[i];
R[1] = RData[i+1] + 0x10 * ChanNum;
R[2] = RData[i+2];
R[3] = RData[i+3];
rp_writech4(ChP,_INDX_ADDR,le32dec(R));
}
ChR = ChP->R;
for(i=0; i < RREGDATASIZE; i+=4)
{
ChR[i] = RRegData[i];
ChR[i+1] = RRegData[i+1] + 0x10 * ChanNum;
ChR[i+2] = RRegData[i+2];
ChR[i+3] = RRegData[i+3];
}
/* Indexed registers */
ChOff = (Word_t)ChanNum * 0x1000;
ChP->BaudDiv[0] = (Byte_t)(ChOff + _BAUD);
ChP->BaudDiv[1] = (Byte_t)((ChOff + _BAUD) >> 8);
ChP->BaudDiv[2] = (Byte_t)BRD9600;
ChP->BaudDiv[3] = (Byte_t)(BRD9600 >> 8);
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->BaudDiv));
ChP->TxControl[0] = (Byte_t)(ChOff + _TX_CTRL);
ChP->TxControl[1] = (Byte_t)((ChOff + _TX_CTRL) >> 8);
ChP->TxControl[2] = 0;
ChP->TxControl[3] = 0;
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl));
ChP->RxControl[0] = (Byte_t)(ChOff + _RX_CTRL);
ChP->RxControl[1] = (Byte_t)((ChOff + _RX_CTRL) >> 8);
ChP->RxControl[2] = 0;
ChP->RxControl[3] = 0;
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl));
ChP->TxEnables[0] = (Byte_t)(ChOff + _TX_ENBLS);
ChP->TxEnables[1] = (Byte_t)((ChOff + _TX_ENBLS) >> 8);
ChP->TxEnables[2] = 0;
ChP->TxEnables[3] = 0;
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxEnables));
ChP->TxCompare[0] = (Byte_t)(ChOff + _TXCMP1);
ChP->TxCompare[1] = (Byte_t)((ChOff + _TXCMP1) >> 8);
ChP->TxCompare[2] = 0;
ChP->TxCompare[3] = 0;
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxCompare));
ChP->TxReplace1[0] = (Byte_t)(ChOff + _TXREP1B1);
ChP->TxReplace1[1] = (Byte_t)((ChOff + _TXREP1B1) >> 8);
ChP->TxReplace1[2] = 0;
ChP->TxReplace1[3] = 0;
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxReplace1));
ChP->TxReplace2[0] = (Byte_t)(ChOff + _TXREP2);
ChP->TxReplace2[1] = (Byte_t)((ChOff + _TXREP2) >> 8);
ChP->TxReplace2[2] = 0;
ChP->TxReplace2[3] = 0;
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxReplace2));
ChP->TxFIFOPtrs = ChOff + _TXF_OUTP;
ChP->TxFIFO = ChOff + _TX_FIFO;
rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum | RESTXFCNT); /* apply reset Tx FIFO count */
rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum); /* remove reset Tx FIFO count */
rp_writech2(ChP,_INDX_ADDR,ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */
rp_writech2(ChP,_INDX_DATA,0);
ChP->RxFIFOPtrs = ChOff + _RXF_OUTP;
ChP->RxFIFO = ChOff + _RX_FIFO;
rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum | RESRXFCNT); /* apply reset Rx FIFO count */
rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum); /* remove reset Rx FIFO count */
rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs); /* clear Rx out ptr */
rp_writech2(ChP,_INDX_DATA,0);
rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */
rp_writech2(ChP,_INDX_DATA,0);
ChP->TxPrioCnt = ChOff + _TXP_CNT;
rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioCnt);
rp_writech1(ChP,_INDX_DATA,0);
ChP->TxPrioPtr = ChOff + _TXP_PNTR;
rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioPtr);
rp_writech1(ChP,_INDX_DATA,0);
ChP->TxPrioBuf = ChOff + _TXP_BUF;
sEnRxProcessor(ChP); /* start the Rx processor */
return(TRUE);
}
/***************************************************************************
Function: sStopRxProcessor
Purpose: Stop the receive processor from processing a channel.
Call: sStopRxProcessor(ChP)
CHANNEL_T *ChP; Ptr to channel structure
Comments: The receive processor can be started again with sStartRxProcessor().
This function causes the receive processor to skip over the
stopped channel. It does not stop it from processing other channels.
Warnings: No context switches are allowed while executing this function.
Do not leave the receive processor stopped for more than one
character time.
After calling this function a delay of 4 uS is required to ensure
that the receive processor is no longer processing this channel.
*/
void sStopRxProcessor(CHANNEL_T *ChP)
{
Byte_t R[4];
R[0] = ChP->R[0];
R[1] = ChP->R[1];
R[2] = 0x0a;
R[3] = ChP->R[3];
rp_writech4(ChP,_INDX_ADDR,le32dec(R));
}
/***************************************************************************
Function: sFlushRxFIFO
Purpose: Flush the Rx FIFO
Call: sFlushRxFIFO(ChP)
CHANNEL_T *ChP; Ptr to channel structure
Return: void
Comments: To prevent data from being enqueued or dequeued in the Tx FIFO
while it is being flushed the receive processor is stopped
and the transmitter is disabled. After these operations a
4 uS delay is done before clearing the pointers to allow
the receive processor to stop. These items are handled inside
this function.
Warnings: No context switches are allowed while executing this function.
*/
void sFlushRxFIFO(CHANNEL_T *ChP)
{
int i;
Byte_t Ch; /* channel number within AIOP */
int RxFIFOEnabled; /* TRUE if Rx FIFO enabled */
if(sGetRxCnt(ChP) == 0) /* Rx FIFO empty */
return; /* don't need to flush */
RxFIFOEnabled = FALSE;
if(ChP->R[0x32] == 0x08) /* Rx FIFO is enabled */
{
RxFIFOEnabled = TRUE;
sDisRxFIFO(ChP); /* disable it */
for(i=0; i < 2000/200; i++) /* delay 2 uS to allow proc to disable FIFO*/
rp_readch1(ChP,_INT_CHAN); /* depends on bus i/o timing */
}
sGetChanStatus(ChP); /* clear any pending Rx errors in chan stat */
Ch = (Byte_t)sGetChanNum(ChP);
rp_writech1(ChP,_CMD_REG,Ch | RESRXFCNT); /* apply reset Rx FIFO count */
rp_writech1(ChP,_CMD_REG,Ch); /* remove reset Rx FIFO count */
rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs); /* clear Rx out ptr */
rp_writech2(ChP,_INDX_DATA,0);
rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */
rp_writech2(ChP,_INDX_DATA,0);
if(RxFIFOEnabled)
sEnRxFIFO(ChP); /* enable Rx FIFO */
}
/***************************************************************************
Function: sFlushTxFIFO
Purpose: Flush the Tx FIFO
Call: sFlushTxFIFO(ChP)
CHANNEL_T *ChP; Ptr to channel structure
Return: void
Comments: To prevent data from being enqueued or dequeued in the Tx FIFO
while it is being flushed the receive processor is stopped
and the transmitter is disabled. After these operations a
4 uS delay is done before clearing the pointers to allow
the receive processor to stop. These items are handled inside
this function.
Warnings: No context switches are allowed while executing this function.
*/
void sFlushTxFIFO(CHANNEL_T *ChP)
{
int i;
Byte_t Ch; /* channel number within AIOP */
int TxEnabled; /* TRUE if transmitter enabled */
if(sGetTxCnt(ChP) == 0) /* Tx FIFO empty */
return; /* don't need to flush */
TxEnabled = FALSE;
if(ChP->TxControl[3] & TX_ENABLE)
{
TxEnabled = TRUE;
sDisTransmit(ChP); /* disable transmitter */
}
sStopRxProcessor(ChP); /* stop Rx processor */
for(i = 0; i < 4000/200; i++) /* delay 4 uS to allow proc to stop */
rp_readch1(ChP,_INT_CHAN); /* depends on bus i/o timing */
Ch = (Byte_t)sGetChanNum(ChP);
rp_writech1(ChP,_CMD_REG,Ch | RESTXFCNT); /* apply reset Tx FIFO count */
rp_writech1(ChP,_CMD_REG,Ch); /* remove reset Tx FIFO count */
rp_writech2(ChP,_INDX_ADDR,ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */
rp_writech2(ChP,_INDX_DATA,0);
if(TxEnabled)
sEnTransmit(ChP); /* enable transmitter */
sStartRxProcessor(ChP); /* restart Rx processor */
}
/***************************************************************************
Function: sWriteTxPrioByte
Purpose: Write a byte of priority transmit data to a channel
Call: sWriteTxPrioByte(ChP,Data)
CHANNEL_T *ChP; Ptr to channel structure
Byte_t Data; The transmit data byte
Return: int: 1 if the bytes is successfully written, otherwise 0.
Comments: The priority byte is transmitted before any data in the Tx FIFO.
Warnings: No context switches are allowed while executing this function.
*/
int sWriteTxPrioByte(CHANNEL_T *ChP, Byte_t Data)
{
Byte_t DWBuf[4]; /* buffer for double word writes */
if(sGetTxCnt(ChP) > 1) /* write it to Tx priority buffer */
{
rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioCnt); /* get priority buffer status */
if(rp_readch1(ChP,_INDX_DATA) & PRI_PEND) /* priority buffer busy */
return(0); /* nothing sent */
le16enc(DWBuf,ChP->TxPrioBuf); /* data byte address */
DWBuf[2] = Data; /* data byte value */
DWBuf[3] = 0; /* priority buffer pointer */
rp_writech4(ChP,_INDX_ADDR,le32dec(DWBuf)); /* write it out */
le16enc(DWBuf,ChP->TxPrioCnt); /* Tx priority count address */
DWBuf[2] = PRI_PEND + 1; /* indicate 1 byte pending */
DWBuf[3] = 0; /* priority buffer pointer */
rp_writech4(ChP,_INDX_ADDR,le32dec(DWBuf)); /* write it out */
}
else /* write it to Tx FIFO */
{
sWriteTxByte(ChP,sGetTxRxDataIO(ChP),Data);
}
return(1); /* 1 byte sent */
}
/***************************************************************************
Function: sEnInterrupts
Purpose: Enable one or more interrupts for a channel
Call: sEnInterrupts(ChP,Flags)
CHANNEL_T *ChP; Ptr to channel structure
Word_t Flags: Interrupt enable flags, can be any combination
of the following flags:
TXINT_EN: Interrupt on Tx FIFO empty
RXINT_EN: Interrupt on Rx FIFO at trigger level (see
sSetRxTrigger())
SRCINT_EN: Interrupt on SRC (Special Rx Condition)
MCINT_EN: Interrupt on modem input change
CHANINT_EN: Allow channel interrupt signal to the AIOP's
Interrupt Channel Register.
Return: void
Comments: If an interrupt enable flag is set in Flags, that interrupt will be
enabled. If an interrupt enable flag is not set in Flags, that
interrupt will not be changed. Interrupts can be disabled with
function sDisInterrupts().
This function sets the appropriate bit for the channel in the AIOP's
Interrupt Mask Register if the CHANINT_EN flag is set. This allows
this channel's bit to be set in the AIOP's Interrupt Channel Register.
Interrupts must also be globally enabled before channel interrupts
will be passed on to the host. This is done with function
sEnGlobalInt().
In some cases it may be desirable to disable interrupts globally but
enable channel interrupts. This would allow the global interrupt
status register to be used to determine which AIOPs need service.
*/
void sEnInterrupts(CHANNEL_T *ChP,Word_t Flags)
{
Byte_t Mask; /* Interrupt Mask Register */
ChP->RxControl[2] |=
((Byte_t)Flags & (RXINT_EN | SRCINT_EN | MCINT_EN));
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl));
ChP->TxControl[2] |= ((Byte_t)Flags & TXINT_EN);
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl));
if(Flags & CHANINT_EN)
{
Mask = rp_readch1(ChP,_INT_MASK) | rp_sBitMapSetTbl[ChP->ChanNum];
rp_writech1(ChP,_INT_MASK,Mask);
}
}
/***************************************************************************
Function: sDisInterrupts
Purpose: Disable one or more interrupts for a channel
Call: sDisInterrupts(ChP,Flags)
CHANNEL_T *ChP; Ptr to channel structure
Word_t Flags: Interrupt flags, can be any combination
of the following flags:
TXINT_EN: Interrupt on Tx FIFO empty
RXINT_EN: Interrupt on Rx FIFO at trigger level (see
sSetRxTrigger())
SRCINT_EN: Interrupt on SRC (Special Rx Condition)
MCINT_EN: Interrupt on modem input change
CHANINT_EN: Disable channel interrupt signal to the
AIOP's Interrupt Channel Register.
Return: void
Comments: If an interrupt flag is set in Flags, that interrupt will be
disabled. If an interrupt flag is not set in Flags, that
interrupt will not be changed. Interrupts can be enabled with
function sEnInterrupts().
This function clears the appropriate bit for the channel in the AIOP's
Interrupt Mask Register if the CHANINT_EN flag is set. This blocks
this channel's bit from being set in the AIOP's Interrupt Channel
Register.
*/
void sDisInterrupts(CHANNEL_T *ChP,Word_t Flags)
{
Byte_t Mask; /* Interrupt Mask Register */
ChP->RxControl[2] &=
~((Byte_t)Flags & (RXINT_EN | SRCINT_EN | MCINT_EN));
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl));
ChP->TxControl[2] &= ~((Byte_t)Flags & TXINT_EN);
rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl));
if(Flags & CHANINT_EN)
{
Mask = rp_readch1(ChP,_INT_MASK) & rp_sBitMapClrTbl[ChP->ChanNum];
rp_writech1(ChP,_INT_MASK,Mask);
}
}
/*********************************************************************
Begin FreeBsd-specific driver code
**********************************************************************/
#define POLL_INTERVAL (hz / 100)
#define RP_ISMULTIPORT(dev) ((dev)->id_flags & 0x1)
#define RP_MPMASTER(dev) (((dev)->id_flags >> 8) & 0xff)
#define RP_NOTAST4(dev) ((dev)->id_flags & 0x04)
/*
* The top-level routines begin here
*/
static void rpclose(struct tty *tp);
static void rphardclose(struct tty *tp);
static int rpmodem(struct tty *, int, int);
static int rpparam(struct tty *, struct termios *);
static void rpstart(struct tty *);
static int rpioctl(struct tty *, u_long, caddr_t, struct thread *);
static int rpopen(struct tty *);
static void rp_do_receive(struct rp_port *rp, struct tty *tp,
CHANNEL_t *cp, unsigned int ChanStatus)
{
unsigned int CharNStat;
int ToRecv, ch, err = 0;
ToRecv = sGetRxCnt(cp);
if(ToRecv == 0)
return;
/* If status indicates there are errored characters in the
FIFO, then enter status mode (a word in FIFO holds
characters and status)
*/
if(ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) {
if(!(ChanStatus & STATMODE)) {
ChanStatus |= STATMODE;
sEnRxStatusMode(cp);
}
}
/*
if we previously entered status mode then read down the
FIFO one word at a time, pulling apart the character and
the status. Update error counters depending on status.
*/
tty_lock(tp);
if(ChanStatus & STATMODE) {
while(ToRecv) {
CharNStat = rp_readch2(cp,sGetTxRxDataIO(cp));
ch = CharNStat & 0xff;
if((CharNStat & STMBREAK) || (CharNStat & STMFRAMEH))
err |= TRE_FRAMING;
else if (CharNStat & STMPARITYH)
err |= TRE_PARITY;
else if (CharNStat & STMRCVROVRH) {
rp->rp_overflows++;
err |= TRE_OVERRUN;
}
ttydisc_rint(tp, ch, err);
ToRecv--;
}
/*
After emtying FIFO in status mode, turn off status mode
*/
if(sGetRxCnt(cp) == 0) {
sDisRxStatusMode(cp);
}
} else {
ToRecv = sGetRxCnt(cp);
while (ToRecv) {
ch = rp_readch1(cp,sGetTxRxDataIO(cp));
ttydisc_rint(tp, ch & 0xff, err);
ToRecv--;
}
}
ttydisc_rint_done(tp);
tty_unlock(tp);
}
static void rp_handle_port(struct rp_port *rp)
{
CHANNEL_t *cp;
struct tty *tp;
unsigned int IntMask, ChanStatus;
if(!rp)
return;
cp = &rp->rp_channel;
tp = rp->rp_tty;
IntMask = sGetChanIntID(cp);
IntMask = IntMask & rp->rp_intmask;
ChanStatus = sGetChanStatus(cp);
if(IntMask & RXF_TRIG)
rp_do_receive(rp, tp, cp, ChanStatus);
if(IntMask & DELTA_CD) {
if(ChanStatus & CD_ACT) {
(void)ttydisc_modem(tp, 1);
} else {
(void)ttydisc_modem(tp, 0);
}
}
/* oldcts = rp->rp_cts;
rp->rp_cts = ((ChanStatus & CTS_ACT) != 0);
if(oldcts != rp->rp_cts) {
printf("CTS change (now %s)... on port %d\n", rp->rp_cts ? "on" : "off", rp->rp_port);
}
*/
}
static void rp_do_poll(void *arg)
{
CONTROLLER_t *ctl;
struct rp_port *rp;
struct tty *tp;
int count;
unsigned char CtlMask, AiopMask;
rp = arg;
tp = rp->rp_tty;
tty_lock_assert(tp, MA_OWNED);
ctl = rp->rp_ctlp;
CtlMask = ctl->ctlmask(ctl);
if (CtlMask & (1 << rp->rp_aiop)) {
AiopMask = sGetAiopIntStatus(ctl, rp->rp_aiop);
if (AiopMask & (1 << rp->rp_chan)) {
rp_handle_port(rp);
}
}
count = sGetTxCnt(&rp->rp_channel);
if (count >= 0 && (count <= rp->rp_restart)) {
rpstart(tp);
}
callout_schedule(&rp->rp_timer, POLL_INTERVAL);
}
static struct ttydevsw rp_tty_class = {
.tsw_flags = TF_INITLOCK|TF_CALLOUT,
.tsw_open = rpopen,
.tsw_close = rpclose,
.tsw_outwakeup = rpstart,
.tsw_ioctl = rpioctl,
.tsw_param = rpparam,
.tsw_modem = rpmodem,
.tsw_free = rpfree,
};
static void
rpfree(void *softc)
{
struct rp_port *rp = softc;
CONTROLLER_t *ctlp = rp->rp_ctlp;
atomic_subtract_32(&ctlp->free, 1);
}
int
rp_attachcommon(CONTROLLER_T *ctlp, int num_aiops, int num_ports)
{
int unit;
int num_chan;
int aiop, chan, port;
int ChanStatus;
int retval;
struct rp_port *rp;
struct tty *tp;
unit = device_get_unit(ctlp->dev);
printf("RocketPort%d (Version %s) %d ports.\n", unit,
RocketPortVersion, num_ports);
ctlp->num_ports = num_ports;
ctlp->rp = rp = (struct rp_port *)
- mallocarray(num_ports, sizeof(struct rp_port), M_DEVBUF,
- M_NOWAIT | M_ZERO);
+ malloc(sizeof(struct rp_port) * num_ports, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rp == NULL) {
device_printf(ctlp->dev, "rp_attachcommon: Could not malloc rp_ports structures.\n");
retval = ENOMEM;
goto nogo;
}
port = 0;
for(aiop=0; aiop < num_aiops; aiop++) {
num_chan = sGetAiopNumChan(ctlp, aiop);
for(chan=0; chan < num_chan; chan++, port++, rp++) {
rp->rp_tty = tp = tty_alloc(&rp_tty_class, rp);
callout_init_mtx(&rp->rp_timer, tty_getlock(tp), 0);
rp->rp_port = port;
rp->rp_ctlp = ctlp;
rp->rp_unit = unit;
rp->rp_chan = chan;
rp->rp_aiop = aiop;
rp->rp_intmask = RXF_TRIG | TXFIFO_MT | SRC_INT |
DELTA_CD | DELTA_CTS | DELTA_DSR;
#ifdef notdef
ChanStatus = sGetChanStatus(&rp->rp_channel);
#endif /* notdef */
if(sInitChan(ctlp, &rp->rp_channel, aiop, chan) == 0) {
device_printf(ctlp->dev, "RocketPort sInitChan(%d, %d, %d) failed.\n",
unit, aiop, chan);
retval = ENXIO;
goto nogo;
}
ChanStatus = sGetChanStatus(&rp->rp_channel);
rp->rp_cts = (ChanStatus & CTS_ACT) != 0;
tty_makedev(tp, NULL, "R%r%r", unit, port);
}
}
mtx_init(&ctlp->hwmtx, "rp_hwmtx", NULL, MTX_DEF);
ctlp->hwmtx_init = 1;
return (0);
nogo:
rp_releaseresource(ctlp);
return (retval);
}
void
rp_releaseresource(CONTROLLER_t *ctlp)
{
struct rp_port *rp;
int i;
if (ctlp->rp != NULL) {
for (i = 0; i < ctlp->num_ports; i++) {
rp = ctlp->rp + i;
atomic_add_32(&ctlp->free, 1);
tty_lock(rp->rp_tty);
tty_rel_gone(rp->rp_tty);
}
free(ctlp->rp, M_DEVBUF);
ctlp->rp = NULL;
}
while (ctlp->free != 0) {
pause("rpwt", hz / 10);
}
if (ctlp->hwmtx_init)
mtx_destroy(&ctlp->hwmtx);
}
static int
rpopen(struct tty *tp)
{
struct rp_port *rp;
int flags;
unsigned int IntMask, ChanStatus;
rp = tty_softc(tp);
flags = 0;
flags |= SET_RTS;
flags |= SET_DTR;
rp->rp_channel.TxControl[3] =
((rp->rp_channel.TxControl[3]
& ~(SET_RTS | SET_DTR)) | flags);
rp_writech4(&rp->rp_channel,_INDX_ADDR,
le32dec(rp->rp_channel.TxControl));
sSetRxTrigger(&rp->rp_channel, TRIG_1);
sDisRxStatusMode(&rp->rp_channel);
sFlushRxFIFO(&rp->rp_channel);
sFlushTxFIFO(&rp->rp_channel);
sEnInterrupts(&rp->rp_channel,
(TXINT_EN|MCINT_EN|RXINT_EN|SRCINT_EN|CHANINT_EN));
sSetRxTrigger(&rp->rp_channel, TRIG_1);
sDisRxStatusMode(&rp->rp_channel);
sClrTxXOFF(&rp->rp_channel);
/* sDisRTSFlowCtl(&rp->rp_channel);
sDisCTSFlowCtl(&rp->rp_channel);
*/
sDisTxSoftFlowCtl(&rp->rp_channel);
sStartRxProcessor(&rp->rp_channel);
sEnRxFIFO(&rp->rp_channel);
sEnTransmit(&rp->rp_channel);
/* sSetDTR(&rp->rp_channel);
sSetRTS(&rp->rp_channel);
*/
IntMask = sGetChanIntID(&rp->rp_channel);
IntMask = IntMask & rp->rp_intmask;
ChanStatus = sGetChanStatus(&rp->rp_channel);
callout_reset(&rp->rp_timer, POLL_INTERVAL, rp_do_poll, rp);
device_busy(rp->rp_ctlp->dev);
return(0);
}
static void
rpclose(struct tty *tp)
{
struct rp_port *rp;
rp = tty_softc(tp);
callout_stop(&rp->rp_timer);
rphardclose(tp);
device_unbusy(rp->rp_ctlp->dev);
}
static void
rphardclose(struct tty *tp)
{
struct rp_port *rp;
CHANNEL_t *cp;
rp = tty_softc(tp);
cp = &rp->rp_channel;
sFlushRxFIFO(cp);
sFlushTxFIFO(cp);
sDisTransmit(cp);
sDisInterrupts(cp, TXINT_EN|MCINT_EN|RXINT_EN|SRCINT_EN|CHANINT_EN);
sDisRTSFlowCtl(cp);
sDisCTSFlowCtl(cp);
sDisTxSoftFlowCtl(cp);
sClrTxXOFF(cp);
#ifdef DJA
if(tp->t_cflag&HUPCL || !(tp->t_state&TS_ISOPEN) || !tp->t_actout) {
sClrDTR(cp);
}
if(ISCALLOUT(tp->t_dev)) {
sClrDTR(cp);
}
tp->t_actout = FALSE;
wakeup(&tp->t_actout);
wakeup(TSA_CARR_ON(tp));
#endif /* DJA */
}
static int
rpioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
{
struct rp_port *rp;
rp = tty_softc(tp);
switch (cmd) {
case TIOCSBRK:
sSendBreak(&rp->rp_channel);
return (0);
case TIOCCBRK:
sClrBreak(&rp->rp_channel);
return (0);
default:
return ENOIOCTL;
}
}
static int
rpmodem(struct tty *tp, int sigon, int sigoff)
{
struct rp_port *rp;
int i, j, k;
rp = tty_softc(tp);
if (sigon != 0 || sigoff != 0) {
i = j = 0;
if (sigon & SER_DTR)
i = SET_DTR;
if (sigoff & SER_DTR)
j = SET_DTR;
if (sigon & SER_RTS)
i = SET_RTS;
if (sigoff & SER_RTS)
j = SET_RTS;
rp->rp_channel.TxControl[3] &= ~i;
rp->rp_channel.TxControl[3] |= j;
rp_writech4(&rp->rp_channel,_INDX_ADDR,
le32dec(rp->rp_channel.TxControl));
} else {
i = sGetChanStatusLo(&rp->rp_channel);
j = rp->rp_channel.TxControl[3];
k = 0;
if (j & SET_DTR)
k |= SER_DTR;
if (j & SET_RTS)
k |= SER_RTS;
if (i & CD_ACT)
k |= SER_DCD;
if (i & DSR_ACT)
k |= SER_DSR;
if (i & CTS_ACT)
k |= SER_CTS;
return(k);
}
return (0);
}
static struct
{
int baud;
int conversion;
} baud_table[] = {
{B0, 0}, {B50, BRD50}, {B75, BRD75},
{B110, BRD110}, {B134, BRD134}, {B150, BRD150},
{B200, BRD200}, {B300, BRD300}, {B600, BRD600},
{B1200, BRD1200}, {B1800, BRD1800}, {B2400, BRD2400},
{B4800, BRD4800}, {B9600, BRD9600}, {B19200, BRD19200},
{B38400, BRD38400}, {B7200, BRD7200}, {B14400, BRD14400},
{B57600, BRD57600}, {B76800, BRD76800},
{B115200, BRD115200}, {B230400, BRD230400},
{-1, -1}
};
static int rp_convert_baud(int baud) {
int i;
for (i = 0; baud_table[i].baud >= 0; i++) {
if (baud_table[i].baud == baud)
break;
}
return baud_table[i].conversion;
}
static int
rpparam(tp, t)
struct tty *tp;
struct termios *t;
{
struct rp_port *rp;
CHANNEL_t *cp;
int cflag, iflag, oflag, lflag;
int ospeed;
#ifdef RPCLOCAL
int devshift;
#endif
rp = tty_softc(tp);
cp = &rp->rp_channel;
cflag = t->c_cflag;
#ifdef RPCLOCAL
devshift = umynor / 32;
devshift = 1 << devshift;
if ( devshift & RPCLOCAL ) {
cflag |= CLOCAL;
}
#endif
iflag = t->c_iflag;
oflag = t->c_oflag;
lflag = t->c_lflag;
ospeed = rp_convert_baud(t->c_ispeed);
if(ospeed < 0 || t->c_ispeed != t->c_ospeed)
return(EINVAL);
if(t->c_ospeed == 0) {
sClrDTR(cp);
return(0);
}
rp->rp_fifo_lw = ((t->c_ospeed*2) / 1000) +1;
/* Set baud rate ----- we only pay attention to ispeed */
sSetDTR(cp);
sSetRTS(cp);
sSetBaud(cp, ospeed);
if(cflag & CSTOPB) {
sSetStop2(cp);
} else {
sSetStop1(cp);
}
if(cflag & PARENB) {
sEnParity(cp);
if(cflag & PARODD) {
sSetOddParity(cp);
} else {
sSetEvenParity(cp);
}
}
else {
sDisParity(cp);
}
if((cflag & CSIZE) == CS8) {
sSetData8(cp);
rp->rp_imask = 0xFF;
} else {
sSetData7(cp);
rp->rp_imask = 0x7F;
}
if(iflag & ISTRIP) {
rp->rp_imask &= 0x7F;
}
if(cflag & CLOCAL) {
rp->rp_intmask &= ~DELTA_CD;
} else {
rp->rp_intmask |= DELTA_CD;
}
/* Put flow control stuff here */
if(cflag & CCTS_OFLOW) {
sEnCTSFlowCtl(cp);
} else {
sDisCTSFlowCtl(cp);
}
if(cflag & CRTS_IFLOW) {
rp->rp_rts_iflow = 1;
} else {
rp->rp_rts_iflow = 0;
}
if(cflag & CRTS_IFLOW) {
sEnRTSFlowCtl(cp);
} else {
sDisRTSFlowCtl(cp);
}
return(0);
}
static void
rpstart(struct tty *tp)
{
struct rp_port *rp;
CHANNEL_t *cp;
char flags;
int xmit_fifo_room;
int i, count, wcount;
rp = tty_softc(tp);
cp = &rp->rp_channel;
flags = rp->rp_channel.TxControl[3];
if(rp->rp_xmit_stopped) {
sEnTransmit(cp);
rp->rp_xmit_stopped = 0;
}
xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
count = ttydisc_getc(tp, &rp->TxBuf, xmit_fifo_room);
if(xmit_fifo_room > 0) {
for( i = 0, wcount = count >> 1; wcount > 0; i += 2, wcount-- ) {
rp_writech2(cp, sGetTxRxDataIO(cp), le16dec(&rp->TxBuf[i]));
}
if ( count & 1 ) {
rp_writech1(cp, sGetTxRxDataIO(cp), rp->TxBuf[(count-1)]);
}
}
}
Index: head/sys/dev/rp/rp_isa.c
===================================================================
--- head/sys/dev/rp/rp_isa.c (revision 328217)
+++ head/sys/dev/rp/rp_isa.c (revision 328218)
@@ -1,509 +1,507 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) Comtrol Corporation <support@comtrol.com>
* All rights reserved.
*
* ISA-specific part separated from:
* sys/i386/isa/rp.c,v 1.33 1999/09/28 11:45:27 phk Exp
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted prodived that the follwoing conditions
* are met.
* 1. Redistributions of source code must retain the above copyright
* notive, this list of conditions and the following disclainer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials prodided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Comtrol Corporation.
* 4. The name of Comtrol Corporation may not be used to endorse or
* promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/tty.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/bus.h>
#include <sys/rman.h>
#define ROCKET_C
#include <dev/rp/rpreg.h>
#include <dev/rp/rpvar.h>
#include <isa/isavar.h>
/* ISA-specific part of CONTROLLER_t */
struct ISACONTROLLER_T {
int MBaseIO; /* rid of the Mudbac controller for this controller */
int MReg0IO; /* offset0 of the Mudbac controller for this controller */
int MReg1IO; /* offset1 of the Mudbac controller for this controller */
int MReg2IO; /* offset2 of the Mudbac controller for this controller */
int MReg3IO; /* offset3 of the Mudbac controller for this controller */
Byte_t MReg2;
Byte_t MReg3;
};
typedef struct ISACONTROLLER_T ISACONTROLLER_t;
#define ISACTL(ctlp) ((ISACONTROLLER_t *)((ctlp)->bus_ctlp))
/***************************************************************************
Function: sControllerEOI
Purpose: Strobe the MUDBAC's End Of Interrupt bit.
Call: sControllerEOI(MudbacCtlP,CtlP)
CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure
CONTROLLER_T *CtlP; Ptr to controller structure
*/
#define sControllerEOI(MudbacCtlP,CtlP) \
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg2IO,ISACTL(CtlP)->MReg2 | INT_STROB)
/***************************************************************************
Function: sDisAiop
Purpose: Disable I/O access to an AIOP
Call: sDisAiop(MudbacCtlP,CtlP)
CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure
CONTROLLER_T *CtlP; Ptr to controller structure
int AiopNum; Number of AIOP on controller
*/
#define sDisAiop(MudbacCtlP,CtlP,AIOPNUM) \
{ \
ISACTL(CtlP)->MReg3 &= rp_sBitMapClrTbl[AIOPNUM]; \
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); \
}
/***************************************************************************
Function: sEnAiop
Purpose: Enable I/O access to an AIOP
Call: sEnAiop(MudbacCtlP,CtlP)
CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure
CONTROLLER_T *CtlP; Ptr to controller structure
int AiopNum; Number of AIOP on controller
*/
#define sEnAiop(MudbacCtlP,CtlP,AIOPNUM) \
{ \
ISACTL(CtlP)->MReg3 |= rp_sBitMapSetTbl[AIOPNUM]; \
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); \
}
/***************************************************************************
Function: sGetControllerIntStatus
Purpose: Get the controller interrupt status
Call: sGetControllerIntStatus(MudbacCtlP,CtlP)
CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure
CONTROLLER_T *CtlP; Ptr to controller structure
Return: Byte_t: The controller interrupt status in the lower 4
bits. Bits 0 through 3 represent AIOP's 0
through 3 respectively. If a bit is set that
AIOP is interrupting. Bits 4 through 7 will
always be cleared.
*/
#define sGetControllerIntStatus(MudbacCtlP,CtlP) \
(rp_readio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg1IO) & 0x0f)
static devclass_t rp_devclass;
static CONTROLLER_t *rp_controller;
static int rp_nisadevs;
static int rp_probe(device_t dev);
static int rp_attach(device_t dev);
static void rp_isareleaseresource(CONTROLLER_t *ctlp);
static int sInitController(CONTROLLER_T *CtlP,
CONTROLLER_T *MudbacCtlP,
int AiopNum,
int IRQNum,
Byte_t Frequency,
int PeriodicOnly);
static rp_aiop2rid_t rp_isa_aiop2rid;
static rp_aiop2off_t rp_isa_aiop2off;
static rp_ctlmask_t rp_isa_ctlmask;
static int
rp_probe(device_t dev)
{
int unit;
CONTROLLER_t *controller;
int num_aiops;
CONTROLLER_t *ctlp;
int retval;
/*
* We have no PnP RocketPort cards.
* (At least according to LINT)
*/
if (isa_get_logicalid(dev) != 0)
return (ENXIO);
/* We need IO port resource to configure an ISA device. */
if (bus_get_resource_count(dev, SYS_RES_IOPORT, 0) == 0)
return (ENXIO);
unit = device_get_unit(dev);
if (unit >= 4) {
device_printf(dev, "rpprobe: unit number %d invalid.\n", unit);
return (ENXIO);
}
device_printf(dev, "probing for RocketPort(ISA) unit %d.\n", unit);
ctlp = device_get_softc(dev);
bzero(ctlp, sizeof(*ctlp));
ctlp->dev = dev;
ctlp->aiop2rid = rp_isa_aiop2rid;
ctlp->aiop2off = rp_isa_aiop2off;
ctlp->ctlmask = rp_isa_ctlmask;
/* The IO ports of AIOPs for an ISA controller are discrete. */
ctlp->io_num = 1;
- ctlp->io_rid = mallocarray(MAX_AIOPS_PER_BOARD, sizeof(*(ctlp->io_rid)),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- ctlp->io = mallocarray(MAX_AIOPS_PER_BOARD, sizeof(*(ctlp->io)),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ ctlp->io_rid = malloc(sizeof(*(ctlp->io_rid)) * MAX_AIOPS_PER_BOARD, M_DEVBUF, M_NOWAIT | M_ZERO);
+ ctlp->io = malloc(sizeof(*(ctlp->io)) * MAX_AIOPS_PER_BOARD, M_DEVBUF, M_NOWAIT | M_ZERO);
if (ctlp->io_rid == NULL || ctlp->io == NULL) {
device_printf(dev, "rp_attach: Out of memory.\n");
retval = ENOMEM;
goto nogo;
}
ctlp->bus_ctlp = malloc(sizeof(ISACONTROLLER_t) * 1, M_DEVBUF, M_NOWAIT | M_ZERO);
if (ctlp->bus_ctlp == NULL) {
device_printf(dev, "rp_attach: Out of memory.\n");
retval = ENOMEM;
goto nogo;
}
ctlp->io_rid[0] = 0;
if (rp_controller != NULL) {
controller = rp_controller;
ctlp->io[0] = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], 0x40, RF_ACTIVE);
} else {
controller = rp_controller = ctlp;
ctlp->io[0] = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], 0x44, RF_ACTIVE);
}
if (ctlp->io[0] == NULL) {
device_printf(dev, "rp_attach: Resource not available.\n");
retval = ENXIO;
goto nogo;
}
num_aiops = sInitController(ctlp,
controller,
MAX_AIOPS_PER_BOARD, 0,
FREQ_DIS, 0);
if (num_aiops <= 0) {
device_printf(dev, "board%d init failed.\n", unit);
retval = ENXIO;
goto nogo;
}
if (rp_controller == NULL)
rp_controller = controller;
rp_nisadevs++;
device_set_desc(dev, "RocketPort ISA");
return (0);
nogo:
rp_isareleaseresource(ctlp);
return (retval);
}
static int
rp_attach(device_t dev)
{
int unit;
int num_ports, num_aiops;
int aiop;
CONTROLLER_t *ctlp;
int retval;
unit = device_get_unit(dev);
ctlp = device_get_softc(dev);
#ifdef notdef
num_aiops = sInitController(ctlp,
rp_controller,
MAX_AIOPS_PER_BOARD, 0,
FREQ_DIS, 0);
#else
num_aiops = ctlp->NumAiop;
#endif /* notdef */
num_ports = 0;
for(aiop=0; aiop < num_aiops; aiop++) {
sResetAiopByNum(ctlp, aiop);
sEnAiop(rp_controller, ctlp, aiop);
num_ports += sGetAiopNumChan(ctlp, aiop);
}
retval = rp_attachcommon(ctlp, num_aiops, num_ports);
if (retval != 0)
goto nogo;
return (0);
nogo:
rp_isareleaseresource(ctlp);
return (retval);
}
static void
rp_isareleaseresource(CONTROLLER_t *ctlp)
{
int i;
rp_releaseresource(ctlp);
if (ctlp == rp_controller)
rp_controller = NULL;
if (ctlp->io != NULL) {
for (i = 0 ; i < MAX_AIOPS_PER_BOARD ; i++)
if (ctlp->io[i] != NULL)
bus_release_resource(ctlp->dev, SYS_RES_IOPORT, ctlp->io_rid[i], ctlp->io[i]);
free(ctlp->io, M_DEVBUF);
}
if (ctlp->io_rid != NULL)
free(ctlp->io_rid, M_DEVBUF);
if (rp_controller != NULL && rp_controller->io[ISACTL(ctlp)->MBaseIO] != NULL) {
bus_release_resource(rp_controller->dev, SYS_RES_IOPORT, rp_controller->io_rid[ISACTL(ctlp)->MBaseIO], rp_controller->io[ISACTL(ctlp)->MBaseIO]);
rp_controller->io[ISACTL(ctlp)->MBaseIO] = NULL;
rp_controller->io_rid[ISACTL(ctlp)->MBaseIO] = 0;
}
if (ctlp->bus_ctlp != NULL)
free(ctlp->bus_ctlp, M_DEVBUF);
}
/***************************************************************************
Function: sInitController
Purpose: Initialization of controller global registers and controller
structure.
Call: sInitController(CtlP,MudbacCtlP,AiopNum,
IRQNum,Frequency,PeriodicOnly)
CONTROLLER_T *CtlP; Ptr to controller structure
CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure
int AiopNum; Number of Aiops
int IRQNum; Interrupt Request number. Can be any of the following:
0: Disable global interrupts
3: IRQ 3
4: IRQ 4
5: IRQ 5
9: IRQ 9
10: IRQ 10
11: IRQ 11
12: IRQ 12
15: IRQ 15
Byte_t Frequency: A flag identifying the frequency
of the periodic interrupt, can be any one of the following:
FREQ_DIS - periodic interrupt disabled
FREQ_137HZ - 137 Hertz
FREQ_69HZ - 69 Hertz
FREQ_34HZ - 34 Hertz
FREQ_17HZ - 17 Hertz
FREQ_9HZ - 9 Hertz
FREQ_4HZ - 4 Hertz
If IRQNum is set to 0 the Frequency parameter is
overidden, it is forced to a value of FREQ_DIS.
int PeriodicOnly: TRUE if all interrupts except the periodic
interrupt are to be blocked.
FALSE is both the periodic interrupt and
other channel interrupts are allowed.
If IRQNum is set to 0 the PeriodicOnly parameter is
overidden, it is forced to a value of FALSE.
Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
initialization failed.
Comments:
If periodic interrupts are to be disabled but AIOP interrupts
are allowed, set Frequency to FREQ_DIS and PeriodicOnly to FALSE.
If interrupts are to be completely disabled set IRQNum to 0.
Setting Frequency to FREQ_DIS and PeriodicOnly to TRUE is an
invalid combination.
This function performs initialization of global interrupt modes,
but it does not actually enable global interrupts. To enable
and disable global interrupts use functions sEnGlobalInt() and
sDisGlobalInt(). Enabling of global interrupts is normally not
done until all other initializations are complete.
Even if interrupts are globally enabled, they must also be
individually enabled for each channel that is to generate
interrupts.
Warnings: No range checking on any of the parameters is done.
No context switches are allowed while executing this function.
After this function all AIOPs on the controller are disabled,
they can be enabled with sEnAiop().
*/
static int
sInitController( CONTROLLER_T *CtlP,
CONTROLLER_T *MudbacCtlP,
int AiopNum,
int IRQNum,
Byte_t Frequency,
int PeriodicOnly)
{
int i;
int ctl_base, aiop_base, aiop_size;
CtlP->CtlID = CTLID_0001; /* controller release 1 */
ISACTL(CtlP)->MBaseIO = rp_nisadevs;
if (MudbacCtlP->io[ISACTL(CtlP)->MBaseIO] != NULL) {
ISACTL(CtlP)->MReg0IO = 0x40 + 0;
ISACTL(CtlP)->MReg1IO = 0x40 + 1;
ISACTL(CtlP)->MReg2IO = 0x40 + 2;
ISACTL(CtlP)->MReg3IO = 0x40 + 3;
} else {
MudbacCtlP->io_rid[ISACTL(CtlP)->MBaseIO] = ISACTL(CtlP)->MBaseIO;
ctl_base = rman_get_start(MudbacCtlP->io[0]) + 0x40 + 0x400 * rp_nisadevs;
MudbacCtlP->io[ISACTL(CtlP)->MBaseIO] = bus_alloc_resource(MudbacCtlP->dev, SYS_RES_IOPORT, &CtlP->io_rid[ISACTL(CtlP)->MBaseIO], ctl_base, ctl_base + 3, 4, RF_ACTIVE);
ISACTL(CtlP)->MReg0IO = 0;
ISACTL(CtlP)->MReg1IO = 1;
ISACTL(CtlP)->MReg2IO = 2;
ISACTL(CtlP)->MReg3IO = 3;
}
#if 1
ISACTL(CtlP)->MReg2 = 0; /* interrupt disable */
ISACTL(CtlP)->MReg3 = 0; /* no periodic interrupts */
#else
if(sIRQMap[IRQNum] == 0) /* interrupts globally disabled */
{
ISACTL(CtlP)->MReg2 = 0; /* interrupt disable */
ISACTL(CtlP)->MReg3 = 0; /* no periodic interrupts */
}
else
{
ISACTL(CtlP)->MReg2 = sIRQMap[IRQNum]; /* set IRQ number */
ISACTL(CtlP)->MReg3 = Frequency; /* set frequency */
if(PeriodicOnly) /* periodic interrupt only */
{
ISACTL(CtlP)->MReg3 |= PERIODIC_ONLY;
}
}
#endif
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg2IO,ISACTL(CtlP)->MReg2);
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3);
sControllerEOI(MudbacCtlP,CtlP); /* clear EOI if warm init */
/* Init AIOPs */
CtlP->NumAiop = 0;
for(i=0; i < AiopNum; i++)
{
if (CtlP->io[i] == NULL) {
CtlP->io_rid[i] = i;
aiop_base = rman_get_start(CtlP->io[0]) + 0x400 * i;
if (rp_nisadevs == 0)
aiop_size = 0x44;
else
aiop_size = 0x40;
CtlP->io[i] = bus_alloc_resource(CtlP->dev, SYS_RES_IOPORT, &CtlP->io_rid[i], aiop_base, aiop_base + aiop_size - 1, aiop_size, RF_ACTIVE);
} else
aiop_base = rman_get_start(CtlP->io[i]);
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,
ISACTL(CtlP)->MReg2IO,
ISACTL(CtlP)->MReg2 | (i & 0x03)); /* AIOP index */
rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,
ISACTL(CtlP)->MReg0IO,
(Byte_t)(aiop_base >> 6)); /* set up AIOP I/O in MUDBAC */
sEnAiop(MudbacCtlP,CtlP,i); /* enable the AIOP */
CtlP->AiopID[i] = sReadAiopID(CtlP, i); /* read AIOP ID */
if(CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
{
sDisAiop(MudbacCtlP,CtlP,i); /* disable AIOP */
bus_release_resource(CtlP->dev, SYS_RES_IOPORT, CtlP->io_rid[i], CtlP->io[i]);
CtlP->io[i] = NULL;
break; /* done looking for AIOPs */
}
CtlP->AiopNumChan[i] = sReadAiopNumChan(CtlP, i); /* num channels in AIOP */
rp_writeaiop2(CtlP,i,_INDX_ADDR,_CLK_PRE); /* clock prescaler */
rp_writeaiop1(CtlP,i,_INDX_DATA,CLOCK_PRESC);
CtlP->NumAiop++; /* bump count of AIOPs */
sDisAiop(MudbacCtlP,CtlP,i); /* disable AIOP */
}
if(CtlP->NumAiop == 0)
return(-1);
else
return(CtlP->NumAiop);
}
/*
* ARGSUSED
* Maps (aiop, offset) to rid.
*/
static int
rp_isa_aiop2rid(int aiop, int offset)
{
/* rid equals to aiop for an ISA controller. */
return aiop;
}
/*
* ARGSUSED
* Maps (aiop, offset) to the offset of resource.
*/
static int
rp_isa_aiop2off(int aiop, int offset)
{
/* Each aiop has its own resource. */
return offset;
}
/* Read the int status for an ISA controller. */
static unsigned char
rp_isa_ctlmask(CONTROLLER_t *ctlp)
{
return sGetControllerIntStatus(rp_controller,ctlp);
}
static device_method_t rp_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, rp_probe),
DEVMETHOD(device_attach, rp_attach),
{ 0, 0 }
};
static driver_t rp_driver = {
"rp",
rp_methods,
sizeof(CONTROLLER_t),
};
/*
* rp can be attached to an isa bus.
*/
DRIVER_MODULE(rp, isa, rp_driver, rp_devclass, 0, 0);
Index: head/sys/dev/rp/rp_pci.c
===================================================================
--- head/sys/dev/rp/rp_pci.c (revision 328217)
+++ head/sys/dev/rp/rp_pci.c (revision 328218)
@@ -1,369 +1,367 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) Comtrol Corporation <support@comtrol.com>
* All rights reserved.
*
* PCI-specific part separated from:
* sys/i386/isa/rp.c,v 1.33 1999/09/28 11:45:27 phk Exp
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted prodived that the follwoing conditions
* are met.
* 1. Redistributions of source code must retain the above copyright
* notive, this list of conditions and the following disclainer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials prodided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Comtrol Corporation.
* 4. The name of Comtrol Corporation may not be used to endorse or
* promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/tty.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/bus.h>
#include <sys/rman.h>
#define ROCKET_C
#include <dev/rp/rpreg.h>
#include <dev/rp/rpvar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
/* PCI IDs */
#define RP_VENDOR_ID 0x11FE
#define RP_DEVICE_ID_32I 0x0001
#define RP_DEVICE_ID_8I 0x0002
#define RP_DEVICE_ID_16I 0x0003
#define RP_DEVICE_ID_4Q 0x0004
#define RP_DEVICE_ID_8O 0x0005
#define RP_DEVICE_ID_8J 0x0006
#define RP_DEVICE_ID_4J 0x0007
#define RP_DEVICE_ID_6M 0x000C
#define RP_DEVICE_ID_4M 0x000D
#define RP_DEVICE_ID_UPCI_32 0x0801
#define RP_DEVICE_ID_UPCI_16 0x0803
#define RP_DEVICE_ID_UPCI_8O 0x0805
/**************************************************************************
MUDBAC remapped for PCI
**************************************************************************/
#define _CFG_INT_PCI 0x40
#define _PCI_INT_FUNC 0x3A
#define PCI_STROB 0x2000
#define INTR_EN_PCI 0x0010
/***************************************************************************
Function: sPCIControllerEOI
Purpose: Strobe the MUDBAC's End Of Interrupt bit.
Call: sPCIControllerEOI(CtlP)
CONTROLLER_T *CtlP; Ptr to controller structure
*/
#define sPCIControllerEOI(CtlP) rp_writeio2(CtlP, 0, _PCI_INT_FUNC, PCI_STROB)
/***************************************************************************
Function: sPCIGetControllerIntStatus
Purpose: Get the controller interrupt status
Call: sPCIGetControllerIntStatus(CtlP)
CONTROLLER_T *CtlP; Ptr to controller structure
Return: Byte_t: The controller interrupt status in the lower 4
bits. Bits 0 through 3 represent AIOP's 0
through 3 respectively. If a bit is set that
AIOP is interrupting. Bits 4 through 7 will
always be cleared.
*/
#define sPCIGetControllerIntStatus(CTLP) ((rp_readio2(CTLP, 0, _PCI_INT_FUNC) >> 8) & 0x1f)
static devclass_t rp_devclass;
static int rp_pciprobe(device_t dev);
static int rp_pciattach(device_t dev);
#ifdef notdef
static int rp_pcidetach(device_t dev);
static int rp_pcishutdown(device_t dev);
#endif /* notdef */
static void rp_pcireleaseresource(CONTROLLER_t *ctlp);
static int sPCIInitController( CONTROLLER_t *CtlP,
int AiopNum,
int IRQNum,
Byte_t Frequency,
int PeriodicOnly,
int VendorDevice);
static rp_aiop2rid_t rp_pci_aiop2rid;
static rp_aiop2off_t rp_pci_aiop2off;
static rp_ctlmask_t rp_pci_ctlmask;
/*
* The following functions are the pci-specific part
* of rp driver.
*/
static int
rp_pciprobe(device_t dev)
{
char *s;
s = NULL;
if (pci_get_vendor(dev) == RP_VENDOR_ID)
s = "RocketPort PCI";
if (s != NULL) {
device_set_desc(dev, s);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
rp_pciattach(device_t dev)
{
int num_ports, num_aiops;
int aiop;
CONTROLLER_t *ctlp;
int unit;
int retval;
ctlp = device_get_softc(dev);
bzero(ctlp, sizeof(*ctlp));
ctlp->dev = dev;
unit = device_get_unit(dev);
ctlp->aiop2rid = rp_pci_aiop2rid;
ctlp->aiop2off = rp_pci_aiop2off;
ctlp->ctlmask = rp_pci_ctlmask;
/* The IO ports of AIOPs for a PCI controller are continuous. */
ctlp->io_num = 1;
- ctlp->io_rid = mallocarray(ctlp->io_num, sizeof(*(ctlp->io_rid)),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- ctlp->io = mallocarray(ctlp->io_num, sizeof(*(ctlp->io)), M_DEVBUF,
- M_NOWAIT | M_ZERO);
+ ctlp->io_rid = malloc(sizeof(*(ctlp->io_rid)) * ctlp->io_num, M_DEVBUF, M_NOWAIT | M_ZERO);
+ ctlp->io = malloc(sizeof(*(ctlp->io)) * ctlp->io_num, M_DEVBUF, M_NOWAIT | M_ZERO);
if (ctlp->io_rid == NULL || ctlp->io == NULL) {
device_printf(dev, "rp_pciattach: Out of memory.\n");
retval = ENOMEM;
goto nogo;
}
ctlp->bus_ctlp = NULL;
switch (pci_get_device(dev)) {
case RP_DEVICE_ID_UPCI_16:
case RP_DEVICE_ID_UPCI_32:
case RP_DEVICE_ID_UPCI_8O:
ctlp->io_rid[0] = PCIR_BAR(2);
break;
default:
ctlp->io_rid[0] = PCIR_BAR(0);
break;
}
ctlp->io[0] = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
&ctlp->io_rid[0], RF_ACTIVE);
if(ctlp->io[0] == NULL) {
device_printf(dev, "ioaddr mapping failed for RocketPort(PCI).\n");
retval = ENXIO;
goto nogo;
}
num_aiops = sPCIInitController(ctlp,
MAX_AIOPS_PER_BOARD, 0,
FREQ_DIS, 0, pci_get_device(dev));
num_ports = 0;
for(aiop=0; aiop < num_aiops; aiop++) {
sResetAiopByNum(ctlp, aiop);
num_ports += sGetAiopNumChan(ctlp, aiop);
}
retval = rp_attachcommon(ctlp, num_aiops, num_ports);
if (retval != 0)
goto nogo;
return (0);
nogo:
rp_pcireleaseresource(ctlp);
return (retval);
}
static int
rp_pcidetach(device_t dev)
{
CONTROLLER_t *ctlp;
ctlp = device_get_softc(dev);
rp_pcireleaseresource(ctlp);
return (0);
}
static int
rp_pcishutdown(device_t dev)
{
CONTROLLER_t *ctlp;
ctlp = device_get_softc(dev);
rp_pcireleaseresource(ctlp);
return (0);
}
static void
rp_pcireleaseresource(CONTROLLER_t *ctlp)
{
rp_releaseresource(ctlp);
if (ctlp->io != NULL) {
if (ctlp->io[0] != NULL)
bus_release_resource(ctlp->dev, SYS_RES_IOPORT, ctlp->io_rid[0], ctlp->io[0]);
free(ctlp->io, M_DEVBUF);
ctlp->io = NULL;
}
if (ctlp->io_rid != NULL) {
free(ctlp->io_rid, M_DEVBUF);
ctlp->io = NULL;
}
}
static int
sPCIInitController( CONTROLLER_t *CtlP,
int AiopNum,
int IRQNum,
Byte_t Frequency,
int PeriodicOnly,
int VendorDevice)
{
int i;
CtlP->CtlID = CTLID_0001; /* controller release 1 */
sPCIControllerEOI(CtlP);
/* Init AIOPs */
CtlP->NumAiop = 0;
for(i=0; i < AiopNum; i++)
{
/*device_printf(CtlP->dev, "aiop %d.\n", i);*/
CtlP->AiopID[i] = sReadAiopID(CtlP, i); /* read AIOP ID */
/*device_printf(CtlP->dev, "ID = %d.\n", CtlP->AiopID[i]);*/
if(CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
{
break; /* done looking for AIOPs */
}
switch( VendorDevice ) {
case RP_DEVICE_ID_4Q:
case RP_DEVICE_ID_4J:
case RP_DEVICE_ID_4M:
CtlP->AiopNumChan[i] = 4;
break;
case RP_DEVICE_ID_6M:
CtlP->AiopNumChan[i] = 6;
break;
case RP_DEVICE_ID_8O:
case RP_DEVICE_ID_8J:
case RP_DEVICE_ID_8I:
case RP_DEVICE_ID_16I:
case RP_DEVICE_ID_32I:
CtlP->AiopNumChan[i] = 8;
break;
default:
#ifdef notdef
CtlP->AiopNumChan[i] = 8;
#else
CtlP->AiopNumChan[i] = sReadAiopNumChan(CtlP, i);
#endif /* notdef */
break;
}
/*device_printf(CtlP->dev, "%d channels.\n", CtlP->AiopNumChan[i]);*/
rp_writeaiop2(CtlP, i, _INDX_ADDR,_CLK_PRE); /* clock prescaler */
/*device_printf(CtlP->dev, "configuring clock prescaler.\n");*/
rp_writeaiop1(CtlP, i, _INDX_DATA,CLOCK_PRESC);
/*device_printf(CtlP->dev, "configured clock prescaler.\n");*/
CtlP->NumAiop++; /* bump count of AIOPs */
}
if(CtlP->NumAiop == 0)
return(-1);
else
return(CtlP->NumAiop);
}
/*
* ARGSUSED
* Maps (aiop, offset) to rid.
*/
static int
rp_pci_aiop2rid(int aiop, int offset)
{
/* Always return zero for a PCI controller. */
return 0;
}
/*
* ARGSUSED
* Maps (aiop, offset) to the offset of resource.
*/
static int
rp_pci_aiop2off(int aiop, int offset)
{
/* Each AIOP reserves 0x40 bytes. */
return aiop * 0x40 + offset;
}
/* Read the int status for a PCI controller. */
static unsigned char
rp_pci_ctlmask(CONTROLLER_t *ctlp)
{
return sPCIGetControllerIntStatus(ctlp);
}
static device_method_t rp_pcimethods[] = {
/* Device interface */
DEVMETHOD(device_probe, rp_pciprobe),
DEVMETHOD(device_attach, rp_pciattach),
DEVMETHOD(device_detach, rp_pcidetach),
DEVMETHOD(device_shutdown, rp_pcishutdown),
{ 0, 0 }
};
static driver_t rp_pcidriver = {
"rp",
rp_pcimethods,
sizeof(CONTROLLER_t),
};
/*
* rp can be attached to a pci bus.
*/
DRIVER_MODULE(rp, pci, rp_pcidriver, rp_devclass, 0, 0);
Index: head/sys/dev/sound/midi/midi.c
===================================================================
--- head/sys/dev/sound/midi/midi.c (revision 328217)
+++ head/sys/dev/sound/midi/midi.c (revision 328218)
@@ -1,1543 +1,1542 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-NetBSD
*
* Copyright (c) 2003 Mathew Kanner
* Copyright (c) 1998 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Lennart Augustsson (augustss@netbsd.org).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Parts of this file started out as NetBSD: midi.c 1.31
* They are mostly gone. Still the most obvious will be the state
* machine midi_in
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
#include <sys/conf.h>
#include <sys/selinfo.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/fcntl.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/poll.h>
#include <sys/sbuf.h>
#include <sys/kobj.h>
#include <sys/module.h>
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_snd.h"
#endif
#include <dev/sound/midi/midi.h>
#include "mpu_if.h"
#include <dev/sound/midi/midiq.h>
#include "synth_if.h"
MALLOC_DEFINE(M_MIDI, "midi buffers", "Midi data allocation area");
#ifndef KOBJMETHOD_END
#define KOBJMETHOD_END { NULL, NULL }
#endif
#define PCMMKMINOR(u, d, c) ((((c) & 0xff) << 16) | (((u) & 0x0f) << 4) | ((d) & 0x0f))
#define MIDIMKMINOR(u, d, c) PCMMKMINOR(u, d, c)
#define MIDI_DEV_RAW 2
#define MIDI_DEV_MIDICTL 12
enum midi_states {
MIDI_IN_START, MIDI_IN_SYSEX, MIDI_IN_DATA
};
/*
* The MPU interface current has init() uninit() inqsize() outqsize()
* callback() : fiddle with the tx|rx status.
*/
#include "mpu_if.h"
/*
* /dev/rmidi Structure definitions
*/
#define MIDI_NAMELEN 16
struct snd_midi {
KOBJ_FIELDS;
struct mtx lock; /* Protects all but queues */
void *cookie;
int unit; /* Should only be used in midistat */
int channel; /* Should only be used in midistat */
int busy;
int flags; /* File flags */
char name[MIDI_NAMELEN];
struct mtx qlock; /* Protects inq, outq and flags */
MIDIQ_HEAD(, char) inq, outq;
int rchan, wchan;
struct selinfo rsel, wsel;
int hiwat; /* QLEN(outq)>High-water -> disable
* writes from userland */
enum midi_states inq_state;
int inq_status, inq_left; /* Variables for the state machine in
* Midi_in, this is to provide that
* signals only get issued only
* complete command packets. */
struct proc *async;
struct cdev *dev;
struct synth_midi *synth;
int synth_flags;
TAILQ_ENTRY(snd_midi) link;
};
struct synth_midi {
KOBJ_FIELDS;
struct snd_midi *m;
};
static synth_open_t midisynth_open;
static synth_close_t midisynth_close;
static synth_writeraw_t midisynth_writeraw;
static synth_killnote_t midisynth_killnote;
static synth_startnote_t midisynth_startnote;
static synth_setinstr_t midisynth_setinstr;
static synth_alloc_t midisynth_alloc;
static synth_controller_t midisynth_controller;
static synth_bender_t midisynth_bender;
static kobj_method_t midisynth_methods[] = {
KOBJMETHOD(synth_open, midisynth_open),
KOBJMETHOD(synth_close, midisynth_close),
KOBJMETHOD(synth_writeraw, midisynth_writeraw),
KOBJMETHOD(synth_setinstr, midisynth_setinstr),
KOBJMETHOD(synth_startnote, midisynth_startnote),
KOBJMETHOD(synth_killnote, midisynth_killnote),
KOBJMETHOD(synth_alloc, midisynth_alloc),
KOBJMETHOD(synth_controller, midisynth_controller),
KOBJMETHOD(synth_bender, midisynth_bender),
KOBJMETHOD_END
};
DEFINE_CLASS(midisynth, midisynth_methods, 0);
/*
* Module Exports & Interface
*
* struct midi_chan *midi_init(MPU_CLASS cls, int unit, int chan,
* void *cookie)
* int midi_uninit(struct snd_midi *)
*
* 0 == no error
* EBUSY or other error
*
* int midi_in(struct snd_midi *, char *buf, int count)
* int midi_out(struct snd_midi *, char *buf, int count)
*
* midi_{in,out} return actual size transfered
*
*/
/*
* midi_devs tailq, holder of all rmidi instances protected by midistat_lock
*/
TAILQ_HEAD(, snd_midi) midi_devs;
/*
* /dev/midistat variables and declarations, protected by midistat_lock
*/
static struct mtx midistat_lock;
static int midistat_isopen = 0;
static struct sbuf midistat_sbuf;
static int midistat_bufptr;
static struct cdev *midistat_dev;
/*
* /dev/midistat dev_t declarations
*/
static d_open_t midistat_open;
static d_close_t midistat_close;
static d_read_t midistat_read;
static struct cdevsw midistat_cdevsw = {
.d_version = D_VERSION,
.d_open = midistat_open,
.d_close = midistat_close,
.d_read = midistat_read,
.d_name = "midistat",
};
/*
* /dev/rmidi dev_t declarations, struct variable access is protected by
* locks contained within the structure.
*/
static d_open_t midi_open;
static d_close_t midi_close;
static d_ioctl_t midi_ioctl;
static d_read_t midi_read;
static d_write_t midi_write;
static d_poll_t midi_poll;
static struct cdevsw midi_cdevsw = {
.d_version = D_VERSION,
.d_open = midi_open,
.d_close = midi_close,
.d_read = midi_read,
.d_write = midi_write,
.d_ioctl = midi_ioctl,
.d_poll = midi_poll,
.d_name = "rmidi",
};
/*
* Prototypes of library functions
*/
static int midi_destroy(struct snd_midi *, int);
static int midistat_prepare(struct sbuf * s);
static int midi_load(void);
static int midi_unload(void);
/*
* Misc declr.
*/
SYSCTL_NODE(_hw, OID_AUTO, midi, CTLFLAG_RD, 0, "Midi driver");
static SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD, 0, "Status device");
int midi_debug;
/* XXX: should this be moved into debug.midi? */
SYSCTL_INT(_hw_midi, OID_AUTO, debug, CTLFLAG_RW, &midi_debug, 0, "");
int midi_dumpraw;
SYSCTL_INT(_hw_midi, OID_AUTO, dumpraw, CTLFLAG_RW, &midi_dumpraw, 0, "");
int midi_instroff;
SYSCTL_INT(_hw_midi, OID_AUTO, instroff, CTLFLAG_RW, &midi_instroff, 0, "");
int midistat_verbose;
SYSCTL_INT(_hw_midi_stat, OID_AUTO, verbose, CTLFLAG_RW,
&midistat_verbose, 0, "");
#define MIDI_DEBUG(l,a) if(midi_debug>=l) a
/*
* CODE START
*/
/*
* Register a new rmidi device. cls midi_if interface unit == 0 means
* auto-assign new unit number unit != 0 already assigned a unit number, eg.
* not the first channel provided by this device. channel, sub-unit
* cookie is passed back on MPU calls Typical device drivers will call with
* unit=0, channel=1..(number of channels) and cookie=soft_c and won't care
* what unit number is used.
*
* It is an error to call midi_init with an already used unit/channel combo.
*
* Returns NULL on error
*
*/
struct snd_midi *
midi_init(kobj_class_t cls, int unit, int channel, void *cookie)
{
struct snd_midi *m;
int i;
int inqsize, outqsize;
MIDI_TYPE *buf;
MIDI_DEBUG(1, printf("midiinit: unit %d/%d.\n", unit, channel));
mtx_lock(&midistat_lock);
/*
* Protect against call with existing unit/channel or auto-allocate a
* new unit number.
*/
i = -1;
TAILQ_FOREACH(m, &midi_devs, link) {
mtx_lock(&m->lock);
if (unit != 0) {
if (m->unit == unit && m->channel == channel) {
mtx_unlock(&m->lock);
goto err0;
}
} else {
/*
* Find a better unit number
*/
if (m->unit > i)
i = m->unit;
}
mtx_unlock(&m->lock);
}
if (unit == 0)
unit = i + 1;
MIDI_DEBUG(1, printf("midiinit #2: unit %d/%d.\n", unit, channel));
m = malloc(sizeof(*m), M_MIDI, M_NOWAIT | M_ZERO);
if (m == NULL)
goto err0;
m->synth = malloc(sizeof(*m->synth), M_MIDI, M_NOWAIT | M_ZERO);
if (m->synth == NULL)
goto err1;
kobj_init((kobj_t)m->synth, &midisynth_class);
m->synth->m = m;
kobj_init((kobj_t)m, cls);
inqsize = MPU_INQSIZE(m, cookie);
outqsize = MPU_OUTQSIZE(m, cookie);
MIDI_DEBUG(1, printf("midiinit queues %d/%d.\n", inqsize, outqsize));
if (!inqsize && !outqsize)
goto err2;
mtx_init(&m->lock, "raw midi", NULL, 0);
mtx_init(&m->qlock, "q raw midi", NULL, 0);
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if (inqsize)
- buf = mallocarray(inqsize, sizeof(MIDI_TYPE), M_MIDI, M_NOWAIT);
+ buf = malloc(sizeof(MIDI_TYPE) * inqsize, M_MIDI, M_NOWAIT);
else
buf = NULL;
MIDIQ_INIT(m->inq, buf, inqsize);
if (outqsize)
- buf = mallocarray(outqsize, sizeof(MIDI_TYPE), M_MIDI,
- M_NOWAIT);
+ buf = malloc(sizeof(MIDI_TYPE) * outqsize, M_MIDI, M_NOWAIT);
else
buf = NULL;
m->hiwat = outqsize / 2;
MIDIQ_INIT(m->outq, buf, outqsize);
if ((inqsize && !MIDIQ_BUF(m->inq)) ||
(outqsize && !MIDIQ_BUF(m->outq)))
goto err3;
m->busy = 0;
m->flags = 0;
m->unit = unit;
m->channel = channel;
m->cookie = cookie;
if (MPU_INIT(m, cookie))
goto err3;
mtx_unlock(&m->lock);
mtx_unlock(&m->qlock);
TAILQ_INSERT_TAIL(&midi_devs, m, link);
mtx_unlock(&midistat_lock);
m->dev = make_dev(&midi_cdevsw,
MIDIMKMINOR(unit, MIDI_DEV_RAW, channel),
UID_ROOT, GID_WHEEL, 0666, "midi%d.%d", unit, channel);
m->dev->si_drv1 = m;
return m;
err3: mtx_destroy(&m->qlock);
mtx_destroy(&m->lock);
if (MIDIQ_BUF(m->inq))
free(MIDIQ_BUF(m->inq), M_MIDI);
if (MIDIQ_BUF(m->outq))
free(MIDIQ_BUF(m->outq), M_MIDI);
err2: free(m->synth, M_MIDI);
err1: free(m, M_MIDI);
err0: mtx_unlock(&midistat_lock);
MIDI_DEBUG(1, printf("midi_init ended in error\n"));
return NULL;
}
/*
* midi_uninit does not call MIDI_UNINIT, as since this is the implementors
* entry point. midi_uninit if fact, does not send any methods. A call to
* midi_uninit is a defacto promise that you won't manipulate ch anymore
*
*/
int
midi_uninit(struct snd_midi *m)
{
int err;
err = EBUSY;
mtx_lock(&midistat_lock);
mtx_lock(&m->lock);
if (m->busy) {
if (!(m->rchan || m->wchan))
goto err;
if (m->rchan) {
wakeup(&m->rchan);
m->rchan = 0;
}
if (m->wchan) {
wakeup(&m->wchan);
m->wchan = 0;
}
}
err = midi_destroy(m, 0);
if (!err)
goto exit;
err: mtx_unlock(&m->lock);
exit: mtx_unlock(&midistat_lock);
return err;
}
/*
* midi_in: process all data until the queue is full, then discards the rest.
* Since midi_in is a state machine, data discards can cause it to get out of
* whack. Process as much as possible. It calls, wakeup, selnotify and
* psignal at most once.
*/
#ifdef notdef
static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0};
#endif /* notdef */
/* Number of bytes in a MIDI command */
#define MIDI_LENGTH(d) (midi_lengths[((d) >> 4) & 7])
#define MIDI_ACK 0xfe
#define MIDI_IS_STATUS(d) ((d) >= 0x80)
#define MIDI_IS_COMMON(d) ((d) >= 0xf0)
#define MIDI_SYSEX_START 0xF0
#define MIDI_SYSEX_END 0xF7
int
midi_in(struct snd_midi *m, MIDI_TYPE *buf, int size)
{
/* int i, sig, enq; */
int used;
/* MIDI_TYPE data; */
MIDI_DEBUG(5, printf("midi_in: m=%p size=%d\n", m, size));
/*
* XXX: locking flub
*/
if (!(m->flags & M_RX))
return size;
used = 0;
mtx_lock(&m->qlock);
#if 0
/*
* Don't bother queuing if not in read mode. Discard everything and
* return size so the caller doesn't freak out.
*/
if (!(m->flags & M_RX))
return size;
for (i = sig = 0; i < size; i++) {
data = buf[i];
enq = 0;
if (data == MIDI_ACK)
continue;
switch (m->inq_state) {
case MIDI_IN_START:
if (MIDI_IS_STATUS(data)) {
switch (data) {
case 0xf0: /* Sysex */
m->inq_state = MIDI_IN_SYSEX;
break;
case 0xf1: /* MTC quarter frame */
case 0xf3: /* Song select */
m->inq_state = MIDI_IN_DATA;
enq = 1;
m->inq_left = 1;
break;
case 0xf2: /* Song position pointer */
m->inq_state = MIDI_IN_DATA;
enq = 1;
m->inq_left = 2;
break;
default:
if (MIDI_IS_COMMON(data)) {
enq = 1;
sig = 1;
} else {
m->inq_state = MIDI_IN_DATA;
enq = 1;
m->inq_status = data;
m->inq_left = MIDI_LENGTH(data);
}
break;
}
} else if (MIDI_IS_STATUS(m->inq_status)) {
m->inq_state = MIDI_IN_DATA;
if (!MIDIQ_FULL(m->inq)) {
used++;
MIDIQ_ENQ(m->inq, &m->inq_status, 1);
}
enq = 1;
m->inq_left = MIDI_LENGTH(m->inq_status) - 1;
}
break;
/*
* End of case MIDI_IN_START:
*/
case MIDI_IN_DATA:
enq = 1;
if (--m->inq_left <= 0)
sig = 1;/* deliver data */
break;
case MIDI_IN_SYSEX:
if (data == MIDI_SYSEX_END)
m->inq_state = MIDI_IN_START;
break;
}
if (enq)
if (!MIDIQ_FULL(m->inq)) {
MIDIQ_ENQ(m->inq, &data, 1);
used++;
}
/*
* End of the state machines main "for loop"
*/
}
if (sig) {
#endif
MIDI_DEBUG(6, printf("midi_in: len %jd avail %jd\n",
(intmax_t)MIDIQ_LEN(m->inq),
(intmax_t)MIDIQ_AVAIL(m->inq)));
if (MIDIQ_AVAIL(m->inq) > size) {
used = size;
MIDIQ_ENQ(m->inq, buf, size);
} else {
MIDI_DEBUG(4, printf("midi_in: Discarding data qu\n"));
mtx_unlock(&m->qlock);
return 0;
}
if (m->rchan) {
wakeup(&m->rchan);
m->rchan = 0;
}
selwakeup(&m->rsel);
if (m->async) {
PROC_LOCK(m->async);
kern_psignal(m->async, SIGIO);
PROC_UNLOCK(m->async);
}
#if 0
}
#endif
mtx_unlock(&m->qlock);
return used;
}
/*
* midi_out: The only clearer of the M_TXEN flag.
*/
int
midi_out(struct snd_midi *m, MIDI_TYPE *buf, int size)
{
int used;
/*
* XXX: locking flub
*/
if (!(m->flags & M_TXEN))
return 0;
MIDI_DEBUG(2, printf("midi_out: %p\n", m));
mtx_lock(&m->qlock);
used = MIN(size, MIDIQ_LEN(m->outq));
MIDI_DEBUG(3, printf("midi_out: used %d\n", used));
if (used)
MIDIQ_DEQ(m->outq, buf, used);
if (MIDIQ_EMPTY(m->outq)) {
m->flags &= ~M_TXEN;
MPU_CALLBACKP(m, m->cookie, m->flags);
}
if (used && MIDIQ_AVAIL(m->outq) > m->hiwat) {
if (m->wchan) {
wakeup(&m->wchan);
m->wchan = 0;
}
selwakeup(&m->wsel);
if (m->async) {
PROC_LOCK(m->async);
kern_psignal(m->async, SIGIO);
PROC_UNLOCK(m->async);
}
}
mtx_unlock(&m->qlock);
return used;
}
/*
* /dev/rmidi#.# device access functions
*/
int
midi_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
{
struct snd_midi *m = i_dev->si_drv1;
int retval;
MIDI_DEBUG(1, printf("midiopen %p %s %s\n", td,
flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : ""));
if (m == NULL)
return ENXIO;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
retval = 0;
if (flags & FREAD) {
if (MIDIQ_SIZE(m->inq) == 0)
retval = ENXIO;
else if (m->flags & M_RX)
retval = EBUSY;
if (retval)
goto err;
}
if (flags & FWRITE) {
if (MIDIQ_SIZE(m->outq) == 0)
retval = ENXIO;
else if (m->flags & M_TX)
retval = EBUSY;
if (retval)
goto err;
}
m->busy++;
m->rchan = 0;
m->wchan = 0;
m->async = 0;
if (flags & FREAD) {
m->flags |= M_RX | M_RXEN;
/*
* Only clear the inq, the outq might still have data to drain
* from a previous session
*/
MIDIQ_CLEAR(m->inq);
}
if (flags & FWRITE)
m->flags |= M_TX;
MPU_CALLBACK(m, m->cookie, m->flags);
MIDI_DEBUG(2, printf("midi_open: opened.\n"));
err: mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
return retval;
}
int
midi_close(struct cdev *i_dev, int flags, int mode, struct thread *td)
{
struct snd_midi *m = i_dev->si_drv1;
int retval;
int oldflags;
MIDI_DEBUG(1, printf("midi_close %p %s %s\n", td,
flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : ""));
if (m == NULL)
return ENXIO;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if ((flags & FREAD && !(m->flags & M_RX)) ||
(flags & FWRITE && !(m->flags & M_TX))) {
retval = ENXIO;
goto err;
}
m->busy--;
oldflags = m->flags;
if (flags & FREAD)
m->flags &= ~(M_RX | M_RXEN);
if (flags & FWRITE)
m->flags &= ~M_TX;
if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN)))
MPU_CALLBACK(m, m->cookie, m->flags);
MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy));
mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
retval = 0;
err: return retval;
}
/*
* TODO: midi_read, per oss programmer's guide pg. 42 should return as soon
* as data is available.
*/
int
midi_read(struct cdev *i_dev, struct uio *uio, int ioflag)
{
#define MIDI_RSIZE 32
struct snd_midi *m = i_dev->si_drv1;
int retval;
int used;
char buf[MIDI_RSIZE];
MIDI_DEBUG(5, printf("midiread: count=%lu\n",
(unsigned long)uio->uio_resid));
retval = EIO;
if (m == NULL)
goto err0;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if (!(m->flags & M_RX))
goto err1;
while (uio->uio_resid > 0) {
while (MIDIQ_EMPTY(m->inq)) {
retval = EWOULDBLOCK;
if (ioflag & O_NONBLOCK)
goto err1;
mtx_unlock(&m->lock);
m->rchan = 1;
retval = msleep(&m->rchan, &m->qlock,
PCATCH | PDROP, "midi RX", 0);
/*
* We slept, maybe things have changed since last
* dying check
*/
if (retval == EINTR)
goto err0;
if (m != i_dev->si_drv1)
retval = ENXIO;
/* if (retval && retval != ERESTART) */
if (retval)
goto err0;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
m->rchan = 0;
if (!m->busy)
goto err1;
}
MIDI_DEBUG(6, printf("midi_read start\n"));
/*
* At this point, it is certain that m->inq has data
*/
used = MIN(MIDIQ_LEN(m->inq), uio->uio_resid);
used = MIN(used, MIDI_RSIZE);
MIDI_DEBUG(6, printf("midiread: uiomove cc=%d\n", used));
MIDIQ_DEQ(m->inq, buf, used);
retval = uiomove(buf, used, uio);
if (retval)
goto err1;
}
/*
* If we Made it here then transfer is good
*/
retval = 0;
err1: mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
err0: MIDI_DEBUG(4, printf("midi_read: ret %d\n", retval));
return retval;
}
/*
* midi_write: The only setter of M_TXEN
*/
int
midi_write(struct cdev *i_dev, struct uio *uio, int ioflag)
{
#define MIDI_WSIZE 32
struct snd_midi *m = i_dev->si_drv1;
int retval;
int used;
char buf[MIDI_WSIZE];
MIDI_DEBUG(4, printf("midi_write\n"));
retval = 0;
if (m == NULL)
goto err0;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if (!(m->flags & M_TX))
goto err1;
while (uio->uio_resid > 0) {
while (MIDIQ_AVAIL(m->outq) == 0) {
retval = EWOULDBLOCK;
if (ioflag & O_NONBLOCK)
goto err1;
mtx_unlock(&m->lock);
m->wchan = 1;
MIDI_DEBUG(3, printf("midi_write msleep\n"));
retval = msleep(&m->wchan, &m->qlock,
PCATCH | PDROP, "midi TX", 0);
/*
* We slept, maybe things have changed since last
* dying check
*/
if (retval == EINTR)
goto err0;
if (m != i_dev->si_drv1)
retval = ENXIO;
if (retval)
goto err0;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
m->wchan = 0;
if (!m->busy)
goto err1;
}
/*
* We are certain than data can be placed on the queue
*/
used = MIN(MIDIQ_AVAIL(m->outq), uio->uio_resid);
used = MIN(used, MIDI_WSIZE);
MIDI_DEBUG(5, printf("midiout: resid %zd len %jd avail %jd\n",
uio->uio_resid, (intmax_t)MIDIQ_LEN(m->outq),
(intmax_t)MIDIQ_AVAIL(m->outq)));
MIDI_DEBUG(5, printf("midi_write: uiomove cc=%d\n", used));
retval = uiomove(buf, used, uio);
if (retval)
goto err1;
MIDIQ_ENQ(m->outq, buf, used);
/*
* Inform the bottom half that data can be written
*/
if (!(m->flags & M_TXEN)) {
m->flags |= M_TXEN;
MPU_CALLBACK(m, m->cookie, m->flags);
}
}
/*
* If we Made it here then transfer is good
*/
retval = 0;
err1: mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
err0: return retval;
}
int
midi_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct thread *td)
{
return ENXIO;
}
int
midi_poll(struct cdev *i_dev, int events, struct thread *td)
{
struct snd_midi *m = i_dev->si_drv1;
int revents;
if (m == NULL)
return 0;
revents = 0;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if (events & (POLLIN | POLLRDNORM))
if (!MIDIQ_EMPTY(m->inq))
events |= events & (POLLIN | POLLRDNORM);
if (events & (POLLOUT | POLLWRNORM))
if (MIDIQ_AVAIL(m->outq) < m->hiwat)
events |= events & (POLLOUT | POLLWRNORM);
if (revents == 0) {
if (events & (POLLIN | POLLRDNORM))
selrecord(td, &m->rsel);
if (events & (POLLOUT | POLLWRNORM))
selrecord(td, &m->wsel);
}
mtx_unlock(&m->lock);
mtx_unlock(&m->qlock);
return (revents);
}
/*
* /dev/midistat device functions
*
*/
static int
midistat_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
{
int error;
MIDI_DEBUG(1, printf("midistat_open\n"));
mtx_lock(&midistat_lock);
if (midistat_isopen) {
mtx_unlock(&midistat_lock);
return EBUSY;
}
midistat_isopen = 1;
mtx_unlock(&midistat_lock);
if (sbuf_new(&midistat_sbuf, NULL, 4096, SBUF_AUTOEXTEND) == NULL) {
error = ENXIO;
mtx_lock(&midistat_lock);
goto out;
}
mtx_lock(&midistat_lock);
midistat_bufptr = 0;
error = (midistat_prepare(&midistat_sbuf) > 0) ? 0 : ENOMEM;
out: if (error)
midistat_isopen = 0;
mtx_unlock(&midistat_lock);
return error;
}
static int
midistat_close(struct cdev *i_dev, int flags, int mode, struct thread *td)
{
MIDI_DEBUG(1, printf("midistat_close\n"));
mtx_lock(&midistat_lock);
if (!midistat_isopen) {
mtx_unlock(&midistat_lock);
return EBADF;
}
sbuf_delete(&midistat_sbuf);
midistat_isopen = 0;
mtx_unlock(&midistat_lock);
return 0;
}
static int
midistat_read(struct cdev *i_dev, struct uio *buf, int flag)
{
int l, err;
MIDI_DEBUG(4, printf("midistat_read\n"));
mtx_lock(&midistat_lock);
if (!midistat_isopen) {
mtx_unlock(&midistat_lock);
return EBADF;
}
l = min(buf->uio_resid, sbuf_len(&midistat_sbuf) - midistat_bufptr);
err = 0;
if (l > 0) {
mtx_unlock(&midistat_lock);
err = uiomove(sbuf_data(&midistat_sbuf) + midistat_bufptr, l,
buf);
mtx_lock(&midistat_lock);
} else
l = 0;
midistat_bufptr += l;
mtx_unlock(&midistat_lock);
return err;
}
/*
* Module library functions
*/
static int
midistat_prepare(struct sbuf *s)
{
struct snd_midi *m;
mtx_assert(&midistat_lock, MA_OWNED);
sbuf_printf(s, "FreeBSD Midi Driver (midi2)\n");
if (TAILQ_EMPTY(&midi_devs)) {
sbuf_printf(s, "No devices installed.\n");
sbuf_finish(s);
return sbuf_len(s);
}
sbuf_printf(s, "Installed devices:\n");
TAILQ_FOREACH(m, &midi_devs, link) {
mtx_lock(&m->lock);
sbuf_printf(s, "%s [%d/%d:%s]", m->name, m->unit, m->channel,
MPU_PROVIDER(m, m->cookie));
sbuf_printf(s, "%s", MPU_DESCR(m, m->cookie, midistat_verbose));
sbuf_printf(s, "\n");
mtx_unlock(&m->lock);
}
sbuf_finish(s);
return sbuf_len(s);
}
#ifdef notdef
/*
* Convert IOCTL command to string for debugging
*/
static char *
midi_cmdname(int cmd)
{
static struct {
int cmd;
char *name;
} *tab, cmdtab_midiioctl[] = {
#define A(x) {x, ## x}
/*
* Once we have some real IOCTLs define, the following will
* be relavant.
*
* A(SNDCTL_MIDI_PRETIME), A(SNDCTL_MIDI_MPUMODE),
* A(SNDCTL_MIDI_MPUCMD), A(SNDCTL_SYNTH_INFO),
* A(SNDCTL_MIDI_INFO), A(SNDCTL_SYNTH_MEMAVL),
* A(SNDCTL_FM_LOAD_INSTR), A(SNDCTL_FM_4OP_ENABLE),
* A(MIOSPASSTHRU), A(MIOGPASSTHRU), A(AIONWRITE),
* A(AIOGSIZE), A(AIOSSIZE), A(AIOGFMT), A(AIOSFMT),
* A(AIOGMIX), A(AIOSMIX), A(AIOSTOP), A(AIOSYNC),
* A(AIOGCAP),
*/
#undef A
{
-1, "unknown"
},
};
for (tab = cmdtab_midiioctl; tab->cmd != cmd && tab->cmd != -1; tab++);
return tab->name;
}
#endif /* notdef */
/*
* midisynth
*/
int
midisynth_open(void *n, void *arg, int flags)
{
struct snd_midi *m = ((struct synth_midi *)n)->m;
int retval;
MIDI_DEBUG(1, printf("midisynth_open %s %s\n",
flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : ""));
if (m == NULL)
return ENXIO;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
retval = 0;
if (flags & FREAD) {
if (MIDIQ_SIZE(m->inq) == 0)
retval = ENXIO;
else if (m->flags & M_RX)
retval = EBUSY;
if (retval)
goto err;
}
if (flags & FWRITE) {
if (MIDIQ_SIZE(m->outq) == 0)
retval = ENXIO;
else if (m->flags & M_TX)
retval = EBUSY;
if (retval)
goto err;
}
m->busy++;
/*
* TODO: Consider m->async = 0;
*/
if (flags & FREAD) {
m->flags |= M_RX | M_RXEN;
/*
* Only clear the inq, the outq might still have data to drain
* from a previous session
*/
MIDIQ_CLEAR(m->inq);
m->rchan = 0;
}
if (flags & FWRITE) {
m->flags |= M_TX;
m->wchan = 0;
}
m->synth_flags = flags & (FREAD | FWRITE);
MPU_CALLBACK(m, m->cookie, m->flags);
err: mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
MIDI_DEBUG(2, printf("midisynth_open: return %d.\n", retval));
return retval;
}
int
midisynth_close(void *n)
{
struct snd_midi *m = ((struct synth_midi *)n)->m;
int retval;
int oldflags;
MIDI_DEBUG(1, printf("midisynth_close %s %s\n",
m->synth_flags & FREAD ? "M_RX" : "",
m->synth_flags & FWRITE ? "M_TX" : ""));
if (m == NULL)
return ENXIO;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if ((m->synth_flags & FREAD && !(m->flags & M_RX)) ||
(m->synth_flags & FWRITE && !(m->flags & M_TX))) {
retval = ENXIO;
goto err;
}
m->busy--;
oldflags = m->flags;
if (m->synth_flags & FREAD)
m->flags &= ~(M_RX | M_RXEN);
if (m->synth_flags & FWRITE)
m->flags &= ~M_TX;
if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN)))
MPU_CALLBACK(m, m->cookie, m->flags);
MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy));
mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
retval = 0;
err: return retval;
}
/*
* Always blocking.
*/
int
midisynth_writeraw(void *n, uint8_t *buf, size_t len)
{
struct snd_midi *m = ((struct synth_midi *)n)->m;
int retval;
int used;
int i;
MIDI_DEBUG(4, printf("midisynth_writeraw\n"));
retval = 0;
if (m == NULL)
return ENXIO;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
if (!(m->flags & M_TX))
goto err1;
if (midi_dumpraw)
printf("midi dump: ");
while (len > 0) {
while (MIDIQ_AVAIL(m->outq) == 0) {
if (!(m->flags & M_TXEN)) {
m->flags |= M_TXEN;
MPU_CALLBACK(m, m->cookie, m->flags);
}
mtx_unlock(&m->lock);
m->wchan = 1;
MIDI_DEBUG(3, printf("midisynth_writeraw msleep\n"));
retval = msleep(&m->wchan, &m->qlock,
PCATCH | PDROP, "midi TX", 0);
/*
* We slept, maybe things have changed since last
* dying check
*/
if (retval == EINTR)
goto err0;
if (retval)
goto err0;
mtx_lock(&m->lock);
mtx_lock(&m->qlock);
m->wchan = 0;
if (!m->busy)
goto err1;
}
/*
* We are certain than data can be placed on the queue
*/
used = MIN(MIDIQ_AVAIL(m->outq), len);
used = MIN(used, MIDI_WSIZE);
MIDI_DEBUG(5,
printf("midi_synth: resid %zu len %jd avail %jd\n",
len, (intmax_t)MIDIQ_LEN(m->outq),
(intmax_t)MIDIQ_AVAIL(m->outq)));
if (midi_dumpraw)
for (i = 0; i < used; i++)
printf("%x ", buf[i]);
MIDIQ_ENQ(m->outq, buf, used);
len -= used;
/*
* Inform the bottom half that data can be written
*/
if (!(m->flags & M_TXEN)) {
m->flags |= M_TXEN;
MPU_CALLBACK(m, m->cookie, m->flags);
}
}
/*
* If we Made it here then transfer is good
*/
if (midi_dumpraw)
printf("\n");
retval = 0;
err1: mtx_unlock(&m->qlock);
mtx_unlock(&m->lock);
err0: return retval;
}
static int
midisynth_killnote(void *n, uint8_t chn, uint8_t note, uint8_t vel)
{
u_char c[3];
if (note > 127 || chn > 15)
return (EINVAL);
if (vel > 127)
vel = 127;
if (vel == 64) {
c[0] = 0x90 | (chn & 0x0f); /* Note on. */
c[1] = (u_char)note;
c[2] = 0;
} else {
c[0] = 0x80 | (chn & 0x0f); /* Note off. */
c[1] = (u_char)note;
c[2] = (u_char)vel;
}
return midisynth_writeraw(n, c, 3);
}
static int
midisynth_setinstr(void *n, uint8_t chn, uint16_t instr)
{
u_char c[2];
if (instr > 127 || chn > 15)
return EINVAL;
c[0] = 0xc0 | (chn & 0x0f); /* Progamme change. */
c[1] = instr + midi_instroff;
return midisynth_writeraw(n, c, 2);
}
static int
midisynth_startnote(void *n, uint8_t chn, uint8_t note, uint8_t vel)
{
u_char c[3];
if (note > 127 || chn > 15)
return EINVAL;
if (vel > 127)
vel = 127;
c[0] = 0x90 | (chn & 0x0f); /* Note on. */
c[1] = (u_char)note;
c[2] = (u_char)vel;
return midisynth_writeraw(n, c, 3);
}
static int
midisynth_alloc(void *n, uint8_t chan, uint8_t note)
{
return chan;
}
static int
midisynth_controller(void *n, uint8_t chn, uint8_t ctrlnum, uint16_t val)
{
u_char c[3];
if (ctrlnum > 127 || chn > 15)
return EINVAL;
c[0] = 0xb0 | (chn & 0x0f); /* Control Message. */
c[1] = ctrlnum;
c[2] = val;
return midisynth_writeraw(n, c, 3);
}
static int
midisynth_bender(void *n, uint8_t chn, uint16_t val)
{
u_char c[3];
if (val > 16383 || chn > 15)
return EINVAL;
c[0] = 0xe0 | (chn & 0x0f); /* Pitch bend. */
c[1] = (u_char)val & 0x7f;
c[2] = (u_char)(val >> 7) & 0x7f;
return midisynth_writeraw(n, c, 3);
}
/*
* Single point of midi destructions.
*/
static int
midi_destroy(struct snd_midi *m, int midiuninit)
{
mtx_assert(&midistat_lock, MA_OWNED);
mtx_assert(&m->lock, MA_OWNED);
MIDI_DEBUG(3, printf("midi_destroy\n"));
m->dev->si_drv1 = NULL;
mtx_unlock(&m->lock); /* XXX */
destroy_dev(m->dev);
TAILQ_REMOVE(&midi_devs, m, link);
if (midiuninit)
MPU_UNINIT(m, m->cookie);
free(MIDIQ_BUF(m->inq), M_MIDI);
free(MIDIQ_BUF(m->outq), M_MIDI);
mtx_destroy(&m->qlock);
mtx_destroy(&m->lock);
free(m->synth, M_MIDI);
free(m, M_MIDI);
return 0;
}
/*
* Load and unload functions, creates the /dev/midistat device
*/
static int
midi_load(void)
{
mtx_init(&midistat_lock, "midistat lock", NULL, 0);
TAILQ_INIT(&midi_devs); /* Initialize the queue. */
midistat_dev = make_dev(&midistat_cdevsw,
MIDIMKMINOR(0, MIDI_DEV_MIDICTL, 0),
UID_ROOT, GID_WHEEL, 0666, "midistat");
return 0;
}
static int
midi_unload(void)
{
struct snd_midi *m, *tmp;
int retval;
MIDI_DEBUG(1, printf("midi_unload()\n"));
retval = EBUSY;
mtx_lock(&midistat_lock);
if (midistat_isopen)
goto exit0;
TAILQ_FOREACH_SAFE(m, &midi_devs, link, tmp) {
mtx_lock(&m->lock);
if (m->busy)
retval = EBUSY;
else
retval = midi_destroy(m, 1);
if (retval)
goto exit1;
}
mtx_unlock(&midistat_lock); /* XXX */
destroy_dev(midistat_dev);
/*
* Made it here then unload is complete
*/
mtx_destroy(&midistat_lock);
return 0;
exit1:
mtx_unlock(&m->lock);
exit0:
mtx_unlock(&midistat_lock);
if (retval)
MIDI_DEBUG(2, printf("midi_unload: failed\n"));
return retval;
}
extern int seq_modevent(module_t mod, int type, void *data);
static int
midi_modevent(module_t mod, int type, void *data)
{
int retval;
retval = 0;
switch (type) {
case MOD_LOAD:
retval = midi_load();
#if 0
if (retval == 0)
retval = seq_modevent(mod, type, data);
#endif
break;
case MOD_UNLOAD:
retval = midi_unload();
#if 0
if (retval == 0)
retval = seq_modevent(mod, type, data);
#endif
break;
default:
break;
}
return retval;
}
kobj_t
midimapper_addseq(void *arg1, int *unit, void **cookie)
{
unit = NULL;
return (kobj_t)arg1;
}
int
midimapper_open(void *arg1, void **cookie)
{
int retval = 0;
struct snd_midi *m;
mtx_lock(&midistat_lock);
TAILQ_FOREACH(m, &midi_devs, link) {
retval++;
}
mtx_unlock(&midistat_lock);
return retval;
}
int
midimapper_close(void *arg1, void *cookie)
{
return 0;
}
kobj_t
midimapper_fetch_synth(void *arg, void *cookie, int unit)
{
struct snd_midi *m;
int retval = 0;
mtx_lock(&midistat_lock);
TAILQ_FOREACH(m, &midi_devs, link) {
if (unit == retval) {
mtx_unlock(&midistat_lock);
return (kobj_t)m->synth;
}
retval++;
}
mtx_unlock(&midistat_lock);
return NULL;
}
DEV_MODULE(midi, midi_modevent, NULL);
MODULE_VERSION(midi, 1);
Index: head/sys/dev/sound/pci/hda/hdaa.c
===================================================================
--- head/sys/dev/sound/pci/hda/hdaa.c (revision 328217)
+++ head/sys/dev/sound/pci/hda/hdaa.c (revision 328218)
@@ -1,7152 +1,7152 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca>
* Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org>
* Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Intel High Definition Audio (Audio function) driver for FreeBSD.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_snd.h"
#endif
#include <dev/sound/pcm/sound.h>
#include <sys/ctype.h>
#include <sys/taskqueue.h>
#include <dev/sound/pci/hda/hdac.h>
#include <dev/sound/pci/hda/hdaa.h>
#include <dev/sound/pci/hda/hda_reg.h>
#include "mixer_if.h"
SND_DECLARE_FILE("$FreeBSD$");
#define hdaa_lock(devinfo) snd_mtxlock((devinfo)->lock)
#define hdaa_unlock(devinfo) snd_mtxunlock((devinfo)->lock)
#define hdaa_lockassert(devinfo) snd_mtxassert((devinfo)->lock)
#define hdaa_lockowned(devinfo) mtx_owned((devinfo)->lock)
static const struct {
const char *key;
uint32_t value;
} hdaa_quirks_tab[] = {
{ "softpcmvol", HDAA_QUIRK_SOFTPCMVOL },
{ "fixedrate", HDAA_QUIRK_FIXEDRATE },
{ "forcestereo", HDAA_QUIRK_FORCESTEREO },
{ "eapdinv", HDAA_QUIRK_EAPDINV },
{ "senseinv", HDAA_QUIRK_SENSEINV },
{ "ivref50", HDAA_QUIRK_IVREF50 },
{ "ivref80", HDAA_QUIRK_IVREF80 },
{ "ivref100", HDAA_QUIRK_IVREF100 },
{ "ovref50", HDAA_QUIRK_OVREF50 },
{ "ovref80", HDAA_QUIRK_OVREF80 },
{ "ovref100", HDAA_QUIRK_OVREF100 },
{ "ivref", HDAA_QUIRK_IVREF },
{ "ovref", HDAA_QUIRK_OVREF },
{ "vref", HDAA_QUIRK_VREF },
};
#define HDA_PARSE_MAXDEPTH 10
MALLOC_DEFINE(M_HDAA, "hdaa", "HDA Audio");
static const char *HDA_COLORS[16] = {"Unknown", "Black", "Grey", "Blue",
"Green", "Red", "Orange", "Yellow", "Purple", "Pink", "Res.A", "Res.B",
"Res.C", "Res.D", "White", "Other"};
static const char *HDA_DEVS[16] = {"Line-out", "Speaker", "Headphones", "CD",
"SPDIF-out", "Digital-out", "Modem-line", "Modem-handset", "Line-in",
"AUX", "Mic", "Telephony", "SPDIF-in", "Digital-in", "Res.E", "Other"};
static const char *HDA_CONNS[4] = {"Jack", "None", "Fixed", "Both"};
static const char *HDA_CONNECTORS[16] = {
"Unknown", "1/8", "1/4", "ATAPI", "RCA", "Optical", "Digital", "Analog",
"DIN", "XLR", "RJ-11", "Combo", "0xc", "0xd", "0xe", "Other" };
static const char *HDA_LOCS[64] = {
"0x00", "Rear", "Front", "Left", "Right", "Top", "Bottom", "Rear-panel",
"Drive-bay", "0x09", "0x0a", "0x0b", "0x0c", "0x0d", "0x0e", "0x0f",
"Internal", "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "Riser",
"0x18", "Onboard", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f",
"External", "Ext-Rear", "Ext-Front", "Ext-Left", "Ext-Right", "Ext-Top", "Ext-Bottom", "0x07",
"0x28", "0x29", "0x2a", "0x2b", "0x2c", "0x2d", "0x2e", "0x2f",
"Other", "0x31", "0x32", "0x33", "0x34", "0x35", "Other-Bott", "Lid-In",
"Lid-Out", "0x39", "0x3a", "0x3b", "0x3c", "0x3d", "0x3e", "0x3f" };
static const char *HDA_GPIO_ACTIONS[8] = {
"keep", "set", "clear", "disable", "input", "0x05", "0x06", "0x07"};
static const char *HDA_HDMI_CODING_TYPES[18] = {
"undefined", "LPCM", "AC-3", "MPEG1", "MP3", "MPEG2", "AAC-LC", "DTS",
"ATRAC", "DSD", "E-AC-3", "DTS-HD", "MLP", "DST", "WMAPro", "HE-AAC",
"HE-AACv2", "MPEG-Surround"
};
/* Default */
static uint32_t hdaa_fmt[] = {
SND_FORMAT(AFMT_S16_LE, 2, 0),
0
};
static struct pcmchan_caps hdaa_caps = {48000, 48000, hdaa_fmt, 0};
static const struct {
uint32_t rate;
int valid;
uint16_t base;
uint16_t mul;
uint16_t div;
} hda_rate_tab[] = {
{ 8000, 1, 0x0000, 0x0000, 0x0500 }, /* (48000 * 1) / 6 */
{ 9600, 0, 0x0000, 0x0000, 0x0400 }, /* (48000 * 1) / 5 */
{ 12000, 0, 0x0000, 0x0000, 0x0300 }, /* (48000 * 1) / 4 */
{ 16000, 1, 0x0000, 0x0000, 0x0200 }, /* (48000 * 1) / 3 */
{ 18000, 0, 0x0000, 0x1000, 0x0700 }, /* (48000 * 3) / 8 */
{ 19200, 0, 0x0000, 0x0800, 0x0400 }, /* (48000 * 2) / 5 */
{ 24000, 0, 0x0000, 0x0000, 0x0100 }, /* (48000 * 1) / 2 */
{ 28800, 0, 0x0000, 0x1000, 0x0400 }, /* (48000 * 3) / 5 */
{ 32000, 1, 0x0000, 0x0800, 0x0200 }, /* (48000 * 2) / 3 */
{ 36000, 0, 0x0000, 0x1000, 0x0300 }, /* (48000 * 3) / 4 */
{ 38400, 0, 0x0000, 0x1800, 0x0400 }, /* (48000 * 4) / 5 */
{ 48000, 1, 0x0000, 0x0000, 0x0000 }, /* (48000 * 1) / 1 */
{ 64000, 0, 0x0000, 0x1800, 0x0200 }, /* (48000 * 4) / 3 */
{ 72000, 0, 0x0000, 0x1000, 0x0100 }, /* (48000 * 3) / 2 */
{ 96000, 1, 0x0000, 0x0800, 0x0000 }, /* (48000 * 2) / 1 */
{ 144000, 0, 0x0000, 0x1000, 0x0000 }, /* (48000 * 3) / 1 */
{ 192000, 1, 0x0000, 0x1800, 0x0000 }, /* (48000 * 4) / 1 */
{ 8820, 0, 0x4000, 0x0000, 0x0400 }, /* (44100 * 1) / 5 */
{ 11025, 1, 0x4000, 0x0000, 0x0300 }, /* (44100 * 1) / 4 */
{ 12600, 0, 0x4000, 0x0800, 0x0600 }, /* (44100 * 2) / 7 */
{ 14700, 0, 0x4000, 0x0000, 0x0200 }, /* (44100 * 1) / 3 */
{ 17640, 0, 0x4000, 0x0800, 0x0400 }, /* (44100 * 2) / 5 */
{ 18900, 0, 0x4000, 0x1000, 0x0600 }, /* (44100 * 3) / 7 */
{ 22050, 1, 0x4000, 0x0000, 0x0100 }, /* (44100 * 1) / 2 */
{ 25200, 0, 0x4000, 0x1800, 0x0600 }, /* (44100 * 4) / 7 */
{ 26460, 0, 0x4000, 0x1000, 0x0400 }, /* (44100 * 3) / 5 */
{ 29400, 0, 0x4000, 0x0800, 0x0200 }, /* (44100 * 2) / 3 */
{ 33075, 0, 0x4000, 0x1000, 0x0300 }, /* (44100 * 3) / 4 */
{ 35280, 0, 0x4000, 0x1800, 0x0400 }, /* (44100 * 4) / 5 */
{ 44100, 1, 0x4000, 0x0000, 0x0000 }, /* (44100 * 1) / 1 */
{ 58800, 0, 0x4000, 0x1800, 0x0200 }, /* (44100 * 4) / 3 */
{ 66150, 0, 0x4000, 0x1000, 0x0100 }, /* (44100 * 3) / 2 */
{ 88200, 1, 0x4000, 0x0800, 0x0000 }, /* (44100 * 2) / 1 */
{ 132300, 0, 0x4000, 0x1000, 0x0000 }, /* (44100 * 3) / 1 */
{ 176400, 1, 0x4000, 0x1800, 0x0000 }, /* (44100 * 4) / 1 */
};
#define HDA_RATE_TAB_LEN (sizeof(hda_rate_tab) / sizeof(hda_rate_tab[0]))
const static char *ossnames[] = SOUND_DEVICE_NAMES;
/****************************************************************************
* Function prototypes
****************************************************************************/
static int hdaa_pcmchannel_setup(struct hdaa_chan *);
static void hdaa_widget_connection_select(struct hdaa_widget *, uint8_t);
static void hdaa_audio_ctl_amp_set(struct hdaa_audio_ctl *,
uint32_t, int, int);
static struct hdaa_audio_ctl *hdaa_audio_ctl_amp_get(struct hdaa_devinfo *,
nid_t, int, int, int);
static void hdaa_audio_ctl_amp_set_internal(struct hdaa_devinfo *,
nid_t, int, int, int, int, int, int);
static void hdaa_dump_pin_config(struct hdaa_widget *w, uint32_t conf);
static char *
hdaa_audio_ctl_ossmixer_mask2allname(uint32_t mask, char *buf, size_t len)
{
int i, first = 1;
bzero(buf, len);
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
if (mask & (1 << i)) {
if (first == 0)
strlcat(buf, ", ", len);
strlcat(buf, ossnames[i], len);
first = 0;
}
}
return (buf);
}
static struct hdaa_audio_ctl *
hdaa_audio_ctl_each(struct hdaa_devinfo *devinfo, int *index)
{
if (devinfo == NULL ||
index == NULL || devinfo->ctl == NULL ||
devinfo->ctlcnt < 1 ||
*index < 0 || *index >= devinfo->ctlcnt)
return (NULL);
return (&devinfo->ctl[(*index)++]);
}
static struct hdaa_audio_ctl *
hdaa_audio_ctl_amp_get(struct hdaa_devinfo *devinfo, nid_t nid, int dir,
int index, int cnt)
{
struct hdaa_audio_ctl *ctl;
int i, found = 0;
if (devinfo == NULL || devinfo->ctl == NULL)
return (NULL);
i = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
if (ctl->enable == 0)
continue;
if (ctl->widget->nid != nid)
continue;
if (dir && ctl->ndir != dir)
continue;
if (index >= 0 && ctl->ndir == HDAA_CTL_IN &&
ctl->dir == ctl->ndir && ctl->index != index)
continue;
found++;
if (found == cnt || cnt <= 0)
return (ctl);
}
return (NULL);
}
static const struct matrix {
struct pcmchan_matrix m;
int analog;
} matrixes[] = {
{ SND_CHN_MATRIX_MAP_1_0, 1 },
{ SND_CHN_MATRIX_MAP_2_0, 1 },
{ SND_CHN_MATRIX_MAP_2_1, 0 },
{ SND_CHN_MATRIX_MAP_3_0, 0 },
{ SND_CHN_MATRIX_MAP_3_1, 0 },
{ SND_CHN_MATRIX_MAP_4_0, 1 },
{ SND_CHN_MATRIX_MAP_4_1, 0 },
{ SND_CHN_MATRIX_MAP_5_0, 0 },
{ SND_CHN_MATRIX_MAP_5_1, 1 },
{ SND_CHN_MATRIX_MAP_6_0, 0 },
{ SND_CHN_MATRIX_MAP_6_1, 0 },
{ SND_CHN_MATRIX_MAP_7_0, 0 },
{ SND_CHN_MATRIX_MAP_7_1, 1 },
};
static const char *channel_names[] = SND_CHN_T_NAMES;
/*
* Connected channels change handler.
*/
static void
hdaa_channels_handler(struct hdaa_audio_as *as)
{
struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo;
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_chan *ch = &devinfo->chans[as->chans[0]];
struct hdaa_widget *w;
uint8_t *eld;
int i, total, sub, assume, channels;
uint16_t cpins, upins, tpins;
cpins = upins = 0;
eld = NULL;
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
w = hdaa_widget_get(devinfo, as->pins[i]);
if (w == NULL)
continue;
if (w->wclass.pin.connected == 1)
cpins |= (1 << i);
else if (w->wclass.pin.connected != 0)
upins |= (1 << i);
if (w->eld != NULL && w->eld_len >= 8)
eld = w->eld;
}
tpins = cpins | upins;
if (as->hpredir >= 0)
tpins &= 0x7fff;
if (tpins == 0)
tpins = as->pinset;
total = sub = assume = channels = 0;
if (eld) {
/* Map CEA speakers to sound(4) channels. */
if (eld[7] & 0x01) /* Front Left/Right */
channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR;
if (eld[7] & 0x02) /* Low Frequency Effect */
channels |= SND_CHN_T_MASK_LF;
if (eld[7] & 0x04) /* Front Center */
channels |= SND_CHN_T_MASK_FC;
if (eld[7] & 0x08) { /* Rear Left/Right */
/* If we have both RLR and RLRC, report RLR as side. */
if (eld[7] & 0x40) /* Rear Left/Right Center */
channels |= SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR;
else
channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR;
}
if (eld[7] & 0x10) /* Rear center */
channels |= SND_CHN_T_MASK_BC;
if (eld[7] & 0x20) /* Front Left/Right Center */
channels |= SND_CHN_T_MASK_FLC | SND_CHN_T_MASK_FRC;
if (eld[7] & 0x40) /* Rear Left/Right Center */
channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR;
} else if (as->pinset != 0 && (tpins & 0xffe0) == 0) {
/* Map UAA speakers to sound(4) channels. */
if (tpins & 0x0001)
channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR;
if (tpins & 0x0002)
channels |= SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF;
if (tpins & 0x0004)
channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR;
if (tpins & 0x0008)
channels |= SND_CHN_T_MASK_FLC | SND_CHN_T_MASK_FRC;
if (tpins & 0x0010) {
/* If there is no back pin, report side as back. */
if ((as->pinset & 0x0004) == 0)
channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR;
else
channels |= SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR;
}
} else if (as->mixed) {
/* Mixed assoc can be only stereo or theoretically mono. */
if (ch->channels == 1)
channels |= SND_CHN_T_MASK_FC;
else
channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR;
}
if (channels) { /* We have some usable channels info. */
HDA_BOOTVERBOSE(
device_printf(pdevinfo->dev, "%s channel set is: ",
as->dir == HDAA_CTL_OUT ? "Playback" : "Recording");
for (i = 0; i < SND_CHN_T_MAX; i++)
if (channels & (1 << i))
printf("%s, ", channel_names[i]);
printf("\n");
);
/* Look for maximal fitting matrix. */
for (i = 0; i < sizeof(matrixes) / sizeof(struct matrix); i++) {
if (as->pinset != 0 && matrixes[i].analog == 0)
continue;
if ((matrixes[i].m.mask & ~channels) == 0) {
total = matrixes[i].m.channels;
sub = matrixes[i].m.ext;
}
}
}
if (total == 0) {
assume = 1;
total = ch->channels;
sub = (total == 6 || total == 8) ? 1 : 0;
}
HDA_BOOTVERBOSE(
device_printf(pdevinfo->dev,
"%s channel matrix is: %s%d.%d (%s)\n",
as->dir == HDAA_CTL_OUT ? "Playback" : "Recording",
assume ? "unknown, assuming " : "", total - sub, sub,
cpins != 0 ? "connected" :
(upins != 0 ? "unknown" : "disconnected"));
);
}
/*
* Headphones redirection change handler.
*/
static void
hdaa_hpredir_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
struct hdaa_audio_as *as = &devinfo->as[w->bindas];
struct hdaa_widget *w1;
struct hdaa_audio_ctl *ctl;
uint32_t val;
int j, connected = w->wclass.pin.connected;
HDA_BOOTVERBOSE(
device_printf((as->pdevinfo && as->pdevinfo->dev) ?
as->pdevinfo->dev : devinfo->dev,
"Redirect output to: %s\n",
connected ? "headphones": "main");
);
/* (Un)Mute headphone pin. */
ctl = hdaa_audio_ctl_amp_get(devinfo,
w->nid, HDAA_CTL_IN, -1, 1);
if (ctl != NULL && ctl->mute) {
/* If pin has muter - use it. */
val = connected ? 0 : 1;
if (val != ctl->forcemute) {
ctl->forcemute = val;
hdaa_audio_ctl_amp_set(ctl,
HDAA_AMP_MUTE_DEFAULT,
HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT);
}
} else {
/* If there is no muter - disable pin output. */
if (connected)
val = w->wclass.pin.ctrl |
HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE;
else
val = w->wclass.pin.ctrl &
~HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE;
if (val != w->wclass.pin.ctrl) {
w->wclass.pin.ctrl = val;
hda_command(devinfo->dev,
HDA_CMD_SET_PIN_WIDGET_CTRL(0,
w->nid, w->wclass.pin.ctrl));
}
}
/* (Un)Mute other pins. */
for (j = 0; j < 15; j++) {
if (as->pins[j] <= 0)
continue;
ctl = hdaa_audio_ctl_amp_get(devinfo,
as->pins[j], HDAA_CTL_IN, -1, 1);
if (ctl != NULL && ctl->mute) {
/* If pin has muter - use it. */
val = connected ? 1 : 0;
if (val == ctl->forcemute)
continue;
ctl->forcemute = val;
hdaa_audio_ctl_amp_set(ctl,
HDAA_AMP_MUTE_DEFAULT,
HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT);
continue;
}
/* If there is no muter - disable pin output. */
w1 = hdaa_widget_get(devinfo, as->pins[j]);
if (w1 != NULL) {
if (connected)
val = w1->wclass.pin.ctrl &
~HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE;
else
val = w1->wclass.pin.ctrl |
HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE;
if (val != w1->wclass.pin.ctrl) {
w1->wclass.pin.ctrl = val;
hda_command(devinfo->dev,
HDA_CMD_SET_PIN_WIDGET_CTRL(0,
w1->nid, w1->wclass.pin.ctrl));
}
}
}
}
/*
* Recording source change handler.
*/
static void
hdaa_autorecsrc_handler(struct hdaa_audio_as *as, struct hdaa_widget *w)
{
struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo;
struct hdaa_devinfo *devinfo;
struct hdaa_widget *w1;
int i, mask, fullmask, prio, bestprio;
char buf[128];
if (!as->mixed || pdevinfo == NULL || pdevinfo->mixer == NULL)
return;
/* Don't touch anything if we asked not to. */
if (pdevinfo->autorecsrc == 0 ||
(pdevinfo->autorecsrc == 1 && w != NULL))
return;
/* Don't touch anything if "mix" or "speaker" selected. */
if (pdevinfo->recsrc & (SOUND_MASK_IMIX | SOUND_MASK_SPEAKER))
return;
/* Don't touch anything if several selected. */
if (ffs(pdevinfo->recsrc) != fls(pdevinfo->recsrc))
return;
devinfo = pdevinfo->devinfo;
mask = fullmask = 0;
bestprio = 0;
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
w1 = hdaa_widget_get(devinfo, as->pins[i]);
if (w1 == NULL || w1->enable == 0)
continue;
if (w1->wclass.pin.connected == 0)
continue;
prio = (w1->wclass.pin.connected == 1) ? 2 : 1;
if (prio < bestprio)
continue;
if (prio > bestprio) {
mask = 0;
bestprio = prio;
}
mask |= (1 << w1->ossdev);
fullmask |= (1 << w1->ossdev);
}
if (mask == 0)
return;
/* Prefer newly connected input. */
if (w != NULL && (mask & (1 << w->ossdev)))
mask = (1 << w->ossdev);
/* Prefer previously selected input */
if (mask & pdevinfo->recsrc)
mask &= pdevinfo->recsrc;
/* Prefer mic. */
if (mask & SOUND_MASK_MIC)
mask = SOUND_MASK_MIC;
/* Prefer monitor (2nd mic). */
if (mask & SOUND_MASK_MONITOR)
mask = SOUND_MASK_MONITOR;
/* Just take first one. */
mask = (1 << (ffs(mask) - 1));
HDA_BOOTVERBOSE(
hdaa_audio_ctl_ossmixer_mask2allname(mask, buf, sizeof(buf));
device_printf(pdevinfo->dev,
"Automatically set rec source to: %s\n", buf);
);
hdaa_unlock(devinfo);
mix_setrecsrc(pdevinfo->mixer, mask);
hdaa_lock(devinfo);
}
/*
* Jack presence detection event handler.
*/
static void
hdaa_presence_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
struct hdaa_audio_as *as;
uint32_t res;
int connected, old;
if (w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
return;
if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 ||
(HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0)
return;
res = hda_command(devinfo->dev, HDA_CMD_GET_PIN_SENSE(0, w->nid));
connected = (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) != 0;
if (devinfo->quirks & HDAA_QUIRK_SENSEINV)
connected = !connected;
old = w->wclass.pin.connected;
if (connected == old)
return;
w->wclass.pin.connected = connected;
HDA_BOOTVERBOSE(
if (connected || old != 2) {
device_printf(devinfo->dev,
"Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
w->nid, res, !connected ? "dis" : "");
}
);
as = &devinfo->as[w->bindas];
if (as->hpredir >= 0 && as->pins[15] == w->nid)
hdaa_hpredir_handler(w);
if (as->dir == HDAA_CTL_IN && old != 2)
hdaa_autorecsrc_handler(as, w);
if (old != 2)
hdaa_channels_handler(as);
}
/*
* Callback for poll based presence detection.
*/
static void
hdaa_jack_poll_callback(void *arg)
{
struct hdaa_devinfo *devinfo = arg;
struct hdaa_widget *w;
int i;
hdaa_lock(devinfo);
if (devinfo->poll_ival == 0) {
hdaa_unlock(devinfo);
return;
}
for (i = 0; i < devinfo->ascnt; i++) {
if (devinfo->as[i].hpredir < 0)
continue;
w = hdaa_widget_get(devinfo, devinfo->as[i].pins[15]);
if (w == NULL || w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
hdaa_presence_handler(w);
}
callout_reset(&devinfo->poll_jack, devinfo->poll_ival,
hdaa_jack_poll_callback, devinfo);
hdaa_unlock(devinfo);
}
static void
hdaa_eld_dump(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
device_t dev = devinfo->dev;
uint8_t *sad;
int len, mnl, i, sadc, fmt;
if (w->eld == NULL || w->eld_len < 4)
return;
device_printf(dev,
"ELD nid=%d: ELD_Ver=%u Baseline_ELD_Len=%u\n",
w->nid, w->eld[0] >> 3, w->eld[2]);
if ((w->eld[0] >> 3) != 0x02)
return;
len = min(w->eld_len, (u_int)w->eld[2] * 4);
mnl = w->eld[4] & 0x1f;
device_printf(dev,
"ELD nid=%d: CEA_EDID_Ver=%u MNL=%u\n",
w->nid, w->eld[4] >> 5, mnl);
sadc = w->eld[5] >> 4;
device_printf(dev,
"ELD nid=%d: SAD_Count=%u Conn_Type=%u S_AI=%u HDCP=%u\n",
w->nid, sadc, (w->eld[5] >> 2) & 0x3,
(w->eld[5] >> 1) & 0x1, w->eld[5] & 0x1);
device_printf(dev,
"ELD nid=%d: Aud_Synch_Delay=%ums\n",
w->nid, w->eld[6] * 2);
device_printf(dev,
"ELD nid=%d: Channels=0x%b\n",
w->nid, w->eld[7],
"\020\07RLRC\06FLRC\05RC\04RLR\03FC\02LFE\01FLR");
device_printf(dev,
"ELD nid=%d: Port_ID=0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
w->nid, w->eld[8], w->eld[9], w->eld[10], w->eld[11],
w->eld[12], w->eld[13], w->eld[14], w->eld[15]);
device_printf(dev,
"ELD nid=%d: Manufacturer_Name=0x%02x%02x\n",
w->nid, w->eld[16], w->eld[17]);
device_printf(dev,
"ELD nid=%d: Product_Code=0x%02x%02x\n",
w->nid, w->eld[18], w->eld[19]);
device_printf(dev,
"ELD nid=%d: Monitor_Name_String='%.*s'\n",
w->nid, mnl, &w->eld[20]);
for (i = 0; i < sadc; i++) {
sad = &w->eld[20 + mnl + i * 3];
fmt = (sad[0] >> 3) & 0x0f;
if (fmt == HDA_HDMI_CODING_TYPE_REF_CTX) {
fmt = (sad[2] >> 3) & 0x1f;
if (fmt < 1 || fmt > 3)
fmt = 0;
else
fmt += 14;
}
device_printf(dev,
"ELD nid=%d: %s %dch freqs=0x%b",
w->nid, HDA_HDMI_CODING_TYPES[fmt], (sad[0] & 0x07) + 1,
sad[1], "\020\007192\006176\00596\00488\00348\00244\00132");
switch (fmt) {
case HDA_HDMI_CODING_TYPE_LPCM:
printf(" sizes=0x%b",
sad[2] & 0x07, "\020\00324\00220\00116");
break;
case HDA_HDMI_CODING_TYPE_AC3:
case HDA_HDMI_CODING_TYPE_MPEG1:
case HDA_HDMI_CODING_TYPE_MP3:
case HDA_HDMI_CODING_TYPE_MPEG2:
case HDA_HDMI_CODING_TYPE_AACLC:
case HDA_HDMI_CODING_TYPE_DTS:
case HDA_HDMI_CODING_TYPE_ATRAC:
printf(" max_bitrate=%d", sad[2] * 8000);
break;
case HDA_HDMI_CODING_TYPE_WMAPRO:
printf(" profile=%d", sad[2] & 0x07);
break;
}
printf("\n");
}
}
static void
hdaa_eld_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
uint32_t res;
int i;
if (w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
return;
if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 ||
(HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0)
return;
res = hda_command(devinfo->dev, HDA_CMD_GET_PIN_SENSE(0, w->nid));
if ((w->eld != 0) == ((res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) != 0))
return;
if (w->eld != NULL) {
w->eld_len = 0;
free(w->eld, M_HDAA);
w->eld = NULL;
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Pin sense: nid=%d sense=0x%08x "
"(%sconnected, ELD %svalid)\n",
w->nid, res,
(res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) ? "" : "dis",
(res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) ? "" : "in");
);
if ((res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) == 0)
return;
res = hda_command(devinfo->dev,
HDA_CMD_GET_HDMI_DIP_SIZE(0, w->nid, 0x08));
if (res == HDA_INVALID)
return;
w->eld_len = res & 0xff;
if (w->eld_len != 0)
w->eld = malloc(w->eld_len, M_HDAA, M_ZERO | M_NOWAIT);
if (w->eld == NULL) {
w->eld_len = 0;
return;
}
for (i = 0; i < w->eld_len; i++) {
res = hda_command(devinfo->dev,
HDA_CMD_GET_HDMI_ELDD(0, w->nid, i));
if (res & 0x80000000)
w->eld[i] = res & 0xff;
}
HDA_BOOTVERBOSE(
hdaa_eld_dump(w);
);
hdaa_channels_handler(&devinfo->as[w->bindas]);
}
/*
* Pin sense initializer.
*/
static void
hdaa_sense_init(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as;
struct hdaa_widget *w;
int i, poll = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) {
if (w->unsol < 0)
w->unsol = HDAC_UNSOL_ALLOC(
device_get_parent(devinfo->dev),
devinfo->dev, w->nid);
hda_command(devinfo->dev,
HDA_CMD_SET_UNSOLICITED_RESPONSE(0, w->nid,
HDA_CMD_SET_UNSOLICITED_RESPONSE_ENABLE | w->unsol));
}
as = &devinfo->as[w->bindas];
if (as->hpredir >= 0 && as->pins[15] == w->nid) {
if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 ||
(HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) {
device_printf(devinfo->dev,
"No presence detection support at nid %d\n",
w->nid);
} else {
if (w->unsol < 0)
poll = 1;
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Headphones redirection for "
"association %d nid=%d using %s.\n",
w->bindas, w->nid,
(w->unsol < 0) ? "polling" :
"unsolicited responses");
);
}
}
hdaa_presence_handler(w);
if (!HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap) &&
!HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap))
continue;
hdaa_eld_handler(w);
}
if (poll) {
callout_reset(&devinfo->poll_jack, 1,
hdaa_jack_poll_callback, devinfo);
}
}
static void
hdaa_sense_deinit(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
int i;
callout_stop(&devinfo->poll_jack);
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (w->unsol < 0)
continue;
hda_command(devinfo->dev,
HDA_CMD_SET_UNSOLICITED_RESPONSE(0, w->nid, 0));
HDAC_UNSOL_FREE(
device_get_parent(devinfo->dev), devinfo->dev,
w->unsol);
w->unsol = -1;
}
}
uint32_t
hdaa_widget_pin_patch(uint32_t config, const char *str)
{
char buf[256];
char *key, *value, *rest, *bad;
int ival, i;
strlcpy(buf, str, sizeof(buf));
rest = buf;
while ((key = strsep(&rest, "=")) != NULL) {
value = strsep(&rest, " \t");
if (value == NULL)
break;
ival = strtol(value, &bad, 10);
if (strcmp(key, "seq") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_SEQUENCE_MASK;
config |= ((ival << HDA_CONFIG_DEFAULTCONF_SEQUENCE_SHIFT) &
HDA_CONFIG_DEFAULTCONF_SEQUENCE_MASK);
} else if (strcmp(key, "as") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK;
config |= ((ival << HDA_CONFIG_DEFAULTCONF_ASSOCIATION_SHIFT) &
HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK);
} else if (strcmp(key, "misc") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_MISC_MASK;
config |= ((ival << HDA_CONFIG_DEFAULTCONF_MISC_SHIFT) &
HDA_CONFIG_DEFAULTCONF_MISC_MASK);
} else if (strcmp(key, "color") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_COLOR_MASK;
if (bad[0] == 0) {
config |= ((ival << HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT) &
HDA_CONFIG_DEFAULTCONF_COLOR_MASK);
}
for (i = 0; i < 16; i++) {
if (strcasecmp(HDA_COLORS[i], value) == 0) {
config |= (i << HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT);
break;
}
}
} else if (strcmp(key, "ctype") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_MASK;
if (bad[0] == 0) {
config |= ((ival << HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_SHIFT) &
HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_MASK);
}
for (i = 0; i < 16; i++) {
if (strcasecmp(HDA_CONNECTORS[i], value) == 0) {
config |= (i << HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_SHIFT);
break;
}
}
} else if (strcmp(key, "device") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_DEVICE_MASK;
if (bad[0] == 0) {
config |= ((ival << HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT) &
HDA_CONFIG_DEFAULTCONF_DEVICE_MASK);
continue;
}
for (i = 0; i < 16; i++) {
if (strcasecmp(HDA_DEVS[i], value) == 0) {
config |= (i << HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT);
break;
}
}
} else if (strcmp(key, "loc") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_LOCATION_MASK;
if (bad[0] == 0) {
config |= ((ival << HDA_CONFIG_DEFAULTCONF_LOCATION_SHIFT) &
HDA_CONFIG_DEFAULTCONF_LOCATION_MASK);
continue;
}
for (i = 0; i < 64; i++) {
if (strcasecmp(HDA_LOCS[i], value) == 0) {
config |= (i << HDA_CONFIG_DEFAULTCONF_LOCATION_SHIFT);
break;
}
}
} else if (strcmp(key, "conn") == 0) {
config &= ~HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK;
if (bad[0] == 0) {
config |= ((ival << HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT) &
HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK);
continue;
}
for (i = 0; i < 4; i++) {
if (strcasecmp(HDA_CONNS[i], value) == 0) {
config |= (i << HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT);
break;
}
}
}
}
return (config);
}
uint32_t
hdaa_gpio_patch(uint32_t gpio, const char *str)
{
char buf[256];
char *key, *value, *rest;
int ikey, i;
strlcpy(buf, str, sizeof(buf));
rest = buf;
while ((key = strsep(&rest, "=")) != NULL) {
value = strsep(&rest, " \t");
if (value == NULL)
break;
ikey = strtol(key, NULL, 10);
if (ikey < 0 || ikey > 7)
continue;
for (i = 0; i < 7; i++) {
if (strcasecmp(HDA_GPIO_ACTIONS[i], value) == 0) {
gpio &= ~HDAA_GPIO_MASK(ikey);
gpio |= i << HDAA_GPIO_SHIFT(ikey);
break;
}
}
}
return (gpio);
}
static void
hdaa_local_patch_pin(struct hdaa_widget *w)
{
device_t dev = w->devinfo->dev;
const char *res = NULL;
uint32_t config, orig;
char buf[32];
config = orig = w->wclass.pin.config;
snprintf(buf, sizeof(buf), "cad%u.nid%u.config",
hda_get_codec_id(dev), w->nid);
if (resource_string_value(device_get_name(
device_get_parent(device_get_parent(dev))),
device_get_unit(device_get_parent(device_get_parent(dev))),
buf, &res) == 0) {
if (strncmp(res, "0x", 2) == 0) {
config = strtol(res + 2, NULL, 16);
} else {
config = hdaa_widget_pin_patch(config, res);
}
}
snprintf(buf, sizeof(buf), "nid%u.config", w->nid);
if (resource_string_value(device_get_name(dev), device_get_unit(dev),
buf, &res) == 0) {
if (strncmp(res, "0x", 2) == 0) {
config = strtol(res + 2, NULL, 16);
} else {
config = hdaa_widget_pin_patch(config, res);
}
}
HDA_BOOTVERBOSE(
if (config != orig)
device_printf(w->devinfo->dev,
"Patching pin config nid=%u 0x%08x -> 0x%08x\n",
w->nid, orig, config);
);
w->wclass.pin.newconf = w->wclass.pin.config = config;
}
static void
hdaa_dump_audio_formats_sb(struct sbuf *sb, uint32_t fcap, uint32_t pcmcap)
{
uint32_t cap;
cap = fcap;
if (cap != 0) {
sbuf_printf(sb, " Stream cap: 0x%08x", cap);
if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap))
sbuf_printf(sb, " AC3");
if (HDA_PARAM_SUPP_STREAM_FORMATS_FLOAT32(cap))
sbuf_printf(sb, " FLOAT32");
if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap))
sbuf_printf(sb, " PCM");
sbuf_printf(sb, "\n");
}
cap = pcmcap;
if (cap != 0) {
sbuf_printf(sb, " PCM cap: 0x%08x", cap);
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(cap))
sbuf_printf(sb, " 8");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(cap))
sbuf_printf(sb, " 16");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(cap))
sbuf_printf(sb, " 20");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(cap))
sbuf_printf(sb, " 24");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(cap))
sbuf_printf(sb, " 32");
sbuf_printf(sb, " bits,");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(cap))
sbuf_printf(sb, " 8");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(cap))
sbuf_printf(sb, " 11");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(cap))
sbuf_printf(sb, " 16");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(cap))
sbuf_printf(sb, " 22");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(cap))
sbuf_printf(sb, " 32");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(cap))
sbuf_printf(sb, " 44");
sbuf_printf(sb, " 48");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(cap))
sbuf_printf(sb, " 88");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(cap))
sbuf_printf(sb, " 96");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(cap))
sbuf_printf(sb, " 176");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(cap))
sbuf_printf(sb, " 192");
sbuf_printf(sb, " KHz\n");
}
}
static void
hdaa_dump_pin_sb(struct sbuf *sb, struct hdaa_widget *w)
{
uint32_t pincap, conf;
pincap = w->wclass.pin.cap;
sbuf_printf(sb, " Pin cap: 0x%08x", pincap);
if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap))
sbuf_printf(sb, " ISC");
if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap))
sbuf_printf(sb, " TRQD");
if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap))
sbuf_printf(sb, " PDC");
if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap))
sbuf_printf(sb, " HP");
if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap))
sbuf_printf(sb, " OUT");
if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap))
sbuf_printf(sb, " IN");
if (HDA_PARAM_PIN_CAP_BALANCED_IO_PINS(pincap))
sbuf_printf(sb, " BAL");
if (HDA_PARAM_PIN_CAP_HDMI(pincap))
sbuf_printf(sb, " HDMI");
if (HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)) {
sbuf_printf(sb, " VREF[");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap))
sbuf_printf(sb, " 50");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap))
sbuf_printf(sb, " 80");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap))
sbuf_printf(sb, " 100");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_GROUND(pincap))
sbuf_printf(sb, " GROUND");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_HIZ(pincap))
sbuf_printf(sb, " HIZ");
sbuf_printf(sb, " ]");
}
if (HDA_PARAM_PIN_CAP_EAPD_CAP(pincap))
sbuf_printf(sb, " EAPD");
if (HDA_PARAM_PIN_CAP_DP(pincap))
sbuf_printf(sb, " DP");
if (HDA_PARAM_PIN_CAP_HBR(pincap))
sbuf_printf(sb, " HBR");
sbuf_printf(sb, "\n");
conf = w->wclass.pin.config;
sbuf_printf(sb, " Pin config: 0x%08x", conf);
sbuf_printf(sb, " as=%d seq=%d "
"device=%s conn=%s ctype=%s loc=%s color=%s misc=%d\n",
HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf),
HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf),
HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)],
HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)],
HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)],
HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)],
HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)],
HDA_CONFIG_DEFAULTCONF_MISC(conf));
sbuf_printf(sb, " Pin control: 0x%08x", w->wclass.pin.ctrl);
if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE)
sbuf_printf(sb, " HP");
if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE)
sbuf_printf(sb, " IN");
if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE)
sbuf_printf(sb, " OUT");
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) {
if ((w->wclass.pin.ctrl &
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) == 0x03)
sbuf_printf(sb, " HBR");
else if ((w->wclass.pin.ctrl &
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0)
sbuf_printf(sb, " EPTs");
} else {
if ((w->wclass.pin.ctrl &
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0)
sbuf_printf(sb, " VREFs");
}
sbuf_printf(sb, "\n");
}
static void
hdaa_dump_amp_sb(struct sbuf *sb, uint32_t cap, const char *banner)
{
int offset, size, step;
offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(cap);
size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(cap);
step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(cap);
sbuf_printf(sb, " %s amp: 0x%08x "
"mute=%d step=%d size=%d offset=%d (%+d/%+ddB)\n",
banner, cap,
HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(cap),
step, size, offset,
((0 - offset) * (size + 1)) / 4,
((step - offset) * (size + 1)) / 4);
}
static int
hdaa_sysctl_caps(SYSCTL_HANDLER_ARGS)
{
struct hdaa_devinfo *devinfo;
struct hdaa_widget *w, *cw;
struct sbuf sb;
char buf[64];
int error, j;
w = (struct hdaa_widget *)oidp->oid_arg1;
devinfo = w->devinfo;
sbuf_new_for_sysctl(&sb, NULL, 256, req);
sbuf_printf(&sb, "%s%s\n", w->name,
(w->enable == 0) ? " [DISABLED]" : "");
sbuf_printf(&sb, " Widget cap: 0x%08x",
w->param.widget_cap);
if (w->param.widget_cap & 0x0ee1) {
if (HDA_PARAM_AUDIO_WIDGET_CAP_LR_SWAP(w->param.widget_cap))
sbuf_printf(&sb, " LRSWAP");
if (HDA_PARAM_AUDIO_WIDGET_CAP_POWER_CTRL(w->param.widget_cap))
sbuf_printf(&sb, " PWR");
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap))
sbuf_printf(&sb, " DIGITAL");
if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap))
sbuf_printf(&sb, " UNSOL");
if (HDA_PARAM_AUDIO_WIDGET_CAP_PROC_WIDGET(w->param.widget_cap))
sbuf_printf(&sb, " PROC");
if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap))
sbuf_printf(&sb, " STRIPE(x%d)",
1 << (fls(w->wclass.conv.stripecap) - 1));
j = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap);
if (j == 1)
sbuf_printf(&sb, " STEREO");
else if (j > 1)
sbuf_printf(&sb, " %dCH", j + 1);
}
sbuf_printf(&sb, "\n");
if (w->bindas != -1) {
sbuf_printf(&sb, " Association: %d (0x%04x)\n",
w->bindas, w->bindseqmask);
}
if (w->ossmask != 0 || w->ossdev >= 0) {
sbuf_printf(&sb, " OSS: %s",
hdaa_audio_ctl_ossmixer_mask2allname(w->ossmask, buf, sizeof(buf)));
if (w->ossdev >= 0)
sbuf_printf(&sb, " (%s)", ossnames[w->ossdev]);
sbuf_printf(&sb, "\n");
}
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) {
hdaa_dump_audio_formats_sb(&sb,
w->param.supp_stream_formats,
w->param.supp_pcm_size_rate);
} else if (w->type ==
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin)
hdaa_dump_pin_sb(&sb, w);
if (w->param.eapdbtl != HDA_INVALID) {
sbuf_printf(&sb, " EAPD: 0x%08x%s%s%s\n",
w->param.eapdbtl,
(w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_LR_SWAP) ?
" LRSWAP" : "",
(w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD) ?
" EAPD" : "",
(w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_BTL) ?
" BTL" : "");
}
if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(w->param.widget_cap) &&
w->param.outamp_cap != 0)
hdaa_dump_amp_sb(&sb, w->param.outamp_cap, "Output");
if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(w->param.widget_cap) &&
w->param.inamp_cap != 0)
hdaa_dump_amp_sb(&sb, w->param.inamp_cap, " Input");
if (w->nconns > 0)
sbuf_printf(&sb, " Connections: %d\n", w->nconns);
for (j = 0; j < w->nconns; j++) {
cw = hdaa_widget_get(devinfo, w->conns[j]);
sbuf_printf(&sb, " + %s<- nid=%d [%s]",
(w->connsenable[j] == 0)?"[DISABLED] ":"",
w->conns[j], (cw == NULL) ? "GHOST!" : cw->name);
if (cw == NULL)
sbuf_printf(&sb, " [UNKNOWN]");
else if (cw->enable == 0)
sbuf_printf(&sb, " [DISABLED]");
if (w->nconns > 1 && w->selconn == j && w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
sbuf_printf(&sb, " (selected)");
sbuf_printf(&sb, "\n");
}
error = sbuf_finish(&sb);
sbuf_delete(&sb);
return (error);
}
static int
hdaa_sysctl_config(SYSCTL_HANDLER_ARGS)
{
char buf[256];
int error;
uint32_t conf;
conf = *(uint32_t *)oidp->oid_arg1;
snprintf(buf, sizeof(buf), "0x%08x as=%d seq=%d "
"device=%s conn=%s ctype=%s loc=%s color=%s misc=%d",
conf,
HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf),
HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf),
HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)],
HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)],
HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)],
HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)],
HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)],
HDA_CONFIG_DEFAULTCONF_MISC(conf));
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (error != 0 || req->newptr == NULL)
return (error);
if (strncmp(buf, "0x", 2) == 0)
conf = strtol(buf + 2, NULL, 16);
else
conf = hdaa_widget_pin_patch(conf, buf);
*(uint32_t *)oidp->oid_arg1 = conf;
return (0);
}
static void
hdaa_config_fetch(const char *str, uint32_t *on, uint32_t *off)
{
int i = 0, j, k, len, inv;
for (;;) {
while (str[i] != '\0' &&
(str[i] == ',' || isspace(str[i]) != 0))
i++;
if (str[i] == '\0')
return;
j = i;
while (str[j] != '\0' &&
!(str[j] == ',' || isspace(str[j]) != 0))
j++;
len = j - i;
if (len > 2 && strncmp(str + i, "no", 2) == 0)
inv = 2;
else
inv = 0;
for (k = 0; len > inv && k < nitems(hdaa_quirks_tab); k++) {
if (strncmp(str + i + inv,
hdaa_quirks_tab[k].key, len - inv) != 0)
continue;
if (len - inv != strlen(hdaa_quirks_tab[k].key))
continue;
if (inv == 0) {
*on |= hdaa_quirks_tab[k].value;
*off &= ~hdaa_quirks_tab[k].value;
} else {
*off |= hdaa_quirks_tab[k].value;
*on &= ~hdaa_quirks_tab[k].value;
}
break;
}
i = j;
}
}
static int
hdaa_sysctl_quirks(SYSCTL_HANDLER_ARGS)
{
char buf[256];
int error, n = 0, i;
uint32_t quirks, quirks_off;
quirks = *(uint32_t *)oidp->oid_arg1;
buf[0] = 0;
for (i = 0; i < nitems(hdaa_quirks_tab); i++) {
if ((quirks & hdaa_quirks_tab[i].value) != 0)
n += snprintf(buf + n, sizeof(buf) - n, "%s%s",
n != 0 ? "," : "", hdaa_quirks_tab[i].key);
}
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (error != 0 || req->newptr == NULL)
return (error);
if (strncmp(buf, "0x", 2) == 0)
quirks = strtol(buf + 2, NULL, 16);
else {
quirks = 0;
hdaa_config_fetch(buf, &quirks, &quirks_off);
}
*(uint32_t *)oidp->oid_arg1 = quirks;
return (0);
}
static void
hdaa_local_patch(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
const char *res = NULL;
uint32_t quirks_on = 0, quirks_off = 0, x;
int i;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL)
continue;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
hdaa_local_patch_pin(w);
}
if (resource_string_value(device_get_name(devinfo->dev),
device_get_unit(devinfo->dev), "config", &res) == 0) {
if (res != NULL && strlen(res) > 0)
hdaa_config_fetch(res, &quirks_on, &quirks_off);
devinfo->quirks |= quirks_on;
devinfo->quirks &= ~quirks_off;
}
if (devinfo->newquirks == -1)
devinfo->newquirks = devinfo->quirks;
else
devinfo->quirks = devinfo->newquirks;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
"Config options: 0x%08x\n", devinfo->quirks);
);
if (resource_string_value(device_get_name(devinfo->dev),
device_get_unit(devinfo->dev), "gpio_config", &res) == 0) {
if (strncmp(res, "0x", 2) == 0) {
devinfo->gpio = strtol(res + 2, NULL, 16);
} else {
devinfo->gpio = hdaa_gpio_patch(devinfo->gpio, res);
}
}
if (devinfo->newgpio == -1)
devinfo->newgpio = devinfo->gpio;
else
devinfo->gpio = devinfo->newgpio;
if (devinfo->newgpo == -1)
devinfo->newgpo = devinfo->gpo;
else
devinfo->gpo = devinfo->newgpo;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev, "GPIO config options:");
for (i = 0; i < 7; i++) {
x = (devinfo->gpio & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i);
if (x != 0)
printf(" %d=%s", i, HDA_GPIO_ACTIONS[x]);
}
printf("\n");
);
}
static void
hdaa_widget_connection_parse(struct hdaa_widget *w)
{
uint32_t res;
int i, j, max, ents, entnum;
nid_t nid = w->nid;
nid_t cnid, addcnid, prevcnid;
w->nconns = 0;
res = hda_command(w->devinfo->dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_CONN_LIST_LENGTH));
ents = HDA_PARAM_CONN_LIST_LENGTH_LIST_LENGTH(res);
if (ents < 1)
return;
entnum = HDA_PARAM_CONN_LIST_LENGTH_LONG_FORM(res) ? 2 : 4;
max = (sizeof(w->conns) / sizeof(w->conns[0])) - 1;
prevcnid = 0;
#define CONN_RMASK(e) (1 << ((32 / (e)) - 1))
#define CONN_NMASK(e) (CONN_RMASK(e) - 1)
#define CONN_RESVAL(r, e, n) ((r) >> ((32 / (e)) * (n)))
#define CONN_RANGE(r, e, n) (CONN_RESVAL(r, e, n) & CONN_RMASK(e))
#define CONN_CNID(r, e, n) (CONN_RESVAL(r, e, n) & CONN_NMASK(e))
for (i = 0; i < ents; i += entnum) {
res = hda_command(w->devinfo->dev,
HDA_CMD_GET_CONN_LIST_ENTRY(0, nid, i));
for (j = 0; j < entnum; j++) {
cnid = CONN_CNID(res, entnum, j);
if (cnid == 0) {
if (w->nconns < ents)
device_printf(w->devinfo->dev,
"WARNING: nid=%d has zero cnid "
"entnum=%d j=%d index=%d "
"entries=%d found=%d res=0x%08x\n",
nid, entnum, j, i,
ents, w->nconns, res);
else
goto getconns_out;
}
if (cnid < w->devinfo->startnode ||
cnid >= w->devinfo->endnode) {
HDA_BOOTVERBOSE(
device_printf(w->devinfo->dev,
"WARNING: nid=%d has cnid outside "
"of the AFG range j=%d "
"entnum=%d index=%d res=0x%08x\n",
nid, j, entnum, i, res);
);
}
if (CONN_RANGE(res, entnum, j) == 0)
addcnid = cnid;
else if (prevcnid == 0 || prevcnid >= cnid) {
device_printf(w->devinfo->dev,
"WARNING: Invalid child range "
"nid=%d index=%d j=%d entnum=%d "
"prevcnid=%d cnid=%d res=0x%08x\n",
nid, i, j, entnum, prevcnid,
cnid, res);
addcnid = cnid;
} else
addcnid = prevcnid + 1;
while (addcnid <= cnid) {
if (w->nconns > max) {
device_printf(w->devinfo->dev,
"Adding %d (nid=%d): "
"Max connection reached! max=%d\n",
addcnid, nid, max + 1);
goto getconns_out;
}
w->connsenable[w->nconns] = 1;
w->conns[w->nconns++] = addcnid++;
}
prevcnid = cnid;
}
}
getconns_out:
return;
}
static void
hdaa_widget_parse(struct hdaa_widget *w)
{
device_t dev = w->devinfo->dev;
uint32_t wcap, cap;
nid_t nid = w->nid;
char buf[64];
w->param.widget_cap = wcap = hda_command(dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_AUDIO_WIDGET_CAP));
w->type = HDA_PARAM_AUDIO_WIDGET_CAP_TYPE(wcap);
hdaa_widget_connection_parse(w);
if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(wcap)) {
if (HDA_PARAM_AUDIO_WIDGET_CAP_AMP_OVR(wcap))
w->param.outamp_cap =
hda_command(dev,
HDA_CMD_GET_PARAMETER(0, nid,
HDA_PARAM_OUTPUT_AMP_CAP));
else
w->param.outamp_cap =
w->devinfo->outamp_cap;
} else
w->param.outamp_cap = 0;
if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(wcap)) {
if (HDA_PARAM_AUDIO_WIDGET_CAP_AMP_OVR(wcap))
w->param.inamp_cap =
hda_command(dev,
HDA_CMD_GET_PARAMETER(0, nid,
HDA_PARAM_INPUT_AMP_CAP));
else
w->param.inamp_cap =
w->devinfo->inamp_cap;
} else
w->param.inamp_cap = 0;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) {
if (HDA_PARAM_AUDIO_WIDGET_CAP_FORMAT_OVR(wcap)) {
cap = hda_command(dev,
HDA_CMD_GET_PARAMETER(0, nid,
HDA_PARAM_SUPP_STREAM_FORMATS));
w->param.supp_stream_formats = (cap != 0) ? cap :
w->devinfo->supp_stream_formats;
cap = hda_command(dev,
HDA_CMD_GET_PARAMETER(0, nid,
HDA_PARAM_SUPP_PCM_SIZE_RATE));
w->param.supp_pcm_size_rate = (cap != 0) ? cap :
w->devinfo->supp_pcm_size_rate;
} else {
w->param.supp_stream_formats =
w->devinfo->supp_stream_formats;
w->param.supp_pcm_size_rate =
w->devinfo->supp_pcm_size_rate;
}
if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) {
w->wclass.conv.stripecap = hda_command(dev,
HDA_CMD_GET_STRIPE_CONTROL(0, w->nid)) >> 20;
} else
w->wclass.conv.stripecap = 1;
} else {
w->param.supp_stream_formats = 0;
w->param.supp_pcm_size_rate = 0;
}
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) {
w->wclass.pin.original = w->wclass.pin.newconf =
w->wclass.pin.config = hda_command(dev,
HDA_CMD_GET_CONFIGURATION_DEFAULT(0, w->nid));
w->wclass.pin.cap = hda_command(dev,
HDA_CMD_GET_PARAMETER(0, w->nid, HDA_PARAM_PIN_CAP));
w->wclass.pin.ctrl = hda_command(dev,
HDA_CMD_GET_PIN_WIDGET_CTRL(0, nid));
w->wclass.pin.connected = 2;
if (HDA_PARAM_PIN_CAP_EAPD_CAP(w->wclass.pin.cap)) {
w->param.eapdbtl = hda_command(dev,
HDA_CMD_GET_EAPD_BTL_ENABLE(0, nid));
w->param.eapdbtl &= 0x7;
w->param.eapdbtl |= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD;
} else
w->param.eapdbtl = HDA_INVALID;
}
w->unsol = -1;
hdaa_unlock(w->devinfo);
snprintf(buf, sizeof(buf), "nid%d", w->nid);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
buf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
w, 0, hdaa_sysctl_caps, "A", "Node capabilities");
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) {
snprintf(buf, sizeof(buf), "nid%d_config", w->nid);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
buf, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
&w->wclass.pin.newconf, 0, hdaa_sysctl_config, "A",
"Current pin configuration");
snprintf(buf, sizeof(buf), "nid%d_original", w->nid);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
buf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
&w->wclass.pin.original, 0, hdaa_sysctl_config, "A",
"Original pin configuration");
}
hdaa_lock(w->devinfo);
}
static void
hdaa_widget_postprocess(struct hdaa_widget *w)
{
const char *typestr;
w->type = HDA_PARAM_AUDIO_WIDGET_CAP_TYPE(w->param.widget_cap);
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT:
typestr = "audio output";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT:
typestr = "audio input";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER:
typestr = "audio mixer";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR:
typestr = "audio selector";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX:
typestr = "pin";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_POWER_WIDGET:
typestr = "power widget";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VOLUME_WIDGET:
typestr = "volume widget";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET:
typestr = "beep widget";
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VENDOR_WIDGET:
typestr = "vendor widget";
break;
default:
typestr = "unknown type";
break;
}
strlcpy(w->name, typestr, sizeof(w->name));
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) {
uint32_t config;
const char *devstr;
int conn, color;
config = w->wclass.pin.config;
devstr = HDA_DEVS[(config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) >>
HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT];
conn = (config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) >>
HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT;
color = (config & HDA_CONFIG_DEFAULTCONF_COLOR_MASK) >>
HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT;
strlcat(w->name, ": ", sizeof(w->name));
strlcat(w->name, devstr, sizeof(w->name));
strlcat(w->name, " (", sizeof(w->name));
if (conn == 0 && color != 0 && color != 15) {
strlcat(w->name, HDA_COLORS[color], sizeof(w->name));
strlcat(w->name, " ", sizeof(w->name));
}
strlcat(w->name, HDA_CONNS[conn], sizeof(w->name));
strlcat(w->name, ")", sizeof(w->name));
}
}
struct hdaa_widget *
hdaa_widget_get(struct hdaa_devinfo *devinfo, nid_t nid)
{
if (devinfo == NULL || devinfo->widget == NULL ||
nid < devinfo->startnode || nid >= devinfo->endnode)
return (NULL);
return (&devinfo->widget[nid - devinfo->startnode]);
}
static void
hdaa_audio_ctl_amp_set_internal(struct hdaa_devinfo *devinfo, nid_t nid,
int index, int lmute, int rmute,
int left, int right, int dir)
{
uint16_t v = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
"Setting amplifier nid=%d index=%d %s mute=%d/%d vol=%d/%d\n",
nid,index,dir ? "in" : "out",lmute,rmute,left,right);
);
if (left != right || lmute != rmute) {
v = (1 << (15 - dir)) | (1 << 13) | (index << 8) |
(lmute << 7) | left;
hda_command(devinfo->dev,
HDA_CMD_SET_AMP_GAIN_MUTE(0, nid, v));
v = (1 << (15 - dir)) | (1 << 12) | (index << 8) |
(rmute << 7) | right;
} else
v = (1 << (15 - dir)) | (3 << 12) | (index << 8) |
(lmute << 7) | left;
hda_command(devinfo->dev,
HDA_CMD_SET_AMP_GAIN_MUTE(0, nid, v));
}
static void
hdaa_audio_ctl_amp_set(struct hdaa_audio_ctl *ctl, uint32_t mute,
int left, int right)
{
nid_t nid;
int lmute, rmute;
nid = ctl->widget->nid;
/* Save new values if valid. */
if (mute != HDAA_AMP_MUTE_DEFAULT)
ctl->muted = mute;
if (left != HDAA_AMP_VOL_DEFAULT)
ctl->left = left;
if (right != HDAA_AMP_VOL_DEFAULT)
ctl->right = right;
/* Prepare effective values */
if (ctl->forcemute) {
lmute = 1;
rmute = 1;
left = 0;
right = 0;
} else {
lmute = HDAA_AMP_LEFT_MUTED(ctl->muted);
rmute = HDAA_AMP_RIGHT_MUTED(ctl->muted);
left = ctl->left;
right = ctl->right;
}
/* Apply effective values */
if (ctl->dir & HDAA_CTL_OUT)
hdaa_audio_ctl_amp_set_internal(ctl->widget->devinfo, nid, ctl->index,
lmute, rmute, left, right, 0);
if (ctl->dir & HDAA_CTL_IN)
hdaa_audio_ctl_amp_set_internal(ctl->widget->devinfo, nid, ctl->index,
lmute, rmute, left, right, 1);
}
static void
hdaa_widget_connection_select(struct hdaa_widget *w, uint8_t index)
{
if (w == NULL || w->nconns < 1 || index > (w->nconns - 1))
return;
HDA_BOOTHVERBOSE(
device_printf(w->devinfo->dev,
"Setting selector nid=%d index=%d\n", w->nid, index);
);
hda_command(w->devinfo->dev,
HDA_CMD_SET_CONNECTION_SELECT_CONTROL(0, w->nid, index));
w->selconn = index;
}
/****************************************************************************
* Device Methods
****************************************************************************/
static void *
hdaa_channel_init(kobj_t obj, void *data, struct snd_dbuf *b,
struct pcm_channel *c, int dir)
{
struct hdaa_chan *ch = data;
struct hdaa_pcm_devinfo *pdevinfo = ch->pdevinfo;
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
hdaa_lock(devinfo);
if (devinfo->quirks & HDAA_QUIRK_FIXEDRATE) {
ch->caps.minspeed = ch->caps.maxspeed = 48000;
ch->pcmrates[0] = 48000;
ch->pcmrates[1] = 0;
}
ch->dir = dir;
ch->b = b;
ch->c = c;
ch->blksz = pdevinfo->chan_size / pdevinfo->chan_blkcnt;
ch->blkcnt = pdevinfo->chan_blkcnt;
hdaa_unlock(devinfo);
if (sndbuf_alloc(ch->b, bus_get_dma_tag(devinfo->dev),
hda_get_dma_nocache(devinfo->dev) ? BUS_DMA_NOCACHE : 0,
pdevinfo->chan_size) != 0)
return (NULL);
return (ch);
}
static int
hdaa_channel_setformat(kobj_t obj, void *data, uint32_t format)
{
struct hdaa_chan *ch = data;
int i;
for (i = 0; ch->caps.fmtlist[i] != 0; i++) {
if (format == ch->caps.fmtlist[i]) {
ch->fmt = format;
return (0);
}
}
return (EINVAL);
}
static uint32_t
hdaa_channel_setspeed(kobj_t obj, void *data, uint32_t speed)
{
struct hdaa_chan *ch = data;
uint32_t spd = 0, threshold;
int i;
/* First look for equal or multiple frequency. */
for (i = 0; ch->pcmrates[i] != 0; i++) {
spd = ch->pcmrates[i];
if (speed != 0 && spd / speed * speed == spd) {
ch->spd = spd;
return (spd);
}
}
/* If no match, just find nearest. */
for (i = 0; ch->pcmrates[i] != 0; i++) {
spd = ch->pcmrates[i];
threshold = spd + ((ch->pcmrates[i + 1] != 0) ?
((ch->pcmrates[i + 1] - spd) >> 1) : 0);
if (speed < threshold)
break;
}
ch->spd = spd;
return (spd);
}
static uint16_t
hdaa_stream_format(struct hdaa_chan *ch)
{
int i;
uint16_t fmt;
fmt = 0;
if (ch->fmt & AFMT_S16_LE)
fmt |= ch->bit16 << 4;
else if (ch->fmt & AFMT_S32_LE)
fmt |= ch->bit32 << 4;
else
fmt |= 1 << 4;
for (i = 0; i < HDA_RATE_TAB_LEN; i++) {
if (hda_rate_tab[i].valid && ch->spd == hda_rate_tab[i].rate) {
fmt |= hda_rate_tab[i].base;
fmt |= hda_rate_tab[i].mul;
fmt |= hda_rate_tab[i].div;
break;
}
}
fmt |= (AFMT_CHANNEL(ch->fmt) - 1);
return (fmt);
}
static int
hdaa_allowed_stripes(uint16_t fmt)
{
static const int bits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 };
int size;
size = bits[(fmt >> 4) & 0x03];
size *= (fmt & 0x0f) + 1;
size *= ((fmt >> 11) & 0x07) + 1;
return (0xffffffffU >> (32 - fls(size / 8)));
}
static void
hdaa_audio_setup(struct hdaa_chan *ch)
{
struct hdaa_audio_as *as = &ch->devinfo->as[ch->as];
struct hdaa_widget *w, *wp;
int i, j, k, chn, cchn, totalchn, totalextchn, c;
uint16_t fmt, dfmt;
/* Mapping channel pairs to codec pins/converters. */
const static uint16_t convmap[2][5] =
/* 1.0 2.0 4.0 5.1 7.1 */
{{ 0x0010, 0x0001, 0x0201, 0x0231, 0x4231 }, /* no dup. */
{ 0x0010, 0x0001, 0x2201, 0x2231, 0x4231 }}; /* side dup. */
/* Mapping formats to HDMI channel allocations. */
const static uint8_t hdmica[2][8] =
/* 1 2 3 4 5 6 7 8 */
{{ 0x02, 0x00, 0x04, 0x08, 0x0a, 0x0e, 0x12, 0x12 }, /* x.0 */
{ 0x01, 0x03, 0x01, 0x03, 0x09, 0x0b, 0x0f, 0x13 }}; /* x.1 */
/* Mapping formats to HDMI channels order. */
const static uint32_t hdmich[2][8] =
/* 1 / 5 2 / 6 3 / 7 4 / 8 */
{{ 0xFFFF0F00, 0xFFFFFF10, 0xFFF2FF10, 0xFF32FF10,
0xFF324F10, 0xF5324F10, 0x54326F10, 0x54326F10 }, /* x.0 */
{ 0xFFFFF000, 0xFFFF0100, 0xFFFFF210, 0xFFFF2310,
0xFF32F410, 0xFF324510, 0xF6324510, 0x76325410 }}; /* x.1 */
int convmapid = -1;
nid_t nid;
uint8_t csum;
totalchn = AFMT_CHANNEL(ch->fmt);
totalextchn = AFMT_EXTCHANNEL(ch->fmt);
HDA_BOOTHVERBOSE(
device_printf(ch->pdevinfo->dev,
"PCMDIR_%s: Stream setup fmt=%08x (%d.%d) speed=%d\n",
(ch->dir == PCMDIR_PLAY) ? "PLAY" : "REC",
ch->fmt, totalchn - totalextchn, totalextchn, ch->spd);
);
fmt = hdaa_stream_format(ch);
/* Set channels to I/O converters mapping for known speaker setups. */
if ((as->pinset == 0x0007 || as->pinset == 0x0013) || /* Standard 5.1 */
(as->pinset == 0x0017)) /* Standard 7.1 */
convmapid = (ch->dir == PCMDIR_PLAY);
dfmt = HDA_CMD_SET_DIGITAL_CONV_FMT1_DIGEN;
if (ch->fmt & AFMT_AC3)
dfmt |= HDA_CMD_SET_DIGITAL_CONV_FMT1_NAUDIO;
chn = 0;
for (i = 0; ch->io[i] != -1; i++) {
w = hdaa_widget_get(ch->devinfo, ch->io[i]);
if (w == NULL)
continue;
/* If HP redirection is enabled, but failed to use same
DAC, make last DAC to duplicate first one. */
if (as->fakeredir && i == (as->pincnt - 1)) {
c = (ch->sid << 4);
} else {
/* Map channels to I/O converters, if set. */
if (convmapid >= 0)
chn = (((convmap[convmapid][totalchn / 2]
>> i * 4) & 0xf) - 1) * 2;
if (chn < 0 || chn >= totalchn) {
c = 0;
} else {
c = (ch->sid << 4) | chn;
}
}
hda_command(ch->devinfo->dev,
HDA_CMD_SET_CONV_FMT(0, ch->io[i], fmt));
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) {
hda_command(ch->devinfo->dev,
HDA_CMD_SET_DIGITAL_CONV_FMT1(0, ch->io[i], dfmt));
}
hda_command(ch->devinfo->dev,
HDA_CMD_SET_CONV_STREAM_CHAN(0, ch->io[i], c));
if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) {
hda_command(ch->devinfo->dev,
HDA_CMD_SET_STRIPE_CONTROL(0, w->nid, ch->stripectl));
}
cchn = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap);
if (cchn > 1 && chn < totalchn) {
cchn = min(cchn, totalchn - chn - 1);
hda_command(ch->devinfo->dev,
HDA_CMD_SET_CONV_CHAN_COUNT(0, ch->io[i], cchn));
}
HDA_BOOTHVERBOSE(
device_printf(ch->pdevinfo->dev,
"PCMDIR_%s: Stream setup nid=%d: "
"fmt=0x%04x, dfmt=0x%04x, chan=0x%04x, "
"chan_count=0x%02x, stripe=%d\n",
(ch->dir == PCMDIR_PLAY) ? "PLAY" : "REC",
ch->io[i], fmt, dfmt, c, cchn, ch->stripectl);
);
for (j = 0; j < 16; j++) {
if (as->dacs[ch->asindex][j] != ch->io[i])
continue;
nid = as->pins[j];
wp = hdaa_widget_get(ch->devinfo, nid);
if (wp == NULL)
continue;
if (!HDA_PARAM_PIN_CAP_DP(wp->wclass.pin.cap) &&
!HDA_PARAM_PIN_CAP_HDMI(wp->wclass.pin.cap))
continue;
/* Set channel mapping. */
for (k = 0; k < 8; k++) {
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_CHAN_SLOT(0, nid,
(((hdmich[totalextchn == 0 ? 0 : 1][totalchn - 1]
>> (k * 4)) & 0xf) << 4) | k));
}
/*
* Enable High Bit Rate (HBR) Encoded Packet Type
* (EPT), if supported and needed (8ch data).
*/
if (HDA_PARAM_PIN_CAP_HDMI(wp->wclass.pin.cap) &&
HDA_PARAM_PIN_CAP_HBR(wp->wclass.pin.cap)) {
wp->wclass.pin.ctrl &=
~HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK;
if ((ch->fmt & AFMT_AC3) && (cchn == 7))
wp->wclass.pin.ctrl |= 0x03;
hda_command(ch->devinfo->dev,
HDA_CMD_SET_PIN_WIDGET_CTRL(0, nid,
wp->wclass.pin.ctrl));
}
/* Stop audio infoframe transmission. */
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_XMIT(0, nid, 0x00));
/* Clear audio infoframe buffer. */
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00));
for (k = 0; k < 32; k++)
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00));
/* Write HDMI/DisplayPort audio infoframe. */
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00));
if (w->eld != NULL && w->eld_len >= 6 &&
((w->eld[5] >> 2) & 0x3) == 1) { /* DisplayPort */
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x84));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x1b));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x44));
} else { /* HDMI */
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x84));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x01));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x0a));
csum = 0;
csum -= 0x84 + 0x01 + 0x0a + (totalchn - 1) +
hdmica[totalextchn == 0 ? 0 : 1][totalchn - 1];
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, csum));
}
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, totalchn - 1));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_DATA(0, nid,
hdmica[totalextchn == 0 ? 0 : 1][totalchn - 1]));
/* Start audio infoframe transmission. */
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00));
hda_command(ch->devinfo->dev,
HDA_CMD_SET_HDMI_DIP_XMIT(0, nid, 0xc0));
}
chn += cchn + 1;
}
}
/*
* Greatest Common Divisor.
*/
static unsigned
gcd(unsigned a, unsigned b)
{
u_int c;
while (b != 0) {
c = a;
a = b;
b = (c % b);
}
return (a);
}
/*
* Least Common Multiple.
*/
static unsigned
lcm(unsigned a, unsigned b)
{
return ((a * b) / gcd(a, b));
}
static int
hdaa_channel_setfragments(kobj_t obj, void *data,
uint32_t blksz, uint32_t blkcnt)
{
struct hdaa_chan *ch = data;
blksz -= blksz % lcm(HDA_DMA_ALIGNMENT, sndbuf_getalign(ch->b));
if (blksz > (sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN))
blksz = sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN;
if (blksz < HDA_BLK_MIN)
blksz = HDA_BLK_MIN;
if (blkcnt > HDA_BDL_MAX)
blkcnt = HDA_BDL_MAX;
if (blkcnt < HDA_BDL_MIN)
blkcnt = HDA_BDL_MIN;
while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->b)) {
if ((blkcnt >> 1) >= HDA_BDL_MIN)
blkcnt >>= 1;
else if ((blksz >> 1) >= HDA_BLK_MIN)
blksz >>= 1;
else
break;
}
if ((sndbuf_getblksz(ch->b) != blksz ||
sndbuf_getblkcnt(ch->b) != blkcnt) &&
sndbuf_resize(ch->b, blkcnt, blksz) != 0)
device_printf(ch->devinfo->dev, "%s: failed blksz=%u blkcnt=%u\n",
__func__, blksz, blkcnt);
ch->blksz = sndbuf_getblksz(ch->b);
ch->blkcnt = sndbuf_getblkcnt(ch->b);
return (0);
}
static uint32_t
hdaa_channel_setblocksize(kobj_t obj, void *data, uint32_t blksz)
{
struct hdaa_chan *ch = data;
hdaa_channel_setfragments(obj, data, blksz, ch->pdevinfo->chan_blkcnt);
return (ch->blksz);
}
static void
hdaa_channel_stop(struct hdaa_chan *ch)
{
struct hdaa_devinfo *devinfo = ch->devinfo;
struct hdaa_widget *w;
int i;
if ((ch->flags & HDAA_CHN_RUNNING) == 0)
return;
ch->flags &= ~HDAA_CHN_RUNNING;
HDAC_STREAM_STOP(device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid);
for (i = 0; ch->io[i] != -1; i++) {
w = hdaa_widget_get(ch->devinfo, ch->io[i]);
if (w == NULL)
continue;
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) {
hda_command(devinfo->dev,
HDA_CMD_SET_DIGITAL_CONV_FMT1(0, ch->io[i], 0));
}
hda_command(devinfo->dev,
HDA_CMD_SET_CONV_STREAM_CHAN(0, ch->io[i],
0));
}
HDAC_STREAM_FREE(device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid);
}
static int
hdaa_channel_start(struct hdaa_chan *ch)
{
struct hdaa_devinfo *devinfo = ch->devinfo;
uint32_t fmt;
fmt = hdaa_stream_format(ch);
ch->stripectl = fls(ch->stripecap & hdaa_allowed_stripes(fmt) &
hda_get_stripes_mask(devinfo->dev)) - 1;
ch->sid = HDAC_STREAM_ALLOC(device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, fmt, ch->stripectl, &ch->dmapos);
if (ch->sid <= 0)
return (EBUSY);
hdaa_audio_setup(ch);
HDAC_STREAM_RESET(device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid);
HDAC_STREAM_START(device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid,
sndbuf_getbufaddr(ch->b), ch->blksz, ch->blkcnt);
ch->flags |= HDAA_CHN_RUNNING;
return (0);
}
static int
hdaa_channel_trigger(kobj_t obj, void *data, int go)
{
struct hdaa_chan *ch = data;
int error = 0;
if (!PCMTRIG_COMMON(go))
return (0);
hdaa_lock(ch->devinfo);
switch (go) {
case PCMTRIG_START:
error = hdaa_channel_start(ch);
break;
case PCMTRIG_STOP:
case PCMTRIG_ABORT:
hdaa_channel_stop(ch);
break;
default:
break;
}
hdaa_unlock(ch->devinfo);
return (error);
}
static uint32_t
hdaa_channel_getptr(kobj_t obj, void *data)
{
struct hdaa_chan *ch = data;
struct hdaa_devinfo *devinfo = ch->devinfo;
uint32_t ptr;
hdaa_lock(devinfo);
if (ch->dmapos != NULL) {
ptr = *(ch->dmapos);
} else {
ptr = HDAC_STREAM_GETPTR(
device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid);
}
hdaa_unlock(devinfo);
/*
* Round to available space and force 128 bytes aligment.
*/
ptr %= ch->blksz * ch->blkcnt;
ptr &= HDA_BLK_ALIGN;
return (ptr);
}
static struct pcmchan_caps *
hdaa_channel_getcaps(kobj_t obj, void *data)
{
return (&((struct hdaa_chan *)data)->caps);
}
static kobj_method_t hdaa_channel_methods[] = {
KOBJMETHOD(channel_init, hdaa_channel_init),
KOBJMETHOD(channel_setformat, hdaa_channel_setformat),
KOBJMETHOD(channel_setspeed, hdaa_channel_setspeed),
KOBJMETHOD(channel_setblocksize, hdaa_channel_setblocksize),
KOBJMETHOD(channel_setfragments, hdaa_channel_setfragments),
KOBJMETHOD(channel_trigger, hdaa_channel_trigger),
KOBJMETHOD(channel_getptr, hdaa_channel_getptr),
KOBJMETHOD(channel_getcaps, hdaa_channel_getcaps),
KOBJMETHOD_END
};
CHANNEL_DECLARE(hdaa_channel);
static int
hdaa_audio_ctl_ossmixer_init(struct snd_mixer *m)
{
struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m);
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w, *cw;
uint32_t mask, recmask;
int i, j;
hdaa_lock(devinfo);
pdevinfo->mixer = m;
/* Make sure that in case of soft volume it won't stay muted. */
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
pdevinfo->left[i] = 100;
pdevinfo->right[i] = 100;
}
/* Declare volume controls assigned to this association. */
mask = pdevinfo->ossmask;
if (pdevinfo->playas >= 0) {
/* Declate EAPD as ogain control. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX ||
w->param.eapdbtl == HDA_INVALID ||
w->bindas != pdevinfo->playas)
continue;
mask |= SOUND_MASK_OGAIN;
break;
}
/* Declare soft PCM volume if needed. */
if ((mask & SOUND_MASK_PCM) == 0 ||
(devinfo->quirks & HDAA_QUIRK_SOFTPCMVOL) ||
pdevinfo->minamp[SOUND_MIXER_PCM] ==
pdevinfo->maxamp[SOUND_MIXER_PCM]) {
mask |= SOUND_MASK_PCM;
pcm_setflags(pdevinfo->dev, pcm_getflags(pdevinfo->dev) | SD_F_SOFTPCMVOL);
HDA_BOOTHVERBOSE(
device_printf(pdevinfo->dev,
"Forcing Soft PCM volume\n");
);
}
/* Declare master volume if needed. */
if ((mask & SOUND_MASK_VOLUME) == 0) {
mask |= SOUND_MASK_VOLUME;
mix_setparentchild(m, SOUND_MIXER_VOLUME,
SOUND_MASK_PCM);
mix_setrealdev(m, SOUND_MIXER_VOLUME,
SOUND_MIXER_NONE);
HDA_BOOTHVERBOSE(
device_printf(pdevinfo->dev,
"Forcing master volume with PCM\n");
);
}
}
/* Declare record sources available to this association. */
recmask = 0;
if (pdevinfo->recas >= 0) {
for (i = 0; i < 16; i++) {
if (devinfo->as[pdevinfo->recas].dacs[0][i] < 0)
continue;
w = hdaa_widget_get(devinfo,
devinfo->as[pdevinfo->recas].dacs[0][i]);
if (w == NULL || w->enable == 0)
continue;
for (j = 0; j < w->nconns; j++) {
if (w->connsenable[j] == 0)
continue;
cw = hdaa_widget_get(devinfo, w->conns[j]);
if (cw == NULL || cw->enable == 0)
continue;
if (cw->bindas != pdevinfo->recas &&
cw->bindas != -2)
continue;
recmask |= cw->ossmask;
}
}
}
recmask &= (1 << SOUND_MIXER_NRDEVICES) - 1;
mask &= (1 << SOUND_MIXER_NRDEVICES) - 1;
pdevinfo->ossmask = mask;
mix_setrecdevs(m, recmask);
mix_setdevs(m, mask);
hdaa_unlock(devinfo);
return (0);
}
/*
* Update amplification per pdevinfo per ossdev, calculate summary coefficient
* and write it to codec, update *left and *right to reflect remaining error.
*/
static void
hdaa_audio_ctl_dev_set(struct hdaa_audio_ctl *ctl, int ossdev,
int mute, int *left, int *right)
{
int i, zleft, zright, sleft, sright, smute, lval, rval;
ctl->devleft[ossdev] = *left;
ctl->devright[ossdev] = *right;
ctl->devmute[ossdev] = mute;
smute = sleft = sright = zleft = zright = 0;
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
sleft += ctl->devleft[i];
sright += ctl->devright[i];
smute |= ctl->devmute[i];
if (i == ossdev)
continue;
zleft += ctl->devleft[i];
zright += ctl->devright[i];
}
lval = QDB2VAL(ctl, sleft);
rval = QDB2VAL(ctl, sright);
hdaa_audio_ctl_amp_set(ctl, smute, lval, rval);
*left -= VAL2QDB(ctl, lval) - VAL2QDB(ctl, QDB2VAL(ctl, zleft));
*right -= VAL2QDB(ctl, rval) - VAL2QDB(ctl, QDB2VAL(ctl, zright));
}
/*
* Trace signal from source, setting volumes on the way.
*/
static void
hdaa_audio_ctl_source_volume(struct hdaa_pcm_devinfo *pdevinfo,
int ossdev, nid_t nid, int index, int mute, int left, int right, int depth)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w, *wc;
struct hdaa_audio_ctl *ctl;
int i, j, conns = 0;
if (depth > HDA_PARSE_MAXDEPTH)
return;
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return;
/* Count number of active inputs. */
if (depth > 0) {
for (j = 0; j < w->nconns; j++) {
if (!w->connsenable[j])
continue;
conns++;
}
}
/* If this is not a first step - use input mixer.
Pins have common input ctl so care must be taken. */
if (depth > 0 && (conns == 1 ||
w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)) {
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN,
index, 1);
if (ctl)
hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right);
}
/* If widget has own ossdev - not traverse it.
It will be traversed on its own. */
if (w->ossdev >= 0 && depth > 0)
return;
/* We must not traverse pin */
if ((w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) &&
depth > 0)
return;
/*
* If signals mixed, we can't assign controls farther.
* Ignore this on depth zero. Caller must knows why.
*/
if (conns > 1 &&
(w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER ||
w->selconn != index))
return;
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1);
if (ctl)
hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right);
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
wc = hdaa_widget_get(devinfo, i);
if (wc == NULL || wc->enable == 0)
continue;
for (j = 0; j < wc->nconns; j++) {
if (wc->connsenable[j] && wc->conns[j] == nid) {
hdaa_audio_ctl_source_volume(pdevinfo, ossdev,
wc->nid, j, mute, left, right, depth + 1);
}
}
}
return;
}
/*
* Trace signal from destination, setting volumes on the way.
*/
static void
hdaa_audio_ctl_dest_volume(struct hdaa_pcm_devinfo *pdevinfo,
int ossdev, nid_t nid, int index, int mute, int left, int right, int depth)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w, *wc;
struct hdaa_audio_ctl *ctl;
int i, j, consumers, cleft, cright;
if (depth > HDA_PARSE_MAXDEPTH)
return;
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return;
if (depth > 0) {
/* If this node produce output for several consumers,
we can't touch it. */
consumers = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
wc = hdaa_widget_get(devinfo, i);
if (wc == NULL || wc->enable == 0)
continue;
for (j = 0; j < wc->nconns; j++) {
if (wc->connsenable[j] && wc->conns[j] == nid)
consumers++;
}
}
/* The only exception is if real HP redirection is configured
and this is a duplication point.
XXX: Actually exception is not completely correct.
XXX: Duplication point check is not perfect. */
if ((consumers == 2 && (w->bindas < 0 ||
as[w->bindas].hpredir < 0 || as[w->bindas].fakeredir ||
(w->bindseqmask & (1 << 15)) == 0)) ||
consumers > 2)
return;
/* Else use it's output mixer. */
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid,
HDAA_CTL_OUT, -1, 1);
if (ctl)
hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right);
}
/* We must not traverse pin */
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
depth > 0)
return;
for (i = 0; i < w->nconns; i++) {
if (w->connsenable[i] == 0)
continue;
if (index >= 0 && i != index)
continue;
cleft = left;
cright = right;
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid,
HDAA_CTL_IN, i, 1);
if (ctl)
hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &cleft, &cright);
hdaa_audio_ctl_dest_volume(pdevinfo, ossdev, w->conns[i], -1,
mute, cleft, cright, depth + 1);
}
}
/*
* Set volumes for the specified pdevinfo and ossdev.
*/
static void
hdaa_audio_ctl_dev_volume(struct hdaa_pcm_devinfo *pdevinfo, unsigned dev)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w, *cw;
uint32_t mute;
int lvol, rvol;
int i, j;
mute = 0;
if (pdevinfo->left[dev] == 0) {
mute |= HDAA_AMP_MUTE_LEFT;
lvol = -4000;
} else
lvol = ((pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) *
pdevinfo->left[dev] + 50) / 100 + pdevinfo->minamp[dev];
if (pdevinfo->right[dev] == 0) {
mute |= HDAA_AMP_MUTE_RIGHT;
rvol = -4000;
} else
rvol = ((pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) *
pdevinfo->right[dev] + 50) / 100 + pdevinfo->minamp[dev];
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->bindas < 0) {
if (pdevinfo->index != 0)
continue;
} else {
if (w->bindas != pdevinfo->playas &&
w->bindas != pdevinfo->recas)
continue;
}
if (dev == SOUND_MIXER_RECLEV &&
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) {
hdaa_audio_ctl_dest_volume(pdevinfo, dev,
w->nid, -1, mute, lvol, rvol, 0);
continue;
}
if (dev == SOUND_MIXER_VOLUME &&
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
devinfo->as[w->bindas].dir == HDAA_CTL_OUT) {
hdaa_audio_ctl_dest_volume(pdevinfo, dev,
w->nid, -1, mute, lvol, rvol, 0);
continue;
}
if (dev == SOUND_MIXER_IGAIN &&
w->pflags & HDAA_ADC_MONITOR) {
for (j = 0; j < w->nconns; j++) {
if (!w->connsenable[j])
continue;
cw = hdaa_widget_get(devinfo, w->conns[j]);
if (cw == NULL || cw->enable == 0)
continue;
if (cw->bindas == -1)
continue;
if (cw->bindas >= 0 &&
devinfo->as[cw->bindas].dir != HDAA_CTL_IN)
continue;
hdaa_audio_ctl_dest_volume(pdevinfo, dev,
w->nid, j, mute, lvol, rvol, 0);
}
continue;
}
if (w->ossdev != dev)
continue;
hdaa_audio_ctl_source_volume(pdevinfo, dev,
w->nid, -1, mute, lvol, rvol, 0);
if (dev == SOUND_MIXER_IMIX && (w->pflags & HDAA_IMIX_AS_DST))
hdaa_audio_ctl_dest_volume(pdevinfo, dev,
w->nid, -1, mute, lvol, rvol, 0);
}
}
/*
* OSS Mixer set method.
*/
static int
hdaa_audio_ctl_ossmixer_set(struct snd_mixer *m, unsigned dev,
unsigned left, unsigned right)
{
struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m);
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w;
int i;
hdaa_lock(devinfo);
/* Save new values. */
pdevinfo->left[dev] = left;
pdevinfo->right[dev] = right;
/* 'ogain' is the special case implemented with EAPD. */
if (dev == SOUND_MIXER_OGAIN) {
uint32_t orig;
w = NULL;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX ||
w->param.eapdbtl == HDA_INVALID)
continue;
break;
}
if (i >= devinfo->endnode) {
hdaa_unlock(devinfo);
return (-1);
}
orig = w->param.eapdbtl;
if (left == 0)
w->param.eapdbtl &= ~HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD;
else
w->param.eapdbtl |= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD;
if (orig != w->param.eapdbtl) {
uint32_t val;
val = w->param.eapdbtl;
if (devinfo->quirks & HDAA_QUIRK_EAPDINV)
val ^= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD;
hda_command(devinfo->dev,
HDA_CMD_SET_EAPD_BTL_ENABLE(0, w->nid, val));
}
hdaa_unlock(devinfo);
return (left | (left << 8));
}
/* Recalculate all controls related to this OSS device. */
hdaa_audio_ctl_dev_volume(pdevinfo, dev);
hdaa_unlock(devinfo);
return (left | (right << 8));
}
/*
* Set mixer settings to our own default values:
* +20dB for mics, -10dB for analog vol, mute for igain, 0dB for others.
*/
static void
hdaa_audio_ctl_set_defaults(struct hdaa_pcm_devinfo *pdevinfo)
{
int amp, vol, dev;
for (dev = 0; dev < SOUND_MIXER_NRDEVICES; dev++) {
if ((pdevinfo->ossmask & (1 << dev)) == 0)
continue;
/* If the value was overriden, leave it as is. */
if (resource_int_value(device_get_name(pdevinfo->dev),
device_get_unit(pdevinfo->dev), ossnames[dev], &vol) == 0)
continue;
vol = -1;
if (dev == SOUND_MIXER_OGAIN)
vol = 100;
else if (dev == SOUND_MIXER_IGAIN)
vol = 0;
else if (dev == SOUND_MIXER_MIC ||
dev == SOUND_MIXER_MONITOR)
amp = 20 * 4; /* +20dB */
else if (dev == SOUND_MIXER_VOLUME && !pdevinfo->digital)
amp = -10 * 4; /* -10dB */
else
amp = 0;
if (vol < 0 &&
(pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) <= 0) {
vol = 100;
} else if (vol < 0) {
vol = ((amp - pdevinfo->minamp[dev]) * 100 +
(pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) / 2) /
(pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]);
vol = imin(imax(vol, 1), 100);
}
mix_set(pdevinfo->mixer, dev, vol, vol);
}
}
/*
* Recursively commutate specified record source.
*/
static uint32_t
hdaa_audio_ctl_recsel_comm(struct hdaa_pcm_devinfo *pdevinfo, uint32_t src, nid_t nid, int depth)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w, *cw;
struct hdaa_audio_ctl *ctl;
char buf[64];
int i, muted;
uint32_t res = 0;
if (depth > HDA_PARSE_MAXDEPTH)
return (0);
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return (0);
for (i = 0; i < w->nconns; i++) {
if (w->connsenable[i] == 0)
continue;
cw = hdaa_widget_get(devinfo, w->conns[i]);
if (cw == NULL || cw->enable == 0 || cw->bindas == -1)
continue;
/* Call recursively to trace signal to it's source if needed. */
if ((src & cw->ossmask) != 0) {
if (cw->ossdev < 0) {
res |= hdaa_audio_ctl_recsel_comm(pdevinfo, src,
w->conns[i], depth + 1);
} else {
res |= cw->ossmask;
}
}
/* We have two special cases: mixers and others (selectors). */
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) {
ctl = hdaa_audio_ctl_amp_get(devinfo,
w->nid, HDAA_CTL_IN, i, 1);
if (ctl == NULL)
continue;
/* If we have input control on this node mute them
* according to requested sources. */
muted = (src & cw->ossmask) ? 0 : 1;
if (muted != ctl->forcemute) {
ctl->forcemute = muted;
hdaa_audio_ctl_amp_set(ctl,
HDAA_AMP_MUTE_DEFAULT,
HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT);
}
HDA_BOOTHVERBOSE(
device_printf(pdevinfo->dev,
"Recsel (%s): nid %d source %d %s\n",
hdaa_audio_ctl_ossmixer_mask2allname(
src, buf, sizeof(buf)),
nid, i, muted?"mute":"unmute");
);
} else {
if (w->nconns == 1)
break;
if ((src & cw->ossmask) == 0)
continue;
/* If we found requested source - select it and exit. */
hdaa_widget_connection_select(w, i);
HDA_BOOTHVERBOSE(
device_printf(pdevinfo->dev,
"Recsel (%s): nid %d source %d select\n",
hdaa_audio_ctl_ossmixer_mask2allname(
src, buf, sizeof(buf)),
nid, i);
);
break;
}
}
return (res);
}
static uint32_t
hdaa_audio_ctl_ossmixer_setrecsrc(struct snd_mixer *m, uint32_t src)
{
struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m);
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w;
struct hdaa_audio_as *as;
struct hdaa_audio_ctl *ctl;
struct hdaa_chan *ch;
int i, j;
uint32_t ret = 0xffffffff;
hdaa_lock(devinfo);
if (pdevinfo->recas < 0) {
hdaa_unlock(devinfo);
return (0);
}
as = &devinfo->as[pdevinfo->recas];
/* For non-mixed associations we always recording everything. */
if (!as->mixed) {
hdaa_unlock(devinfo);
return (mix_getrecdevs(m));
}
/* Commutate requested recsrc for each ADC. */
for (j = 0; j < as->num_chans; j++) {
ch = &devinfo->chans[as->chans[j]];
for (i = 0; ch->io[i] >= 0; i++) {
w = hdaa_widget_get(devinfo, ch->io[i]);
if (w == NULL || w->enable == 0)
continue;
ret &= hdaa_audio_ctl_recsel_comm(pdevinfo, src,
ch->io[i], 0);
}
}
if (ret == 0xffffffff)
ret = 0;
/*
* Some controls could be shared. Reset volumes for controls
* related to previously chosen devices, as they may no longer
* affect the signal.
*/
i = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
if (ctl->enable == 0 ||
!(ctl->ossmask & pdevinfo->recsrc))
continue;
if (!((pdevinfo->playas >= 0 &&
ctl->widget->bindas == pdevinfo->playas) ||
(pdevinfo->recas >= 0 &&
ctl->widget->bindas == pdevinfo->recas) ||
(pdevinfo->index == 0 &&
ctl->widget->bindas == -2)))
continue;
for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) {
if (pdevinfo->recsrc & (1 << j)) {
ctl->devleft[j] = 0;
ctl->devright[j] = 0;
ctl->devmute[j] = 0;
}
}
}
/*
* Some controls could be shared. Set volumes for controls
* related to devices selected both previously and now.
*/
for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) {
if ((ret | pdevinfo->recsrc) & (1 << j))
hdaa_audio_ctl_dev_volume(pdevinfo, j);
}
pdevinfo->recsrc = ret;
hdaa_unlock(devinfo);
return (ret);
}
static kobj_method_t hdaa_audio_ctl_ossmixer_methods[] = {
KOBJMETHOD(mixer_init, hdaa_audio_ctl_ossmixer_init),
KOBJMETHOD(mixer_set, hdaa_audio_ctl_ossmixer_set),
KOBJMETHOD(mixer_setrecsrc, hdaa_audio_ctl_ossmixer_setrecsrc),
KOBJMETHOD_END
};
MIXER_DECLARE(hdaa_audio_ctl_ossmixer);
static void
hdaa_dump_gpi(struct hdaa_devinfo *devinfo)
{
device_t dev = devinfo->dev;
int i;
uint32_t data, wake, unsol, sticky;
if (HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap) > 0) {
data = hda_command(dev,
HDA_CMD_GET_GPI_DATA(0, devinfo->nid));
wake = hda_command(dev,
HDA_CMD_GET_GPI_WAKE_ENABLE_MASK(0, devinfo->nid));
unsol = hda_command(dev,
HDA_CMD_GET_GPI_UNSOLICITED_ENABLE_MASK(0, devinfo->nid));
sticky = hda_command(dev,
HDA_CMD_GET_GPI_STICKY_MASK(0, devinfo->nid));
for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap); i++) {
device_printf(dev, " GPI%d:%s%s%s state=%d", i,
(sticky & (1 << i)) ? " sticky" : "",
(unsol & (1 << i)) ? " unsol" : "",
(wake & (1 << i)) ? " wake" : "",
(data >> i) & 1);
}
}
}
static void
hdaa_dump_gpio(struct hdaa_devinfo *devinfo)
{
device_t dev = devinfo->dev;
int i;
uint32_t data, dir, enable, wake, unsol, sticky;
if (HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap) > 0) {
data = hda_command(dev,
HDA_CMD_GET_GPIO_DATA(0, devinfo->nid));
enable = hda_command(dev,
HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid));
dir = hda_command(dev,
HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid));
wake = hda_command(dev,
HDA_CMD_GET_GPIO_WAKE_ENABLE_MASK(0, devinfo->nid));
unsol = hda_command(dev,
HDA_CMD_GET_GPIO_UNSOLICITED_ENABLE_MASK(0, devinfo->nid));
sticky = hda_command(dev,
HDA_CMD_GET_GPIO_STICKY_MASK(0, devinfo->nid));
for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); i++) {
device_printf(dev, " GPIO%d: ", i);
if ((enable & (1 << i)) == 0) {
printf("disabled\n");
continue;
}
if ((dir & (1 << i)) == 0) {
printf("input%s%s%s",
(sticky & (1 << i)) ? " sticky" : "",
(unsol & (1 << i)) ? " unsol" : "",
(wake & (1 << i)) ? " wake" : "");
} else
printf("output");
printf(" state=%d\n", (data >> i) & 1);
}
}
}
static void
hdaa_dump_gpo(struct hdaa_devinfo *devinfo)
{
device_t dev = devinfo->dev;
int i;
uint32_t data;
if (HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap) > 0) {
data = hda_command(dev,
HDA_CMD_GET_GPO_DATA(0, devinfo->nid));
for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); i++) {
device_printf(dev, " GPO%d: state=%d", i,
(data >> i) & 1);
}
}
}
static void
hdaa_audio_parse(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
uint32_t res;
int i;
nid_t nid;
nid = devinfo->nid;
res = hda_command(devinfo->dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_GPIO_COUNT));
devinfo->gpio_cap = res;
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"NumGPIO=%d NumGPO=%d "
"NumGPI=%d GPIWake=%d GPIUnsol=%d\n",
HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_GPI_WAKE(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_GPI_UNSOL(devinfo->gpio_cap));
hdaa_dump_gpi(devinfo);
hdaa_dump_gpio(devinfo);
hdaa_dump_gpo(devinfo);
);
res = hda_command(devinfo->dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_STREAM_FORMATS));
devinfo->supp_stream_formats = res;
res = hda_command(devinfo->dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_PCM_SIZE_RATE));
devinfo->supp_pcm_size_rate = res;
res = hda_command(devinfo->dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_OUTPUT_AMP_CAP));
devinfo->outamp_cap = res;
res = hda_command(devinfo->dev,
HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_INPUT_AMP_CAP));
devinfo->inamp_cap = res;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL)
device_printf(devinfo->dev, "Ghost widget! nid=%d!\n", i);
else {
w->devinfo = devinfo;
w->nid = i;
w->enable = 1;
w->selconn = -1;
w->pflags = 0;
w->ossdev = -1;
w->bindas = -1;
w->param.eapdbtl = HDA_INVALID;
hdaa_widget_parse(w);
}
}
}
static void
hdaa_audio_postprocess(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
int i;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL)
continue;
hdaa_widget_postprocess(w);
}
}
static void
hdaa_audio_ctl_parse(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_ctl *ctls;
struct hdaa_widget *w, *cw;
int i, j, cnt, max, ocap, icap;
int mute, offset, step, size;
/* XXX This is redundant */
max = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->param.outamp_cap != 0)
max++;
if (w->param.inamp_cap != 0) {
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR:
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER:
for (j = 0; j < w->nconns; j++) {
cw = hdaa_widget_get(devinfo,
w->conns[j]);
if (cw == NULL || cw->enable == 0)
continue;
max++;
}
break;
default:
max++;
break;
}
}
}
devinfo->ctlcnt = max;
if (max < 1)
return;
- ctls = (struct hdaa_audio_ctl *)mallocarray(max,
- sizeof(*ctls), M_HDAA, M_ZERO | M_NOWAIT);
+ ctls = (struct hdaa_audio_ctl *)malloc(
+ sizeof(*ctls) * max, M_HDAA, M_ZERO | M_NOWAIT);
if (ctls == NULL) {
/* Blekh! */
device_printf(devinfo->dev, "unable to allocate ctls!\n");
devinfo->ctlcnt = 0;
return;
}
cnt = 0;
for (i = devinfo->startnode; cnt < max && i < devinfo->endnode; i++) {
if (cnt >= max) {
device_printf(devinfo->dev, "%s: Ctl overflow!\n",
__func__);
break;
}
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
ocap = w->param.outamp_cap;
icap = w->param.inamp_cap;
if (ocap != 0) {
mute = HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(ocap);
step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(ocap);
size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(ocap);
offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(ocap);
/*if (offset > step) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"BUGGY outamp: nid=%d "
"[offset=%d > step=%d]\n",
w->nid, offset, step);
);
offset = step;
}*/
ctls[cnt].enable = 1;
ctls[cnt].widget = w;
ctls[cnt].mute = mute;
ctls[cnt].step = step;
ctls[cnt].size = size;
ctls[cnt].offset = offset;
ctls[cnt].left = offset;
ctls[cnt].right = offset;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX ||
w->waspin)
ctls[cnt].ndir = HDAA_CTL_IN;
else
ctls[cnt].ndir = HDAA_CTL_OUT;
ctls[cnt++].dir = HDAA_CTL_OUT;
}
if (icap != 0) {
mute = HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(icap);
step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(icap);
size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(icap);
offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(icap);
/*if (offset > step) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"BUGGY inamp: nid=%d "
"[offset=%d > step=%d]\n",
w->nid, offset, step);
);
offset = step;
}*/
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR:
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER:
for (j = 0; j < w->nconns; j++) {
if (cnt >= max) {
device_printf(devinfo->dev,
"%s: Ctl overflow!\n",
__func__);
break;
}
cw = hdaa_widget_get(devinfo,
w->conns[j]);
if (cw == NULL || cw->enable == 0)
continue;
ctls[cnt].enable = 1;
ctls[cnt].widget = w;
ctls[cnt].childwidget = cw;
ctls[cnt].index = j;
ctls[cnt].mute = mute;
ctls[cnt].step = step;
ctls[cnt].size = size;
ctls[cnt].offset = offset;
ctls[cnt].left = offset;
ctls[cnt].right = offset;
ctls[cnt].ndir = HDAA_CTL_IN;
ctls[cnt++].dir = HDAA_CTL_IN;
}
break;
default:
if (cnt >= max) {
device_printf(devinfo->dev,
"%s: Ctl overflow!\n",
__func__);
break;
}
ctls[cnt].enable = 1;
ctls[cnt].widget = w;
ctls[cnt].mute = mute;
ctls[cnt].step = step;
ctls[cnt].size = size;
ctls[cnt].offset = offset;
ctls[cnt].left = offset;
ctls[cnt].right = offset;
if (w->type ==
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
ctls[cnt].ndir = HDAA_CTL_OUT;
else
ctls[cnt].ndir = HDAA_CTL_IN;
ctls[cnt++].dir = HDAA_CTL_IN;
break;
}
}
}
devinfo->ctl = ctls;
}
static void
hdaa_audio_as_parse(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as;
struct hdaa_widget *w;
int i, j, cnt, max, type, dir, assoc, seq, first, hpredir;
/* Count present associations */
max = 0;
for (j = 1; j < 16; j++) {
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (HDA_CONFIG_DEFAULTCONF_ASSOCIATION(w->wclass.pin.config)
!= j)
continue;
max++;
if (j != 15) /* There could be many 1-pin assocs #15 */
break;
}
}
devinfo->ascnt = max;
if (max < 1)
return;
- as = (struct hdaa_audio_as *)mallocarray(max,
- sizeof(*as), M_HDAA, M_ZERO | M_NOWAIT);
+ as = (struct hdaa_audio_as *)malloc(
+ sizeof(*as) * max, M_HDAA, M_ZERO | M_NOWAIT);
if (as == NULL) {
/* Blekh! */
device_printf(devinfo->dev, "unable to allocate assocs!\n");
devinfo->ascnt = 0;
return;
}
for (i = 0; i < max; i++) {
as[i].hpredir = -1;
as[i].digital = 0;
as[i].num_chans = 1;
as[i].location = -1;
}
/* Scan associations skipping as=0. */
cnt = 0;
for (j = 1; j < 16 && cnt < max; j++) {
first = 16;
hpredir = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
assoc = HDA_CONFIG_DEFAULTCONF_ASSOCIATION(w->wclass.pin.config);
seq = HDA_CONFIG_DEFAULTCONF_SEQUENCE(w->wclass.pin.config);
if (assoc != j) {
continue;
}
KASSERT(cnt < max,
("%s: Associations owerflow (%d of %d)",
__func__, cnt, max));
type = w->wclass.pin.config &
HDA_CONFIG_DEFAULTCONF_DEVICE_MASK;
/* Get pin direction. */
if (type == HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_OUT ||
type == HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER ||
type == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT ||
type == HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_OUT ||
type == HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_OUT)
dir = HDAA_CTL_OUT;
else
dir = HDAA_CTL_IN;
/* If this is a first pin - create new association. */
if (as[cnt].pincnt == 0) {
as[cnt].enable = 1;
as[cnt].index = j;
as[cnt].dir = dir;
}
if (seq < first)
first = seq;
/* Check association correctness. */
if (as[cnt].pins[seq] != 0) {
device_printf(devinfo->dev, "%s: Duplicate pin %d (%d) "
"in association %d! Disabling association.\n",
__func__, seq, w->nid, j);
as[cnt].enable = 0;
}
if (dir != as[cnt].dir) {
device_printf(devinfo->dev, "%s: Pin %d has wrong "
"direction for association %d! Disabling "
"association.\n",
__func__, w->nid, j);
as[cnt].enable = 0;
}
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) {
as[cnt].digital |= 0x1;
if (HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap))
as[cnt].digital |= 0x2;
if (HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap))
as[cnt].digital |= 0x4;
}
if (as[cnt].location == -1) {
as[cnt].location =
HDA_CONFIG_DEFAULTCONF_LOCATION(w->wclass.pin.config);
} else if (as[cnt].location !=
HDA_CONFIG_DEFAULTCONF_LOCATION(w->wclass.pin.config)) {
as[cnt].location = -2;
}
/* Headphones with seq=15 may mean redirection. */
if (type == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT &&
seq == 15)
hpredir = 1;
as[cnt].pins[seq] = w->nid;
as[cnt].pincnt++;
/* Association 15 is a multiple unassociated pins. */
if (j == 15)
cnt++;
}
if (j != 15 && as[cnt].pincnt > 0) {
if (hpredir && as[cnt].pincnt > 1)
as[cnt].hpredir = first;
cnt++;
}
}
for (i = 0; i < max; i++) {
if (as[i].dir == HDAA_CTL_IN && (as[i].pincnt == 1 ||
as[i].pins[14] > 0 || as[i].pins[15] > 0))
as[i].mixed = 1;
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"%d associations found:\n", max);
for (i = 0; i < max; i++) {
device_printf(devinfo->dev,
"Association %d (%d) %s%s:\n",
i, as[i].index, (as[i].dir == HDAA_CTL_IN)?"in":"out",
as[i].enable?"":" (disabled)");
for (j = 0; j < 16; j++) {
if (as[i].pins[j] == 0)
continue;
device_printf(devinfo->dev,
" Pin nid=%d seq=%d\n",
as[i].pins[j], j);
}
}
);
devinfo->as = as;
}
/*
* Trace path from DAC to pin.
*/
static nid_t
hdaa_audio_trace_dac(struct hdaa_devinfo *devinfo, int as, int seq, nid_t nid,
int dupseq, int min, int only, int depth)
{
struct hdaa_widget *w;
int i, im = -1;
nid_t m = 0, ret;
if (depth > HDA_PARSE_MAXDEPTH)
return (0);
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return (0);
HDA_BOOTHVERBOSE(
if (!only) {
device_printf(devinfo->dev,
" %*stracing via nid %d\n",
depth + 1, "", w->nid);
}
);
/* Use only unused widgets */
if (w->bindas >= 0 && w->bindas != as) {
HDA_BOOTHVERBOSE(
if (!only) {
device_printf(devinfo->dev,
" %*snid %d busy by association %d\n",
depth + 1, "", w->nid, w->bindas);
}
);
return (0);
}
if (dupseq < 0) {
if (w->bindseqmask != 0) {
HDA_BOOTHVERBOSE(
if (!only) {
device_printf(devinfo->dev,
" %*snid %d busy by seqmask %x\n",
depth + 1, "", w->nid, w->bindseqmask);
}
);
return (0);
}
} else {
/* If this is headphones - allow duplicate first pin. */
if (w->bindseqmask != 0 &&
(w->bindseqmask & (1 << dupseq)) == 0) {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d busy by seqmask %x\n",
depth + 1, "", w->nid, w->bindseqmask);
);
return (0);
}
}
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT:
/* Do not traverse input. AD1988 has digital monitor
for which we are not ready. */
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT:
/* If we are tracing HP take only dac of first pin. */
if ((only == 0 || only == w->nid) &&
(w->nid >= min) && (dupseq < 0 || w->nid ==
devinfo->as[as].dacs[0][dupseq]))
m = w->nid;
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX:
if (depth > 0)
break;
/* Fall */
default:
/* Find reachable DACs with smallest nid respecting constraints. */
for (i = 0; i < w->nconns; i++) {
if (w->connsenable[i] == 0)
continue;
if (w->selconn != -1 && w->selconn != i)
continue;
if ((ret = hdaa_audio_trace_dac(devinfo, as, seq,
w->conns[i], dupseq, min, only, depth + 1)) != 0) {
if (m == 0 || ret < m) {
m = ret;
im = i;
}
if (only || dupseq >= 0)
break;
}
}
if (im >= 0 && only && ((w->nconns > 1 &&
w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR))
w->selconn = im;
break;
}
if (m && only) {
w->bindas = as;
w->bindseqmask |= (1 << seq);
}
HDA_BOOTHVERBOSE(
if (!only) {
device_printf(devinfo->dev,
" %*snid %d returned %d\n",
depth + 1, "", w->nid, m);
}
);
return (m);
}
/*
* Trace path from widget to ADC.
*/
static nid_t
hdaa_audio_trace_adc(struct hdaa_devinfo *devinfo, int as, int seq, nid_t nid,
int mixed, int min, int only, int depth, int *length, int onlylength)
{
struct hdaa_widget *w, *wc;
int i, j, im, lm = HDA_PARSE_MAXDEPTH;
nid_t m = 0, ret;
if (depth > HDA_PARSE_MAXDEPTH)
return (0);
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return (0);
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*stracing via nid %d\n",
depth + 1, "", w->nid);
);
/* Use only unused widgets */
if (w->bindas >= 0 && w->bindas != as) {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d busy by association %d\n",
depth + 1, "", w->nid, w->bindas);
);
return (0);
}
if (!mixed && w->bindseqmask != 0) {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d busy by seqmask %x\n",
depth + 1, "", w->nid, w->bindseqmask);
);
return (0);
}
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT:
if ((only == 0 || only == w->nid) && (w->nid >= min) &&
(onlylength == 0 || onlylength == depth)) {
m = w->nid;
*length = depth;
}
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX:
if (depth > 0)
break;
/* Fall */
default:
/* Try to find reachable ADCs with specified nid. */
for (j = devinfo->startnode; j < devinfo->endnode; j++) {
wc = hdaa_widget_get(devinfo, j);
if (wc == NULL || wc->enable == 0)
continue;
im = -1;
for (i = 0; i < wc->nconns; i++) {
if (wc->connsenable[i] == 0)
continue;
if (wc->conns[i] != nid)
continue;
if ((ret = hdaa_audio_trace_adc(devinfo, as, seq,
j, mixed, min, only, depth + 1,
length, onlylength)) != 0) {
if (m == 0 || ret < m ||
(ret == m && *length < lm)) {
m = ret;
im = i;
lm = *length;
} else
*length = lm;
if (only)
break;
}
}
if (im >= 0 && only && ((wc->nconns > 1 &&
wc->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) ||
wc->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR))
wc->selconn = im;
}
break;
}
if (m && only) {
w->bindas = as;
w->bindseqmask |= (1 << seq);
}
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d returned %d\n",
depth + 1, "", w->nid, m);
);
return (m);
}
/*
* Erase trace path of the specified association.
*/
static void
hdaa_audio_undo_trace(struct hdaa_devinfo *devinfo, int as, int seq)
{
struct hdaa_widget *w;
int i;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->bindas == as) {
if (seq >= 0) {
w->bindseqmask &= ~(1 << seq);
if (w->bindseqmask == 0) {
w->bindas = -1;
w->selconn = -1;
}
} else {
w->bindas = -1;
w->bindseqmask = 0;
w->selconn = -1;
}
}
}
}
/*
* Trace association path from DAC to output
*/
static int
hdaa_audio_trace_as_out(struct hdaa_devinfo *devinfo, int as, int seq)
{
struct hdaa_audio_as *ases = devinfo->as;
int i, hpredir;
nid_t min, res;
/* Find next pin */
for (i = seq; i < 16 && ases[as].pins[i] == 0; i++)
;
/* Check if there is no any left. If so - we succeeded. */
if (i == 16)
return (1);
hpredir = (i == 15 && ases[as].fakeredir == 0)?ases[as].hpredir:-1;
min = 0;
do {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Tracing pin %d with min nid %d",
ases[as].pins[i], min);
if (hpredir >= 0)
printf(" and hpredir %d", hpredir);
printf("\n");
);
/* Trace this pin taking min nid into account. */
res = hdaa_audio_trace_dac(devinfo, as, i,
ases[as].pins[i], hpredir, min, 0, 0);
if (res == 0) {
/* If we failed - return to previous and redo it. */
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Unable to trace pin %d seq %d with min "
"nid %d",
ases[as].pins[i], i, min);
if (hpredir >= 0)
printf(" and hpredir %d", hpredir);
printf("\n");
);
return (0);
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Pin %d traced to DAC %d",
ases[as].pins[i], res);
if (hpredir >= 0)
printf(" and hpredir %d", hpredir);
if (ases[as].fakeredir)
printf(" with fake redirection");
printf("\n");
);
/* Trace again to mark the path */
hdaa_audio_trace_dac(devinfo, as, i,
ases[as].pins[i], hpredir, min, res, 0);
ases[as].dacs[0][i] = res;
/* We succeeded, so call next. */
if (hdaa_audio_trace_as_out(devinfo, as, i + 1))
return (1);
/* If next failed, we should retry with next min */
hdaa_audio_undo_trace(devinfo, as, i);
ases[as].dacs[0][i] = 0;
min = res + 1;
} while (1);
}
/*
* Check equivalency of two DACs.
*/
static int
hdaa_audio_dacs_equal(struct hdaa_widget *w1, struct hdaa_widget *w2)
{
struct hdaa_devinfo *devinfo = w1->devinfo;
struct hdaa_widget *w3;
int i, j, c1, c2;
if (memcmp(&w1->param, &w2->param, sizeof(w1->param)))
return (0);
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w3 = hdaa_widget_get(devinfo, i);
if (w3 == NULL || w3->enable == 0)
continue;
if (w3->bindas != w1->bindas)
continue;
if (w3->nconns == 0)
continue;
c1 = c2 = -1;
for (j = 0; j < w3->nconns; j++) {
if (w3->connsenable[j] == 0)
continue;
if (w3->conns[j] == w1->nid)
c1 = j;
if (w3->conns[j] == w2->nid)
c2 = j;
}
if (c1 < 0)
continue;
if (c2 < 0)
return (0);
if (w3->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
return (0);
}
return (1);
}
/*
* Check equivalency of two ADCs.
*/
static int
hdaa_audio_adcs_equal(struct hdaa_widget *w1, struct hdaa_widget *w2)
{
struct hdaa_devinfo *devinfo = w1->devinfo;
struct hdaa_widget *w3, *w4;
int i;
if (memcmp(&w1->param, &w2->param, sizeof(w1->param)))
return (0);
if (w1->nconns != 1 || w2->nconns != 1)
return (0);
if (w1->conns[0] == w2->conns[0])
return (1);
w3 = hdaa_widget_get(devinfo, w1->conns[0]);
if (w3 == NULL || w3->enable == 0)
return (0);
w4 = hdaa_widget_get(devinfo, w2->conns[0]);
if (w4 == NULL || w4->enable == 0)
return (0);
if (w3->bindas == w4->bindas && w3->bindseqmask == w4->bindseqmask)
return (1);
if (w4->bindas >= 0)
return (0);
if (w3->type != w4->type)
return (0);
if (memcmp(&w3->param, &w4->param, sizeof(w3->param)))
return (0);
if (w3->nconns != w4->nconns)
return (0);
for (i = 0; i < w3->nconns; i++) {
if (w3->conns[i] != w4->conns[i])
return (0);
}
return (1);
}
/*
* Look for equivalent DAC/ADC to implement second channel.
*/
static void
hdaa_audio_adddac(struct hdaa_devinfo *devinfo, int asid)
{
struct hdaa_audio_as *as = &devinfo->as[asid];
struct hdaa_widget *w1, *w2;
int i, pos;
nid_t nid1, nid2;
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Looking for additional %sC "
"for association %d (%d)\n",
(as->dir == HDAA_CTL_OUT) ? "DA" : "AD",
asid, as->index);
);
/* Find the exisitng DAC position and return if found more the one. */
pos = -1;
for (i = 0; i < 16; i++) {
if (as->dacs[0][i] <= 0)
continue;
if (pos >= 0 && as->dacs[0][i] != as->dacs[0][pos])
return;
pos = i;
}
nid1 = as->dacs[0][pos];
w1 = hdaa_widget_get(devinfo, nid1);
w2 = NULL;
for (nid2 = devinfo->startnode; nid2 < devinfo->endnode; nid2++) {
w2 = hdaa_widget_get(devinfo, nid2);
if (w2 == NULL || w2->enable == 0)
continue;
if (w2->bindas >= 0)
continue;
if (w1->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT) {
if (w2->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT)
continue;
if (hdaa_audio_dacs_equal(w1, w2))
break;
} else {
if (w2->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT)
continue;
if (hdaa_audio_adcs_equal(w1, w2))
break;
}
}
if (nid2 >= devinfo->endnode)
return;
w2->bindas = w1->bindas;
w2->bindseqmask = w1->bindseqmask;
if (w1->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" ADC %d considered equal to ADC %d\n", nid2, nid1);
);
w1 = hdaa_widget_get(devinfo, w1->conns[0]);
w2 = hdaa_widget_get(devinfo, w2->conns[0]);
w2->bindas = w1->bindas;
w2->bindseqmask = w1->bindseqmask;
} else {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" DAC %d considered equal to DAC %d\n", nid2, nid1);
);
}
for (i = 0; i < 16; i++) {
if (as->dacs[0][i] <= 0)
continue;
as->dacs[as->num_chans][i] = nid2;
}
as->num_chans++;
}
/*
* Trace association path from input to ADC
*/
static int
hdaa_audio_trace_as_in(struct hdaa_devinfo *devinfo, int as)
{
struct hdaa_audio_as *ases = devinfo->as;
struct hdaa_widget *w;
int i, j, k, length;
for (j = devinfo->startnode; j < devinfo->endnode; j++) {
w = hdaa_widget_get(devinfo, j);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT)
continue;
if (w->bindas >= 0 && w->bindas != as)
continue;
/* Find next pin */
for (i = 0; i < 16; i++) {
if (ases[as].pins[i] == 0)
continue;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Tracing pin %d to ADC %d\n",
ases[as].pins[i], j);
);
/* Trace this pin taking goal into account. */
if (hdaa_audio_trace_adc(devinfo, as, i,
ases[as].pins[i], 1, 0, j, 0, &length, 0) == 0) {
/* If we failed - return to previous and redo it. */
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Unable to trace pin %d to ADC %d, undo traces\n",
ases[as].pins[i], j);
);
hdaa_audio_undo_trace(devinfo, as, -1);
for (k = 0; k < 16; k++)
ases[as].dacs[0][k] = 0;
break;
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Pin %d traced to ADC %d\n",
ases[as].pins[i], j);
);
ases[as].dacs[0][i] = j;
}
if (i == 16)
return (1);
}
return (0);
}
/*
* Trace association path from input to multiple ADCs
*/
static int
hdaa_audio_trace_as_in_mch(struct hdaa_devinfo *devinfo, int as, int seq)
{
struct hdaa_audio_as *ases = devinfo->as;
int i, length;
nid_t min, res;
/* Find next pin */
for (i = seq; i < 16 && ases[as].pins[i] == 0; i++)
;
/* Check if there is no any left. If so - we succeeded. */
if (i == 16)
return (1);
min = 0;
do {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Tracing pin %d with min nid %d",
ases[as].pins[i], min);
printf("\n");
);
/* Trace this pin taking min nid into account. */
res = hdaa_audio_trace_adc(devinfo, as, i,
ases[as].pins[i], 0, min, 0, 0, &length, 0);
if (res == 0) {
/* If we failed - return to previous and redo it. */
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Unable to trace pin %d seq %d with min "
"nid %d",
ases[as].pins[i], i, min);
printf("\n");
);
return (0);
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Pin %d traced to ADC %d\n",
ases[as].pins[i], res);
);
/* Trace again to mark the path */
hdaa_audio_trace_adc(devinfo, as, i,
ases[as].pins[i], 0, min, res, 0, &length, length);
ases[as].dacs[0][i] = res;
/* We succeeded, so call next. */
if (hdaa_audio_trace_as_in_mch(devinfo, as, i + 1))
return (1);
/* If next failed, we should retry with next min */
hdaa_audio_undo_trace(devinfo, as, i);
ases[as].dacs[0][i] = 0;
min = res + 1;
} while (1);
}
/*
* Trace input monitor path from mixer to output association.
*/
static int
hdaa_audio_trace_to_out(struct hdaa_devinfo *devinfo, nid_t nid, int depth)
{
struct hdaa_audio_as *ases = devinfo->as;
struct hdaa_widget *w, *wc;
int i, j;
nid_t res = 0;
if (depth > HDA_PARSE_MAXDEPTH)
return (0);
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return (0);
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*stracing via nid %d\n",
depth + 1, "", w->nid);
);
/* Use only unused widgets */
if (depth > 0 && w->bindas != -1) {
if (w->bindas < 0 || ases[w->bindas].dir == HDAA_CTL_OUT) {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d found output association %d\n",
depth + 1, "", w->nid, w->bindas);
);
if (w->bindas >= 0)
w->pflags |= HDAA_ADC_MONITOR;
return (1);
} else {
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d busy by input association %d\n",
depth + 1, "", w->nid, w->bindas);
);
return (0);
}
}
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT:
/* Do not traverse input. AD1988 has digital monitor
for which we are not ready. */
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX:
if (depth > 0)
break;
/* Fall */
default:
/* Try to find reachable ADCs with specified nid. */
for (j = devinfo->startnode; j < devinfo->endnode; j++) {
wc = hdaa_widget_get(devinfo, j);
if (wc == NULL || wc->enable == 0)
continue;
for (i = 0; i < wc->nconns; i++) {
if (wc->connsenable[i] == 0)
continue;
if (wc->conns[i] != nid)
continue;
if (hdaa_audio_trace_to_out(devinfo,
j, depth + 1) != 0) {
res = 1;
if (wc->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR &&
wc->selconn == -1)
wc->selconn = i;
}
}
}
break;
}
if (res && w->bindas == -1)
w->bindas = -2;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" %*snid %d returned %d\n",
depth + 1, "", w->nid, res);
);
return (res);
}
/*
* Trace extra associations (beeper, monitor)
*/
static void
hdaa_audio_trace_as_extra(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w;
int j;
/* Input monitor */
/* Find mixer associated with input, but supplying signal
for output associations. Hope it will be input monitor. */
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Tracing input monitor\n");
);
for (j = devinfo->startnode; j < devinfo->endnode; j++) {
w = hdaa_widget_get(devinfo, j);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
continue;
if (w->bindas < 0 || as[w->bindas].dir != HDAA_CTL_IN)
continue;
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Tracing nid %d to out\n",
j);
);
if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" nid %d is input monitor\n",
w->nid);
);
w->ossdev = SOUND_MIXER_IMIX;
}
}
/* Other inputs monitor */
/* Find input pins supplying signal for output associations.
Hope it will be input monitoring. */
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Tracing other input monitors\n");
);
for (j = devinfo->startnode; j < devinfo->endnode; j++) {
w = hdaa_widget_get(devinfo, j);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (w->bindas < 0 || as[w->bindas].dir != HDAA_CTL_IN)
continue;
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" Tracing nid %d to out\n",
j);
);
if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" nid %d is input monitor\n",
w->nid);
);
}
}
/* Beeper */
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Tracing beeper\n");
);
for (j = devinfo->startnode; j < devinfo->endnode; j++) {
w = hdaa_widget_get(devinfo, j);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET)
continue;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Tracing nid %d to out\n",
j);
);
if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
" nid %d traced to out\n",
j);
);
}
w->bindas = -2;
}
}
/*
* Bind assotiations to PCM channels
*/
static void
hdaa_audio_bind_as(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
int i, j, cnt = 0, free;
for (j = 0; j < devinfo->ascnt; j++) {
if (as[j].enable)
cnt += as[j].num_chans;
}
if (devinfo->num_chans == 0) {
- devinfo->chans = (struct hdaa_chan *)mallocarray(cnt,
- sizeof(struct hdaa_chan),
+ devinfo->chans = (struct hdaa_chan *)malloc(
+ sizeof(struct hdaa_chan) * cnt,
M_HDAA, M_ZERO | M_NOWAIT);
if (devinfo->chans == NULL) {
device_printf(devinfo->dev,
"Channels memory allocation failed!\n");
return;
}
} else {
devinfo->chans = (struct hdaa_chan *)realloc(devinfo->chans,
sizeof(struct hdaa_chan) * (devinfo->num_chans + cnt),
M_HDAA, M_ZERO | M_NOWAIT);
if (devinfo->chans == NULL) {
devinfo->num_chans = 0;
device_printf(devinfo->dev,
"Channels memory allocation failed!\n");
return;
}
/* Fixup relative pointers after realloc */
for (j = 0; j < devinfo->num_chans; j++)
devinfo->chans[j].caps.fmtlist = devinfo->chans[j].fmtlist;
}
free = devinfo->num_chans;
devinfo->num_chans += cnt;
for (j = free; j < free + cnt; j++) {
devinfo->chans[j].devinfo = devinfo;
devinfo->chans[j].as = -1;
}
/* Assign associations in order of their numbers, */
for (j = 0; j < devinfo->ascnt; j++) {
if (as[j].enable == 0)
continue;
for (i = 0; i < as[j].num_chans; i++) {
devinfo->chans[free].as = j;
devinfo->chans[free].asindex = i;
devinfo->chans[free].dir =
(as[j].dir == HDAA_CTL_IN) ? PCMDIR_REC : PCMDIR_PLAY;
hdaa_pcmchannel_setup(&devinfo->chans[free]);
as[j].chans[i] = free;
free++;
}
}
}
static void
hdaa_audio_disable_nonaudio(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
int i;
/* Disable power and volume widgets. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_POWER_WIDGET ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VOLUME_WIDGET) {
w->enable = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling nid %d due to it's"
" non-audio type.\n",
w->nid);
);
}
}
}
static void
hdaa_audio_disable_useless(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w, *cw;
struct hdaa_audio_ctl *ctl;
int done, found, i, j, k;
/* Disable useless pins. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) {
if ((w->wclass.pin.config &
HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) ==
HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_NONE) {
w->enable = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling pin nid %d due"
" to None connectivity.\n",
w->nid);
);
} else if ((w->wclass.pin.config &
HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK) == 0) {
w->enable = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling unassociated"
" pin nid %d.\n",
w->nid);
);
}
}
}
do {
done = 1;
/* Disable and mute controls for disabled widgets. */
i = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
if (ctl->enable == 0)
continue;
if (ctl->widget->enable == 0 ||
(ctl->childwidget != NULL &&
ctl->childwidget->enable == 0)) {
ctl->forcemute = 1;
ctl->muted = HDAA_AMP_MUTE_ALL;
ctl->left = 0;
ctl->right = 0;
ctl->enable = 0;
if (ctl->ndir == HDAA_CTL_IN)
ctl->widget->connsenable[ctl->index] = 0;
done = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling ctl %d nid %d cnid %d due"
" to disabled widget.\n", i,
ctl->widget->nid,
(ctl->childwidget != NULL)?
ctl->childwidget->nid:-1);
);
}
}
/* Disable useless widgets. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
/* Disable inputs with disabled child widgets. */
for (j = 0; j < w->nconns; j++) {
if (w->connsenable[j]) {
cw = hdaa_widget_get(devinfo, w->conns[j]);
if (cw == NULL || cw->enable == 0) {
w->connsenable[j] = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling nid %d connection %d due"
" to disabled child widget.\n",
i, j);
);
}
}
}
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR &&
w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
continue;
/* Disable mixers and selectors without inputs. */
found = 0;
for (j = 0; j < w->nconns; j++) {
if (w->connsenable[j]) {
found = 1;
break;
}
}
if (found == 0) {
w->enable = 0;
done = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling nid %d due to all it's"
" inputs disabled.\n", w->nid);
);
}
/* Disable nodes without consumers. */
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR &&
w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
continue;
found = 0;
for (k = devinfo->startnode; k < devinfo->endnode; k++) {
cw = hdaa_widget_get(devinfo, k);
if (cw == NULL || cw->enable == 0)
continue;
for (j = 0; j < cw->nconns; j++) {
if (cw->connsenable[j] && cw->conns[j] == i) {
found = 1;
break;
}
}
}
if (found == 0) {
w->enable = 0;
done = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling nid %d due to all it's"
" consumers disabled.\n", w->nid);
);
}
}
} while (done == 0);
}
static void
hdaa_audio_disable_unas(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w, *cw;
struct hdaa_audio_ctl *ctl;
int i, j, k;
/* Disable unassosiated widgets. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->bindas == -1) {
w->enable = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling unassociated nid %d.\n",
w->nid);
);
}
}
/* Disable input connections on input pin and
* output on output. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (w->bindas < 0)
continue;
if (as[w->bindas].dir == HDAA_CTL_IN) {
for (j = 0; j < w->nconns; j++) {
if (w->connsenable[j] == 0)
continue;
w->connsenable[j] = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling connection to input pin "
"nid %d conn %d.\n",
i, j);
);
}
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid,
HDAA_CTL_IN, -1, 1);
if (ctl && ctl->enable) {
ctl->forcemute = 1;
ctl->muted = HDAA_AMP_MUTE_ALL;
ctl->left = 0;
ctl->right = 0;
ctl->enable = 0;
}
} else {
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid,
HDAA_CTL_OUT, -1, 1);
if (ctl && ctl->enable) {
ctl->forcemute = 1;
ctl->muted = HDAA_AMP_MUTE_ALL;
ctl->left = 0;
ctl->right = 0;
ctl->enable = 0;
}
for (k = devinfo->startnode; k < devinfo->endnode; k++) {
cw = hdaa_widget_get(devinfo, k);
if (cw == NULL || cw->enable == 0)
continue;
for (j = 0; j < cw->nconns; j++) {
if (cw->connsenable[j] && cw->conns[j] == i) {
cw->connsenable[j] = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling connection from output pin "
"nid %d conn %d cnid %d.\n",
k, j, i);
);
if (cw->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
cw->nconns > 1)
continue;
ctl = hdaa_audio_ctl_amp_get(devinfo, k,
HDAA_CTL_IN, j, 1);
if (ctl && ctl->enable) {
ctl->forcemute = 1;
ctl->muted = HDAA_AMP_MUTE_ALL;
ctl->left = 0;
ctl->right = 0;
ctl->enable = 0;
}
}
}
}
}
}
}
static void
hdaa_audio_disable_notselected(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w;
int i, j;
/* On playback path we can safely disable all unseleted inputs. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->nconns <= 1)
continue;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
continue;
if (w->bindas < 0 || as[w->bindas].dir == HDAA_CTL_IN)
continue;
for (j = 0; j < w->nconns; j++) {
if (w->connsenable[j] == 0)
continue;
if (w->selconn < 0 || w->selconn == j)
continue;
w->connsenable[j] = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling unselected connection "
"nid %d conn %d.\n",
i, j);
);
}
}
}
static void
hdaa_audio_disable_crossas(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *ases = devinfo->as;
struct hdaa_widget *w, *cw;
struct hdaa_audio_ctl *ctl;
int i, j;
/* Disable crossassociatement and unwanted crosschannel connections. */
/* ... using selectors */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->nconns <= 1)
continue;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
continue;
/* Allow any -> mix */
if (w->bindas == -2)
continue;
for (j = 0; j < w->nconns; j++) {
if (w->connsenable[j] == 0)
continue;
cw = hdaa_widget_get(devinfo, w->conns[j]);
if (cw == NULL || w->enable == 0)
continue;
/* Allow mix -> out. */
if (cw->bindas == -2 && w->bindas >= 0 &&
ases[w->bindas].dir == HDAA_CTL_OUT)
continue;
/* Allow mix -> mixed-in. */
if (cw->bindas == -2 && w->bindas >= 0 &&
ases[w->bindas].mixed)
continue;
/* Allow in -> mix. */
if ((w->pflags & HDAA_ADC_MONITOR) &&
cw->bindas >= 0 &&
ases[cw->bindas].dir == HDAA_CTL_IN)
continue;
/* Allow if have common as/seqs. */
if (w->bindas == cw->bindas &&
(w->bindseqmask & cw->bindseqmask) != 0)
continue;
w->connsenable[j] = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling crossassociatement connection "
"nid %d conn %d cnid %d.\n",
i, j, cw->nid);
);
}
}
/* ... using controls */
i = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
if (ctl->enable == 0 || ctl->childwidget == NULL)
continue;
/* Allow any -> mix */
if (ctl->widget->bindas == -2)
continue;
/* Allow mix -> out. */
if (ctl->childwidget->bindas == -2 &&
ctl->widget->bindas >= 0 &&
ases[ctl->widget->bindas].dir == HDAA_CTL_OUT)
continue;
/* Allow mix -> mixed-in. */
if (ctl->childwidget->bindas == -2 &&
ctl->widget->bindas >= 0 &&
ases[ctl->widget->bindas].mixed)
continue;
/* Allow in -> mix. */
if ((ctl->widget->pflags & HDAA_ADC_MONITOR) &&
ctl->childwidget->bindas >= 0 &&
ases[ctl->childwidget->bindas].dir == HDAA_CTL_IN)
continue;
/* Allow if have common as/seqs. */
if (ctl->widget->bindas == ctl->childwidget->bindas &&
(ctl->widget->bindseqmask & ctl->childwidget->bindseqmask) != 0)
continue;
ctl->forcemute = 1;
ctl->muted = HDAA_AMP_MUTE_ALL;
ctl->left = 0;
ctl->right = 0;
ctl->enable = 0;
if (ctl->ndir == HDAA_CTL_IN)
ctl->widget->connsenable[ctl->index] = 0;
HDA_BOOTHVERBOSE(
device_printf(devinfo->dev,
" Disabling crossassociatement connection "
"ctl %d nid %d cnid %d.\n", i,
ctl->widget->nid,
ctl->childwidget->nid);
);
}
}
/*
* Find controls to control amplification for source and calculate possible
* amplification range.
*/
static int
hdaa_audio_ctl_source_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index,
int ossdev, int ctlable, int depth, int *minamp, int *maxamp)
{
struct hdaa_widget *w, *wc;
struct hdaa_audio_ctl *ctl;
int i, j, conns = 0, tminamp, tmaxamp, cminamp, cmaxamp, found = 0;
if (depth > HDA_PARSE_MAXDEPTH)
return (found);
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return (found);
/* Count number of active inputs. */
if (depth > 0) {
for (j = 0; j < w->nconns; j++) {
if (!w->connsenable[j])
continue;
conns++;
}
}
/* If this is not a first step - use input mixer.
Pins have common input ctl so care must be taken. */
if (depth > 0 && ctlable && (conns == 1 ||
w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)) {
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN,
index, 1);
if (ctl) {
ctl->ossmask |= (1 << ossdev);
found++;
if (*minamp == *maxamp) {
*minamp += MINQDB(ctl);
*maxamp += MAXQDB(ctl);
}
}
}
/* If widget has own ossdev - not traverse it.
It will be traversed on its own. */
if (w->ossdev >= 0 && depth > 0)
return (found);
/* We must not traverse pin */
if ((w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) &&
depth > 0)
return (found);
/* record that this widget exports such signal, */
w->ossmask |= (1 << ossdev);
/*
* If signals mixed, we can't assign controls farther.
* Ignore this on depth zero. Caller must knows why.
*/
if (conns > 1 &&
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
ctlable = 0;
if (ctlable) {
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1);
if (ctl) {
ctl->ossmask |= (1 << ossdev);
found++;
if (*minamp == *maxamp) {
*minamp += MINQDB(ctl);
*maxamp += MAXQDB(ctl);
}
}
}
cminamp = cmaxamp = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
wc = hdaa_widget_get(devinfo, i);
if (wc == NULL || wc->enable == 0)
continue;
for (j = 0; j < wc->nconns; j++) {
if (wc->connsenable[j] && wc->conns[j] == nid) {
tminamp = tmaxamp = 0;
found += hdaa_audio_ctl_source_amp(devinfo,
wc->nid, j, ossdev, ctlable, depth + 1,
&tminamp, &tmaxamp);
if (cminamp == 0 && cmaxamp == 0) {
cminamp = tminamp;
cmaxamp = tmaxamp;
} else if (tminamp != tmaxamp) {
cminamp = imax(cminamp, tminamp);
cmaxamp = imin(cmaxamp, tmaxamp);
}
}
}
}
if (*minamp == *maxamp && cminamp < cmaxamp) {
*minamp += cminamp;
*maxamp += cmaxamp;
}
return (found);
}
/*
* Find controls to control amplification for destination and calculate
* possible amplification range.
*/
static int
hdaa_audio_ctl_dest_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index,
int ossdev, int depth, int *minamp, int *maxamp)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w, *wc;
struct hdaa_audio_ctl *ctl;
int i, j, consumers, tminamp, tmaxamp, cminamp, cmaxamp, found = 0;
if (depth > HDA_PARSE_MAXDEPTH)
return (found);
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return (found);
if (depth > 0) {
/* If this node produce output for several consumers,
we can't touch it. */
consumers = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
wc = hdaa_widget_get(devinfo, i);
if (wc == NULL || wc->enable == 0)
continue;
for (j = 0; j < wc->nconns; j++) {
if (wc->connsenable[j] && wc->conns[j] == nid)
consumers++;
}
}
/* The only exception is if real HP redirection is configured
and this is a duplication point.
XXX: Actually exception is not completely correct.
XXX: Duplication point check is not perfect. */
if ((consumers == 2 && (w->bindas < 0 ||
as[w->bindas].hpredir < 0 || as[w->bindas].fakeredir ||
(w->bindseqmask & (1 << 15)) == 0)) ||
consumers > 2)
return (found);
/* Else use it's output mixer. */
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid,
HDAA_CTL_OUT, -1, 1);
if (ctl) {
ctl->ossmask |= (1 << ossdev);
found++;
if (*minamp == *maxamp) {
*minamp += MINQDB(ctl);
*maxamp += MAXQDB(ctl);
}
}
}
/* We must not traverse pin */
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
depth > 0)
return (found);
cminamp = cmaxamp = 0;
for (i = 0; i < w->nconns; i++) {
if (w->connsenable[i] == 0)
continue;
if (index >= 0 && i != index)
continue;
tminamp = tmaxamp = 0;
ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid,
HDAA_CTL_IN, i, 1);
if (ctl) {
ctl->ossmask |= (1 << ossdev);
found++;
if (*minamp == *maxamp) {
tminamp += MINQDB(ctl);
tmaxamp += MAXQDB(ctl);
}
}
found += hdaa_audio_ctl_dest_amp(devinfo, w->conns[i], -1, ossdev,
depth + 1, &tminamp, &tmaxamp);
if (cminamp == 0 && cmaxamp == 0) {
cminamp = tminamp;
cmaxamp = tmaxamp;
} else if (tminamp != tmaxamp) {
cminamp = imax(cminamp, tminamp);
cmaxamp = imin(cmaxamp, tmaxamp);
}
}
if (*minamp == *maxamp && cminamp < cmaxamp) {
*minamp += cminamp;
*maxamp += cmaxamp;
}
return (found);
}
/*
* Assign OSS names to sound sources
*/
static void
hdaa_audio_assign_names(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w;
int i, j;
int type = -1, use, used = 0;
static const int types[7][13] = {
{ SOUND_MIXER_LINE, SOUND_MIXER_LINE1, SOUND_MIXER_LINE2,
SOUND_MIXER_LINE3, -1 }, /* line */
{ SOUND_MIXER_MONITOR, SOUND_MIXER_MIC, -1 }, /* int mic */
{ SOUND_MIXER_MIC, SOUND_MIXER_MONITOR, -1 }, /* ext mic */
{ SOUND_MIXER_CD, -1 }, /* cd */
{ SOUND_MIXER_SPEAKER, -1 }, /* speaker */
{ SOUND_MIXER_DIGITAL1, SOUND_MIXER_DIGITAL2, SOUND_MIXER_DIGITAL3,
-1 }, /* digital */
{ SOUND_MIXER_LINE, SOUND_MIXER_LINE1, SOUND_MIXER_LINE2,
SOUND_MIXER_LINE3, SOUND_MIXER_PHONEIN, SOUND_MIXER_PHONEOUT,
SOUND_MIXER_VIDEO, SOUND_MIXER_RADIO, SOUND_MIXER_DIGITAL1,
SOUND_MIXER_DIGITAL2, SOUND_MIXER_DIGITAL3, SOUND_MIXER_MONITOR,
-1 } /* others */
};
/* Surely known names */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->bindas == -1)
continue;
use = -1;
switch (w->type) {
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX:
if (as[w->bindas].dir == HDAA_CTL_OUT)
break;
type = -1;
switch (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) {
case HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_IN:
type = 0;
break;
case HDA_CONFIG_DEFAULTCONF_DEVICE_MIC_IN:
if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK)
== HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_JACK)
break;
type = 1;
break;
case HDA_CONFIG_DEFAULTCONF_DEVICE_CD:
type = 3;
break;
case HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER:
type = 4;
break;
case HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_IN:
case HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_IN:
type = 5;
break;
}
if (type == -1)
break;
j = 0;
while (types[type][j] >= 0 &&
(used & (1 << types[type][j])) != 0) {
j++;
}
if (types[type][j] >= 0)
use = types[type][j];
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT:
use = SOUND_MIXER_PCM;
break;
case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET:
use = SOUND_MIXER_SPEAKER;
break;
default:
break;
}
if (use >= 0) {
w->ossdev = use;
used |= (1 << use);
}
}
/* Semi-known names */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->ossdev >= 0)
continue;
if (w->bindas == -1)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (as[w->bindas].dir == HDAA_CTL_OUT)
continue;
type = -1;
switch (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) {
case HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_OUT:
case HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER:
case HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT:
case HDA_CONFIG_DEFAULTCONF_DEVICE_AUX:
type = 0;
break;
case HDA_CONFIG_DEFAULTCONF_DEVICE_MIC_IN:
type = 2;
break;
case HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_OUT:
case HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_OUT:
type = 5;
break;
}
if (type == -1)
break;
j = 0;
while (types[type][j] >= 0 &&
(used & (1 << types[type][j])) != 0) {
j++;
}
if (types[type][j] >= 0) {
w->ossdev = types[type][j];
used |= (1 << types[type][j]);
}
}
/* Others */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->ossdev >= 0)
continue;
if (w->bindas == -1)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (as[w->bindas].dir == HDAA_CTL_OUT)
continue;
j = 0;
while (types[6][j] >= 0 &&
(used & (1 << types[6][j])) != 0) {
j++;
}
if (types[6][j] >= 0) {
w->ossdev = types[6][j];
used |= (1 << types[6][j]);
}
}
}
static void
hdaa_audio_build_tree(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
int j, res;
/* Trace all associations in order of their numbers. */
for (j = 0; j < devinfo->ascnt; j++) {
if (as[j].enable == 0)
continue;
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Tracing association %d (%d)\n", j, as[j].index);
);
if (as[j].dir == HDAA_CTL_OUT) {
retry:
res = hdaa_audio_trace_as_out(devinfo, j, 0);
if (res == 0 && as[j].hpredir >= 0 &&
as[j].fakeredir == 0) {
/* If CODEC can't do analog HP redirection
try to make it using one more DAC. */
as[j].fakeredir = 1;
goto retry;
}
} else if (as[j].mixed)
res = hdaa_audio_trace_as_in(devinfo, j);
else
res = hdaa_audio_trace_as_in_mch(devinfo, j, 0);
if (res) {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Association %d (%d) trace succeeded\n",
j, as[j].index);
);
} else {
HDA_BOOTVERBOSE(
device_printf(devinfo->dev,
"Association %d (%d) trace failed\n",
j, as[j].index);
);
as[j].enable = 0;
}
}
/* Look for additional DACs/ADCs. */
for (j = 0; j < devinfo->ascnt; j++) {
if (as[j].enable == 0)
continue;
hdaa_audio_adddac(devinfo, j);
}
/* Trace mixer and beeper pseudo associations. */
hdaa_audio_trace_as_extra(devinfo);
}
/*
* Store in pdevinfo new data about whether and how we can control signal
* for OSS device to/from specified widget.
*/
static void
hdaa_adjust_amp(struct hdaa_widget *w, int ossdev,
int found, int minamp, int maxamp)
{
struct hdaa_devinfo *devinfo = w->devinfo;
struct hdaa_pcm_devinfo *pdevinfo;
if (w->bindas >= 0)
pdevinfo = devinfo->as[w->bindas].pdevinfo;
else
pdevinfo = &devinfo->devs[0];
if (found)
pdevinfo->ossmask |= (1 << ossdev);
if (minamp == 0 && maxamp == 0)
return;
if (pdevinfo->minamp[ossdev] == 0 && pdevinfo->maxamp[ossdev] == 0) {
pdevinfo->minamp[ossdev] = minamp;
pdevinfo->maxamp[ossdev] = maxamp;
} else {
pdevinfo->minamp[ossdev] = imax(pdevinfo->minamp[ossdev], minamp);
pdevinfo->maxamp[ossdev] = imin(pdevinfo->maxamp[ossdev], maxamp);
}
}
/*
* Trace signals from/to all possible sources/destionstions to find possible
* recording sources, OSS device control ranges and to assign controls.
*/
static void
hdaa_audio_assign_mixers(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w, *cw;
int i, j, minamp, maxamp, found;
/* Assign mixers to the tree. */
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
minamp = maxamp = 0;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET ||
(w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
as[w->bindas].dir == HDAA_CTL_IN)) {
if (w->ossdev < 0)
continue;
found = hdaa_audio_ctl_source_amp(devinfo, w->nid, -1,
w->ossdev, 1, 0, &minamp, &maxamp);
hdaa_adjust_amp(w, w->ossdev, found, minamp, maxamp);
} else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) {
found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1,
SOUND_MIXER_RECLEV, 0, &minamp, &maxamp);
hdaa_adjust_amp(w, SOUND_MIXER_RECLEV, found, minamp, maxamp);
} else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
as[w->bindas].dir == HDAA_CTL_OUT) {
found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1,
SOUND_MIXER_VOLUME, 0, &minamp, &maxamp);
hdaa_adjust_amp(w, SOUND_MIXER_VOLUME, found, minamp, maxamp);
}
if (w->ossdev == SOUND_MIXER_IMIX) {
minamp = maxamp = 0;
found = hdaa_audio_ctl_source_amp(devinfo, w->nid, -1,
w->ossdev, 1, 0, &minamp, &maxamp);
if (minamp == maxamp) {
/* If we are unable to control input monitor
as source - try to control it as destination. */
found += hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1,
w->ossdev, 0, &minamp, &maxamp);
w->pflags |= HDAA_IMIX_AS_DST;
}
hdaa_adjust_amp(w, w->ossdev, found, minamp, maxamp);
}
if (w->pflags & HDAA_ADC_MONITOR) {
for (j = 0; j < w->nconns; j++) {
if (!w->connsenable[j])
continue;
cw = hdaa_widget_get(devinfo, w->conns[j]);
if (cw == NULL || cw->enable == 0)
continue;
if (cw->bindas == -1)
continue;
if (cw->bindas >= 0 &&
as[cw->bindas].dir != HDAA_CTL_IN)
continue;
minamp = maxamp = 0;
found = hdaa_audio_ctl_dest_amp(devinfo,
w->nid, j, SOUND_MIXER_IGAIN, 0,
&minamp, &maxamp);
hdaa_adjust_amp(w, SOUND_MIXER_IGAIN,
found, minamp, maxamp);
}
}
}
}
static void
hdaa_audio_prepare_pin_ctrl(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w;
uint32_t pincap;
int i;
for (i = 0; i < devinfo->nodecnt; i++) {
w = &devinfo->widget[i];
if (w == NULL)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX &&
w->waspin == 0)
continue;
pincap = w->wclass.pin.cap;
/* Disable everything. */
w->wclass.pin.ctrl &= ~(
HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE |
HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE |
HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE |
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK);
if (w->enable == 0) {
/* Pin is unused so left it disabled. */
continue;
} else if (w->waspin) {
/* Enable input for beeper input. */
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE;
} else if (w->bindas < 0 || as[w->bindas].enable == 0) {
/* Pin is unused so left it disabled. */
continue;
} else if (as[w->bindas].dir == HDAA_CTL_IN) {
/* Input pin, configure for input. */
if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE;
if ((devinfo->quirks & HDAA_QUIRK_IVREF100) &&
HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE(
HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_100);
else if ((devinfo->quirks & HDAA_QUIRK_IVREF80) &&
HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE(
HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_80);
else if ((devinfo->quirks & HDAA_QUIRK_IVREF50) &&
HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE(
HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_50);
} else {
/* Output pin, configure for output. */
if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE;
if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap) &&
(w->wclass.pin.config &
HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) ==
HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT)
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE;
if ((devinfo->quirks & HDAA_QUIRK_OVREF100) &&
HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE(
HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_100);
else if ((devinfo->quirks & HDAA_QUIRK_OVREF80) &&
HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE(
HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_80);
else if ((devinfo->quirks & HDAA_QUIRK_OVREF50) &&
HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap))
w->wclass.pin.ctrl |=
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE(
HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_50);
}
}
}
static void
hdaa_audio_ctl_commit(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_ctl *ctl;
int i, z;
i = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
if (ctl->enable == 0 || ctl->ossmask != 0) {
/* Mute disabled and mixer controllable controls.
* Last will be initialized by mixer_init().
* This expected to reduce click on startup. */
hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_ALL, 0, 0);
continue;
}
/* Init fixed controls to 0dB amplification. */
z = ctl->offset;
if (z > ctl->step)
z = ctl->step;
hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_NONE, z, z);
}
}
static void
hdaa_gpio_commit(struct hdaa_devinfo *devinfo)
{
uint32_t gdata, gmask, gdir;
int i, numgpio;
numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap);
if (devinfo->gpio != 0 && numgpio != 0) {
gdata = hda_command(devinfo->dev,
HDA_CMD_GET_GPIO_DATA(0, devinfo->nid));
gmask = hda_command(devinfo->dev,
HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid));
gdir = hda_command(devinfo->dev,
HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid));
for (i = 0; i < numgpio; i++) {
if ((devinfo->gpio & HDAA_GPIO_MASK(i)) ==
HDAA_GPIO_SET(i)) {
gdata |= (1 << i);
gmask |= (1 << i);
gdir |= (1 << i);
} else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) ==
HDAA_GPIO_CLEAR(i)) {
gdata &= ~(1 << i);
gmask |= (1 << i);
gdir |= (1 << i);
} else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) ==
HDAA_GPIO_DISABLE(i)) {
gmask &= ~(1 << i);
} else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) ==
HDAA_GPIO_INPUT(i)) {
gmask |= (1 << i);
gdir &= ~(1 << i);
}
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev, "GPIO commit\n");
);
hda_command(devinfo->dev,
HDA_CMD_SET_GPIO_ENABLE_MASK(0, devinfo->nid, gmask));
hda_command(devinfo->dev,
HDA_CMD_SET_GPIO_DIRECTION(0, devinfo->nid, gdir));
hda_command(devinfo->dev,
HDA_CMD_SET_GPIO_DATA(0, devinfo->nid, gdata));
HDA_BOOTVERBOSE(
hdaa_dump_gpio(devinfo);
);
}
}
static void
hdaa_gpo_commit(struct hdaa_devinfo *devinfo)
{
uint32_t gdata;
int i, numgpo;
numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap);
if (devinfo->gpo != 0 && numgpo != 0) {
gdata = hda_command(devinfo->dev,
HDA_CMD_GET_GPO_DATA(0, devinfo->nid));
for (i = 0; i < numgpo; i++) {
if ((devinfo->gpio & HDAA_GPIO_MASK(i)) ==
HDAA_GPIO_SET(i)) {
gdata |= (1 << i);
} else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) ==
HDAA_GPIO_CLEAR(i)) {
gdata &= ~(1 << i);
}
}
HDA_BOOTVERBOSE(
device_printf(devinfo->dev, "GPO commit\n");
);
hda_command(devinfo->dev,
HDA_CMD_SET_GPO_DATA(0, devinfo->nid, gdata));
HDA_BOOTVERBOSE(
hdaa_dump_gpo(devinfo);
);
}
}
static void
hdaa_audio_commit(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
int i;
/* Commit controls. */
hdaa_audio_ctl_commit(devinfo);
/* Commit selectors, pins and EAPD. */
for (i = 0; i < devinfo->nodecnt; i++) {
w = &devinfo->widget[i];
if (w == NULL)
continue;
if (w->selconn == -1)
w->selconn = 0;
if (w->nconns > 0)
hdaa_widget_connection_select(w, w->selconn);
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX ||
w->waspin) {
hda_command(devinfo->dev,
HDA_CMD_SET_PIN_WIDGET_CTRL(0, w->nid,
w->wclass.pin.ctrl));
}
if (w->param.eapdbtl != HDA_INVALID) {
uint32_t val;
val = w->param.eapdbtl;
if (devinfo->quirks &
HDAA_QUIRK_EAPDINV)
val ^= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD;
hda_command(devinfo->dev,
HDA_CMD_SET_EAPD_BTL_ENABLE(0, w->nid,
val));
}
}
hdaa_gpio_commit(devinfo);
hdaa_gpo_commit(devinfo);
}
static void
hdaa_powerup(struct hdaa_devinfo *devinfo)
{
int i;
hda_command(devinfo->dev,
HDA_CMD_SET_POWER_STATE(0,
devinfo->nid, HDA_CMD_POWER_STATE_D0));
DELAY(100);
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
hda_command(devinfo->dev,
HDA_CMD_SET_POWER_STATE(0,
i, HDA_CMD_POWER_STATE_D0));
}
DELAY(1000);
}
static int
hdaa_pcmchannel_setup(struct hdaa_chan *ch)
{
struct hdaa_devinfo *devinfo = ch->devinfo;
struct hdaa_audio_as *as = devinfo->as;
struct hdaa_widget *w;
uint32_t cap, fmtcap, pcmcap;
int i, j, ret, channels, onlystereo;
uint16_t pinset;
ch->caps = hdaa_caps;
ch->caps.fmtlist = ch->fmtlist;
ch->bit16 = 1;
ch->bit32 = 0;
ch->pcmrates[0] = 48000;
ch->pcmrates[1] = 0;
ch->stripecap = 0xff;
ret = 0;
channels = 0;
onlystereo = 1;
pinset = 0;
fmtcap = devinfo->supp_stream_formats;
pcmcap = devinfo->supp_pcm_size_rate;
for (i = 0; i < 16; i++) {
/* Check as is correct */
if (ch->as < 0)
break;
/* Cound only present DACs */
if (as[ch->as].dacs[ch->asindex][i] <= 0)
continue;
/* Ignore duplicates */
for (j = 0; j < ret; j++) {
if (ch->io[j] == as[ch->as].dacs[ch->asindex][i])
break;
}
if (j < ret)
continue;
w = hdaa_widget_get(devinfo, as[ch->as].dacs[ch->asindex][i]);
if (w == NULL || w->enable == 0)
continue;
cap = w->param.supp_stream_formats;
if (!HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap) &&
!HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap))
continue;
/* Many CODECs does not declare AC3 support on SPDIF.
I don't beleave that they doesn't support it! */
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap))
cap |= HDA_PARAM_SUPP_STREAM_FORMATS_AC3_MASK;
if (ret == 0) {
fmtcap = cap;
pcmcap = w->param.supp_pcm_size_rate;
} else {
fmtcap &= cap;
pcmcap &= w->param.supp_pcm_size_rate;
}
ch->io[ret++] = as[ch->as].dacs[ch->asindex][i];
ch->stripecap &= w->wclass.conv.stripecap;
/* Do not count redirection pin/dac channels. */
if (i == 15 && as[ch->as].hpredir >= 0)
continue;
channels += HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap) + 1;
if (HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap) != 1)
onlystereo = 0;
pinset |= (1 << i);
}
ch->io[ret] = -1;
ch->channels = channels;
if (as[ch->as].fakeredir)
ret--;
/* Standard speaks only about stereo pins and playback, ... */
if ((!onlystereo) || as[ch->as].mixed)
pinset = 0;
/* ..., but there it gives us info about speakers layout. */
as[ch->as].pinset = pinset;
ch->supp_stream_formats = fmtcap;
ch->supp_pcm_size_rate = pcmcap;
/*
* 8bit = 0
* 16bit = 1
* 20bit = 2
* 24bit = 3
* 32bit = 4
*/
if (ret > 0) {
i = 0;
if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(fmtcap)) {
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(pcmcap))
ch->bit16 = 1;
else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(pcmcap))
ch->bit16 = 0;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(pcmcap))
ch->bit32 = 3;
else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(pcmcap))
ch->bit32 = 2;
else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(pcmcap))
ch->bit32 = 4;
if (!(devinfo->quirks & HDAA_QUIRK_FORCESTEREO)) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 1, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 1, 0);
}
if (channels >= 2) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 2, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 2, 0);
}
if (channels >= 3 && !onlystereo) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 3, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 3, 0);
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 3, 1);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 3, 1);
}
if (channels >= 4) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 4, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 4, 0);
if (!onlystereo) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 4, 1);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 4, 1);
}
}
if (channels >= 5 && !onlystereo) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 5, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 5, 0);
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 5, 1);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 5, 1);
}
if (channels >= 6) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 6, 1);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 6, 1);
if (!onlystereo) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 6, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 6, 0);
}
}
if (channels >= 7 && !onlystereo) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 7, 0);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 7, 0);
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 7, 1);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 7, 1);
}
if (channels >= 8) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 8, 1);
if (ch->bit32)
ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 8, 1);
}
}
if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(fmtcap)) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 2, 0);
if (channels >= 8) {
ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 8, 0);
ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 8, 1);
}
}
ch->fmtlist[i] = 0;
i = 0;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(pcmcap))
ch->pcmrates[i++] = 8000;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(pcmcap))
ch->pcmrates[i++] = 11025;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(pcmcap))
ch->pcmrates[i++] = 16000;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(pcmcap))
ch->pcmrates[i++] = 22050;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(pcmcap))
ch->pcmrates[i++] = 32000;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(pcmcap))
ch->pcmrates[i++] = 44100;
/* if (HDA_PARAM_SUPP_PCM_SIZE_RATE_48KHZ(pcmcap)) */
ch->pcmrates[i++] = 48000;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(pcmcap))
ch->pcmrates[i++] = 88200;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(pcmcap))
ch->pcmrates[i++] = 96000;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(pcmcap))
ch->pcmrates[i++] = 176400;
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(pcmcap))
ch->pcmrates[i++] = 192000;
/* if (HDA_PARAM_SUPP_PCM_SIZE_RATE_384KHZ(pcmcap)) */
ch->pcmrates[i] = 0;
if (i > 0) {
ch->caps.minspeed = ch->pcmrates[0];
ch->caps.maxspeed = ch->pcmrates[i - 1];
}
}
return (ret);
}
static void
hdaa_prepare_pcms(struct hdaa_devinfo *devinfo)
{
struct hdaa_audio_as *as = devinfo->as;
int i, j, k, apdev = 0, ardev = 0, dpdev = 0, drdev = 0;
for (i = 0; i < devinfo->ascnt; i++) {
if (as[i].enable == 0)
continue;
if (as[i].dir == HDAA_CTL_IN) {
if (as[i].digital)
drdev++;
else
ardev++;
} else {
if (as[i].digital)
dpdev++;
else
apdev++;
}
}
devinfo->num_devs =
max(ardev, apdev) + max(drdev, dpdev);
devinfo->devs =
- (struct hdaa_pcm_devinfo *)mallocarray(
- devinfo->num_devs, sizeof(struct hdaa_pcm_devinfo),
+ (struct hdaa_pcm_devinfo *)malloc(
+ devinfo->num_devs * sizeof(struct hdaa_pcm_devinfo),
M_HDAA, M_ZERO | M_NOWAIT);
if (devinfo->devs == NULL) {
device_printf(devinfo->dev,
"Unable to allocate memory for devices\n");
return;
}
for (i = 0; i < devinfo->num_devs; i++) {
devinfo->devs[i].index = i;
devinfo->devs[i].devinfo = devinfo;
devinfo->devs[i].playas = -1;
devinfo->devs[i].recas = -1;
devinfo->devs[i].digital = 255;
}
for (i = 0; i < devinfo->ascnt; i++) {
if (as[i].enable == 0)
continue;
for (j = 0; j < devinfo->num_devs; j++) {
if (devinfo->devs[j].digital != 255 &&
(!devinfo->devs[j].digital) !=
(!as[i].digital))
continue;
if (as[i].dir == HDAA_CTL_IN) {
if (devinfo->devs[j].recas >= 0)
continue;
devinfo->devs[j].recas = i;
} else {
if (devinfo->devs[j].playas >= 0)
continue;
devinfo->devs[j].playas = i;
}
as[i].pdevinfo = &devinfo->devs[j];
for (k = 0; k < as[i].num_chans; k++) {
devinfo->chans[as[i].chans[k]].pdevinfo =
&devinfo->devs[j];
}
devinfo->devs[j].digital = as[i].digital;
break;
}
}
}
static void
hdaa_create_pcms(struct hdaa_devinfo *devinfo)
{
int i;
for (i = 0; i < devinfo->num_devs; i++) {
struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i];
pdevinfo->dev = device_add_child(devinfo->dev, "pcm", -1);
device_set_ivars(pdevinfo->dev, (void *)pdevinfo);
}
}
static void
hdaa_dump_ctls(struct hdaa_pcm_devinfo *pdevinfo, const char *banner, uint32_t flag)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_audio_ctl *ctl;
char buf[64];
int i, j, printed = 0;
if (flag == 0) {
flag = ~(SOUND_MASK_VOLUME | SOUND_MASK_PCM |
SOUND_MASK_CD | SOUND_MASK_LINE | SOUND_MASK_RECLEV |
SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_IGAIN |
SOUND_MASK_OGAIN | SOUND_MASK_IMIX | SOUND_MASK_MONITOR);
}
for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) {
if ((flag & (1 << j)) == 0)
continue;
i = 0;
printed = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
if (ctl->enable == 0 ||
ctl->widget->enable == 0)
continue;
if (!((pdevinfo->playas >= 0 &&
ctl->widget->bindas == pdevinfo->playas) ||
(pdevinfo->recas >= 0 &&
ctl->widget->bindas == pdevinfo->recas) ||
(ctl->widget->bindas == -2 && pdevinfo->index == 0)))
continue;
if ((ctl->ossmask & (1 << j)) == 0)
continue;
if (printed == 0) {
if (banner != NULL) {
device_printf(pdevinfo->dev, "%s", banner);
} else {
device_printf(pdevinfo->dev, "Unknown Ctl");
}
printf(" (OSS: %s)",
hdaa_audio_ctl_ossmixer_mask2allname(1 << j,
buf, sizeof(buf)));
if (pdevinfo->ossmask & (1 << j)) {
printf(": %+d/%+ddB\n",
pdevinfo->minamp[j] / 4,
pdevinfo->maxamp[j] / 4);
} else
printf("\n");
printed = 1;
}
device_printf(pdevinfo->dev, " +- ctl %2d (nid %3d %s", i,
ctl->widget->nid,
(ctl->ndir == HDAA_CTL_IN)?"in ":"out");
if (ctl->ndir == HDAA_CTL_IN && ctl->ndir == ctl->dir)
printf(" %2d): ", ctl->index);
else
printf("): ");
if (ctl->step > 0) {
printf("%+d/%+ddB (%d steps)%s\n",
MINQDB(ctl) / 4,
MAXQDB(ctl) / 4,
ctl->step + 1,
ctl->mute?" + mute":"");
} else
printf("%s\n", ctl->mute?"mute":"");
}
}
if (printed)
device_printf(pdevinfo->dev, "\n");
}
static void
hdaa_dump_audio_formats(device_t dev, uint32_t fcap, uint32_t pcmcap)
{
uint32_t cap;
cap = fcap;
if (cap != 0) {
device_printf(dev, " Stream cap: 0x%08x", cap);
if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap))
printf(" AC3");
if (HDA_PARAM_SUPP_STREAM_FORMATS_FLOAT32(cap))
printf(" FLOAT32");
if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap))
printf(" PCM");
printf("\n");
}
cap = pcmcap;
if (cap != 0) {
device_printf(dev, " PCM cap: 0x%08x", cap);
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(cap))
printf(" 8");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(cap))
printf(" 16");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(cap))
printf(" 20");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(cap))
printf(" 24");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(cap))
printf(" 32");
printf(" bits,");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(cap))
printf(" 8");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(cap))
printf(" 11");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(cap))
printf(" 16");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(cap))
printf(" 22");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(cap))
printf(" 32");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(cap))
printf(" 44");
printf(" 48");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(cap))
printf(" 88");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(cap))
printf(" 96");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(cap))
printf(" 176");
if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(cap))
printf(" 192");
printf(" KHz\n");
}
}
static void
hdaa_dump_pin(struct hdaa_widget *w)
{
uint32_t pincap;
pincap = w->wclass.pin.cap;
device_printf(w->devinfo->dev, " Pin cap: 0x%08x", pincap);
if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap))
printf(" ISC");
if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap))
printf(" TRQD");
if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap))
printf(" PDC");
if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap))
printf(" HP");
if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap))
printf(" OUT");
if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap))
printf(" IN");
if (HDA_PARAM_PIN_CAP_BALANCED_IO_PINS(pincap))
printf(" BAL");
if (HDA_PARAM_PIN_CAP_HDMI(pincap))
printf(" HDMI");
if (HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)) {
printf(" VREF[");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap))
printf(" 50");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap))
printf(" 80");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap))
printf(" 100");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_GROUND(pincap))
printf(" GROUND");
if (HDA_PARAM_PIN_CAP_VREF_CTRL_HIZ(pincap))
printf(" HIZ");
printf(" ]");
}
if (HDA_PARAM_PIN_CAP_EAPD_CAP(pincap))
printf(" EAPD");
if (HDA_PARAM_PIN_CAP_DP(pincap))
printf(" DP");
if (HDA_PARAM_PIN_CAP_HBR(pincap))
printf(" HBR");
printf("\n");
device_printf(w->devinfo->dev, " Pin config: 0x%08x\n",
w->wclass.pin.config);
device_printf(w->devinfo->dev, " Pin control: 0x%08x", w->wclass.pin.ctrl);
if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE)
printf(" HP");
if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE)
printf(" IN");
if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE)
printf(" OUT");
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) {
if ((w->wclass.pin.ctrl &
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) == 0x03)
printf(" HBR");
else if ((w->wclass.pin.ctrl &
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0)
printf(" EPTs");
} else {
if ((w->wclass.pin.ctrl &
HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0)
printf(" VREFs");
}
printf("\n");
}
static void
hdaa_dump_pin_config(struct hdaa_widget *w, uint32_t conf)
{
device_printf(w->devinfo->dev, "%2d %08x %-2d %-2d "
"%-13s %-5s %-7s %-10s %-7s %d%s\n",
w->nid, conf,
HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf),
HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf),
HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)],
HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)],
HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)],
HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)],
HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)],
HDA_CONFIG_DEFAULTCONF_MISC(conf),
(w->enable == 0)?" DISA":"");
}
static void
hdaa_dump_pin_configs(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w;
int i;
device_printf(devinfo->dev, "nid 0x as seq "
"device conn jack loc color misc\n");
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
hdaa_dump_pin_config(w, w->wclass.pin.config);
}
}
static void
hdaa_dump_amp(device_t dev, uint32_t cap, const char *banner)
{
int offset, size, step;
offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(cap);
size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(cap);
step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(cap);
device_printf(dev, " %s amp: 0x%08x "
"mute=%d step=%d size=%d offset=%d (%+d/%+ddB)\n",
banner, cap,
HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(cap),
step, size, offset,
((0 - offset) * (size + 1)) / 4,
((step - offset) * (size + 1)) / 4);
}
static void
hdaa_dump_nodes(struct hdaa_devinfo *devinfo)
{
struct hdaa_widget *w, *cw;
char buf[64];
int i, j;
device_printf(devinfo->dev, "\n");
device_printf(devinfo->dev, "Default parameters:\n");
hdaa_dump_audio_formats(devinfo->dev,
devinfo->supp_stream_formats,
devinfo->supp_pcm_size_rate);
hdaa_dump_amp(devinfo->dev, devinfo->inamp_cap, " Input");
hdaa_dump_amp(devinfo->dev, devinfo->outamp_cap, "Output");
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL) {
device_printf(devinfo->dev, "Ghost widget nid=%d\n", i);
continue;
}
device_printf(devinfo->dev, "\n");
device_printf(devinfo->dev, " nid: %d%s\n", w->nid,
(w->enable == 0) ? " [DISABLED]" : "");
device_printf(devinfo->dev, " Name: %s\n", w->name);
device_printf(devinfo->dev, " Widget cap: 0x%08x",
w->param.widget_cap);
if (w->param.widget_cap & 0x0ee1) {
if (HDA_PARAM_AUDIO_WIDGET_CAP_LR_SWAP(w->param.widget_cap))
printf(" LRSWAP");
if (HDA_PARAM_AUDIO_WIDGET_CAP_POWER_CTRL(w->param.widget_cap))
printf(" PWR");
if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap))
printf(" DIGITAL");
if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap))
printf(" UNSOL");
if (HDA_PARAM_AUDIO_WIDGET_CAP_PROC_WIDGET(w->param.widget_cap))
printf(" PROC");
if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap))
printf(" STRIPE(x%d)",
1 << (fls(w->wclass.conv.stripecap) - 1));
j = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap);
if (j == 1)
printf(" STEREO");
else if (j > 1)
printf(" %dCH", j + 1);
}
printf("\n");
if (w->bindas != -1) {
device_printf(devinfo->dev, " Association: %d (0x%04x)\n",
w->bindas, w->bindseqmask);
}
if (w->ossmask != 0 || w->ossdev >= 0) {
device_printf(devinfo->dev, " OSS: %s",
hdaa_audio_ctl_ossmixer_mask2allname(w->ossmask, buf, sizeof(buf)));
if (w->ossdev >= 0)
printf(" (%s)", ossnames[w->ossdev]);
printf("\n");
}
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT ||
w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) {
hdaa_dump_audio_formats(devinfo->dev,
w->param.supp_stream_formats,
w->param.supp_pcm_size_rate);
} else if (w->type ==
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin)
hdaa_dump_pin(w);
if (w->param.eapdbtl != HDA_INVALID)
device_printf(devinfo->dev, " EAPD: 0x%08x\n",
w->param.eapdbtl);
if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(w->param.widget_cap) &&
w->param.outamp_cap != 0)
hdaa_dump_amp(devinfo->dev, w->param.outamp_cap, "Output");
if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(w->param.widget_cap) &&
w->param.inamp_cap != 0)
hdaa_dump_amp(devinfo->dev, w->param.inamp_cap, " Input");
if (w->nconns > 0)
device_printf(devinfo->dev, " Connections: %d\n", w->nconns);
for (j = 0; j < w->nconns; j++) {
cw = hdaa_widget_get(devinfo, w->conns[j]);
device_printf(devinfo->dev, " + %s<- nid=%d [%s]",
(w->connsenable[j] == 0)?"[DISABLED] ":"",
w->conns[j], (cw == NULL) ? "GHOST!" : cw->name);
if (cw == NULL)
printf(" [UNKNOWN]");
else if (cw->enable == 0)
printf(" [DISABLED]");
if (w->nconns > 1 && w->selconn == j && w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER)
printf(" (selected)");
printf("\n");
}
}
}
static void
hdaa_dump_dst_nid(struct hdaa_pcm_devinfo *pdevinfo, nid_t nid, int depth)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w, *cw;
char buf[64];
int i;
if (depth > HDA_PARSE_MAXDEPTH)
return;
w = hdaa_widget_get(devinfo, nid);
if (w == NULL || w->enable == 0)
return;
if (depth == 0)
device_printf(pdevinfo->dev, "%*s", 4, "");
else
device_printf(pdevinfo->dev, "%*s + <- ", 4 + (depth - 1) * 7, "");
printf("nid=%d [%s]", w->nid, w->name);
if (depth > 0) {
if (w->ossmask == 0) {
printf("\n");
return;
}
printf(" [src: %s]",
hdaa_audio_ctl_ossmixer_mask2allname(
w->ossmask, buf, sizeof(buf)));
if (w->ossdev >= 0) {
printf("\n");
return;
}
}
printf("\n");
for (i = 0; i < w->nconns; i++) {
if (w->connsenable[i] == 0)
continue;
cw = hdaa_widget_get(devinfo, w->conns[i]);
if (cw == NULL || cw->enable == 0 || cw->bindas == -1)
continue;
hdaa_dump_dst_nid(pdevinfo, w->conns[i], depth + 1);
}
}
static void
hdaa_dump_dac(struct hdaa_pcm_devinfo *pdevinfo)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_audio_as *as;
struct hdaa_widget *w;
nid_t *nids;
int chid, i;
if (pdevinfo->playas < 0)
return;
device_printf(pdevinfo->dev, "Playback:\n");
chid = devinfo->as[pdevinfo->playas].chans[0];
hdaa_dump_audio_formats(pdevinfo->dev,
devinfo->chans[chid].supp_stream_formats,
devinfo->chans[chid].supp_pcm_size_rate);
for (i = 0; i < devinfo->as[pdevinfo->playas].num_chans; i++) {
chid = devinfo->as[pdevinfo->playas].chans[i];
device_printf(pdevinfo->dev, " DAC:");
for (nids = devinfo->chans[chid].io; *nids != -1; nids++)
printf(" %d", *nids);
printf("\n");
}
as = &devinfo->as[pdevinfo->playas];
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
w = hdaa_widget_get(devinfo, as->pins[i]);
if (w == NULL || w->enable == 0)
continue;
device_printf(pdevinfo->dev, "\n");
hdaa_dump_dst_nid(pdevinfo, as->pins[i], 0);
}
device_printf(pdevinfo->dev, "\n");
}
static void
hdaa_dump_adc(struct hdaa_pcm_devinfo *pdevinfo)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w;
nid_t *nids;
int chid, i;
if (pdevinfo->recas < 0)
return;
device_printf(pdevinfo->dev, "Record:\n");
chid = devinfo->as[pdevinfo->recas].chans[0];
hdaa_dump_audio_formats(pdevinfo->dev,
devinfo->chans[chid].supp_stream_formats,
devinfo->chans[chid].supp_pcm_size_rate);
for (i = 0; i < devinfo->as[pdevinfo->recas].num_chans; i++) {
chid = devinfo->as[pdevinfo->recas].chans[i];
device_printf(pdevinfo->dev, " ADC:");
for (nids = devinfo->chans[chid].io; *nids != -1; nids++)
printf(" %d", *nids);
printf("\n");
}
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT)
continue;
if (w->bindas != pdevinfo->recas)
continue;
device_printf(pdevinfo->dev, "\n");
hdaa_dump_dst_nid(pdevinfo, i, 0);
}
device_printf(pdevinfo->dev, "\n");
}
static void
hdaa_dump_mix(struct hdaa_pcm_devinfo *pdevinfo)
{
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_widget *w;
int i;
int printed = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0)
continue;
if (w->ossdev != SOUND_MIXER_IMIX)
continue;
if (w->bindas != pdevinfo->recas)
continue;
if (printed == 0) {
printed = 1;
device_printf(pdevinfo->dev, "Input Mix:\n");
}
device_printf(pdevinfo->dev, "\n");
hdaa_dump_dst_nid(pdevinfo, i, 0);
}
if (printed)
device_printf(pdevinfo->dev, "\n");
}
static void
hdaa_pindump(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_widget *w;
uint32_t res, pincap, delay;
int i;
device_printf(dev, "Dumping AFG pins:\n");
device_printf(dev, "nid 0x as seq "
"device conn jack loc color misc\n");
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
hdaa_dump_pin_config(w, w->wclass.pin.config);
pincap = w->wclass.pin.cap;
device_printf(dev, " Caps: %2s %3s %2s %4s %4s",
HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)?"IN":"",
HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)?"OUT":"",
HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)?"HP":"",
HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)?"EAPD":"",
HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)?"VREF":"");
if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap) ||
HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) {
if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) {
delay = 0;
hda_command(dev,
HDA_CMD_SET_PIN_SENSE(0, w->nid, 0));
do {
res = hda_command(dev,
HDA_CMD_GET_PIN_SENSE(0, w->nid));
if (res != 0x7fffffff && res != 0xffffffff)
break;
DELAY(10);
} while (++delay < 10000);
} else {
delay = 0;
res = hda_command(dev, HDA_CMD_GET_PIN_SENSE(0,
w->nid));
}
printf(" Sense: 0x%08x (%sconnected%s)", res,
(res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) ?
"" : "dis",
(HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap) &&
(res & HDA_CMD_GET_PIN_SENSE_ELD_VALID)) ?
", ELD valid" : "");
if (delay > 0)
printf(" delay %dus", delay * 10);
}
printf("\n");
}
device_printf(dev,
"NumGPIO=%d NumGPO=%d NumGPI=%d GPIWake=%d GPIUnsol=%d\n",
HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_GPI_WAKE(devinfo->gpio_cap),
HDA_PARAM_GPIO_COUNT_GPI_UNSOL(devinfo->gpio_cap));
hdaa_dump_gpi(devinfo);
hdaa_dump_gpio(devinfo);
hdaa_dump_gpo(devinfo);
}
static void
hdaa_configure(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_audio_ctl *ctl;
int i;
HDA_BOOTHVERBOSE(
device_printf(dev, "Applying built-in patches...\n");
);
hdaa_patch(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Applying local patches...\n");
);
hdaa_local_patch(devinfo);
hdaa_audio_postprocess(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Parsing Ctls...\n");
);
hdaa_audio_ctl_parse(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling nonaudio...\n");
);
hdaa_audio_disable_nonaudio(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling useless...\n");
);
hdaa_audio_disable_useless(devinfo);
HDA_BOOTVERBOSE(
device_printf(dev, "Patched pins configuration:\n");
hdaa_dump_pin_configs(devinfo);
);
HDA_BOOTHVERBOSE(
device_printf(dev, "Parsing pin associations...\n");
);
hdaa_audio_as_parse(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Building AFG tree...\n");
);
hdaa_audio_build_tree(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling unassociated "
"widgets...\n");
);
hdaa_audio_disable_unas(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling nonselected "
"inputs...\n");
);
hdaa_audio_disable_notselected(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling useless...\n");
);
hdaa_audio_disable_useless(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling "
"crossassociatement connections...\n");
);
hdaa_audio_disable_crossas(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Disabling useless...\n");
);
hdaa_audio_disable_useless(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Binding associations to channels...\n");
);
hdaa_audio_bind_as(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Assigning names to signal sources...\n");
);
hdaa_audio_assign_names(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Preparing PCM devices...\n");
);
hdaa_prepare_pcms(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Assigning mixers to the tree...\n");
);
hdaa_audio_assign_mixers(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Preparing pin controls...\n");
);
hdaa_audio_prepare_pin_ctrl(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "AFG commit...\n");
);
hdaa_audio_commit(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Applying direct built-in patches...\n");
);
hdaa_patch_direct(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Pin sense init...\n");
);
hdaa_sense_init(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Creating PCM devices...\n");
);
hdaa_create_pcms(devinfo);
HDA_BOOTVERBOSE(
if (devinfo->quirks != 0) {
device_printf(dev, "FG config/quirks:");
for (i = 0; i < nitems(hdaa_quirks_tab); i++) {
if ((devinfo->quirks &
hdaa_quirks_tab[i].value) ==
hdaa_quirks_tab[i].value)
printf(" %s", hdaa_quirks_tab[i].key);
}
printf("\n");
}
);
HDA_BOOTHVERBOSE(
device_printf(dev, "\n");
device_printf(dev, "+-----------+\n");
device_printf(dev, "| HDA NODES |\n");
device_printf(dev, "+-----------+\n");
hdaa_dump_nodes(devinfo);
device_printf(dev, "\n");
device_printf(dev, "+----------------+\n");
device_printf(dev, "| HDA AMPLIFIERS |\n");
device_printf(dev, "+----------------+\n");
device_printf(dev, "\n");
i = 0;
while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) {
device_printf(dev, "%3d: nid %3d %s (%s) index %d", i,
(ctl->widget != NULL) ? ctl->widget->nid : -1,
(ctl->ndir == HDAA_CTL_IN)?"in ":"out",
(ctl->dir == HDAA_CTL_IN)?"in ":"out",
ctl->index);
if (ctl->childwidget != NULL)
printf(" cnid %3d", ctl->childwidget->nid);
else
printf(" ");
printf(" ossmask=0x%08x\n",
ctl->ossmask);
device_printf(dev,
" mute: %d step: %3d size: %3d off: %3d%s\n",
ctl->mute, ctl->step, ctl->size, ctl->offset,
(ctl->enable == 0) ? " [DISABLED]" :
((ctl->ossmask == 0) ? " [UNUSED]" : ""));
}
device_printf(dev, "\n");
);
}
static void
hdaa_unconfigure(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_widget *w;
int i, j;
HDA_BOOTHVERBOSE(
device_printf(dev, "Pin sense deinit...\n");
);
hdaa_sense_deinit(devinfo);
free(devinfo->ctl, M_HDAA);
devinfo->ctl = NULL;
devinfo->ctlcnt = 0;
free(devinfo->as, M_HDAA);
devinfo->as = NULL;
devinfo->ascnt = 0;
free(devinfo->devs, M_HDAA);
devinfo->devs = NULL;
devinfo->num_devs = 0;
free(devinfo->chans, M_HDAA);
devinfo->chans = NULL;
devinfo->num_chans = 0;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL)
continue;
w->enable = 1;
w->selconn = -1;
w->pflags = 0;
w->bindas = -1;
w->bindseqmask = 0;
w->ossdev = -1;
w->ossmask = 0;
for (j = 0; j < w->nconns; j++)
w->connsenable[j] = 1;
if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
w->wclass.pin.config = w->wclass.pin.newconf;
if (w->eld != NULL) {
w->eld_len = 0;
free(w->eld, M_HDAA);
w->eld = NULL;
}
}
}
static int
hdaa_sysctl_gpi_state(SYSCTL_HANDLER_ARGS)
{
struct hdaa_devinfo *devinfo = oidp->oid_arg1;
device_t dev = devinfo->dev;
char buf[256];
int n = 0, i, numgpi;
uint32_t data = 0;
buf[0] = 0;
hdaa_lock(devinfo);
numgpi = HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap);
if (numgpi > 0) {
data = hda_command(dev,
HDA_CMD_GET_GPI_DATA(0, devinfo->nid));
}
hdaa_unlock(devinfo);
for (i = 0; i < numgpi; i++) {
n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%d",
n != 0 ? " " : "", i, ((data >> i) & 1));
}
return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
}
static int
hdaa_sysctl_gpio_state(SYSCTL_HANDLER_ARGS)
{
struct hdaa_devinfo *devinfo = oidp->oid_arg1;
device_t dev = devinfo->dev;
char buf[256];
int n = 0, i, numgpio;
uint32_t data = 0, enable = 0, dir = 0;
buf[0] = 0;
hdaa_lock(devinfo);
numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap);
if (numgpio > 0) {
data = hda_command(dev,
HDA_CMD_GET_GPIO_DATA(0, devinfo->nid));
enable = hda_command(dev,
HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid));
dir = hda_command(dev,
HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid));
}
hdaa_unlock(devinfo);
for (i = 0; i < numgpio; i++) {
n += snprintf(buf + n, sizeof(buf) - n, "%s%d=",
n != 0 ? " " : "", i);
if ((enable & (1 << i)) == 0) {
n += snprintf(buf + n, sizeof(buf) - n, "disabled");
continue;
}
n += snprintf(buf + n, sizeof(buf) - n, "%sput(%d)",
((dir >> i) & 1) ? "out" : "in", ((data >> i) & 1));
}
return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
}
static int
hdaa_sysctl_gpio_config(SYSCTL_HANDLER_ARGS)
{
struct hdaa_devinfo *devinfo = oidp->oid_arg1;
char buf[256];
int error, n = 0, i, numgpio;
uint32_t gpio, x;
gpio = devinfo->newgpio;
numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap);
buf[0] = 0;
for (i = 0; i < numgpio; i++) {
x = (gpio & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i);
n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%s",
n != 0 ? " " : "", i, HDA_GPIO_ACTIONS[x]);
}
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (error != 0 || req->newptr == NULL)
return (error);
if (strncmp(buf, "0x", 2) == 0)
gpio = strtol(buf + 2, NULL, 16);
else
gpio = hdaa_gpio_patch(gpio, buf);
hdaa_lock(devinfo);
devinfo->newgpio = devinfo->gpio = gpio;
hdaa_gpio_commit(devinfo);
hdaa_unlock(devinfo);
return (0);
}
static int
hdaa_sysctl_gpo_state(SYSCTL_HANDLER_ARGS)
{
struct hdaa_devinfo *devinfo = oidp->oid_arg1;
device_t dev = devinfo->dev;
char buf[256];
int n = 0, i, numgpo;
uint32_t data = 0;
buf[0] = 0;
hdaa_lock(devinfo);
numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap);
if (numgpo > 0) {
data = hda_command(dev,
HDA_CMD_GET_GPO_DATA(0, devinfo->nid));
}
hdaa_unlock(devinfo);
for (i = 0; i < numgpo; i++) {
n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%d",
n != 0 ? " " : "", i, ((data >> i) & 1));
}
return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
}
static int
hdaa_sysctl_gpo_config(SYSCTL_HANDLER_ARGS)
{
struct hdaa_devinfo *devinfo = oidp->oid_arg1;
char buf[256];
int error, n = 0, i, numgpo;
uint32_t gpo, x;
gpo = devinfo->newgpo;
numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap);
buf[0] = 0;
for (i = 0; i < numgpo; i++) {
x = (gpo & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i);
n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%s",
n != 0 ? " " : "", i, HDA_GPIO_ACTIONS[x]);
}
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (error != 0 || req->newptr == NULL)
return (error);
if (strncmp(buf, "0x", 2) == 0)
gpo = strtol(buf + 2, NULL, 16);
else
gpo = hdaa_gpio_patch(gpo, buf);
hdaa_lock(devinfo);
devinfo->newgpo = devinfo->gpo = gpo;
hdaa_gpo_commit(devinfo);
hdaa_unlock(devinfo);
return (0);
}
static int
hdaa_sysctl_reconfig(SYSCTL_HANDLER_ARGS)
{
device_t dev;
struct hdaa_devinfo *devinfo;
int error, val;
dev = oidp->oid_arg1;
devinfo = device_get_softc(dev);
if (devinfo == NULL)
return (EINVAL);
val = 0;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error != 0 || req->newptr == NULL || val == 0)
return (error);
HDA_BOOTHVERBOSE(
device_printf(dev, "Reconfiguration...\n");
);
if ((error = device_delete_children(dev)) != 0)
return (error);
hdaa_lock(devinfo);
hdaa_unconfigure(dev);
hdaa_configure(dev);
hdaa_unlock(devinfo);
bus_generic_attach(dev);
HDA_BOOTHVERBOSE(
device_printf(dev, "Reconfiguration done\n");
);
return (0);
}
static int
hdaa_suspend(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
int i;
HDA_BOOTHVERBOSE(
device_printf(dev, "Suspend...\n");
);
hdaa_lock(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Stop streams...\n");
);
for (i = 0; i < devinfo->num_chans; i++) {
if (devinfo->chans[i].flags & HDAA_CHN_RUNNING) {
devinfo->chans[i].flags |= HDAA_CHN_SUSPEND;
hdaa_channel_stop(&devinfo->chans[i]);
}
}
HDA_BOOTHVERBOSE(
device_printf(dev, "Power down FG"
" nid=%d to the D3 state...\n",
devinfo->nid);
);
hda_command(devinfo->dev,
HDA_CMD_SET_POWER_STATE(0,
devinfo->nid, HDA_CMD_POWER_STATE_D3));
callout_stop(&devinfo->poll_jack);
hdaa_unlock(devinfo);
callout_drain(&devinfo->poll_jack);
HDA_BOOTHVERBOSE(
device_printf(dev, "Suspend done\n");
);
return (0);
}
static int
hdaa_resume(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
int i;
HDA_BOOTHVERBOSE(
device_printf(dev, "Resume...\n");
);
hdaa_lock(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Power up audio FG nid=%d...\n",
devinfo->nid);
);
hdaa_powerup(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "AFG commit...\n");
);
hdaa_audio_commit(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Applying direct built-in patches...\n");
);
hdaa_patch_direct(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Pin sense init...\n");
);
hdaa_sense_init(devinfo);
hdaa_unlock(devinfo);
for (i = 0; i < devinfo->num_devs; i++) {
struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i];
HDA_BOOTHVERBOSE(
device_printf(pdevinfo->dev,
"OSS mixer reinitialization...\n");
);
if (mixer_reinit(pdevinfo->dev) == -1)
device_printf(pdevinfo->dev,
"unable to reinitialize the mixer\n");
}
hdaa_lock(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Start streams...\n");
);
for (i = 0; i < devinfo->num_chans; i++) {
if (devinfo->chans[i].flags & HDAA_CHN_SUSPEND) {
devinfo->chans[i].flags &= ~HDAA_CHN_SUSPEND;
hdaa_channel_start(&devinfo->chans[i]);
}
}
hdaa_unlock(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Resume done\n");
);
return (0);
}
static int
hdaa_probe(device_t dev)
{
const char *pdesc;
char buf[128];
if (hda_get_node_type(dev) != HDA_PARAM_FCT_GRP_TYPE_NODE_TYPE_AUDIO)
return (ENXIO);
pdesc = device_get_desc(device_get_parent(dev));
snprintf(buf, sizeof(buf), "%.*s Audio Function Group",
(int)(strlen(pdesc) - 10), pdesc);
device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
static int
hdaa_attach(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
uint32_t res;
nid_t nid = hda_get_node_id(dev);
devinfo->dev = dev;
devinfo->lock = HDAC_GET_MTX(device_get_parent(dev), dev);
devinfo->nid = nid;
devinfo->newquirks = -1;
devinfo->newgpio = -1;
devinfo->newgpo = -1;
callout_init(&devinfo->poll_jack, 1);
devinfo->poll_ival = hz;
hdaa_lock(devinfo);
res = hda_command(dev,
HDA_CMD_GET_PARAMETER(0 , nid, HDA_PARAM_SUB_NODE_COUNT));
hdaa_unlock(devinfo);
devinfo->nodecnt = HDA_PARAM_SUB_NODE_COUNT_TOTAL(res);
devinfo->startnode = HDA_PARAM_SUB_NODE_COUNT_START(res);
devinfo->endnode = devinfo->startnode + devinfo->nodecnt;
HDA_BOOTVERBOSE(
device_printf(dev, "Subsystem ID: 0x%08x\n",
hda_get_subsystem_id(dev));
);
HDA_BOOTHVERBOSE(
device_printf(dev,
"Audio Function Group at nid=%d: %d subnodes %d-%d\n",
nid, devinfo->nodecnt,
devinfo->startnode, devinfo->endnode - 1);
);
if (devinfo->nodecnt > 0)
devinfo->widget = (struct hdaa_widget *)malloc(
sizeof(*(devinfo->widget)) * devinfo->nodecnt, M_HDAA,
M_WAITOK | M_ZERO);
else
devinfo->widget = NULL;
hdaa_lock(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Powering up...\n");
);
hdaa_powerup(devinfo);
HDA_BOOTHVERBOSE(
device_printf(dev, "Parsing audio FG...\n");
);
hdaa_audio_parse(devinfo);
HDA_BOOTVERBOSE(
device_printf(dev, "Original pins configuration:\n");
hdaa_dump_pin_configs(devinfo);
);
hdaa_configure(dev);
hdaa_unlock(devinfo);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
&devinfo->newquirks, 0, hdaa_sysctl_quirks, "A",
"Configuration options");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"gpi_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
devinfo, 0, hdaa_sysctl_gpi_state, "A", "GPI state");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"gpio_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
devinfo, 0, hdaa_sysctl_gpio_state, "A", "GPIO state");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"gpio_config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
devinfo, 0, hdaa_sysctl_gpio_config, "A", "GPIO configuration");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"gpo_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
devinfo, 0, hdaa_sysctl_gpo_state, "A", "GPO state");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"gpo_config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
devinfo, 0, hdaa_sysctl_gpo_config, "A", "GPO configuration");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"reconfig", CTLTYPE_INT | CTLFLAG_RW,
dev, 0, hdaa_sysctl_reconfig, "I", "Reprocess configuration");
bus_generic_attach(dev);
return (0);
}
static int
hdaa_detach(device_t dev)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
int error;
if ((error = device_delete_children(dev)) != 0)
return (error);
hdaa_lock(devinfo);
hdaa_unconfigure(dev);
devinfo->poll_ival = 0;
callout_stop(&devinfo->poll_jack);
hdaa_unlock(devinfo);
callout_drain(&devinfo->poll_jack);
free(devinfo->widget, M_HDAA);
return (0);
}
static int
hdaa_print_child(device_t dev, device_t child)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_pcm_devinfo *pdevinfo =
(struct hdaa_pcm_devinfo *)device_get_ivars(child);
struct hdaa_audio_as *as;
int retval, first = 1, i;
retval = bus_print_child_header(dev, child);
retval += printf(" at nid ");
if (pdevinfo->playas >= 0) {
as = &devinfo->as[pdevinfo->playas];
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
retval += printf("%s%d", first ? "" : ",", as->pins[i]);
first = 0;
}
}
if (pdevinfo->recas >= 0) {
if (pdevinfo->playas >= 0) {
retval += printf(" and ");
first = 1;
}
as = &devinfo->as[pdevinfo->recas];
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
retval += printf("%s%d", first ? "" : ",", as->pins[i]);
first = 0;
}
}
retval += bus_print_child_footer(dev, child);
return (retval);
}
static int
hdaa_child_location_str(device_t dev, device_t child, char *buf,
size_t buflen)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_pcm_devinfo *pdevinfo =
(struct hdaa_pcm_devinfo *)device_get_ivars(child);
struct hdaa_audio_as *as;
int first = 1, i, len = 0;
len += snprintf(buf + len, buflen - len, "nid=");
if (pdevinfo->playas >= 0) {
as = &devinfo->as[pdevinfo->playas];
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
len += snprintf(buf + len, buflen - len,
"%s%d", first ? "" : ",", as->pins[i]);
first = 0;
}
}
if (pdevinfo->recas >= 0) {
as = &devinfo->as[pdevinfo->recas];
for (i = 0; i < 16; i++) {
if (as->pins[i] <= 0)
continue;
len += snprintf(buf + len, buflen - len,
"%s%d", first ? "" : ",", as->pins[i]);
first = 0;
}
}
return (0);
}
static void
hdaa_stream_intr(device_t dev, int dir, int stream)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_chan *ch;
int i;
for (i = 0; i < devinfo->num_chans; i++) {
ch = &devinfo->chans[i];
if (!(ch->flags & HDAA_CHN_RUNNING))
continue;
if (ch->dir == ((dir == 1) ? PCMDIR_PLAY : PCMDIR_REC) &&
ch->sid == stream) {
hdaa_unlock(devinfo);
chn_intr(ch->c);
hdaa_lock(devinfo);
}
}
}
static void
hdaa_unsol_intr(device_t dev, uint32_t resp)
{
struct hdaa_devinfo *devinfo = device_get_softc(dev);
struct hdaa_widget *w;
int i, tag, flags;
HDA_BOOTHVERBOSE(
device_printf(dev, "Unsolicited response %08x\n", resp);
);
tag = resp >> 26;
for (i = devinfo->startnode; i < devinfo->endnode; i++) {
w = hdaa_widget_get(devinfo, i);
if (w == NULL || w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
if (w->unsol != tag)
continue;
if (HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap) ||
HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap))
flags = resp & 0x03;
else
flags = 0x01;
if (flags & 0x01)
hdaa_presence_handler(w);
if (flags & 0x02)
hdaa_eld_handler(w);
}
}
static device_method_t hdaa_methods[] = {
/* device interface */
DEVMETHOD(device_probe, hdaa_probe),
DEVMETHOD(device_attach, hdaa_attach),
DEVMETHOD(device_detach, hdaa_detach),
DEVMETHOD(device_suspend, hdaa_suspend),
DEVMETHOD(device_resume, hdaa_resume),
/* Bus interface */
DEVMETHOD(bus_print_child, hdaa_print_child),
DEVMETHOD(bus_child_location_str, hdaa_child_location_str),
DEVMETHOD(hdac_stream_intr, hdaa_stream_intr),
DEVMETHOD(hdac_unsol_intr, hdaa_unsol_intr),
DEVMETHOD(hdac_pindump, hdaa_pindump),
DEVMETHOD_END
};
static driver_t hdaa_driver = {
"hdaa",
hdaa_methods,
sizeof(struct hdaa_devinfo),
};
static devclass_t hdaa_devclass;
DRIVER_MODULE(snd_hda, hdacc, hdaa_driver, hdaa_devclass, NULL, NULL);
static void
hdaa_chan_formula(struct hdaa_devinfo *devinfo, int asid,
char *buf, int buflen)
{
struct hdaa_audio_as *as;
int c;
as = &devinfo->as[asid];
c = devinfo->chans[as->chans[0]].channels;
if (c == 1)
snprintf(buf, buflen, "mono");
else if (c == 2) {
if (as->hpredir < 0)
buf[0] = 0;
else
snprintf(buf, buflen, "2.0");
} else if (as->pinset == 0x0003)
snprintf(buf, buflen, "3.1");
else if (as->pinset == 0x0005 || as->pinset == 0x0011)
snprintf(buf, buflen, "4.0");
else if (as->pinset == 0x0007 || as->pinset == 0x0013)
snprintf(buf, buflen, "5.1");
else if (as->pinset == 0x0017)
snprintf(buf, buflen, "7.1");
else
snprintf(buf, buflen, "%dch", c);
if (as->hpredir >= 0)
strlcat(buf, "+HP", buflen);
}
static int
hdaa_chan_type(struct hdaa_devinfo *devinfo, int asid)
{
struct hdaa_audio_as *as;
struct hdaa_widget *w;
int i, t = -1, t1;
as = &devinfo->as[asid];
for (i = 0; i < 16; i++) {
w = hdaa_widget_get(devinfo, as->pins[i]);
if (w == NULL || w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
continue;
t1 = HDA_CONFIG_DEFAULTCONF_DEVICE(w->wclass.pin.config);
if (t == -1)
t = t1;
else if (t != t1) {
t = -2;
break;
}
}
return (t);
}
static int
hdaa_sysctl_32bit(SYSCTL_HANDLER_ARGS)
{
struct hdaa_audio_as *as = (struct hdaa_audio_as *)oidp->oid_arg1;
struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo;
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_chan *ch;
int error, val, i;
uint32_t pcmcap;
ch = &devinfo->chans[as->chans[0]];
val = (ch->bit32 == 4) ? 32 : ((ch->bit32 == 3) ? 24 :
((ch->bit32 == 2) ? 20 : 0));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
pcmcap = ch->supp_pcm_size_rate;
if (val == 32 && HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(pcmcap))
ch->bit32 = 4;
else if (val == 24 && HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(pcmcap))
ch->bit32 = 3;
else if (val == 20 && HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(pcmcap))
ch->bit32 = 2;
else
return (EINVAL);
for (i = 1; i < as->num_chans; i++)
devinfo->chans[as->chans[i]].bit32 = ch->bit32;
return (0);
}
static int
hdaa_pcm_probe(device_t dev)
{
struct hdaa_pcm_devinfo *pdevinfo =
(struct hdaa_pcm_devinfo *)device_get_ivars(dev);
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
const char *pdesc;
char chans1[8], chans2[8];
char buf[128];
int loc1, loc2, t1, t2;
if (pdevinfo->playas >= 0)
loc1 = devinfo->as[pdevinfo->playas].location;
else
loc1 = devinfo->as[pdevinfo->recas].location;
if (pdevinfo->recas >= 0)
loc2 = devinfo->as[pdevinfo->recas].location;
else
loc2 = loc1;
if (loc1 != loc2)
loc1 = -2;
if (loc1 >= 0 && HDA_LOCS[loc1][0] == '0')
loc1 = -2;
chans1[0] = 0;
chans2[0] = 0;
t1 = t2 = -1;
if (pdevinfo->playas >= 0) {
hdaa_chan_formula(devinfo, pdevinfo->playas,
chans1, sizeof(chans1));
t1 = hdaa_chan_type(devinfo, pdevinfo->playas);
}
if (pdevinfo->recas >= 0) {
hdaa_chan_formula(devinfo, pdevinfo->recas,
chans2, sizeof(chans2));
t2 = hdaa_chan_type(devinfo, pdevinfo->recas);
}
if (chans1[0] != 0 || chans2[0] != 0) {
if (chans1[0] == 0 && pdevinfo->playas >= 0)
snprintf(chans1, sizeof(chans1), "2.0");
else if (chans2[0] == 0 && pdevinfo->recas >= 0)
snprintf(chans2, sizeof(chans2), "2.0");
if (strcmp(chans1, chans2) == 0)
chans2[0] = 0;
}
if (t1 == -1)
t1 = t2;
else if (t2 == -1)
t2 = t1;
if (t1 != t2)
t1 = -2;
if (pdevinfo->digital)
t1 = -2;
pdesc = device_get_desc(device_get_parent(dev));
snprintf(buf, sizeof(buf), "%.*s (%s%s%s%s%s%s%s%s%s)",
(int)(strlen(pdesc) - 21), pdesc,
loc1 >= 0 ? HDA_LOCS[loc1] : "", loc1 >= 0 ? " " : "",
(pdevinfo->digital == 0x7)?"HDMI/DP":
((pdevinfo->digital == 0x5)?"DisplayPort":
((pdevinfo->digital == 0x3)?"HDMI":
((pdevinfo->digital)?"Digital":"Analog"))),
chans1[0] ? " " : "", chans1,
chans2[0] ? "/" : "", chans2,
t1 >= 0 ? " " : "", t1 >= 0 ? HDA_DEVS[t1] : "");
device_set_desc_copy(dev, buf);
return (BUS_PROBE_SPECIFIC);
}
static int
hdaa_pcm_attach(device_t dev)
{
struct hdaa_pcm_devinfo *pdevinfo =
(struct hdaa_pcm_devinfo *)device_get_ivars(dev);
struct hdaa_devinfo *devinfo = pdevinfo->devinfo;
struct hdaa_audio_as *as;
struct snddev_info *d;
char status[SND_STATUSLEN];
int i;
pdevinfo->chan_size = pcm_getbuffersize(dev,
HDA_BUFSZ_MIN, HDA_BUFSZ_DEFAULT, HDA_BUFSZ_MAX);
HDA_BOOTVERBOSE(
hdaa_dump_dac(pdevinfo);
hdaa_dump_adc(pdevinfo);
hdaa_dump_mix(pdevinfo);
hdaa_dump_ctls(pdevinfo, "Master Volume", SOUND_MASK_VOLUME);
hdaa_dump_ctls(pdevinfo, "PCM Volume", SOUND_MASK_PCM);
hdaa_dump_ctls(pdevinfo, "CD Volume", SOUND_MASK_CD);
hdaa_dump_ctls(pdevinfo, "Microphone Volume", SOUND_MASK_MIC);
hdaa_dump_ctls(pdevinfo, "Microphone2 Volume", SOUND_MASK_MONITOR);
hdaa_dump_ctls(pdevinfo, "Line-in Volume", SOUND_MASK_LINE);
hdaa_dump_ctls(pdevinfo, "Speaker/Beep Volume", SOUND_MASK_SPEAKER);
hdaa_dump_ctls(pdevinfo, "Recording Level", SOUND_MASK_RECLEV);
hdaa_dump_ctls(pdevinfo, "Input Mix Level", SOUND_MASK_IMIX);
hdaa_dump_ctls(pdevinfo, "Input Monitoring Level", SOUND_MASK_IGAIN);
hdaa_dump_ctls(pdevinfo, NULL, 0);
);
if (resource_int_value(device_get_name(dev),
device_get_unit(dev), "blocksize", &i) == 0 && i > 0) {
i &= HDA_BLK_ALIGN;
if (i < HDA_BLK_MIN)
i = HDA_BLK_MIN;
pdevinfo->chan_blkcnt = pdevinfo->chan_size / i;
i = 0;
while (pdevinfo->chan_blkcnt >> i)
i++;
pdevinfo->chan_blkcnt = 1 << (i - 1);
if (pdevinfo->chan_blkcnt < HDA_BDL_MIN)
pdevinfo->chan_blkcnt = HDA_BDL_MIN;
else if (pdevinfo->chan_blkcnt > HDA_BDL_MAX)
pdevinfo->chan_blkcnt = HDA_BDL_MAX;
} else
pdevinfo->chan_blkcnt = HDA_BDL_DEFAULT;
/*
* We don't register interrupt handler with snd_setup_intr
* in pcm device. Mark pcm device as MPSAFE manually.
*/
pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE);
HDA_BOOTHVERBOSE(
device_printf(dev, "OSS mixer initialization...\n");
);
if (mixer_init(dev, &hdaa_audio_ctl_ossmixer_class, pdevinfo) != 0)
device_printf(dev, "Can't register mixer\n");
HDA_BOOTHVERBOSE(
device_printf(dev, "Registering PCM channels...\n");
);
if (pcm_register(dev, pdevinfo, (pdevinfo->playas >= 0)?1:0,
(pdevinfo->recas >= 0)?1:0) != 0)
device_printf(dev, "Can't register PCM\n");
pdevinfo->registered++;
d = device_get_softc(dev);
if (pdevinfo->playas >= 0) {
as = &devinfo->as[pdevinfo->playas];
for (i = 0; i < as->num_chans; i++)
pcm_addchan(dev, PCMDIR_PLAY, &hdaa_channel_class,
&devinfo->chans[as->chans[i]]);
SYSCTL_ADD_PROC(&d->play_sysctl_ctx,
SYSCTL_CHILDREN(d->play_sysctl_tree), OID_AUTO,
"32bit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
as, sizeof(as), hdaa_sysctl_32bit, "I",
"Resolution of 32bit samples (20/24/32bit)");
}
if (pdevinfo->recas >= 0) {
as = &devinfo->as[pdevinfo->recas];
for (i = 0; i < as->num_chans; i++)
pcm_addchan(dev, PCMDIR_REC, &hdaa_channel_class,
&devinfo->chans[as->chans[i]]);
SYSCTL_ADD_PROC(&d->rec_sysctl_ctx,
SYSCTL_CHILDREN(d->rec_sysctl_tree), OID_AUTO,
"32bit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
as, sizeof(as), hdaa_sysctl_32bit, "I",
"Resolution of 32bit samples (20/24/32bit)");
pdevinfo->autorecsrc = 2;
resource_int_value(device_get_name(dev), device_get_unit(dev),
"rec.autosrc", &pdevinfo->autorecsrc);
SYSCTL_ADD_INT(&d->rec_sysctl_ctx,
SYSCTL_CHILDREN(d->rec_sysctl_tree), OID_AUTO,
"autosrc", CTLFLAG_RW,
&pdevinfo->autorecsrc, 0,
"Automatic recording source selection");
}
if (pdevinfo->mixer != NULL) {
hdaa_audio_ctl_set_defaults(pdevinfo);
hdaa_lock(devinfo);
if (pdevinfo->playas >= 0) {
as = &devinfo->as[pdevinfo->playas];
hdaa_channels_handler(as);
}
if (pdevinfo->recas >= 0) {
as = &devinfo->as[pdevinfo->recas];
hdaa_autorecsrc_handler(as, NULL);
hdaa_channels_handler(as);
}
hdaa_unlock(devinfo);
}
snprintf(status, SND_STATUSLEN, "on %s %s",
device_get_nameunit(device_get_parent(dev)),
PCM_KLDSTRING(snd_hda));
pcm_setstatus(dev, status);
return (0);
}
static int
hdaa_pcm_detach(device_t dev)
{
struct hdaa_pcm_devinfo *pdevinfo =
(struct hdaa_pcm_devinfo *)device_get_ivars(dev);
int err;
if (pdevinfo->registered > 0) {
err = pcm_unregister(dev);
if (err != 0)
return (err);
}
return (0);
}
static device_method_t hdaa_pcm_methods[] = {
/* device interface */
DEVMETHOD(device_probe, hdaa_pcm_probe),
DEVMETHOD(device_attach, hdaa_pcm_attach),
DEVMETHOD(device_detach, hdaa_pcm_detach),
DEVMETHOD_END
};
static driver_t hdaa_pcm_driver = {
"pcm",
hdaa_pcm_methods,
PCM_SOFTC_SIZE,
};
DRIVER_MODULE(snd_hda_pcm, hdaa, hdaa_pcm_driver, pcm_devclass, NULL, NULL);
MODULE_DEPEND(snd_hda, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER);
MODULE_VERSION(snd_hda, 1);
Index: head/sys/dev/syscons/fire/fire_saver.c
===================================================================
--- head/sys/dev/syscons/fire/fire_saver.c (revision 328217)
+++ head/sys/dev/syscons/fire/fire_saver.c (revision 328218)
@@ -1,197 +1,197 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1999 Brad Forschinger
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* brad forschinger, 19990504 <retch@flag.blackened.net>
*
* written with much help from warp_saver.c
*
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/syslog.h>
#include <sys/consio.h>
#include <sys/malloc.h>
#include <sys/fbio.h>
#include <dev/fb/fbreg.h>
#include <dev/fb/splashreg.h>
#include <dev/syscons/syscons.h>
#define SAVER_NAME "fire_saver"
#define RED(n) ((n) * 3 + 0)
#define GREEN(n) ((n) * 3 + 1)
#define BLUE(n) ((n) * 3 + 2)
#define SET_ORIGIN(adp, o) do { \
int oo = o; \
if (oo != last_origin) \
vidd_set_win_org(adp, last_origin = oo); \
} while (0)
static u_char *buf;
static u_char *vid;
static int banksize, scrmode, bpsl, scrw, scrh;
static u_char fire_pal[768];
static int blanked;
static void
fire_update(video_adapter_t *adp)
{
int x, y;
int o, p;
int last_origin = -1;
/* make a new bottom line */
for (x = 0, y = scrh; x < scrw; x++)
buf[x + (y * bpsl)] = random() % 160 + 96;
/* fade the flames out */
for (y = 0; y < scrh; y++) {
for (x = 0; x < scrw; x++) {
buf[x + (y * scrw)] =
(buf[(x + 0) + ((y + 0) * scrw)] +
buf[(x - 1) + ((y + 1) * scrw)] +
buf[(x + 0) + ((y + 1) * scrw)] +
buf[(x + 1) + ((y + 1) * scrw)]) / 4;
if (buf[x + (y * scrw)] > 0)
buf[x + (y * scrw)]--;
}
}
/* blit our buffer into video ram */
for (y = 0, p = 0, o = 0; y < scrh; y++, p += bpsl) {
while (p > banksize) {
p -= banksize;
o += banksize;
}
SET_ORIGIN(adp, o);
if (p + scrw < banksize) {
bcopy(buf + y * scrw, vid + p, scrw);
} else {
bcopy(buf + y * scrw, vid + p, banksize - p);
SET_ORIGIN(adp, o + banksize);
bcopy(buf + y * scrw + (banksize - p), vid,
scrw - (banksize - p));
p -= banksize;
o += banksize;
}
}
}
static int
fire_saver(video_adapter_t *adp, int blank)
{
int pl;
if (blank) {
/* switch to graphics mode */
if (blanked <= 0) {
pl = splhigh();
vidd_set_mode(adp, scrmode);
vidd_load_palette(adp, fire_pal);
blanked++;
vid = (u_char *)adp->va_window;
banksize = adp->va_window_size;
bpsl = adp->va_line_width;
splx(pl);
vidd_clear(adp);
}
fire_update(adp);
} else {
blanked = 0;
}
return 0;
}
static int
fire_init(video_adapter_t *adp)
{
video_info_t info;
int i, red, green, blue;
if (!vidd_get_info(adp, M_VGA_CG320, &info)) {
scrmode = M_VGA_CG320;
} else {
log(LOG_NOTICE,
"%s: the console does not support M_VGA_CG320\n",
SAVER_NAME);
return (ENODEV);
}
scrw = info.vi_width;
scrh = info.vi_height;
- buf = (u_char *)mallocarray(scrw, scrh + 1, M_DEVBUF, M_NOWAIT);
+ buf = (u_char *)malloc(scrw * (scrh + 1), M_DEVBUF, M_NOWAIT);
if (buf) {
bzero(buf, scrw * (scrh + 1));
} else {
log(LOG_NOTICE,
"%s: buffer allocation is failed\n",
SAVER_NAME);
return (ENODEV);
}
/* intialize the palette */
red = green = blue = 0;
for (i = 0; i < 256; i++) {
red++;
if (red > 128)
green += 2;
fire_pal[RED(i)] = red;
fire_pal[GREEN(i)] = green;
fire_pal[BLUE(i)] = blue;
}
return (0);
}
static int
fire_term(video_adapter_t *adp)
{
free(buf, M_DEVBUF);
return (0);
}
static scrn_saver_t fire_module = {
SAVER_NAME,
fire_init,
fire_term,
fire_saver,
NULL
};
SAVER_MODULE(fire_saver, fire_module);
Index: head/sys/dev/virtio/console/virtio_console.c
===================================================================
--- head/sys/dev/virtio/console/virtio_console.c (revision 328217)
+++ head/sys/dev/virtio/console/virtio_console.c (revision 328218)
@@ -1,1504 +1,1502 @@
/*-
* Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO console devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/ctype.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kdb.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sglist.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/queue.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/tty.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/console/virtio_console.h>
#include "virtio_if.h"
#define VTCON_MAX_PORTS 32
#define VTCON_TTY_PREFIX "V"
#define VTCON_TTY_ALIAS_PREFIX "vtcon"
#define VTCON_BULK_BUFSZ 128
#define VTCON_CTRL_BUFSZ 128
/*
* The buffers cannot cross more than one page boundary due to the
* size of the sglist segment array used.
*/
CTASSERT(VTCON_BULK_BUFSZ <= PAGE_SIZE);
CTASSERT(VTCON_CTRL_BUFSZ <= PAGE_SIZE);
CTASSERT(sizeof(struct virtio_console_config) <= VTCON_CTRL_BUFSZ);
struct vtcon_softc;
struct vtcon_softc_port;
struct vtcon_port {
struct mtx vtcport_mtx;
struct vtcon_softc *vtcport_sc;
struct vtcon_softc_port *vtcport_scport;
struct tty *vtcport_tty;
struct virtqueue *vtcport_invq;
struct virtqueue *vtcport_outvq;
int vtcport_id;
int vtcport_flags;
#define VTCON_PORT_FLAG_GONE 0x01
#define VTCON_PORT_FLAG_CONSOLE 0x02
#define VTCON_PORT_FLAG_ALIAS 0x04
#if defined(KDB)
int vtcport_alt_break_state;
#endif
};
#define VTCON_PORT_LOCK(_port) mtx_lock(&(_port)->vtcport_mtx)
#define VTCON_PORT_UNLOCK(_port) mtx_unlock(&(_port)->vtcport_mtx)
struct vtcon_softc_port {
struct vtcon_softc *vcsp_sc;
struct vtcon_port *vcsp_port;
struct virtqueue *vcsp_invq;
struct virtqueue *vcsp_outvq;
};
struct vtcon_softc {
device_t vtcon_dev;
struct mtx vtcon_mtx;
uint64_t vtcon_features;
uint32_t vtcon_max_ports;
uint32_t vtcon_flags;
#define VTCON_FLAG_DETACHED 0x01
#define VTCON_FLAG_SIZE 0x02
#define VTCON_FLAG_MULTIPORT 0x04
/*
* Ports can be added and removed during runtime, but we have
* to allocate all the virtqueues during attach. This array is
* indexed by the port ID.
*/
struct vtcon_softc_port *vtcon_ports;
struct task vtcon_ctrl_task;
struct virtqueue *vtcon_ctrl_rxvq;
struct virtqueue *vtcon_ctrl_txvq;
struct mtx vtcon_ctrl_tx_mtx;
};
#define VTCON_LOCK(_sc) mtx_lock(&(_sc)->vtcon_mtx)
#define VTCON_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_mtx)
#define VTCON_LOCK_ASSERT(_sc) \
mtx_assert(&(_sc)->vtcon_mtx, MA_OWNED)
#define VTCON_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(&(_sc)->vtcon_mtx, MA_NOTOWNED)
#define VTCON_CTRL_TX_LOCK(_sc) mtx_lock(&(_sc)->vtcon_ctrl_tx_mtx)
#define VTCON_CTRL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_ctrl_tx_mtx)
#define VTCON_ASSERT_VALID_PORTID(_sc, _id) \
KASSERT((_id) >= 0 && (_id) < (_sc)->vtcon_max_ports, \
("%s: port ID %d out of range", __func__, _id))
#define VTCON_FEATURES VIRTIO_CONSOLE_F_MULTIPORT
static struct virtio_feature_desc vtcon_feature_desc[] = {
{ VIRTIO_CONSOLE_F_SIZE, "ConsoleSize" },
{ VIRTIO_CONSOLE_F_MULTIPORT, "MultiplePorts" },
{ VIRTIO_CONSOLE_F_EMERG_WRITE, "EmergencyWrite" },
{ 0, NULL }
};
static int vtcon_modevent(module_t, int, void *);
static void vtcon_drain_all(void);
static int vtcon_probe(device_t);
static int vtcon_attach(device_t);
static int vtcon_detach(device_t);
static int vtcon_config_change(device_t);
static void vtcon_setup_features(struct vtcon_softc *);
static void vtcon_negotiate_features(struct vtcon_softc *);
static int vtcon_alloc_scports(struct vtcon_softc *);
static int vtcon_alloc_virtqueues(struct vtcon_softc *);
static void vtcon_read_config(struct vtcon_softc *,
struct virtio_console_config *);
static void vtcon_determine_max_ports(struct vtcon_softc *,
struct virtio_console_config *);
static void vtcon_destroy_ports(struct vtcon_softc *);
static void vtcon_stop(struct vtcon_softc *);
static int vtcon_ctrl_event_enqueue(struct vtcon_softc *,
struct virtio_console_control *);
static int vtcon_ctrl_event_create(struct vtcon_softc *);
static void vtcon_ctrl_event_requeue(struct vtcon_softc *,
struct virtio_console_control *);
static int vtcon_ctrl_event_populate(struct vtcon_softc *);
static void vtcon_ctrl_event_drain(struct vtcon_softc *);
static int vtcon_ctrl_init(struct vtcon_softc *);
static void vtcon_ctrl_deinit(struct vtcon_softc *);
static void vtcon_ctrl_port_add_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_remove_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_console_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_open_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_name_event(struct vtcon_softc *, int,
const char *, size_t);
static void vtcon_ctrl_process_event(struct vtcon_softc *,
struct virtio_console_control *, void *, size_t);
static void vtcon_ctrl_task_cb(void *, int);
static void vtcon_ctrl_event_intr(void *);
static void vtcon_ctrl_poll(struct vtcon_softc *,
struct virtio_console_control *control);
static void vtcon_ctrl_send_control(struct vtcon_softc *, uint32_t,
uint16_t, uint16_t);
static int vtcon_port_enqueue_buf(struct vtcon_port *, void *, size_t);
static int vtcon_port_create_buf(struct vtcon_port *);
static void vtcon_port_requeue_buf(struct vtcon_port *, void *);
static int vtcon_port_populate(struct vtcon_port *);
static void vtcon_port_destroy(struct vtcon_port *);
static int vtcon_port_create(struct vtcon_softc *, int);
static void vtcon_port_dev_alias(struct vtcon_port *, const char *,
size_t);
static void vtcon_port_drain_bufs(struct virtqueue *);
static void vtcon_port_drain(struct vtcon_port *);
static void vtcon_port_teardown(struct vtcon_port *);
static void vtcon_port_change_size(struct vtcon_port *, uint16_t,
uint16_t);
static void vtcon_port_update_console_size(struct vtcon_softc *);
static void vtcon_port_enable_intr(struct vtcon_port *);
static void vtcon_port_disable_intr(struct vtcon_port *);
static void vtcon_port_in(struct vtcon_port *);
static void vtcon_port_intr(void *);
static void vtcon_port_out(struct vtcon_port *, void *, int);
static void vtcon_port_submit_event(struct vtcon_port *, uint16_t,
uint16_t);
static int vtcon_tty_open(struct tty *);
static void vtcon_tty_close(struct tty *);
static void vtcon_tty_outwakeup(struct tty *);
static void vtcon_tty_free(void *);
static void vtcon_get_console_size(struct vtcon_softc *, uint16_t *,
uint16_t *);
static void vtcon_enable_interrupts(struct vtcon_softc *);
static void vtcon_disable_interrupts(struct vtcon_softc *);
static int vtcon_pending_free;
static struct ttydevsw vtcon_tty_class = {
.tsw_flags = 0,
.tsw_open = vtcon_tty_open,
.tsw_close = vtcon_tty_close,
.tsw_outwakeup = vtcon_tty_outwakeup,
.tsw_free = vtcon_tty_free,
};
static device_method_t vtcon_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtcon_probe),
DEVMETHOD(device_attach, vtcon_attach),
DEVMETHOD(device_detach, vtcon_detach),
/* VirtIO methods. */
DEVMETHOD(virtio_config_change, vtcon_config_change),
DEVMETHOD_END
};
static driver_t vtcon_driver = {
"vtcon",
vtcon_methods,
sizeof(struct vtcon_softc)
};
static devclass_t vtcon_devclass;
DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass,
vtcon_modevent, 0);
MODULE_VERSION(virtio_console, 1);
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
static int
vtcon_modevent(module_t mod, int type, void *unused)
{
int error;
switch (type) {
case MOD_LOAD:
error = 0;
break;
case MOD_QUIESCE:
error = 0;
break;
case MOD_UNLOAD:
vtcon_drain_all();
error = 0;
break;
case MOD_SHUTDOWN:
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static void
vtcon_drain_all(void)
{
int first;
for (first = 1; vtcon_pending_free != 0; first = 0) {
if (first != 0) {
printf("virtio_console: Waiting for all detached TTY "
"devices to have open fds closed.\n");
}
pause("vtcondra", hz);
}
}
static int
vtcon_probe(device_t dev)
{
if (virtio_get_device_type(dev) != VIRTIO_ID_CONSOLE)
return (ENXIO);
device_set_desc(dev, "VirtIO Console Adapter");
return (BUS_PROBE_DEFAULT);
}
static int
vtcon_attach(device_t dev)
{
struct vtcon_softc *sc;
struct virtio_console_config concfg;
int error;
sc = device_get_softc(dev);
sc->vtcon_dev = dev;
mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
virtio_set_feature_desc(dev, vtcon_feature_desc);
vtcon_setup_features(sc);
vtcon_read_config(sc, &concfg);
vtcon_determine_max_ports(sc, &concfg);
error = vtcon_alloc_scports(sc);
if (error) {
device_printf(dev, "cannot allocate softc port structures\n");
goto fail;
}
error = vtcon_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) {
TASK_INIT(&sc->vtcon_ctrl_task, 0, vtcon_ctrl_task_cb, sc);
error = vtcon_ctrl_init(sc);
if (error)
goto fail;
} else {
error = vtcon_port_create(sc, 0);
if (error)
goto fail;
if (sc->vtcon_flags & VTCON_FLAG_SIZE)
vtcon_port_update_console_size(sc);
}
error = virtio_setup_intr(dev, INTR_TYPE_TTY);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupts\n");
goto fail;
}
vtcon_enable_interrupts(sc);
vtcon_ctrl_send_control(sc, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 1);
fail:
if (error)
vtcon_detach(dev);
return (error);
}
static int
vtcon_detach(device_t dev)
{
struct vtcon_softc *sc;
sc = device_get_softc(dev);
VTCON_LOCK(sc);
sc->vtcon_flags |= VTCON_FLAG_DETACHED;
if (device_is_attached(dev))
vtcon_stop(sc);
VTCON_UNLOCK(sc);
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) {
taskqueue_drain(taskqueue_thread, &sc->vtcon_ctrl_task);
vtcon_ctrl_deinit(sc);
}
vtcon_destroy_ports(sc);
mtx_destroy(&sc->vtcon_mtx);
mtx_destroy(&sc->vtcon_ctrl_tx_mtx);
return (0);
}
static int
vtcon_config_change(device_t dev)
{
struct vtcon_softc *sc;
sc = device_get_softc(dev);
/*
* When the multiport feature is negotiated, all configuration
* changes are done through control virtqueue events.
*/
if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0) {
if (sc->vtcon_flags & VTCON_FLAG_SIZE)
vtcon_port_update_console_size(sc);
}
return (0);
}
static void
vtcon_negotiate_features(struct vtcon_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtcon_dev;
features = VTCON_FEATURES;
sc->vtcon_features = virtio_negotiate_features(dev, features);
}
static void
vtcon_setup_features(struct vtcon_softc *sc)
{
device_t dev;
dev = sc->vtcon_dev;
vtcon_negotiate_features(sc);
if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
sc->vtcon_flags |= VTCON_FLAG_SIZE;
if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
}
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg) \
if (virtio_with_feature(_dev, _feature)) { \
virtio_read_device_config(_dev, \
offsetof(struct virtio_console_config, _field), \
&(_cfg)->_field, sizeof((_cfg)->_field)); \
}
static void
vtcon_read_config(struct vtcon_softc *sc, struct virtio_console_config *concfg)
{
device_t dev;
dev = sc->vtcon_dev;
bzero(concfg, sizeof(struct virtio_console_config));
VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, cols, concfg);
VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, rows, concfg);
VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_MULTIPORT, max_nr_ports, concfg);
}
#undef VTCON_GET_CONFIG
static int
vtcon_alloc_scports(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
- u_int max, i;
+ int max, i;
max = sc->vtcon_max_ports;
- sc->vtcon_ports = mallocarray(max, sizeof(struct vtcon_softc_port),
+ sc->vtcon_ports = malloc(sizeof(struct vtcon_softc_port) * max,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtcon_ports == NULL)
return (ENOMEM);
for (i = 0; i < max; i++) {
scport = &sc->vtcon_ports[i];
scport->vcsp_sc = sc;
}
return (0);
}
static int
vtcon_alloc_virtqueues(struct vtcon_softc *sc)
{
device_t dev;
struct vq_alloc_info *info;
struct vtcon_softc_port *scport;
- u_int i, idx, portidx, nvqs;
- int error;
+ int i, idx, portidx, nvqs, error;
dev = sc->vtcon_dev;
nvqs = sc->vtcon_max_ports * 2;
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT)
nvqs += 2;
- info = mallocarray(nvqs, sizeof(struct vq_alloc_info), M_TEMP,
- M_NOWAIT);
+ info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
if (info == NULL)
return (ENOMEM);
for (i = 0, idx = 0, portidx = 0; i < nvqs / 2; i++, idx += 2) {
if (i == 1) {
/* The control virtqueues are after the first port. */
VQ_ALLOC_INFO_INIT(&info[idx], 0,
vtcon_ctrl_event_intr, sc, &sc->vtcon_ctrl_rxvq,
"%s-control rx", device_get_nameunit(dev));
VQ_ALLOC_INFO_INIT(&info[idx+1], 0,
NULL, sc, &sc->vtcon_ctrl_txvq,
"%s-control tx", device_get_nameunit(dev));
continue;
}
scport = &sc->vtcon_ports[portidx];
VQ_ALLOC_INFO_INIT(&info[idx], 0, vtcon_port_intr,
scport, &scport->vcsp_invq, "%s-port%d in",
device_get_nameunit(dev), i);
VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL,
NULL, &scport->vcsp_outvq, "%s-port%d out",
device_get_nameunit(dev), i);
portidx++;
}
error = virtio_alloc_virtqueues(dev, 0, nvqs, info);
free(info, M_TEMP);
return (error);
}
static void
vtcon_determine_max_ports(struct vtcon_softc *sc,
struct virtio_console_config *concfg)
{
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) {
sc->vtcon_max_ports =
min(concfg->max_nr_ports, VTCON_MAX_PORTS);
if (sc->vtcon_max_ports == 0)
sc->vtcon_max_ports = 1;
} else
sc->vtcon_max_ports = 1;
}
static void
vtcon_destroy_ports(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
struct vtcon_port *port;
struct virtqueue *vq;
int i;
if (sc->vtcon_ports == NULL)
return;
VTCON_LOCK(sc);
for (i = 0; i < sc->vtcon_max_ports; i++) {
scport = &sc->vtcon_ports[i];
port = scport->vcsp_port;
if (port != NULL) {
scport->vcsp_port = NULL;
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_teardown(port);
VTCON_LOCK(sc);
}
vq = scport->vcsp_invq;
if (vq != NULL)
vtcon_port_drain_bufs(vq);
}
VTCON_UNLOCK(sc);
free(sc->vtcon_ports, M_DEVBUF);
sc->vtcon_ports = NULL;
}
static void
vtcon_stop(struct vtcon_softc *sc)
{
vtcon_disable_interrupts(sc);
virtio_stop(sc->vtcon_dev);
}
static int
vtcon_ctrl_event_enqueue(struct vtcon_softc *sc,
struct virtio_console_control *control)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = sc->vtcon_ctrl_rxvq;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, control, VTCON_CTRL_BUFSZ);
KASSERT(error == 0, ("%s: error %d adding control to sglist",
__func__, error));
return (virtqueue_enqueue(vq, control, &sg, 0, sg.sg_nseg));
}
static int
vtcon_ctrl_event_create(struct vtcon_softc *sc)
{
struct virtio_console_control *control;
int error;
control = malloc(VTCON_CTRL_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT);
if (control == NULL)
return (ENOMEM);
error = vtcon_ctrl_event_enqueue(sc, control);
if (error)
free(control, M_DEVBUF);
return (error);
}
static void
vtcon_ctrl_event_requeue(struct vtcon_softc *sc,
struct virtio_console_control *control)
{
int error;
bzero(control, VTCON_CTRL_BUFSZ);
error = vtcon_ctrl_event_enqueue(sc, control);
KASSERT(error == 0,
("%s: cannot requeue control buffer %d", __func__, error));
}
static int
vtcon_ctrl_event_populate(struct vtcon_softc *sc)
{
struct virtqueue *vq;
int nbufs, error;
vq = sc->vtcon_ctrl_rxvq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtcon_ctrl_event_create(sc);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
error = 0;
}
return (error);
}
static void
vtcon_ctrl_event_drain(struct vtcon_softc *sc)
{
struct virtio_console_control *control;
struct virtqueue *vq;
int last;
vq = sc->vtcon_ctrl_rxvq;
last = 0;
if (vq == NULL)
return;
VTCON_LOCK(sc);
while ((control = virtqueue_drain(vq, &last)) != NULL)
free(control, M_DEVBUF);
VTCON_UNLOCK(sc);
}
static int
vtcon_ctrl_init(struct vtcon_softc *sc)
{
int error;
error = vtcon_ctrl_event_populate(sc);
return (error);
}
static void
vtcon_ctrl_deinit(struct vtcon_softc *sc)
{
vtcon_ctrl_event_drain(sc);
}
static void
vtcon_ctrl_port_add_event(struct vtcon_softc *sc, int id)
{
device_t dev;
int error;
dev = sc->vtcon_dev;
/* This single thread only way for ports to be created. */
if (sc->vtcon_ports[id].vcsp_port != NULL) {
device_printf(dev, "%s: adding port %d, but already exists\n",
__func__, id);
return;
}
error = vtcon_port_create(sc, id);
if (error) {
device_printf(dev, "%s: cannot create port %d: %d\n",
__func__, id, error);
vtcon_ctrl_send_control(sc, id, VIRTIO_CONSOLE_PORT_READY, 0);
return;
}
}
static void
vtcon_ctrl_port_remove_event(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: remove port %d, but does not exist\n",
__func__, id);
return;
}
scport->vcsp_port = NULL;
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_teardown(port);
}
static void
vtcon_ctrl_port_console_event(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: console port %d, but does not exist\n",
__func__, id);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
port->vtcport_flags |= VTCON_PORT_FLAG_CONSOLE;
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_ctrl_port_open_event(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: open port %d, but does not exist\n",
__func__, id);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_enable_intr(port);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_ctrl_port_name_event(struct vtcon_softc *sc, int id, const char *name,
size_t len)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
/*
* The VirtIO specification says the NUL terminator is not included in
* the length, but QEMU includes it. Adjust the length if needed.
*/
if (name == NULL || len == 0)
return;
if (name[len - 1] == '\0') {
len--;
if (len == 0)
return;
}
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: name port %d, but does not exist\n",
__func__, id);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_dev_alias(port, name, len);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_ctrl_process_event(struct vtcon_softc *sc,
struct virtio_console_control *control, void *data, size_t data_len)
{
device_t dev;
int id;
dev = sc->vtcon_dev;
id = control->id;
if (id < 0 || id >= sc->vtcon_max_ports) {
device_printf(dev, "%s: invalid port ID %d\n", __func__, id);
return;
}
switch (control->event) {
case VIRTIO_CONSOLE_PORT_ADD:
vtcon_ctrl_port_add_event(sc, id);
break;
case VIRTIO_CONSOLE_PORT_REMOVE:
vtcon_ctrl_port_remove_event(sc, id);
break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
vtcon_ctrl_port_console_event(sc, id);
break;
case VIRTIO_CONSOLE_RESIZE:
break;
case VIRTIO_CONSOLE_PORT_OPEN:
vtcon_ctrl_port_open_event(sc, id);
break;
case VIRTIO_CONSOLE_PORT_NAME:
vtcon_ctrl_port_name_event(sc, id, (const char *)data, data_len);
break;
}
}
static void
vtcon_ctrl_task_cb(void *xsc, int pending)
{
struct vtcon_softc *sc;
struct virtqueue *vq;
struct virtio_console_control *control;
void *data;
size_t data_len;
int detached;
uint32_t len;
sc = xsc;
vq = sc->vtcon_ctrl_rxvq;
VTCON_LOCK(sc);
while ((detached = (sc->vtcon_flags & VTCON_FLAG_DETACHED)) == 0) {
control = virtqueue_dequeue(vq, &len);
if (control == NULL)
break;
if (len > sizeof(struct virtio_console_control)) {
data = (void *) &control[1];
data_len = len - sizeof(struct virtio_console_control);
} else {
data = NULL;
data_len = 0;
}
VTCON_UNLOCK(sc);
vtcon_ctrl_process_event(sc, control, data, data_len);
VTCON_LOCK(sc);
vtcon_ctrl_event_requeue(sc, control);
}
if (!detached) {
virtqueue_notify(vq);
if (virtqueue_enable_intr(vq) != 0)
taskqueue_enqueue(taskqueue_thread,
&sc->vtcon_ctrl_task);
}
VTCON_UNLOCK(sc);
}
static void
vtcon_ctrl_event_intr(void *xsc)
{
struct vtcon_softc *sc;
sc = xsc;
/*
* Only some events require us to potentially block, but it
* easier to just defer all event handling to the taskqueue.
*/
taskqueue_enqueue(taskqueue_thread, &sc->vtcon_ctrl_task);
}
static void
vtcon_ctrl_poll(struct vtcon_softc *sc,
struct virtio_console_control *control)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = sc->vtcon_ctrl_txvq;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, control,
sizeof(struct virtio_console_control));
KASSERT(error == 0, ("%s: error %d adding control to sglist",
__func__, error));
/*
* We cannot use the softc lock to serialize access to this
* virtqueue since this is called from the tty layer with the
* port lock held. Acquiring the softc would violate our lock
* ordering.
*/
VTCON_CTRL_TX_LOCK(sc);
KASSERT(virtqueue_empty(vq),
("%s: virtqueue is not emtpy", __func__));
error = virtqueue_enqueue(vq, control, &sg, sg.sg_nseg, 0);
if (error == 0) {
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
VTCON_CTRL_TX_UNLOCK(sc);
}
static void
vtcon_ctrl_send_control(struct vtcon_softc *sc, uint32_t portid,
uint16_t event, uint16_t value)
{
struct virtio_console_control control;
if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
return;
control.id = portid;
control.event = event;
control.value = value;
vtcon_ctrl_poll(sc, &control);
}
static int
vtcon_port_enqueue_buf(struct vtcon_port *port, void *buf, size_t len)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = port->vtcport_invq;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, buf, len);
KASSERT(error == 0,
("%s: error %d adding buffer to sglist", __func__, error));
error = virtqueue_enqueue(vq, buf, &sg, 0, sg.sg_nseg);
return (error);
}
static int
vtcon_port_create_buf(struct vtcon_port *port)
{
void *buf;
int error;
buf = malloc(VTCON_BULK_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT);
if (buf == NULL)
return (ENOMEM);
error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ);
if (error)
free(buf, M_DEVBUF);
return (error);
}
static void
vtcon_port_requeue_buf(struct vtcon_port *port, void *buf)
{
int error;
error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ);
KASSERT(error == 0,
("%s: cannot requeue input buffer %d", __func__, error));
}
static int
vtcon_port_populate(struct vtcon_port *port)
{
struct virtqueue *vq;
int nbufs, error;
vq = port->vtcport_invq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtcon_port_create_buf(port);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
error = 0;
}
return (error);
}
static void
vtcon_port_destroy(struct vtcon_port *port)
{
port->vtcport_sc = NULL;
port->vtcport_scport = NULL;
port->vtcport_invq = NULL;
port->vtcport_outvq = NULL;
port->vtcport_id = -1;
mtx_destroy(&port->vtcport_mtx);
free(port, M_DEVBUF);
}
static int
vtcon_port_init_vqs(struct vtcon_port *port)
{
struct vtcon_softc_port *scport;
int error;
scport = port->vtcport_scport;
port->vtcport_invq = scport->vcsp_invq;
port->vtcport_outvq = scport->vcsp_outvq;
/*
* Free any data left over from when this virtqueue was in use by a
* prior port. We have not yet notified the host that the port is
* ready, so assume nothing in the virtqueue can be for us.
*/
vtcon_port_drain(port);
KASSERT(virtqueue_empty(port->vtcport_invq),
("%s: in virtqueue is not empty", __func__));
KASSERT(virtqueue_empty(port->vtcport_outvq),
("%s: out virtqueue is not empty", __func__));
error = vtcon_port_populate(port);
if (error)
return (error);
return (0);
}
static int
vtcon_port_create(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
int error;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_ASSERT_VALID_PORTID(sc, id);
MPASS(scport->vcsp_port == NULL);
port = malloc(sizeof(struct vtcon_port), M_DEVBUF, M_NOWAIT | M_ZERO);
if (port == NULL)
return (ENOMEM);
port->vtcport_sc = sc;
port->vtcport_scport = scport;
port->vtcport_id = id;
mtx_init(&port->vtcport_mtx, "vtcpmtx", NULL, MTX_DEF);
port->vtcport_tty = tty_alloc_mutex(&vtcon_tty_class, port,
&port->vtcport_mtx);
error = vtcon_port_init_vqs(port);
if (error) {
VTCON_PORT_LOCK(port);
vtcon_port_teardown(port);
return (error);
}
VTCON_LOCK(sc);
VTCON_PORT_LOCK(port);
scport->vcsp_port = port;
vtcon_port_enable_intr(port);
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_READY, 1);
VTCON_PORT_UNLOCK(port);
VTCON_UNLOCK(sc);
tty_makedev(port->vtcport_tty, NULL, "%s%r.%r", VTCON_TTY_PREFIX,
device_get_unit(dev), id);
return (0);
}
static void
vtcon_port_dev_alias(struct vtcon_port *port, const char *name, size_t len)
{
struct vtcon_softc *sc;
struct cdev *pdev;
struct tty *tp;
int i, error;
sc = port->vtcport_sc;
tp = port->vtcport_tty;
if (port->vtcport_flags & VTCON_PORT_FLAG_ALIAS)
return;
/* Port name is UTF-8, but we can only handle ASCII. */
for (i = 0; i < len; i++) {
if (!isascii(name[i]))
return;
}
/*
* Port name may not conform to the devfs requirements so we cannot use
* tty_makealias() because the MAKEDEV_CHECKNAME flag must be specified.
*/
error = make_dev_alias_p(MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME, &pdev,
tp->t_dev, "%s/%*s", VTCON_TTY_ALIAS_PREFIX, (int)len, name);
if (error) {
device_printf(sc->vtcon_dev,
"%s: cannot make dev alias (%s/%*s) error %d\n", __func__,
VTCON_TTY_ALIAS_PREFIX, (int)len, name, error);
} else
port->vtcport_flags |= VTCON_PORT_FLAG_ALIAS;
}
static void
vtcon_port_drain_bufs(struct virtqueue *vq)
{
void *buf;
int last;
last = 0;
while ((buf = virtqueue_drain(vq, &last)) != NULL)
free(buf, M_DEVBUF);
}
static void
vtcon_port_drain(struct vtcon_port *port)
{
vtcon_port_drain_bufs(port->vtcport_invq);
}
static void
vtcon_port_teardown(struct vtcon_port *port)
{
struct tty *tp;
tp = port->vtcport_tty;
port->vtcport_flags |= VTCON_PORT_FLAG_GONE;
if (tp != NULL) {
atomic_add_int(&vtcon_pending_free, 1);
tty_rel_gone(tp);
} else
vtcon_port_destroy(port);
}
static void
vtcon_port_change_size(struct vtcon_port *port, uint16_t cols, uint16_t rows)
{
struct tty *tp;
struct winsize sz;
tp = port->vtcport_tty;
if (tp == NULL)
return;
bzero(&sz, sizeof(struct winsize));
sz.ws_col = cols;
sz.ws_row = rows;
tty_set_winsize(tp, &sz);
}
static void
vtcon_port_update_console_size(struct vtcon_softc *sc)
{
struct vtcon_port *port;
struct vtcon_softc_port *scport;
uint16_t cols, rows;
vtcon_get_console_size(sc, &cols, &rows);
/*
* For now, assume the first (only) port is the console. Note
* QEMU does not implement this feature yet.
*/
scport = &sc->vtcon_ports[0];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port != NULL) {
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_change_size(port, cols, rows);
VTCON_PORT_UNLOCK(port);
} else
VTCON_UNLOCK(sc);
}
static void
vtcon_port_enable_intr(struct vtcon_port *port)
{
/*
* NOTE: The out virtqueue is always polled, so its interrupt
* kept disabled.
*/
virtqueue_enable_intr(port->vtcport_invq);
}
static void
vtcon_port_disable_intr(struct vtcon_port *port)
{
if (port->vtcport_invq != NULL)
virtqueue_disable_intr(port->vtcport_invq);
if (port->vtcport_outvq != NULL)
virtqueue_disable_intr(port->vtcport_outvq);
}
static void
vtcon_port_in(struct vtcon_port *port)
{
struct virtqueue *vq;
struct tty *tp;
char *buf;
uint32_t len;
int i, deq;
tp = port->vtcport_tty;
vq = port->vtcport_invq;
again:
deq = 0;
while ((buf = virtqueue_dequeue(vq, &len)) != NULL) {
for (i = 0; i < len; i++) {
#if defined(KDB)
if (port->vtcport_flags & VTCON_PORT_FLAG_CONSOLE)
kdb_alt_break(buf[i],
&port->vtcport_alt_break_state);
#endif
ttydisc_rint(tp, buf[i], 0);
}
vtcon_port_requeue_buf(port, buf);
deq++;
}
ttydisc_rint_done(tp);
if (deq > 0)
virtqueue_notify(vq);
if (virtqueue_enable_intr(vq) != 0)
goto again;
}
static void
vtcon_port_intr(void *scportx)
{
struct vtcon_softc_port *scport;
struct vtcon_softc *sc;
struct vtcon_port *port;
scport = scportx;
sc = scport->vcsp_sc;
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
if ((port->vtcport_flags & VTCON_PORT_FLAG_GONE) == 0)
vtcon_port_in(port);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_port_out(struct vtcon_port *port, void *buf, int bufsize)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = port->vtcport_outvq;
KASSERT(virtqueue_empty(vq),
("%s: port %p out virtqueue not emtpy", __func__, port));
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, buf, bufsize);
KASSERT(error == 0, ("%s: error %d adding buffer to sglist",
__func__, error));
error = virtqueue_enqueue(vq, buf, &sg, sg.sg_nseg, 0);
if (error == 0) {
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
}
static void
vtcon_port_submit_event(struct vtcon_port *port, uint16_t event,
uint16_t value)
{
struct vtcon_softc *sc;
sc = port->vtcport_sc;
vtcon_ctrl_send_control(sc, port->vtcport_id, event, value);
}
static int
vtcon_tty_open(struct tty *tp)
{
struct vtcon_port *port;
port = tty_softc(tp);
if (port->vtcport_flags & VTCON_PORT_FLAG_GONE)
return (ENXIO);
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
return (0);
}
static void
vtcon_tty_close(struct tty *tp)
{
struct vtcon_port *port;
port = tty_softc(tp);
if (port->vtcport_flags & VTCON_PORT_FLAG_GONE)
return;
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
}
static void
vtcon_tty_outwakeup(struct tty *tp)
{
struct vtcon_port *port;
char buf[VTCON_BULK_BUFSZ];
int len;
port = tty_softc(tp);
if (port->vtcport_flags & VTCON_PORT_FLAG_GONE)
return;
while ((len = ttydisc_getc(tp, buf, sizeof(buf))) != 0)
vtcon_port_out(port, buf, len);
}
static void
vtcon_tty_free(void *xport)
{
struct vtcon_port *port;
port = xport;
vtcon_port_destroy(port);
atomic_subtract_int(&vtcon_pending_free, 1);
}
static void
vtcon_get_console_size(struct vtcon_softc *sc, uint16_t *cols, uint16_t *rows)
{
struct virtio_console_config concfg;
KASSERT(sc->vtcon_flags & VTCON_FLAG_SIZE,
("%s: size feature not negotiated", __func__));
vtcon_read_config(sc, &concfg);
*cols = concfg.cols;
*rows = concfg.rows;
}
static void
vtcon_enable_interrupts(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
struct vtcon_port *port;
int i;
VTCON_LOCK(sc);
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT)
virtqueue_enable_intr(sc->vtcon_ctrl_rxvq);
for (i = 0; i < sc->vtcon_max_ports; i++) {
scport = &sc->vtcon_ports[i];
port = scport->vcsp_port;
if (port == NULL)
continue;
VTCON_PORT_LOCK(port);
vtcon_port_enable_intr(port);
VTCON_PORT_UNLOCK(port);
}
VTCON_UNLOCK(sc);
}
static void
vtcon_disable_interrupts(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
struct vtcon_port *port;
int i;
VTCON_LOCK_ASSERT(sc);
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT)
virtqueue_disable_intr(sc->vtcon_ctrl_rxvq);
for (i = 0; i < sc->vtcon_max_ports; i++) {
scport = &sc->vtcon_ports[i];
port = scport->vcsp_port;
if (port == NULL)
continue;
VTCON_PORT_LOCK(port);
vtcon_port_disable_intr(port);
VTCON_PORT_UNLOCK(port);
}
}
Index: head/sys/dev/virtio/mmio/virtio_mmio.c
===================================================================
--- head/sys/dev/virtio/mmio/virtio_mmio.c (revision 328217)
+++ head/sys/dev/virtio/mmio/virtio_mmio.c (revision 328218)
@@ -1,862 +1,862 @@
/*-
* Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by Andrew Turner
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* VirtIO MMIO interface.
* This driver is heavily based on VirtIO PCI interface driver.
*/
/*
* FDT example:
* virtio_block@1000 {
* compatible = "virtio,mmio";
* reg = <0x1000 0x100>;
* interrupts = <63>;
* interrupt-parent = <&GIC>;
* };
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/rman.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/mmio/virtio_mmio.h>
#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
#define PAGE_SHIFT 12
struct vtmmio_virtqueue {
struct virtqueue *vtv_vq;
int vtv_no_intr;
};
struct vtmmio_softc {
device_t dev;
device_t platform;
struct resource *res[2];
uint64_t vtmmio_features;
uint32_t vtmmio_flags;
/* This "bus" will only ever have one child. */
device_t vtmmio_child_dev;
struct virtio_feature_desc *vtmmio_child_feat_desc;
int vtmmio_nvqs;
struct vtmmio_virtqueue *vtmmio_vqs;
void *ih;
};
static int vtmmio_probe(device_t);
static int vtmmio_attach(device_t);
static int vtmmio_detach(device_t);
static int vtmmio_suspend(device_t);
static int vtmmio_resume(device_t);
static int vtmmio_shutdown(device_t);
static void vtmmio_driver_added(device_t, driver_t *);
static void vtmmio_child_detached(device_t, device_t);
static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t);
static uint64_t vtmmio_negotiate_features(device_t, uint64_t);
static int vtmmio_with_feature(device_t, uint64_t);
static int vtmmio_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t);
static uint8_t vtmmio_get_status(device_t);
static void vtmmio_set_status(device_t, uint8_t);
static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int);
static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int);
static void vtmmio_describe_features(struct vtmmio_softc *, const char *,
uint64_t);
static void vtmmio_probe_and_attach_child(struct vtmmio_softc *);
static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int);
static void vtmmio_free_interrupts(struct vtmmio_softc *);
static void vtmmio_free_virtqueues(struct vtmmio_softc *);
static void vtmmio_release_child_resources(struct vtmmio_softc *);
static void vtmmio_reset(struct vtmmio_softc *);
static void vtmmio_select_virtqueue(struct vtmmio_softc *, int);
static void vtmmio_vq_intr(void *);
/*
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
do { \
if (sc->platform != NULL) \
VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
bus_write_1((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_write_config_2(sc, o, v) \
do { \
if (sc->platform != NULL) \
VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
bus_write_2((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_write_config_4(sc, o, v) \
do { \
if (sc->platform != NULL) \
VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
bus_write_4((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
#define vtmmio_read_config_2(sc, o) \
bus_read_2((sc)->res[0], (o))
#define vtmmio_read_config_4(sc, o) \
bus_read_4((sc)->res[0], (o))
static device_method_t vtmmio_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtmmio_probe),
DEVMETHOD(device_attach, vtmmio_attach),
DEVMETHOD(device_detach, vtmmio_detach),
DEVMETHOD(device_suspend, vtmmio_suspend),
DEVMETHOD(device_resume, vtmmio_resume),
DEVMETHOD(device_shutdown, vtmmio_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtmmio_driver_added),
DEVMETHOD(bus_child_detached, vtmmio_child_detached),
DEVMETHOD(bus_read_ivar, vtmmio_read_ivar),
DEVMETHOD(bus_write_ivar, vtmmio_write_ivar),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features),
DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config),
DEVMETHOD_END
};
static driver_t vtmmio_driver = {
"virtio_mmio",
vtmmio_methods,
sizeof(struct vtmmio_softc)
};
devclass_t vtmmio_devclass;
DRIVER_MODULE(virtio_mmio, simplebus, vtmmio_driver, vtmmio_devclass, 0, 0);
DRIVER_MODULE(virtio_mmio, ofwbus, vtmmio_driver, vtmmio_devclass, 0, 0);
MODULE_VERSION(virtio_mmio, 1);
MODULE_DEPEND(virtio_mmio, simplebus, 1, 1, 1);
MODULE_DEPEND(virtio_mmio, virtio, 1, 1, 1);
static int
vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
int err;
sc = device_get_softc(dev);
if (sc->platform != NULL) {
err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
vtmmio_vq_intr, sc);
if (err == 0) {
/* Okay we have backend-specific interrupts */
return (0);
}
}
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (!sc->res[1]) {
device_printf(dev, "Can't allocate interrupt\n");
return (ENXIO);
}
if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, vtmmio_vq_intr, sc, &sc->ih)) {
device_printf(dev, "Can't setup the interrupt\n");
return (ENXIO);
}
return (0);
}
static int
vtmmio_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "virtio,mmio"))
return (ENXIO);
device_set_desc(dev, "VirtIO MMIO adapter");
return (BUS_PROBE_DEFAULT);
}
static int
vtmmio_setup_platform(struct vtmmio_softc *sc)
{
phandle_t platform_node;
struct fdt_ic *ic;
phandle_t xref;
phandle_t node;
sc->platform = NULL;
if ((node = ofw_bus_get_node(sc->dev)) == -1)
return (ENXIO);
if (OF_searchencprop(node, "platform", &xref,
sizeof(xref)) == -1) {
return (ENXIO);
}
platform_node = OF_node_from_xref(xref);
SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
if (ic->iph == platform_node) {
sc->platform = ic->dev;
break;
}
}
if (sc->platform == NULL) {
/* No platform-specific device. Ignore it. */
}
return (0);
}
static int
vtmmio_attach(device_t dev)
{
struct vtmmio_softc *sc;
device_t child;
int rid;
sc = device_get_softc(dev);
sc->dev = dev;
vtmmio_setup_platform(sc);
rid = 0;
sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!sc->res[0]) {
device_printf(dev, "Cannot allocate memory window.\n");
return (ENXIO);
}
vtmmio_reset(sc);
/* Tell the host we've noticed this device. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
if ((child = device_add_child(dev, NULL, -1)) == NULL) {
device_printf(dev, "Cannot create child device.\n");
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_detach(dev);
return (ENOMEM);
}
sc->vtmmio_child_dev = child;
vtmmio_probe_and_attach_child(sc);
return (0);
}
static int
vtmmio_detach(device_t dev)
{
struct vtmmio_softc *sc;
device_t child;
int error;
sc = device_get_softc(dev);
if ((child = sc->vtmmio_child_dev) != NULL) {
error = device_delete_child(dev, child);
if (error)
return (error);
sc->vtmmio_child_dev = NULL;
}
vtmmio_reset(sc);
if (sc->res[0] != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, 0,
sc->res[0]);
sc->res[0] = NULL;
}
return (0);
}
static int
vtmmio_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtmmio_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtmmio_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtmmio_stop(dev);
return (0);
}
static void
vtmmio_driver_added(device_t dev, driver_t *driver)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_probe_and_attach_child(sc);
}
static void
vtmmio_child_detached(device_t dev, device_t child)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_reset(sc);
vtmmio_release_child_resources(sc);
}
static int
vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->vtmmio_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
case VIRTIO_IVAR_SUBDEVICE:
*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID);
break;
case VIRTIO_IVAR_VENDOR:
*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
break;
default:
return (ENOENT);
}
return (0);
}
static int
vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->vtmmio_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_FEATURE_DESC:
sc->vtmmio_child_feat_desc = (void *) value;
break;
default:
return (ENOENT);
}
return (0);
}
static uint64_t
vtmmio_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtmmio_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
vtmmio_describe_features(sc, "host", host_features);
/*
* Limit negotiated features to what the driver, virtqueue, and
* host all support.
*/
features = host_features & child_features;
features = virtqueue_filter_features(features);
sc->vtmmio_features = features;
vtmmio_describe_features(sc, "negotiated", features);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features);
return (features);
}
static int
vtmmio_with_feature(device_t dev, uint64_t feature)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
return ((sc->vtmmio_features & feature) != 0);
}
static int
vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtmmio_virtqueue *vqx;
struct vq_alloc_info *info;
struct vtmmio_softc *sc;
struct virtqueue *vq;
uint32_t size;
int idx, error;
sc = device_get_softc(dev);
if (sc->vtmmio_nvqs != 0)
return (EALREADY);
if (nvqs <= 0)
return (EINVAL);
- sc->vtmmio_vqs = mallocarray(nvqs, sizeof(struct vtmmio_virtqueue),
+ sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtmmio_vqs == NULL)
return (ENOMEM);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 1 << PAGE_SHIFT);
for (idx = 0; idx < nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
info = &vq_info[idx];
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
vtmmio_select_virtqueue(sc, idx);
size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
error = virtqueue_alloc(dev, idx, size,
VIRTIO_MMIO_VRING_ALIGN, 0xFFFFFFFFUL, info, &vq);
if (error) {
device_printf(dev,
"cannot allocate virtqueue %d: %d\n",
idx, error);
break;
}
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
VIRTIO_MMIO_VRING_ALIGN);
#if 0
device_printf(dev, "virtqueue paddr 0x%08lx\n",
(uint64_t)virtqueue_paddr(vq));
#endif
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
virtqueue_paddr(vq) >> PAGE_SHIFT);
vqx->vtv_vq = *info->vqai_vq = vq;
vqx->vtv_no_intr = info->vqai_intr == NULL;
sc->vtmmio_nvqs++;
}
if (error)
vtmmio_free_virtqueues(sc);
return (error);
}
static void
vtmmio_stop(device_t dev)
{
vtmmio_reset(device_get_softc(dev));
}
static void
vtmmio_poll(device_t dev)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->platform != NULL)
VIRTIO_MMIO_POLL(sc->platform);
}
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
struct vtmmio_softc *sc;
int idx, error;
sc = device_get_softc(dev);
if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
vtmmio_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device
* does not become usable again until vtmmio_reinit_complete().
*/
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
vtmmio_negotiate_features(dev, features);
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
error = vtmmio_reinit_virtqueue(sc, idx);
if (error)
return (error);
}
return (0);
}
static void
vtmmio_reinit_complete(device_t dev)
{
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtmmio_notify_virtqueue(device_t dev, uint16_t queue)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NOTIFY, queue);
}
static uint8_t
vtmmio_get_status(device_t dev)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS));
}
static void
vtmmio_set_status(device_t dev, uint8_t status)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtmmio_get_status(dev);
vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status);
}
static void
vtmmio_read_dev_config(device_t dev, bus_size_t offset,
void *dst, int length)
{
struct vtmmio_softc *sc;
bus_size_t off;
uint8_t *d;
int size;
sc = device_get_softc(dev);
off = VIRTIO_MMIO_CONFIG + offset;
for (d = dst; length > 0; d += size, off += size, length -= size) {
#ifdef ALLOW_WORD_ALIGNED_ACCESS
if (length >= 4) {
size = 4;
*(uint32_t *)d = vtmmio_read_config_4(sc, off);
} else if (length >= 2) {
size = 2;
*(uint16_t *)d = vtmmio_read_config_2(sc, off);
} else
#endif
{
size = 1;
*d = vtmmio_read_config_1(sc, off);
}
}
}
static void
vtmmio_write_dev_config(device_t dev, bus_size_t offset,
void *src, int length)
{
struct vtmmio_softc *sc;
bus_size_t off;
uint8_t *s;
int size;
sc = device_get_softc(dev);
off = VIRTIO_MMIO_CONFIG + offset;
for (s = src; length > 0; s += size, off += size, length -= size) {
#ifdef ALLOW_WORD_ALIGNED_ACCESS
if (length >= 4) {
size = 4;
vtmmio_write_config_4(sc, off, *(uint32_t *)s);
} else if (length >= 2) {
size = 2;
vtmmio_write_config_2(sc, off, *(uint16_t *)s);
} else
#endif
{
size = 1;
vtmmio_write_config_1(sc, off, *s);
}
}
}
static void
vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg,
uint64_t features)
{
device_t dev, child;
dev = sc->dev;
child = sc->vtmmio_child_dev;
if (device_is_attached(child) || bootverbose == 0)
return;
virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc);
}
static void
vtmmio_probe_and_attach_child(struct vtmmio_softc *sc)
{
device_t dev, child;
dev = sc->dev;
child = sc->vtmmio_child_dev;
if (child == NULL)
return;
if (device_get_state(child) != DS_NOTPRESENT) {
return;
}
if (device_probe(child) != 0) {
return;
}
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_reset(sc);
vtmmio_release_child_resources(sc);
/* Reset status for future attempt. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
} else {
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx)
{
struct vtmmio_virtqueue *vqx;
struct virtqueue *vq;
int error;
uint16_t size;
vqx = &sc->vtmmio_vqs[idx];
vq = vqx->vtv_vq;
KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
vtmmio_select_virtqueue(sc, idx);
size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
error = virtqueue_reinit(vq, size);
if (error)
return (error);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
virtqueue_paddr(vq) >> PAGE_SHIFT);
return (0);
}
static void
vtmmio_free_interrupts(struct vtmmio_softc *sc)
{
if (sc->ih != NULL)
bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
if (sc->res[1] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]);
}
static void
vtmmio_free_virtqueues(struct vtmmio_softc *sc)
{
struct vtmmio_virtqueue *vqx;
int idx;
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
vtmmio_select_virtqueue(sc, idx);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0);
virtqueue_free(vqx->vtv_vq);
vqx->vtv_vq = NULL;
}
free(sc->vtmmio_vqs, M_DEVBUF);
sc->vtmmio_vqs = NULL;
sc->vtmmio_nvqs = 0;
}
static void
vtmmio_release_child_resources(struct vtmmio_softc *sc)
{
vtmmio_free_interrupts(sc);
vtmmio_free_virtqueues(sc);
}
static void
vtmmio_reset(struct vtmmio_softc *sc)
{
/*
* Setting the status to RESET sets the host device to
* the original, uninitialized state.
*/
vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET);
}
static void
vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx)
{
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
}
static void
vtmmio_vq_intr(void *arg)
{
struct vtmmio_virtqueue *vqx;
struct vtmmio_softc *sc;
struct virtqueue *vq;
uint32_t status;
int idx;
sc = arg;
status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS);
vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status);
/* The config changed */
if (status & VIRTIO_MMIO_INT_CONFIG)
if (sc->vtmmio_child_dev != NULL)
VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev);
/* Notify all virtqueues. */
if (status & VIRTIO_MMIO_INT_VRING) {
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
if (vqx->vtv_no_intr == 0) {
vq = vqx->vtv_vq;
virtqueue_intr(vq);
}
}
}
}
Index: head/sys/dev/virtio/network/if_vtnet.c
===================================================================
--- head/sys/dev/virtio/network/if_vtnet.c (revision 328217)
+++ head/sys/dev/virtio/network/if_vtnet.c (revision 328218)
@@ -1,3979 +1,3978 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO network devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/random.h>
#include <sys/sglist.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <machine/smp.h>
#include <vm/uma.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/sctp.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/network/virtio_net.h>
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
#include "opt_inet.h"
#include "opt_inet6.h"
static int vtnet_modevent(module_t, int, void *);
static int vtnet_probe(device_t);
static int vtnet_attach(device_t);
static int vtnet_detach(device_t);
static int vtnet_suspend(device_t);
static int vtnet_resume(device_t);
static int vtnet_shutdown(device_t);
static int vtnet_attach_completed(device_t);
static int vtnet_config_change(device_t);
static void vtnet_negotiate_features(struct vtnet_softc *);
static void vtnet_setup_features(struct vtnet_softc *);
static int vtnet_init_rxq(struct vtnet_softc *, int);
static int vtnet_init_txq(struct vtnet_softc *, int);
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
static void vtnet_free_rxtx_queues(struct vtnet_softc *);
static int vtnet_alloc_rx_filters(struct vtnet_softc *);
static void vtnet_free_rx_filters(struct vtnet_softc *);
static int vtnet_alloc_virtqueues(struct vtnet_softc *);
static int vtnet_setup_interface(struct vtnet_softc *);
static int vtnet_change_mtu(struct vtnet_softc *, int);
static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
static uint64_t vtnet_get_counter(struct ifnet *, ift_counter);
static int vtnet_rxq_populate(struct vtnet_rxq *);
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
struct mbuf *, int);
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
static int vtnet_rxq_eof(struct vtnet_rxq *);
static void vtnet_rx_vq_intr(void *);
static void vtnet_rxq_tq_intr(void *, int);
static int vtnet_txq_below_threshold(struct vtnet_txq *);
static int vtnet_txq_notify(struct vtnet_txq *);
static void vtnet_txq_free_mbufs(struct vtnet_txq *);
static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
int *, int *, int *);
static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
int, struct virtio_net_hdr *);
static struct mbuf *
vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
struct virtio_net_hdr *);
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
struct vtnet_tx_header *);
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **);
#ifdef VTNET_LEGACY_TX
static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
static void vtnet_start(struct ifnet *);
#else
static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
static void vtnet_txq_tq_deferred(void *, int);
#endif
static void vtnet_txq_start(struct vtnet_txq *);
static void vtnet_txq_tq_intr(void *, int);
static int vtnet_txq_eof(struct vtnet_txq *);
static void vtnet_tx_vq_intr(void *);
static void vtnet_tx_start_all(struct vtnet_softc *);
#ifndef VTNET_LEGACY_TX
static void vtnet_qflush(struct ifnet *);
#endif
static int vtnet_watchdog(struct vtnet_txq *);
static void vtnet_accum_stats(struct vtnet_softc *,
struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
static void vtnet_tick(void *);
static void vtnet_start_taskqueues(struct vtnet_softc *);
static void vtnet_free_taskqueues(struct vtnet_softc *);
static void vtnet_drain_taskqueues(struct vtnet_softc *);
static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
static void vtnet_stop_rendezvous(struct vtnet_softc *);
static void vtnet_stop(struct vtnet_softc *);
static int vtnet_virtio_reinit(struct vtnet_softc *);
static void vtnet_init_rx_filters(struct vtnet_softc *);
static int vtnet_init_rx_queues(struct vtnet_softc *);
static int vtnet_init_tx_queues(struct vtnet_softc *);
static int vtnet_init_rxtx_queues(struct vtnet_softc *);
static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
static int vtnet_reinit(struct vtnet_softc *);
static void vtnet_init_locked(struct vtnet_softc *);
static void vtnet_init(void *);
static void vtnet_free_ctrl_vq(struct vtnet_softc *);
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
struct sglist *, int, int);
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
static int vtnet_set_promisc(struct vtnet_softc *, int);
static int vtnet_set_allmulti(struct vtnet_softc *, int);
static void vtnet_attach_disable_promisc(struct vtnet_softc *);
static void vtnet_rx_filter(struct vtnet_softc *);
static void vtnet_rx_filter_mac(struct vtnet_softc *);
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
static void vtnet_rx_filter_vlan(struct vtnet_softc *);
static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
static int vtnet_is_link_up(struct vtnet_softc *);
static void vtnet_update_link_status(struct vtnet_softc *);
static int vtnet_ifmedia_upd(struct ifnet *);
static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static void vtnet_get_hwaddr(struct vtnet_softc *);
static void vtnet_set_hwaddr(struct vtnet_softc *);
static void vtnet_vlan_tag_remove(struct mbuf *);
static void vtnet_set_rx_process_limit(struct vtnet_softc *);
static void vtnet_set_tx_intr_threshold(struct vtnet_softc *);
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct vtnet_rxq *);
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct vtnet_txq *);
static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
static void vtnet_setup_sysctl(struct vtnet_softc *);
static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
static int vtnet_txq_enable_intr(struct vtnet_txq *);
static void vtnet_txq_disable_intr(struct vtnet_txq *);
static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
static void vtnet_enable_interrupts(struct vtnet_softc *);
static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
static void vtnet_disable_interrupts(struct vtnet_softc *);
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
/* Tunables. */
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
static int vtnet_csum_disable = 0;
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
&vtnet_csum_disable, 0, "Disables receive and send checksum offload");
static int vtnet_tso_disable = 0;
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
0, "Disables TCP Segmentation Offload");
static int vtnet_lro_disable = 0;
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
0, "Disables TCP Large Receive Offload");
static int vtnet_mq_disable = 0;
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
0, "Disables Multi Queue support");
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
&vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
static int vtnet_rx_process_limit = 512;
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&vtnet_rx_process_limit, 0,
"Limits the number RX segments processed in a single pass");
static uma_zone_t vtnet_tx_header_zone;
static struct virtio_feature_desc vtnet_feature_desc[] = {
{ VIRTIO_NET_F_CSUM, "TxChecksum" },
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
{ VIRTIO_NET_F_MAC, "MacAddress" },
{ VIRTIO_NET_F_GSO, "TxAllGSO" },
{ VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
{ VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
{ VIRTIO_NET_F_GUEST_ECN, "RxECN" },
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" },
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
{ VIRTIO_NET_F_STATUS, "Status" },
{ VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
{ VIRTIO_NET_F_CTRL_RX, "RxMode" },
{ VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
{ VIRTIO_NET_F_MQ, "Multiqueue" },
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
{ 0, NULL }
};
static device_method_t vtnet_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtnet_probe),
DEVMETHOD(device_attach, vtnet_attach),
DEVMETHOD(device_detach, vtnet_detach),
DEVMETHOD(device_suspend, vtnet_suspend),
DEVMETHOD(device_resume, vtnet_resume),
DEVMETHOD(device_shutdown, vtnet_shutdown),
/* VirtIO methods. */
DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
DEVMETHOD(virtio_config_change, vtnet_config_change),
DEVMETHOD_END
};
#ifdef DEV_NETMAP
#include <dev/netmap/if_vtnet_netmap.h>
#endif /* DEV_NETMAP */
static driver_t vtnet_driver = {
"vtnet",
vtnet_methods,
sizeof(struct vtnet_softc)
};
static devclass_t vtnet_devclass;
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
vtnet_modevent, 0);
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
vtnet_modevent, 0);
MODULE_VERSION(vtnet, 1);
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
static int
vtnet_modevent(module_t mod, int type, void *unused)
{
int error = 0;
static int loaded = 0;
switch (type) {
case MOD_LOAD:
if (loaded++ == 0)
vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
sizeof(struct vtnet_tx_header),
NULL, NULL, NULL, NULL, 0, 0);
break;
case MOD_QUIESCE:
if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
error = EBUSY;
break;
case MOD_UNLOAD:
if (--loaded == 0) {
uma_zdestroy(vtnet_tx_header_zone);
vtnet_tx_header_zone = NULL;
}
break;
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtnet_probe(device_t dev)
{
if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
return (ENXIO);
device_set_desc(dev, "VirtIO Networking Adapter");
return (BUS_PROBE_DEFAULT);
}
static int
vtnet_attach(device_t dev)
{
struct vtnet_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtnet_dev = dev;
/* Register our feature descriptions. */
virtio_set_feature_desc(dev, vtnet_feature_desc);
VTNET_CORE_LOCK_INIT(sc);
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
vtnet_setup_sysctl(sc);
vtnet_setup_features(sc);
error = vtnet_alloc_rx_filters(sc);
if (error) {
device_printf(dev, "cannot allocate Rx filters\n");
goto fail;
}
error = vtnet_alloc_rxtx_queues(sc);
if (error) {
device_printf(dev, "cannot allocate queues\n");
goto fail;
}
error = vtnet_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
error = vtnet_setup_interface(sc);
if (error) {
device_printf(dev, "cannot setup interface\n");
goto fail;
}
error = virtio_setup_intr(dev, INTR_TYPE_NET);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupts\n");
/* BMV: This will crash if during boot! */
ether_ifdetach(sc->vtnet_ifp);
goto fail;
}
#ifdef DEV_NETMAP
vtnet_netmap_attach(sc);
#endif /* DEV_NETMAP */
vtnet_start_taskqueues(sc);
fail:
if (error)
vtnet_detach(dev);
return (error);
}
static int
vtnet_detach(device_t dev)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->vtnet_ifp;
if (device_is_attached(dev)) {
VTNET_CORE_LOCK(sc);
vtnet_stop(sc);
VTNET_CORE_UNLOCK(sc);
callout_drain(&sc->vtnet_tick_ch);
vtnet_drain_taskqueues(sc);
ether_ifdetach(ifp);
}
#ifdef DEV_NETMAP
netmap_detach(ifp);
#endif /* DEV_NETMAP */
vtnet_free_taskqueues(sc);
if (sc->vtnet_vlan_attach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
sc->vtnet_vlan_attach = NULL;
}
if (sc->vtnet_vlan_detach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
sc->vtnet_vlan_detach = NULL;
}
ifmedia_removeall(&sc->vtnet_media);
if (ifp != NULL) {
if_free(ifp);
sc->vtnet_ifp = NULL;
}
vtnet_free_rxtx_queues(sc);
vtnet_free_rx_filters(sc);
if (sc->vtnet_ctrl_vq != NULL)
vtnet_free_ctrl_vq(sc);
VTNET_CORE_LOCK_DESTROY(sc);
return (0);
}
static int
vtnet_suspend(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_stop(sc);
sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_resume(device_t dev)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK(sc);
if (ifp->if_flags & IFF_UP)
vtnet_init_locked(sc);
sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_shutdown(device_t dev)
{
/*
* Suspend already does all of what we need to
* do here; we just never expect to be resumed.
*/
return (vtnet_suspend(dev));
}
static int
vtnet_attach_completed(device_t dev)
{
vtnet_attach_disable_promisc(device_get_softc(dev));
return (0);
}
static int
vtnet_config_change(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_update_link_status(sc);
if (sc->vtnet_link_active != 0)
vtnet_tx_start_all(sc);
VTNET_CORE_UNLOCK(sc);
return (0);
}
static void
vtnet_negotiate_features(struct vtnet_softc *sc)
{
device_t dev;
uint64_t mask, features;
dev = sc->vtnet_dev;
mask = 0;
/*
* TSO and LRO are only available when their corresponding checksum
* offload feature is also negotiated.
*/
if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
}
if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
mask |= VTNET_TSO_FEATURES;
if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
mask |= VTNET_LRO_FEATURES;
#ifndef VTNET_LEGACY_TX
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
mask |= VIRTIO_NET_F_MQ;
#else
mask |= VIRTIO_NET_F_MQ;
#endif
features = VTNET_FEATURES & ~mask;
sc->vtnet_features = virtio_negotiate_features(dev, features);
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
/*
* LRO without mergeable buffers requires special care. This
* is not ideal because every receive buffer must be large
* enough to hold the maximum TCP packet, the Ethernet header,
* and the header. This requires up to 34 descriptors with
* MCLBYTES clusters. If we do not have indirect descriptors,
* LRO is disabled since the virtqueue will not contain very
* many receive buffers.
*/
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
device_printf(dev,
"LRO disabled due to both mergeable buffers and "
"indirect descriptors not negotiated\n");
features &= ~VTNET_LRO_FEATURES;
sc->vtnet_features =
virtio_negotiate_features(dev, features);
} else
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
}
}
static void
vtnet_setup_features(struct vtnet_softc *sc)
{
device_t dev;
dev = sc->vtnet_dev;
vtnet_negotiate_features(sc);
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
/* This feature should always be negotiated. */
sc->vtnet_flags |= VTNET_FLAG_MAC;
}
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
else
sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
else
sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
}
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, max_virtqueue_pairs));
} else
sc->vtnet_max_vq_pairs = 1;
if (sc->vtnet_max_vq_pairs > 1) {
/*
* Limit the maximum number of queue pairs to the lower of
* the number of CPUs and the configured maximum.
* The actual number of queues that get used may be less.
*/
int max;
max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
if (max > mp_ncpus)
max = mp_ncpus;
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
if (max > 1) {
sc->vtnet_requested_vq_pairs = max;
sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
}
}
}
}
static int
vtnet_init_rxq(struct vtnet_softc *sc, int id)
{
struct vtnet_rxq *rxq;
rxq = &sc->vtnet_rxqs[id];
snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
device_get_nameunit(sc->vtnet_dev), id);
mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
rxq->vtnrx_sc = sc;
rxq->vtnrx_id = id;
rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
if (rxq->vtnrx_sg == NULL)
return (ENOMEM);
TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
taskqueue_thread_enqueue, &rxq->vtnrx_tq);
return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
}
static int
vtnet_init_txq(struct vtnet_softc *sc, int id)
{
struct vtnet_txq *txq;
txq = &sc->vtnet_txqs[id];
snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
device_get_nameunit(sc->vtnet_dev), id);
mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
txq->vtntx_sc = sc;
txq->vtntx_id = id;
txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
if (txq->vtntx_sg == NULL)
return (ENOMEM);
#ifndef VTNET_LEGACY_TX
txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &txq->vtntx_mtx);
if (txq->vtntx_br == NULL)
return (ENOMEM);
TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
#endif
TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
taskqueue_thread_enqueue, &txq->vtntx_tq);
if (txq->vtntx_tq == NULL)
return (ENOMEM);
return (0);
}
static int
vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
{
int i, npairs, error;
npairs = sc->vtnet_max_vq_pairs;
- sc->vtnet_rxqs = mallocarray(npairs, sizeof(struct vtnet_rxq), M_DEVBUF,
+ sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
M_NOWAIT | M_ZERO);
- sc->vtnet_txqs = mallocarray(npairs, sizeof(struct vtnet_txq), M_DEVBUF,
+ sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
return (ENOMEM);
for (i = 0; i < npairs; i++) {
error = vtnet_init_rxq(sc, i);
if (error)
return (error);
error = vtnet_init_txq(sc, i);
if (error)
return (error);
}
vtnet_setup_queue_sysctl(sc);
return (0);
}
static void
vtnet_destroy_rxq(struct vtnet_rxq *rxq)
{
rxq->vtnrx_sc = NULL;
rxq->vtnrx_id = -1;
if (rxq->vtnrx_sg != NULL) {
sglist_free(rxq->vtnrx_sg);
rxq->vtnrx_sg = NULL;
}
if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
mtx_destroy(&rxq->vtnrx_mtx);
}
static void
vtnet_destroy_txq(struct vtnet_txq *txq)
{
txq->vtntx_sc = NULL;
txq->vtntx_id = -1;
if (txq->vtntx_sg != NULL) {
sglist_free(txq->vtntx_sg);
txq->vtntx_sg = NULL;
}
#ifndef VTNET_LEGACY_TX
if (txq->vtntx_br != NULL) {
buf_ring_free(txq->vtntx_br, M_DEVBUF);
txq->vtntx_br = NULL;
}
#endif
if (mtx_initialized(&txq->vtntx_mtx) != 0)
mtx_destroy(&txq->vtntx_mtx);
}
static void
vtnet_free_rxtx_queues(struct vtnet_softc *sc)
{
int i;
if (sc->vtnet_rxqs != NULL) {
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
free(sc->vtnet_rxqs, M_DEVBUF);
sc->vtnet_rxqs = NULL;
}
if (sc->vtnet_txqs != NULL) {
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_destroy_txq(&sc->vtnet_txqs[i]);
free(sc->vtnet_txqs, M_DEVBUF);
sc->vtnet_txqs = NULL;
}
}
static int
vtnet_alloc_rx_filters(struct vtnet_softc *sc)
{
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtnet_mac_filter == NULL)
return (ENOMEM);
}
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtnet_vlan_filter == NULL)
return (ENOMEM);
}
return (0);
}
static void
vtnet_free_rx_filters(struct vtnet_softc *sc)
{
if (sc->vtnet_mac_filter != NULL) {
free(sc->vtnet_mac_filter, M_DEVBUF);
sc->vtnet_mac_filter = NULL;
}
if (sc->vtnet_vlan_filter != NULL) {
free(sc->vtnet_vlan_filter, M_DEVBUF);
sc->vtnet_vlan_filter = NULL;
}
}
static int
vtnet_alloc_virtqueues(struct vtnet_softc *sc)
{
device_t dev;
struct vq_alloc_info *info;
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i, idx, flags, nvqs, error;
dev = sc->vtnet_dev;
flags = 0;
nvqs = sc->vtnet_max_vq_pairs * 2;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
nvqs++;
- info = mallocarray(nvqs, sizeof(struct vq_alloc_info), M_TEMP,
- M_NOWAIT);
+ info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
if (info == NULL)
return (ENOMEM);
for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
"%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
"%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
}
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
}
/*
* Enable interrupt binding if this is multiqueue. This only matters
* when per-vq MSIX is available.
*/
if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
flags |= 0;
error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
free(info, M_TEMP);
return (error);
}
static int
vtnet_setup_interface(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "cannot allocate ifnet structure\n");
return (ENOSPC);
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_baudrate = IF_Gbps(10); /* Approx. */
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = vtnet_init;
ifp->if_ioctl = vtnet_ioctl;
ifp->if_get_counter = vtnet_get_counter;
#ifndef VTNET_LEGACY_TX
ifp->if_transmit = vtnet_txq_mq_start;
ifp->if_qflush = vtnet_qflush;
#else
struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
ifp->if_start = vtnet_start;
IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
IFQ_SET_READY(&ifp->if_snd);
#endif
ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
vtnet_ifmedia_sts);
ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
/* Read (or generate) the MAC address for the adapter. */
vtnet_get_hwaddr(sc);
ether_ifattach(ifp, sc->vtnet_hwaddr);
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
ifp->if_capabilities |= IFCAP_LINKSTATE;
/* Tell the upper layer(s) we support long frames. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
} else {
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
ifp->if_capabilities |= IFCAP_TSO4;
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
ifp->if_capabilities |= IFCAP_TSO6;
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
}
if (ifp->if_capabilities & IFCAP_TSO)
ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
}
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
ifp->if_capabilities |= IFCAP_LRO;
}
if (ifp->if_capabilities & IFCAP_HWCSUM) {
/*
* VirtIO does not support VLAN tagging, but we can fake
* it by inserting and removing the 802.1Q header during
* transmit and receive. We are then able to do checksum
* offloading of VLAN frames.
*/
ifp->if_capabilities |=
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
}
ifp->if_capenable = ifp->if_capabilities;
/*
* Capabilities after here are not enabled by default.
*/
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
}
vtnet_set_rx_process_limit(sc);
vtnet_set_tx_intr_threshold(sc);
return (0);
}
static int
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
{
struct ifnet *ifp;
int frame_size, clsize;
ifp = sc->vtnet_ifp;
if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
return (EINVAL);
frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
new_mtu;
/*
* Based on the new MTU (and hence frame size) determine which
* cluster size is most appropriate for the receive queues.
*/
if (frame_size <= MCLBYTES) {
clsize = MCLBYTES;
} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
/* Avoid going past 9K jumbos. */
if (frame_size > MJUM9BYTES)
return (EINVAL);
clsize = MJUM9BYTES;
} else
clsize = MJUMPAGESIZE;
ifp->if_mtu = new_mtu;
sc->vtnet_rx_new_clsize = clsize;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc);
}
return (0);
}
static int
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct vtnet_softc *sc;
struct ifreq *ifr;
int reinit, mask, error;
sc = ifp->if_softc;
ifr = (struct ifreq *) data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifp->if_mtu != ifr->ifr_mtu) {
VTNET_CORE_LOCK(sc);
error = vtnet_change_mtu(sc, ifr->ifr_mtu);
VTNET_CORE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
VTNET_CORE_LOCK(sc);
if ((ifp->if_flags & IFF_UP) == 0) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vtnet_stop(sc);
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if ((ifp->if_flags ^ sc->vtnet_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
vtnet_rx_filter(sc);
else {
ifp->if_flags |= IFF_PROMISC;
if ((ifp->if_flags ^ sc->vtnet_if_flags)
& IFF_ALLMULTI)
error = ENOTSUP;
}
}
} else
vtnet_init_locked(sc);
if (error == 0)
sc->vtnet_if_flags = ifp->if_flags;
VTNET_CORE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
break;
VTNET_CORE_LOCK(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vtnet_rx_filter_mac(sc);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
break;
case SIOCSIFCAP:
VTNET_CORE_LOCK(sc);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if (mask & IFCAP_TXCSUM)
ifp->if_capenable ^= IFCAP_TXCSUM;
if (mask & IFCAP_TXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
if (mask & IFCAP_TSO4)
ifp->if_capenable ^= IFCAP_TSO4;
if (mask & IFCAP_TSO6)
ifp->if_capenable ^= IFCAP_TSO6;
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
IFCAP_VLAN_HWFILTER)) {
/* These Rx features require us to renegotiate. */
reinit = 1;
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWFILTER)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
} else
reinit = 0;
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc);
}
VTNET_CORE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
return (error);
}
static int
vtnet_rxq_populate(struct vtnet_rxq *rxq)
{
struct virtqueue *vq;
int nbufs, error;
vq = rxq->vtnrx_vq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtnet_rxq_new_buf(rxq);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
/*
* EMSGSIZE signifies the virtqueue did not have enough
* entries available to hold the last mbuf. This is not
* an error.
*/
if (error == EMSGSIZE)
error = 0;
}
return (error);
}
static void
vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
{
struct virtqueue *vq;
struct mbuf *m;
int last;
vq = rxq->vtnrx_vq;
last = 0;
while ((m = virtqueue_drain(vq, &last)) != NULL)
m_freem(m);
KASSERT(virtqueue_empty(vq),
("%s: mbufs remaining in rx queue %p", __func__, rxq));
}
static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
{
struct mbuf *m_head, *m_tail, *m;
int i, clsize;
clsize = sc->vtnet_rx_clsize;
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
if (m_head == NULL)
goto fail;
m_head->m_len = clsize;
m_tail = m_head;
/* Allocate the rest of the chain. */
for (i = 1; i < nbufs; i++) {
m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
if (m == NULL)
goto fail;
m->m_len = clsize;
m_tail->m_next = m;
m_tail = m;
}
if (m_tailp != NULL)
*m_tailp = m_tail;
return (m_head);
fail:
sc->vtnet_stats.mbuf_alloc_failed++;
m_freem(m_head);
return (NULL);
}
/*
* Slow path for when LRO without mergeable buffers is negotiated.
*/
static int
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
int len0)
{
struct vtnet_softc *sc;
struct mbuf *m, *m_prev;
struct mbuf *m_new, *m_tail;
int len, clsize, nreplace, error;
sc = rxq->vtnrx_sc;
clsize = sc->vtnet_rx_clsize;
m_prev = NULL;
m_tail = NULL;
nreplace = 0;
m = m0;
len = len0;
/*
* Since these mbuf chains are so large, we avoid allocating an
* entire replacement chain if possible. When the received frame
* did not consume the entire chain, the unused mbufs are moved
* to the replacement chain.
*/
while (len > 0) {
/*
* Something is seriously wrong if we received a frame
* larger than the chain. Drop it.
*/
if (m == NULL) {
sc->vtnet_stats.rx_frame_too_large++;
return (EMSGSIZE);
}
/* We always allocate the same cluster size. */
KASSERT(m->m_len == clsize,
("%s: mbuf size %d is not the cluster size %d",
__func__, m->m_len, clsize));
m->m_len = MIN(m->m_len, len);
len -= m->m_len;
m_prev = m;
m = m->m_next;
nreplace++;
}
KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
("%s: too many replacement mbufs %d max %d", __func__, nreplace,
sc->vtnet_rx_nmbufs));
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
if (m_new == NULL) {
m_prev->m_len = clsize;
return (ENOBUFS);
}
/*
* Move any unused mbufs from the received chain onto the end
* of the new chain.
*/
if (m_prev->m_next != NULL) {
m_tail->m_next = m_prev->m_next;
m_prev->m_next = NULL;
}
error = vtnet_rxq_enqueue_buf(rxq, m_new);
if (error) {
/*
* BAD! We could not enqueue the replacement mbuf chain. We
* must restore the m0 chain to the original state if it was
* modified so we can subsequently discard it.
*
* NOTE: The replacement is suppose to be an identical copy
* to the one just dequeued so this is an unexpected error.
*/
sc->vtnet_stats.rx_enq_replacement_failed++;
if (m_tail->m_next != NULL) {
m_prev->m_next = m_tail->m_next;
m_tail->m_next = NULL;
}
m_prev->m_len = clsize;
m_freem(m_new);
}
return (error);
}
static int
vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
{
struct vtnet_softc *sc;
struct mbuf *m_new;
int error;
sc = rxq->vtnrx_sc;
KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
("%s: chained mbuf without LRO_NOMRG", __func__));
if (m->m_next == NULL) {
/* Fast-path for the common case of just one mbuf. */
if (m->m_len < len)
return (EINVAL);
m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
if (m_new == NULL)
return (ENOBUFS);
error = vtnet_rxq_enqueue_buf(rxq, m_new);
if (error) {
/*
* The new mbuf is suppose to be an identical
* copy of the one just dequeued so this is an
* unexpected error.
*/
m_freem(m_new);
sc->vtnet_stats.rx_enq_replacement_failed++;
} else
m->m_len = len;
} else
error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
return (error);
}
static int
vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
{
struct vtnet_softc *sc;
struct sglist *sg;
struct vtnet_rx_header *rxhdr;
uint8_t *mdata;
int offset, error;
sc = rxq->vtnrx_sc;
sg = rxq->vtnrx_sg;
mdata = mtod(m, uint8_t *);
VTNET_RXQ_LOCK_ASSERT(rxq);
KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
("%s: chained mbuf without LRO_NOMRG", __func__));
KASSERT(m->m_len == sc->vtnet_rx_clsize,
("%s: unexpected cluster size %d/%d", __func__, m->m_len,
sc->vtnet_rx_clsize));
sglist_reset(sg);
if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
rxhdr = (struct vtnet_rx_header *) mdata;
sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
offset = sizeof(struct vtnet_rx_header);
} else
offset = 0;
sglist_append(sg, mdata + offset, m->m_len - offset);
if (m->m_next != NULL) {
error = sglist_append_mbuf(sg, m->m_next);
MPASS(error == 0);
}
error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
return (error);
}
static int
vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
{
struct vtnet_softc *sc;
struct mbuf *m;
int error;
sc = rxq->vtnrx_sc;
m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
if (m == NULL)
return (ENOBUFS);
error = vtnet_rxq_enqueue_buf(rxq, m);
if (error)
m_freem(m);
return (error);
}
/*
* Use the checksum offset in the VirtIO header to set the
* correct CSUM_* flags.
*/
static int
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
#if defined(INET) || defined(INET6)
int offset = hdr->csum_start + hdr->csum_offset;
#endif
sc = rxq->vtnrx_sc;
/* Only do a basic sanity check on the offset. */
switch (eth_type) {
#if defined(INET)
case ETHERTYPE_IP:
if (__predict_false(offset < ip_start + sizeof(struct ip)))
return (1);
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
return (1);
break;
#endif
default:
sc->vtnet_stats.rx_csum_bad_ethtype++;
return (1);
}
/*
* Use the offset to determine the appropriate CSUM_* flags. This is
* a bit dirty, but we can get by with it since the checksum offsets
* happen to be different. We assume the host host does not do IPv4
* header checksum offloading.
*/
switch (hdr->csum_offset) {
case offsetof(struct udphdr, uh_sum):
case offsetof(struct tcphdr, th_sum):
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
case offsetof(struct sctphdr, checksum):
m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
break;
default:
sc->vtnet_stats.rx_csum_bad_offset++;
return (1);
}
return (0);
}
static int
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int offset, proto;
sc = rxq->vtnrx_sc;
switch (eth_type) {
#if defined(INET)
case ETHERTYPE_IP: {
struct ip *ip;
if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
return (1);
ip = (struct ip *)(m->m_data + ip_start);
proto = ip->ip_p;
offset = ip_start + (ip->ip_hl << 2);
break;
}
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < ip_start +
sizeof(struct ip6_hdr)))
return (1);
offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
if (__predict_false(offset < 0))
return (1);
break;
#endif
default:
sc->vtnet_stats.rx_csum_bad_ethtype++;
return (1);
}
switch (proto) {
case IPPROTO_TCP:
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
return (1);
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
case IPPROTO_UDP:
if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
return (1);
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
case IPPROTO_SCTP:
if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
return (1);
m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
break;
default:
/*
* For the remaining protocols, FreeBSD does not support
* checksum offloading, so the checksum will be recomputed.
*/
#if 0
if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
"protocol eth_type=%#x proto=%d csum_start=%d "
"csum_offset=%d\n", __func__, eth_type, proto,
hdr->csum_start, hdr->csum_offset);
#endif
break;
}
return (0);
}
/*
* Set the appropriate CSUM_* flags. Unfortunately, the information
* provided is not directly useful to us. The VirtIO header gives the
* offset of the checksum, which is all Linux needs, but this is not
* how FreeBSD does things. We are forced to peek inside the packet
* a bit.
*
* It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
* could accept the offsets and let the stack figure it out.
*/
static int
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct ether_header *eh;
struct ether_vlan_header *evh;
uint16_t eth_type;
int offset, error;
eh = mtod(m, struct ether_header *);
eth_type = ntohs(eh->ether_type);
if (eth_type == ETHERTYPE_VLAN) {
/* BMV: We should handle nested VLAN tags too. */
evh = mtod(m, struct ether_vlan_header *);
eth_type = ntohs(evh->evl_proto);
offset = sizeof(struct ether_vlan_header);
} else
offset = sizeof(struct ether_header);
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
else
error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
return (error);
}
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
{
struct mbuf *m;
while (--nbufs > 0) {
m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
if (m == NULL)
break;
vtnet_rxq_discard_buf(rxq, m);
}
}
static void
vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
{
int error;
/*
* Requeue the discarded mbuf. This should always be successful
* since it was just dequeued.
*/
error = vtnet_rxq_enqueue_buf(rxq, m);
KASSERT(error == 0,
("%s: cannot requeue discarded mbuf %d", __func__, error));
}
static int
vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct mbuf *m, *m_tail;
int len;
sc = rxq->vtnrx_sc;
vq = rxq->vtnrx_vq;
m_tail = m_head;
while (--nbufs > 0) {
m = virtqueue_dequeue(vq, &len);
if (m == NULL) {
rxq->vtnrx_stats.vrxs_ierrors++;
goto fail;
}
if (vtnet_rxq_new_buf(rxq) != 0) {
rxq->vtnrx_stats.vrxs_iqdrops++;
vtnet_rxq_discard_buf(rxq, m);
if (nbufs > 1)
vtnet_rxq_discard_merged_bufs(rxq, nbufs);
goto fail;
}
if (m->m_len < len)
len = m->m_len;
m->m_len = len;
m->m_flags &= ~M_PKTHDR;
m_head->m_pkthdr.len += len;
m_tail->m_next = m;
m_tail = m;
}
return (0);
fail:
sc->vtnet_stats.rx_mergeable_failed++;
m_freem(m_head);
return (1);
}
static void
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
struct ether_header *eh;
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
vtnet_vlan_tag_remove(m);
/*
* With the 802.1Q header removed, update the
* checksum starting location accordingly.
*/
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
}
}
m->m_pkthdr.flowid = rxq->vtnrx_id;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
/*
* BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
* distinction that Linux does. Need to reevaluate if performing
* offloading for the NEEDS_CSUM case is really appropriate.
*/
if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
VIRTIO_NET_HDR_F_DATA_VALID)) {
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
}
rxq->vtnrx_stats.vrxs_ipackets++;
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
VTNET_RXQ_UNLOCK(rxq);
(*ifp->if_input)(ifp, m);
VTNET_RXQ_LOCK(rxq);
}
static int
vtnet_rxq_eof(struct vtnet_rxq *rxq)
{
struct virtio_net_hdr lhdr, *hdr;
struct vtnet_softc *sc;
struct ifnet *ifp;
struct virtqueue *vq;
struct mbuf *m;
struct virtio_net_hdr_mrg_rxbuf *mhdr;
int len, deq, nbufs, adjsz, count;
sc = rxq->vtnrx_sc;
vq = rxq->vtnrx_vq;
ifp = sc->vtnet_ifp;
hdr = &lhdr;
deq = 0;
count = sc->vtnet_rx_process_limit;
VTNET_RXQ_LOCK_ASSERT(rxq);
#ifdef DEV_NETMAP
if (netmap_rx_irq(ifp, 0, &deq)) {
return (FALSE);
}
#endif /* DEV_NETMAP */
while (count-- > 0) {
m = virtqueue_dequeue(vq, &len);
if (m == NULL)
break;
deq++;
if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
rxq->vtnrx_stats.vrxs_ierrors++;
vtnet_rxq_discard_buf(rxq, m);
continue;
}
if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
nbufs = 1;
adjsz = sizeof(struct vtnet_rx_header);
/*
* Account for our pad inserted between the header
* and the actual start of the frame.
*/
len += VTNET_RX_HEADER_PAD;
} else {
mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
nbufs = mhdr->num_buffers;
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
}
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
rxq->vtnrx_stats.vrxs_iqdrops++;
vtnet_rxq_discard_buf(rxq, m);
if (nbufs > 1)
vtnet_rxq_discard_merged_bufs(rxq, nbufs);
continue;
}
m->m_pkthdr.len = len;
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.csum_flags = 0;
if (nbufs > 1) {
/* Dequeue the rest of chain. */
if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
continue;
}
/*
* Save copy of header before we strip it. For both mergeable
* and non-mergeable, the header is at the beginning of the
* mbuf data. We no longer need num_buffers, so always use a
* regular header.
*
* BMV: Is this memcpy() expensive? We know the mbuf data is
* still valid even after the m_adj().
*/
memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
m_adj(m, adjsz);
vtnet_rxq_input(rxq, m, hdr);
/* Must recheck after dropping the Rx lock. */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
}
if (deq > 0)
virtqueue_notify(vq);
return (count > 0 ? 0 : EAGAIN);
}
static void
vtnet_rx_vq_intr(void *xrxq)
{
struct vtnet_softc *sc;
struct vtnet_rxq *rxq;
struct ifnet *ifp;
int tries, more;
rxq = xrxq;
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
tries = 0;
if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
/*
* Ignore this interrupt. Either this is a spurious interrupt
* or multiqueue without per-VQ MSIX so every queue needs to
* be polled (a brain dead configuration we could try harder
* to avoid).
*/
vtnet_rxq_disable_intr(rxq);
return;
}
VTNET_RXQ_LOCK(rxq);
again:
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_RXQ_UNLOCK(rxq);
return;
}
more = vtnet_rxq_eof(rxq);
if (more || vtnet_rxq_enable_intr(rxq) != 0) {
if (!more)
vtnet_rxq_disable_intr(rxq);
/*
* This is an occasional condition or race (when !more),
* so retry a few times before scheduling the taskqueue.
*/
if (tries++ < VTNET_INTR_DISABLE_RETRIES)
goto again;
VTNET_RXQ_UNLOCK(rxq);
rxq->vtnrx_stats.vrxs_rescheduled++;
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
} else
VTNET_RXQ_UNLOCK(rxq);
}
static void
vtnet_rxq_tq_intr(void *xrxq, int pending)
{
struct vtnet_softc *sc;
struct vtnet_rxq *rxq;
struct ifnet *ifp;
int more;
rxq = xrxq;
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
VTNET_RXQ_LOCK(rxq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_RXQ_UNLOCK(rxq);
return;
}
more = vtnet_rxq_eof(rxq);
if (more || vtnet_rxq_enable_intr(rxq) != 0) {
if (!more)
vtnet_rxq_disable_intr(rxq);
rxq->vtnrx_stats.vrxs_rescheduled++;
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
}
VTNET_RXQ_UNLOCK(rxq);
}
static int
vtnet_txq_below_threshold(struct vtnet_txq *txq)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
}
static int
vtnet_txq_notify(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
virtqueue_notify(vq);
if (vtnet_txq_enable_intr(txq) == 0)
return (0);
/*
* Drain frames that were completed since last checked. If this
* causes the queue to go above the threshold, the caller should
* continue transmitting.
*/
if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
virtqueue_disable_intr(vq);
return (1);
}
return (0);
}
static void
vtnet_txq_free_mbufs(struct vtnet_txq *txq)
{
struct virtqueue *vq;
struct vtnet_tx_header *txhdr;
int last;
vq = txq->vtntx_vq;
last = 0;
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
m_freem(txhdr->vth_mbuf);
uma_zfree(vtnet_tx_header_zone, txhdr);
}
KASSERT(virtqueue_empty(vq),
("%s: mbufs remaining in tx queue %p", __func__, txq));
}
/*
* BMV: Much of this can go away once we finally have offsets in
* the mbuf packet header. Bug andre@.
*/
static int
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
int *etype, int *proto, int *start)
{
struct vtnet_softc *sc;
struct ether_vlan_header *evh;
int offset;
sc = txq->vtntx_sc;
evh = mtod(m, struct ether_vlan_header *);
if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
/* BMV: We should handle nested VLAN tags too. */
*etype = ntohs(evh->evl_proto);
offset = sizeof(struct ether_vlan_header);
} else {
*etype = ntohs(evh->evl_encap_proto);
offset = sizeof(struct ether_header);
}
switch (*etype) {
#if defined(INET)
case ETHERTYPE_IP: {
struct ip *ip, iphdr;
if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
m_copydata(m, offset, sizeof(struct ip),
(caddr_t) &iphdr);
ip = &iphdr;
} else
ip = (struct ip *)(m->m_data + offset);
*proto = ip->ip_p;
*start = offset + (ip->ip_hl << 2);
break;
}
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
*proto = -1;
*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
/* Assert the network stack sent us a valid packet. */
KASSERT(*start > offset,
("%s: mbuf %p start %d offset %d proto %d", __func__, m,
*start, offset, *proto));
break;
#endif
default:
sc->vtnet_stats.tx_csum_bad_ethtype++;
return (EINVAL);
}
return (0);
}
static int
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
int offset, struct virtio_net_hdr *hdr)
{
static struct timeval lastecn;
static int curecn;
struct vtnet_softc *sc;
struct tcphdr *tcp, tcphdr;
sc = txq->vtntx_sc;
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
tcp = &tcphdr;
} else
tcp = (struct tcphdr *)(m->m_data + offset);
hdr->hdr_len = offset + (tcp->th_off << 2);
hdr->gso_size = m->m_pkthdr.tso_segsz;
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
if (tcp->th_flags & TH_CWR) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
* ECN support is not on a per-interface basis, but globally via
* the net.inet.tcp.ecn.enable sysctl knob. The default is off.
*/
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
if (ppsratecheck(&lastecn, &curecn, 1))
if_printf(sc->vtnet_ifp,
"TSO with ECN not negotiated with host\n");
return (ENOTSUP);
}
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
}
txq->vtntx_stats.vtxs_tso++;
return (0);
}
static struct mbuf *
vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int flags, etype, csum_start, proto, error;
sc = txq->vtntx_sc;
flags = m->m_pkthdr.csum_flags;
error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
if (error)
goto drop;
if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
(etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
/*
* We could compare the IP protocol vs the CSUM_ flag too,
* but that really should not be necessary.
*/
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = csum_start;
hdr->csum_offset = m->m_pkthdr.csum_data;
txq->vtntx_stats.vtxs_csum++;
}
if (flags & CSUM_TSO) {
if (__predict_false(proto != IPPROTO_TCP)) {
/* Likely failed to correctly parse the mbuf. */
sc->vtnet_stats.tx_tso_not_tcp++;
goto drop;
}
KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
("%s: mbuf %p TSO without checksum offload %#x",
__func__, m, flags));
error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
if (error)
goto drop;
}
return (m);
drop:
m_freem(m);
return (NULL);
}
static int
vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
struct vtnet_tx_header *txhdr)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct sglist *sg;
struct mbuf *m;
int error;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
sg = txq->vtntx_sg;
m = *m_head;
sglist_reset(sg);
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
KASSERT(error == 0 && sg->sg_nseg == 1,
("%s: error %d adding header to sglist", __func__, error));
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
if (m == NULL)
goto fail;
*m_head = m;
sc->vtnet_stats.tx_defragged++;
error = sglist_append_mbuf(sg, m);
if (error)
goto fail;
}
txhdr->vth_mbuf = m;
error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
return (error);
fail:
sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
static int
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head)
{
struct vtnet_tx_header *txhdr;
struct virtio_net_hdr *hdr;
struct mbuf *m;
int error;
m = *m_head;
M_ASSERTPKTHDR(m);
txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
if (txhdr == NULL) {
m_freem(m);
*m_head = NULL;
return (ENOMEM);
}
/*
* Always use the non-mergeable header, regardless if the feature
* was negotiated. For transmit, num_buffers is always zero. The
* vtnet_hdr_size is used to enqueue the correct header size.
*/
hdr = &txhdr->vth_uhdr.hdr;
if (m->m_flags & M_VLANTAG) {
m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
goto fail;
}
m->m_flags &= ~M_VLANTAG;
}
if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
goto fail;
}
}
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
if (error == 0)
return (0);
fail:
uma_zfree(vtnet_tx_header_zone, txhdr);
return (error);
}
#ifdef VTNET_LEGACY_TX
static void
vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct mbuf *m0;
int tries, enq;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
tries = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->vtnet_link_active == 0)
return;
vtnet_txq_eof(txq);
again:
enq = 0;
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
if (virtqueue_full(vq))
break;
IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
if (m0 == NULL)
break;
if (vtnet_txq_encap(txq, &m0) != 0) {
if (m0 != NULL)
IFQ_DRV_PREPEND(&ifp->if_snd, m0);
break;
}
enq++;
ETHER_BPF_MTAP(ifp, m0);
}
if (enq > 0 && vtnet_txq_notify(txq) != 0) {
if (tries++ < VTNET_NOTIFY_RETRIES)
goto again;
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
}
}
static void
vtnet_start(struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
sc = ifp->if_softc;
txq = &sc->vtnet_txqs[0];
VTNET_TXQ_LOCK(txq);
vtnet_start_locked(txq, ifp);
VTNET_TXQ_UNLOCK(txq);
}
#else /* !VTNET_LEGACY_TX */
static int
vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct buf_ring *br;
struct ifnet *ifp;
int enq, tries, error;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
br = txq->vtntx_br;
ifp = sc->vtnet_ifp;
tries = 0;
error = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->vtnet_link_active == 0) {
if (m != NULL)
error = drbr_enqueue(ifp, br, m);
return (error);
}
if (m != NULL) {
error = drbr_enqueue(ifp, br, m);
if (error)
return (error);
}
vtnet_txq_eof(txq);
again:
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
if (virtqueue_full(vq)) {
drbr_putback(ifp, br, m);
break;
}
if (vtnet_txq_encap(txq, &m) != 0) {
if (m != NULL)
drbr_putback(ifp, br, m);
else
drbr_advance(ifp, br);
break;
}
drbr_advance(ifp, br);
enq++;
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0 && vtnet_txq_notify(txq) != 0) {
if (tries++ < VTNET_NOTIFY_RETRIES)
goto again;
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
}
return (0);
}
static int
vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
int i, npairs, error;
sc = ifp->if_softc;
npairs = sc->vtnet_act_vq_pairs;
/* check if flowid is set */
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
i = m->m_pkthdr.flowid % npairs;
else
i = curcpu % npairs;
txq = &sc->vtnet_txqs[i];
if (VTNET_TXQ_TRYLOCK(txq) != 0) {
error = vtnet_txq_mq_start_locked(txq, m);
VTNET_TXQ_UNLOCK(txq);
} else {
error = drbr_enqueue(ifp, txq->vtntx_br, m);
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
}
return (error);
}
static void
vtnet_txq_tq_deferred(void *xtxq, int pending)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
txq = xtxq;
sc = txq->vtntx_sc;
VTNET_TXQ_LOCK(txq);
if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
vtnet_txq_mq_start_locked(txq, NULL);
VTNET_TXQ_UNLOCK(txq);
}
#endif /* VTNET_LEGACY_TX */
static void
vtnet_txq_start(struct vtnet_txq *txq)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
#ifdef VTNET_LEGACY_TX
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
vtnet_start_locked(txq, ifp);
#else
if (!drbr_empty(ifp, txq->vtntx_br))
vtnet_txq_mq_start_locked(txq, NULL);
#endif
}
static void
vtnet_txq_tq_intr(void *xtxq, int pending)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct ifnet *ifp;
txq = xtxq;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
VTNET_TXQ_LOCK(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
}
vtnet_txq_eof(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
static int
vtnet_txq_eof(struct vtnet_txq *txq)
{
struct virtqueue *vq;
struct vtnet_tx_header *txhdr;
struct mbuf *m;
int deq;
vq = txq->vtntx_vq;
deq = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
#ifdef DEV_NETMAP
if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
virtqueue_disable_intr(vq); // XXX luigi
return 0; // XXX or 1 ?
}
#endif /* DEV_NETMAP */
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
m = txhdr->vth_mbuf;
deq++;
txq->vtntx_stats.vtxs_opackets++;
txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
if (m->m_flags & M_MCAST)
txq->vtntx_stats.vtxs_omcasts++;
m_freem(m);
uma_zfree(vtnet_tx_header_zone, txhdr);
}
if (virtqueue_empty(vq))
txq->vtntx_watchdog = 0;
return (deq);
}
static void
vtnet_tx_vq_intr(void *xtxq)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct ifnet *ifp;
txq = xtxq;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
/*
* Ignore this interrupt. Either this is a spurious interrupt
* or multiqueue without per-VQ MSIX so every queue needs to
* be polled (a brain dead configuration we could try harder
* to avoid).
*/
vtnet_txq_disable_intr(txq);
return;
}
VTNET_TXQ_LOCK(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
}
vtnet_txq_eof(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
static void
vtnet_tx_start_all(struct vtnet_softc *sc)
{
struct vtnet_txq *txq;
int i;
VTNET_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
}
#ifndef VTNET_LEGACY_TX
static void
vtnet_qflush(struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct mbuf *m;
int i;
sc = ifp->if_softc;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
m_freem(m);
VTNET_TXQ_UNLOCK(txq);
}
if_qflush(ifp);
}
#endif
static int
vtnet_watchdog(struct vtnet_txq *txq)
{
struct ifnet *ifp;
ifp = txq->vtntx_sc->vtnet_ifp;
VTNET_TXQ_LOCK(txq);
if (txq->vtntx_watchdog == 1) {
/*
* Only drain completed frames if the watchdog is about to
* expire. If any frames were drained, there may be enough
* free descriptors now available to transmit queued frames.
* In that case, the timer will immediately be decremented
* below, but the timeout is generous enough that should not
* be a problem.
*/
if (vtnet_txq_eof(txq) != 0)
vtnet_txq_start(txq);
}
if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
VTNET_TXQ_UNLOCK(txq);
return (0);
}
VTNET_TXQ_UNLOCK(txq);
if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
return (1);
}
static void
vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
struct vtnet_txq_stats *txacc)
{
bzero(rxacc, sizeof(struct vtnet_rxq_stats));
bzero(txacc, sizeof(struct vtnet_txq_stats));
for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
struct vtnet_rxq_stats *rxst;
struct vtnet_txq_stats *txst;
rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
rxacc->vrxs_csum += rxst->vrxs_csum;
rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
txst = &sc->vtnet_txqs[i].vtntx_stats;
txacc->vtxs_opackets += txst->vtxs_opackets;
txacc->vtxs_obytes += txst->vtxs_obytes;
txacc->vtxs_csum += txst->vtxs_csum;
txacc->vtxs_tso += txst->vtxs_tso;
txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
}
}
static uint64_t
vtnet_get_counter(if_t ifp, ift_counter cnt)
{
struct vtnet_softc *sc;
struct vtnet_rxq_stats rxaccum;
struct vtnet_txq_stats txaccum;
sc = if_getsoftc(ifp);
vtnet_accum_stats(sc, &rxaccum, &txaccum);
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (rxaccum.vrxs_ipackets);
case IFCOUNTER_IQDROPS:
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
#ifndef VTNET_LEGACY_TX
case IFCOUNTER_OBYTES:
return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
return (txaccum.vtxs_omcasts);
#endif
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
vtnet_tick(void *xsc)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
int i, timedout;
sc = xsc;
ifp = sc->vtnet_ifp;
timedout = 0;
VTNET_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
if (timedout != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc);
} else
callout_schedule(&sc->vtnet_tick_ch, hz);
}
static void
vtnet_start_taskqueues(struct vtnet_softc *sc)
{
device_t dev;
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i, error;
dev = sc->vtnet_dev;
/*
* Errors here are very difficult to recover from - we cannot
* easily fail because, if this is during boot, we will hang
* when freeing any successfully started taskqueues because
* the scheduler isn't up yet.
*
* Most drivers just ignore the return value - it only fails
* with ENOMEM so an error is not likely.
*/
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
if (error) {
device_printf(dev, "failed to start rx taskq %d\n",
rxq->vtnrx_id);
}
txq = &sc->vtnet_txqs[i];
error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
"%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
if (error) {
device_printf(dev, "failed to start tx taskq %d\n",
txq->vtntx_id);
}
}
}
static void
vtnet_free_taskqueues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (rxq->vtnrx_tq != NULL) {
taskqueue_free(rxq->vtnrx_tq);
rxq->vtnrx_vq = NULL;
}
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_free(txq->vtntx_tq);
txq->vtntx_tq = NULL;
}
}
}
static void
vtnet_drain_taskqueues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (rxq->vtnrx_tq != NULL)
taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
#ifndef VTNET_LEGACY_TX
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
#endif
}
}
}
static void
vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
#ifdef DEV_NETMAP
if (nm_native_on(NA(sc->vtnet_ifp)))
return;
#endif /* DEV_NETMAP */
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
vtnet_rxq_free_mbufs(rxq);
txq = &sc->vtnet_txqs[i];
vtnet_txq_free_mbufs(txq);
}
}
static void
vtnet_stop_rendezvous(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
/*
* Lock and unlock the per-queue mutex so we known the stop
* state is visible. Doing only the active queues should be
* sufficient, but it does not cost much extra to do all the
* queues. Note we hold the core mutex here too.
*/
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
VTNET_RXQ_LOCK(rxq);
VTNET_RXQ_UNLOCK(rxq);
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
VTNET_TXQ_UNLOCK(txq);
}
}
static void
vtnet_stop(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
sc->vtnet_link_active = 0;
callout_stop(&sc->vtnet_tick_ch);
/* Only advisory. */
vtnet_disable_interrupts(sc);
/*
* Stop the host adapter. This resets it to the pre-initialized
* state. It will not generate any interrupts until after it is
* reinitialized.
*/
virtio_stop(dev);
vtnet_stop_rendezvous(sc);
/* Free any mbufs left in the virtqueues. */
vtnet_drain_rxtx_queues(sc);
}
static int
vtnet_virtio_reinit(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
uint64_t features;
int mask, error;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
features = sc->vtnet_features;
mask = 0;
#if defined(INET)
mask |= IFCAP_RXCSUM;
#endif
#if defined (INET6)
mask |= IFCAP_RXCSUM_IPV6;
#endif
/*
* Re-negotiate with the host, removing any disabled receive
* features. Transmit features are disabled only on our side
* via if_capenable and if_hwassist.
*/
if (ifp->if_capabilities & mask) {
/*
* We require both IPv4 and IPv6 offloading to be enabled
* in order to negotiated it: VirtIO does not distinguish
* between the two.
*/
if ((ifp->if_capenable & mask) != mask)
features &= ~VIRTIO_NET_F_GUEST_CSUM;
}
if (ifp->if_capabilities & IFCAP_LRO) {
if ((ifp->if_capenable & IFCAP_LRO) == 0)
features &= ~VTNET_LRO_FEATURES;
}
if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
features &= ~VIRTIO_NET_F_CTRL_VLAN;
}
error = virtio_reinit(dev, features);
if (error)
device_printf(dev, "virtio reinit error %d\n", error);
return (error);
}
static void
vtnet_init_rx_filters(struct vtnet_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vtnet_ifp;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
/* Restore promiscuous and all-multicast modes. */
vtnet_rx_filter(sc);
/* Restore filtered MAC addresses. */
vtnet_rx_filter_mac(sc);
}
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
vtnet_rx_filter_vlan(sc);
}
static int
vtnet_init_rx_queues(struct vtnet_softc *sc)
{
device_t dev;
struct vtnet_rxq *rxq;
int i, clsize, error;
dev = sc->vtnet_dev;
/*
* Use the new cluster size if one has been set (via a MTU
* change). Otherwise, use the standard 2K clusters.
*
* BMV: It might make sense to use page sized clusters as
* the default (depending on the features negotiated).
*/
if (sc->vtnet_rx_new_clsize != 0) {
clsize = sc->vtnet_rx_new_clsize;
sc->vtnet_rx_new_clsize = 0;
} else
clsize = MCLBYTES;
sc->vtnet_rx_clsize = clsize;
sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
("%s: too many rx mbufs %d for %d segments", __func__,
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
#ifdef DEV_NETMAP
if (vtnet_netmap_init_rx_buffers(sc))
return 0;
#endif /* DEV_NETMAP */
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
/* Hold the lock to satisfy asserts. */
VTNET_RXQ_LOCK(rxq);
error = vtnet_rxq_populate(rxq);
VTNET_RXQ_UNLOCK(rxq);
if (error) {
device_printf(dev,
"cannot allocate mbufs for Rx queue %d\n", i);
return (error);
}
}
return (0);
}
static int
vtnet_init_tx_queues(struct vtnet_softc *sc)
{
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
txq->vtntx_watchdog = 0;
}
return (0);
}
static int
vtnet_init_rxtx_queues(struct vtnet_softc *sc)
{
int error;
error = vtnet_init_rx_queues(sc);
if (error)
return (error);
error = vtnet_init_tx_queues(sc);
if (error)
return (error);
return (0);
}
static void
vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
{
device_t dev;
int npairs;
dev = sc->vtnet_dev;
if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
sc->vtnet_act_vq_pairs = 1;
return;
}
npairs = sc->vtnet_requested_vq_pairs;
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
device_printf(dev,
"cannot set active queue pairs to %d\n", npairs);
npairs = 1;
}
sc->vtnet_act_vq_pairs = npairs;
}
static int
vtnet_reinit(struct vtnet_softc *sc)
{
struct ifnet *ifp;
int error;
ifp = sc->vtnet_ifp;
/* Use the current MAC address. */
bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
vtnet_set_hwaddr(sc);
vtnet_set_active_vq_pairs(sc);
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
if (ifp->if_capenable & IFCAP_TSO4)
ifp->if_hwassist |= CSUM_IP_TSO;
if (ifp->if_capenable & IFCAP_TSO6)
ifp->if_hwassist |= CSUM_IP6_TSO;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
vtnet_init_rx_filters(sc);
error = vtnet_init_rxtx_queues(sc);
if (error)
return (error);
vtnet_enable_interrupts(sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
return (0);
}
static void
vtnet_init_locked(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
vtnet_stop(sc);
/* Reinitialize with the host. */
if (vtnet_virtio_reinit(sc) != 0)
goto fail;
if (vtnet_reinit(sc) != 0)
goto fail;
virtio_reinit_complete(dev);
vtnet_update_link_status(sc);
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
return;
fail:
vtnet_stop(sc);
}
static void
vtnet_init(void *xsc)
{
struct vtnet_softc *sc;
sc = xsc;
#ifdef DEV_NETMAP
if (!NA(sc->vtnet_ifp)) {
D("try to attach again");
vtnet_netmap_attach(sc);
}
#endif /* DEV_NETMAP */
VTNET_CORE_LOCK(sc);
vtnet_init_locked(sc);
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
{
struct virtqueue *vq;
vq = sc->vtnet_ctrl_vq;
/*
* The control virtqueue is only polled and therefore it should
* already be empty.
*/
KASSERT(virtqueue_empty(vq),
("%s: ctrl vq %p not empty", __func__, vq));
}
static void
vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
struct sglist *sg, int readable, int writable)
{
struct virtqueue *vq;
vq = sc->vtnet_ctrl_vq;
VTNET_CORE_LOCK_ASSERT(sc);
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
("%s: CTRL_VQ feature not negotiated", __func__));
if (!virtqueue_empty(vq))
return;
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
return;
/*
* Poll for the response, but the command is likely already
* done when we return from the notify.
*/
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
static int
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
{
struct virtio_net_ctrl_hdr hdr __aligned(2);
struct sglist_seg segs[3];
struct sglist sg;
uint8_t ack;
int error;
hdr.class = VIRTIO_NET_CTRL_MAC;
hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
ack = VIRTIO_NET_ERR;
sglist_init(&sg, 3, segs);
error = 0;
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
error |= sglist_append(&sg, &ack, sizeof(uint8_t));
KASSERT(error == 0 && sg.sg_nseg == 3,
("%s: error %d adding set MAC msg to sglist", __func__, error));
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
return (ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr;
uint8_t pad1;
struct virtio_net_ctrl_mq mq;
uint8_t pad2;
uint8_t ack;
} s __aligned(2);
int error;
s.hdr.class = VIRTIO_NET_CTRL_MQ;
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
s.mq.virtqueue_pairs = npairs;
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, 3, segs);
error = 0;
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
KASSERT(error == 0 && sg.sg_nseg == 3,
("%s: error %d adding MQ message to sglist", __func__, error));
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr;
uint8_t pad1;
uint8_t onoff;
uint8_t pad2;
uint8_t ack;
} s __aligned(2);
int error;
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
("%s: CTRL_RX feature not negotiated", __func__));
s.hdr.class = VIRTIO_NET_CTRL_RX;
s.hdr.cmd = cmd;
s.onoff = !!on;
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, 3, segs);
error = 0;
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
KASSERT(error == 0 && sg.sg_nseg == 3,
("%s: error %d adding Rx message to sglist", __func__, error));
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_set_promisc(struct vtnet_softc *sc, int on)
{
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
}
static int
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
{
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
}
/*
* The device defaults to promiscuous mode for backwards compatibility.
* Turn it off at attach time if possible.
*/
static void
vtnet_attach_disable_promisc(struct vtnet_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK(sc);
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
ifp->if_flags |= IFF_PROMISC;
} else if (vtnet_set_promisc(sc, 0) != 0) {
ifp->if_flags |= IFF_PROMISC;
device_printf(sc->vtnet_dev,
"cannot disable default promiscuous mode\n");
}
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_rx_filter(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
device_printf(dev, "cannot %s promiscuous mode\n",
ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
device_printf(dev, "cannot %s all-multicast mode\n",
ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
}
static void
vtnet_rx_filter_mac(struct vtnet_softc *sc)
{
struct virtio_net_ctrl_hdr hdr __aligned(2);
struct vtnet_mac_filter *filter;
struct sglist_seg segs[4];
struct sglist sg;
struct ifnet *ifp;
struct ifaddr *ifa;
struct ifmultiaddr *ifma;
int ucnt, mcnt, promisc, allmulti, error;
uint8_t ack;
ifp = sc->vtnet_ifp;
filter = sc->vtnet_mac_filter;
ucnt = 0;
mcnt = 0;
promisc = 0;
allmulti = 0;
VTNET_CORE_LOCK_ASSERT(sc);
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
("%s: CTRL_RX feature not negotiated", __func__));
/* Unicast MAC addresses: */
if_addr_rlock(ifp);
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr->sa_family != AF_LINK)
continue;
else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
continue;
else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
promisc = 1;
break;
}
bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
&filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
ucnt++;
}
if_addr_runlock(ifp);
if (promisc != 0) {
filter->vmf_unicast.nentries = 0;
if_printf(ifp, "more than %d MAC addresses assigned, "
"falling back to promiscuous mode\n",
VTNET_MAX_MAC_ENTRIES);
} else
filter->vmf_unicast.nentries = ucnt;
/* Multicast MAC addresses: */
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
allmulti = 1;
break;
}
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
&filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
mcnt++;
}
if_maddr_runlock(ifp);
if (allmulti != 0) {
filter->vmf_multicast.nentries = 0;
if_printf(ifp, "more than %d multicast MAC addresses "
"assigned, falling back to all-multicast mode\n",
VTNET_MAX_MAC_ENTRIES);
} else
filter->vmf_multicast.nentries = mcnt;
if (promisc != 0 && allmulti != 0)
goto out;
hdr.class = VIRTIO_NET_CTRL_MAC;
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
ack = VIRTIO_NET_ERR;
sglist_init(&sg, 4, segs);
error = 0;
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &filter->vmf_unicast,
sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
error |= sglist_append(&sg, &filter->vmf_multicast,
sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
error |= sglist_append(&sg, &ack, sizeof(uint8_t));
KASSERT(error == 0 && sg.sg_nseg == 4,
("%s: error %d adding MAC filter msg to sglist", __func__, error));
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
if (ack != VIRTIO_NET_OK)
if_printf(ifp, "error setting host MAC filter table\n");
out:
if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
static int
vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr;
uint8_t pad1;
uint16_t tag;
uint8_t pad2;
uint8_t ack;
} s __aligned(2);
int error;
s.hdr.class = VIRTIO_NET_CTRL_VLAN;
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
s.tag = tag;
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, 3, segs);
error = 0;
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
KASSERT(error == 0 && sg.sg_nseg == 3,
("%s: error %d adding VLAN message to sglist", __func__, error));
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static void
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
{
uint32_t w;
uint16_t tag;
int i, bit;
VTNET_CORE_LOCK_ASSERT(sc);
KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
("%s: VLAN_FILTER feature not negotiated", __func__));
/* Enable the filter for each configured VLAN. */
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
w = sc->vtnet_vlan_filter[i];
while ((bit = ffs(w) - 1) != -1) {
w &= ~(1 << bit);
tag = sizeof(w) * CHAR_BIT * i + bit;
if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot enable VLAN %d filter\n", tag);
}
}
}
}
static void
vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
{
struct ifnet *ifp;
int idx, bit;
ifp = sc->vtnet_ifp;
idx = (tag >> 5) & 0x7F;
bit = tag & 0x1F;
if (tag == 0 || tag > 4095)
return;
VTNET_CORE_LOCK(sc);
if (add)
sc->vtnet_vlan_filter[idx] |= (1 << bit);
else
sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
ifp->if_drv_flags & IFF_DRV_RUNNING &&
vtnet_exec_vlan_filter(sc, add, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot %s VLAN %d %s the host filter table\n",
add ? "add" : "remove", tag, add ? "to" : "from");
}
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
{
if (ifp->if_softc != arg)
return;
vtnet_update_vlan_filter(arg, 1, tag);
}
static void
vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
{
if (ifp->if_softc != arg)
return;
vtnet_update_vlan_filter(arg, 0, tag);
}
static int
vtnet_is_link_up(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
uint16_t status;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
status = VIRTIO_NET_S_LINK_UP;
else
status = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, status));
return ((status & VIRTIO_NET_S_LINK_UP) != 0);
}
static void
vtnet_update_link_status(struct vtnet_softc *sc)
{
struct ifnet *ifp;
int link;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
link = vtnet_is_link_up(sc);
/* Notify if the link status has changed. */
if (link != 0 && sc->vtnet_link_active == 0) {
sc->vtnet_link_active = 1;
if_link_state_change(ifp, LINK_STATE_UP);
} else if (link == 0 && sc->vtnet_link_active != 0) {
sc->vtnet_link_active = 0;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
static int
vtnet_ifmedia_upd(struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct ifmedia *ifm;
sc = ifp->if_softc;
ifm = &sc->vtnet_media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
return (0);
}
static void
vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct vtnet_softc *sc;
sc = ifp->if_softc;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
VTNET_CORE_LOCK(sc);
if (vtnet_is_link_up(sc) != 0) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= VTNET_MEDIATYPE;
} else
ifmr->ifm_active |= IFM_NONE;
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_set_hwaddr(struct vtnet_softc *sc)
{
device_t dev;
int i;
dev = sc->vtnet_dev;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
device_printf(dev, "unable to set MAC address\n");
} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
for (i = 0; i < ETHER_ADDR_LEN; i++) {
virtio_write_dev_config_1(dev,
offsetof(struct virtio_net_config, mac) + i,
sc->vtnet_hwaddr[i]);
}
}
}
static void
vtnet_get_hwaddr(struct vtnet_softc *sc)
{
device_t dev;
int i;
dev = sc->vtnet_dev;
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
/*
* Generate a random locally administered unicast address.
*
* It would be nice to generate the same MAC address across
* reboots, but it seems all the hosts currently available
* support the MAC feature, so this isn't too important.
*/
sc->vtnet_hwaddr[0] = 0xB2;
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
vtnet_set_hwaddr(sc);
return;
}
for (i = 0; i < ETHER_ADDR_LEN; i++) {
sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
offsetof(struct virtio_net_config, mac) + i);
}
}
static void
vtnet_vlan_tag_remove(struct mbuf *m)
{
struct ether_vlan_header *evh;
evh = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
m->m_flags |= M_VLANTAG;
/* Strip the 802.1Q header. */
bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
static void
vtnet_set_rx_process_limit(struct vtnet_softc *sc)
{
int limit;
limit = vtnet_tunable_int(sc, "rx_process_limit",
vtnet_rx_process_limit);
if (limit < 0)
limit = INT_MAX;
sc->vtnet_rx_process_limit = limit;
}
static void
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
{
int size, thresh;
size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
/*
* The Tx interrupt is disabled until the queue free count falls
* below our threshold. Completed frames are drained from the Tx
* virtqueue before transmitting new frames and in the watchdog
* callout, so the frequency of Tx interrupts is greatly reduced,
* at the cost of not freeing mbufs as quickly as they otherwise
* would be.
*
* N.B. We assume all the Tx queues are the same size.
*/
thresh = size / 4;
/*
* Without indirect descriptors, leave enough room for the most
* segments we handle.
*/
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
thresh < sc->vtnet_tx_nsegs)
thresh = sc->vtnet_tx_nsegs;
sc->vtnet_tx_intr_thresh = thresh;
}
static void
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
struct vtnet_rxq_stats *stats;
char namebuf[16];
snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Receive Queue");
list = SYSCTL_CHILDREN(node);
stats = &rxq->vtnrx_stats;
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
&stats->vrxs_ipackets, "Receive packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
&stats->vrxs_ibytes, "Receive bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
&stats->vrxs_iqdrops, "Receive drops");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
&stats->vrxs_ierrors, "Receive errors");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vrxs_csum, "Receive checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
static void
vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_txq *txq)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
struct vtnet_txq_stats *stats;
char namebuf[16];
snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Transmit Queue");
list = SYSCTL_CHILDREN(node);
stats = &txq->vtntx_stats;
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
&stats->vtxs_opackets, "Transmit packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
&stats->vtxs_obytes, "Transmit bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
&stats->vtxs_omcasts, "Transmit multicasts");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vtxs_csum, "Transmit checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
&stats->vtxs_tso, "Transmit segmentation offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
static void
vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
int i;
dev = sc->vtnet_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
}
}
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
{
struct vtnet_statistics *stats;
struct vtnet_rxq_stats rxaccum;
struct vtnet_txq_stats txaccum;
vtnet_accum_stats(sc, &rxaccum, &txaccum);
stats = &sc->vtnet_stats;
stats->rx_csum_offloaded = rxaccum.vrxs_csum;
stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
stats->tx_csum_offloaded = txaccum.vtxs_csum;
stats->tx_tso_offloaded = txaccum.vtxs_tso;
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
CTLFLAG_RD, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
CTLFLAG_RD, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
CTLFLAG_RD, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
CTLFLAG_RD, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
CTLFLAG_RD, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
CTLFLAG_RD, &stats->rx_csum_bad_proto,
"Received checksum offloaded buffer with incorrect protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
CTLFLAG_RD, &stats->rx_csum_failed,
"Received buffer checksum offload failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
CTLFLAG_RD, &stats->rx_csum_offloaded,
"Received buffer checksum offload succeeded");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
CTLFLAG_RD, &stats->rx_task_rescheduled,
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
"Aborted transmit of TSO buffer with unknown Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
CTLFLAG_RD, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
CTLFLAG_RD, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
CTLFLAG_RD, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
CTLFLAG_RD, &stats->tx_csum_offloaded,
"Offloaded checksum of transmitted buffer");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
CTLFLAG_RD, &stats->tx_tso_offloaded,
"Segmentation offload of transmitted buffer");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
CTLFLAG_RD, &stats->tx_task_rescheduled,
"Times the transmit interrupt task rescheduled itself");
}
static void
vtnet_setup_sysctl(struct vtnet_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtnet_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
"Maximum number of supported virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
"Requested number of virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
"Number of active virtqueue pairs");
vtnet_setup_stat_sysctl(ctx, child, sc);
}
static int
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
{
return (virtqueue_enable_intr(rxq->vtnrx_vq));
}
static void
vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
{
virtqueue_disable_intr(rxq->vtnrx_vq);
}
static int
vtnet_txq_enable_intr(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
if (vtnet_txq_below_threshold(txq) != 0)
return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
/*
* The free count is above our threshold. Keep the Tx interrupt
* disabled until the queue is fuller.
*/
return (0);
}
static void
vtnet_txq_disable_intr(struct vtnet_txq *txq)
{
virtqueue_disable_intr(txq->vtntx_vq);
}
static void
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
}
static void
vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
}
static void
vtnet_enable_interrupts(struct vtnet_softc *sc)
{
vtnet_enable_rx_interrupts(sc);
vtnet_enable_tx_interrupts(sc);
}
static void
vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
}
static void
vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
}
static void
vtnet_disable_interrupts(struct vtnet_softc *sc)
{
vtnet_disable_rx_interrupts(sc);
vtnet_disable_tx_interrupts(sc);
}
static int
vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path),
"hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
Index: head/sys/dev/virtio/pci/virtio_pci.c
===================================================================
--- head/sys/dev/virtio/pci/virtio_pci.c (revision 328217)
+++ head/sys/dev/virtio/pci/virtio_pci.c (revision 328218)
@@ -1,1332 +1,1332 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for the VirtIO PCI interface. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/pci/virtio_pci.h>
#include "virtio_bus_if.h"
#include "virtio_if.h"
struct vtpci_interrupt {
struct resource *vti_irq;
int vti_rid;
void *vti_handler;
};
struct vtpci_virtqueue {
struct virtqueue *vtv_vq;
int vtv_no_intr;
};
struct vtpci_softc {
device_t vtpci_dev;
struct resource *vtpci_res;
struct resource *vtpci_msix_res;
uint64_t vtpci_features;
uint32_t vtpci_flags;
#define VTPCI_FLAG_NO_MSI 0x0001
#define VTPCI_FLAG_NO_MSIX 0x0002
#define VTPCI_FLAG_LEGACY 0x1000
#define VTPCI_FLAG_MSI 0x2000
#define VTPCI_FLAG_MSIX 0x4000
#define VTPCI_FLAG_SHARED_MSIX 0x8000
#define VTPCI_FLAG_ITYPE_MASK 0xF000
/* This "bus" will only ever have one child. */
device_t vtpci_child_dev;
struct virtio_feature_desc *vtpci_child_feat_desc;
int vtpci_nvqs;
struct vtpci_virtqueue *vtpci_vqs;
/*
* Ideally, each virtqueue that the driver provides a callback for will
* receive its own MSIX vector. If there are not sufficient vectors
* available, then attempt to have all the VQs share one vector. For
* MSIX, the configuration changed notifications must be on their own
* vector.
*
* If MSIX is not available, we will attempt to have the whole device
* share one MSI vector, and then, finally, one legacy interrupt.
*/
struct vtpci_interrupt vtpci_device_interrupt;
struct vtpci_interrupt *vtpci_msix_vq_interrupts;
int vtpci_nmsix_resources;
};
static int vtpci_probe(device_t);
static int vtpci_attach(device_t);
static int vtpci_detach(device_t);
static int vtpci_suspend(device_t);
static int vtpci_resume(device_t);
static int vtpci_shutdown(device_t);
static void vtpci_driver_added(device_t, driver_t *);
static void vtpci_child_detached(device_t, device_t);
static int vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtpci_write_ivar(device_t, device_t, int, uintptr_t);
static uint64_t vtpci_negotiate_features(device_t, uint64_t);
static int vtpci_with_feature(device_t, uint64_t);
static int vtpci_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtpci_setup_intr(device_t, enum intr_type);
static void vtpci_stop(device_t);
static int vtpci_reinit(device_t, uint64_t);
static void vtpci_reinit_complete(device_t);
static void vtpci_notify_virtqueue(device_t, uint16_t);
static uint8_t vtpci_get_status(device_t);
static void vtpci_set_status(device_t, uint8_t);
static void vtpci_read_dev_config(device_t, bus_size_t, void *, int);
static void vtpci_write_dev_config(device_t, bus_size_t, void *, int);
static void vtpci_describe_features(struct vtpci_softc *, const char *,
uint64_t);
static void vtpci_probe_and_attach_child(struct vtpci_softc *);
static int vtpci_alloc_msix(struct vtpci_softc *, int);
static int vtpci_alloc_msi(struct vtpci_softc *);
static int vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
static int vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
static int vtpci_alloc_intr_msi(struct vtpci_softc *);
static int vtpci_alloc_intr_legacy(struct vtpci_softc *);
static int vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
struct vtpci_interrupt *);
static int vtpci_alloc_intr_resources(struct vtpci_softc *);
static int vtpci_setup_legacy_interrupt(struct vtpci_softc *,
enum intr_type);
static int vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
enum intr_type);
static int vtpci_setup_msix_interrupts(struct vtpci_softc *,
enum intr_type);
static int vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type);
static int vtpci_register_msix_vector(struct vtpci_softc *, int,
struct vtpci_interrupt *);
static int vtpci_set_host_msix_vectors(struct vtpci_softc *);
static int vtpci_reinit_virtqueue(struct vtpci_softc *, int);
static void vtpci_free_interrupt(struct vtpci_softc *,
struct vtpci_interrupt *);
static void vtpci_free_interrupts(struct vtpci_softc *);
static void vtpci_free_virtqueues(struct vtpci_softc *);
static void vtpci_release_child_resources(struct vtpci_softc *);
static void vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
static void vtpci_reset(struct vtpci_softc *);
static void vtpci_select_virtqueue(struct vtpci_softc *, int);
static void vtpci_legacy_intr(void *);
static int vtpci_vq_shared_intr_filter(void *);
static void vtpci_vq_shared_intr(void *);
static int vtpci_vq_intr_filter(void *);
static void vtpci_vq_intr(void *);
static void vtpci_config_intr(void *);
#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
#define VIRTIO_PCI_CONFIG(_sc) \
VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0)
/*
* I/O port read/write wrappers.
*/
#define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o))
#define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o))
#define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o))
#define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v))
#define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v))
#define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v))
/* Tunables. */
static int vtpci_disable_msix = 0;
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
static device_method_t vtpci_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtpci_probe),
DEVMETHOD(device_attach, vtpci_attach),
DEVMETHOD(device_detach, vtpci_detach),
DEVMETHOD(device_suspend, vtpci_suspend),
DEVMETHOD(device_resume, vtpci_resume),
DEVMETHOD(device_shutdown, vtpci_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtpci_driver_added),
DEVMETHOD(bus_child_detached, vtpci_child_detached),
DEVMETHOD(bus_read_ivar, vtpci_read_ivar),
DEVMETHOD(bus_write_ivar, vtpci_write_ivar),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features),
DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr),
DEVMETHOD(virtio_bus_stop, vtpci_stop),
DEVMETHOD(virtio_bus_reinit, vtpci_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue),
DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
DEVMETHOD_END
};
static driver_t vtpci_driver = {
"virtio_pci",
vtpci_methods,
sizeof(struct vtpci_softc)
};
devclass_t vtpci_devclass;
DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
MODULE_VERSION(virtio_pci, 1);
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
static int
vtpci_probe(device_t dev)
{
char desc[36];
const char *name;
if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
return (ENXIO);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
return (ENXIO);
if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
return (ENXIO);
name = virtio_device_name(pci_get_subdevice(dev));
if (name == NULL)
name = "Unknown";
snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
device_set_desc_copy(dev, desc);
return (BUS_PROBE_DEFAULT);
}
static int
vtpci_attach(device_t dev)
{
struct vtpci_softc *sc;
device_t child;
int rid;
sc = device_get_softc(dev);
sc->vtpci_dev = dev;
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
RF_ACTIVE);
if (sc->vtpci_res == NULL) {
device_printf(dev, "cannot map I/O space\n");
return (ENXIO);
}
if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
rid = PCIR_BAR(1);
sc->vtpci_msix_res = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
}
if (sc->vtpci_msix_res == NULL)
sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
vtpci_reset(sc);
/* Tell the host we've noticed this device. */
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
if ((child = device_add_child(dev, NULL, -1)) == NULL) {
device_printf(dev, "cannot create child device\n");
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtpci_detach(dev);
return (ENOMEM);
}
sc->vtpci_child_dev = child;
vtpci_probe_and_attach_child(sc);
return (0);
}
static int
vtpci_detach(device_t dev)
{
struct vtpci_softc *sc;
device_t child;
int error;
sc = device_get_softc(dev);
if ((child = sc->vtpci_child_dev) != NULL) {
error = device_delete_child(dev, child);
if (error)
return (error);
sc->vtpci_child_dev = NULL;
}
vtpci_reset(sc);
if (sc->vtpci_msix_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
sc->vtpci_msix_res);
sc->vtpci_msix_res = NULL;
}
if (sc->vtpci_res != NULL) {
bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
sc->vtpci_res);
sc->vtpci_res = NULL;
}
return (0);
}
static int
vtpci_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtpci_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtpci_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtpci_stop(dev);
return (0);
}
static void
vtpci_driver_added(device_t dev, driver_t *driver)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
vtpci_probe_and_attach_child(sc);
}
static void
vtpci_child_detached(device_t dev, device_t child)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
vtpci_reset(sc);
vtpci_release_child_resources(sc);
}
static int
vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
if (sc->vtpci_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
case VIRTIO_IVAR_SUBDEVICE:
*result = pci_get_subdevice(dev);
break;
case VIRTIO_IVAR_VENDOR:
*result = pci_get_vendor(dev);
break;
case VIRTIO_IVAR_DEVICE:
*result = pci_get_device(dev);
break;
case VIRTIO_IVAR_SUBVENDOR:
*result = pci_get_subdevice(dev);
break;
default:
return (ENOENT);
}
return (0);
}
static int
vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
if (sc->vtpci_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_FEATURE_DESC:
sc->vtpci_child_feat_desc = (void *) value;
break;
default:
return (ENOENT);
}
return (0);
}
static uint64_t
vtpci_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtpci_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
vtpci_describe_features(sc, "host", host_features);
/*
* Limit negotiated features to what the driver, virtqueue, and
* host all support.
*/
features = host_features & child_features;
features = virtqueue_filter_features(features);
sc->vtpci_features = features;
vtpci_describe_features(sc, "negotiated", features);
vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
return (features);
}
static int
vtpci_with_feature(device_t dev, uint64_t feature)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
return ((sc->vtpci_features & feature) != 0);
}
static int
vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtpci_softc *sc;
struct virtqueue *vq;
struct vtpci_virtqueue *vqx;
struct vq_alloc_info *info;
int idx, error;
uint16_t size;
sc = device_get_softc(dev);
if (sc->vtpci_nvqs != 0)
return (EALREADY);
if (nvqs <= 0)
return (EINVAL);
- sc->vtpci_vqs = mallocarray(nvqs, sizeof(struct vtpci_virtqueue),
+ sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtpci_vqs == NULL)
return (ENOMEM);
for (idx = 0; idx < nvqs; idx++) {
vqx = &sc->vtpci_vqs[idx];
info = &vq_info[idx];
vtpci_select_virtqueue(sc, idx);
size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN,
0xFFFFFFFFUL, info, &vq);
if (error) {
device_printf(dev,
"cannot allocate virtqueue %d: %d\n", idx, error);
break;
}
vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
vqx->vtv_vq = *info->vqai_vq = vq;
vqx->vtv_no_intr = info->vqai_intr == NULL;
sc->vtpci_nvqs++;
}
if (error)
vtpci_free_virtqueues(sc);
return (error);
}
static int
vtpci_setup_intr(device_t dev, enum intr_type type)
{
struct vtpci_softc *sc;
int attempt, error;
sc = device_get_softc(dev);
for (attempt = 0; attempt < 5; attempt++) {
/*
* Start with the most desirable interrupt configuration and
* fallback towards less desirable ones.
*/
switch (attempt) {
case 0:
error = vtpci_alloc_intr_msix_pervq(sc);
break;
case 1:
error = vtpci_alloc_intr_msix_shared(sc);
break;
case 2:
error = vtpci_alloc_intr_msi(sc);
break;
case 3:
error = vtpci_alloc_intr_legacy(sc);
break;
default:
device_printf(dev,
"exhausted all interrupt allocation attempts\n");
return (ENXIO);
}
if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
break;
vtpci_cleanup_setup_intr_attempt(sc);
}
if (bootverbose) {
if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
device_printf(dev, "using legacy interrupt\n");
else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
device_printf(dev, "using MSI interrupt\n");
else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
device_printf(dev, "using shared MSIX interrupts\n");
else
device_printf(dev, "using per VQ MSIX interrupts\n");
}
return (0);
}
static void
vtpci_stop(device_t dev)
{
vtpci_reset(device_get_softc(dev));
}
static int
vtpci_reinit(device_t dev, uint64_t features)
{
struct vtpci_softc *sc;
int idx, error;
sc = device_get_softc(dev);
/*
* Redrive the device initialization. This is a bit of an abuse of
* the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
* play nice.
*
* We do not allow the host device to change from what was originally
* negotiated beyond what the guest driver changed. MSIX state should
* not change, number of virtqueues and their size remain the same, etc.
* This will need to be rethought when we want to support migration.
*/
if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
vtpci_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device
* does not become usable again until vtpci_reinit_complete().
*/
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
vtpci_negotiate_features(dev, features);
for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
error = vtpci_reinit_virtqueue(sc, idx);
if (error)
return (error);
}
if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
error = vtpci_set_host_msix_vectors(sc);
if (error)
return (error);
}
return (0);
}
static void
vtpci_reinit_complete(device_t dev)
{
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtpci_notify_virtqueue(device_t dev, uint16_t queue)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
}
static uint8_t
vtpci_get_status(device_t dev)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
}
static void
vtpci_set_status(device_t dev, uint8_t status)
{
struct vtpci_softc *sc;
sc = device_get_softc(dev);
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtpci_get_status(dev);
vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
}
static void
vtpci_read_dev_config(device_t dev, bus_size_t offset,
void *dst, int length)
{
struct vtpci_softc *sc;
bus_size_t off;
uint8_t *d;
int size;
sc = device_get_softc(dev);
off = VIRTIO_PCI_CONFIG(sc) + offset;
for (d = dst; length > 0; d += size, off += size, length -= size) {
if (length >= 4) {
size = 4;
*(uint32_t *)d = vtpci_read_config_4(sc, off);
} else if (length >= 2) {
size = 2;
*(uint16_t *)d = vtpci_read_config_2(sc, off);
} else {
size = 1;
*d = vtpci_read_config_1(sc, off);
}
}
}
static void
vtpci_write_dev_config(device_t dev, bus_size_t offset,
void *src, int length)
{
struct vtpci_softc *sc;
bus_size_t off;
uint8_t *s;
int size;
sc = device_get_softc(dev);
off = VIRTIO_PCI_CONFIG(sc) + offset;
for (s = src; length > 0; s += size, off += size, length -= size) {
if (length >= 4) {
size = 4;
vtpci_write_config_4(sc, off, *(uint32_t *)s);
} else if (length >= 2) {
size = 2;
vtpci_write_config_2(sc, off, *(uint16_t *)s);
} else {
size = 1;
vtpci_write_config_1(sc, off, *s);
}
}
}
static void
vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
uint64_t features)
{
device_t dev, child;
dev = sc->vtpci_dev;
child = sc->vtpci_child_dev;
if (device_is_attached(child) || bootverbose == 0)
return;
virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
}
static void
vtpci_probe_and_attach_child(struct vtpci_softc *sc)
{
device_t dev, child;
dev = sc->vtpci_dev;
child = sc->vtpci_child_dev;
if (child == NULL)
return;
if (device_get_state(child) != DS_NOTPRESENT)
return;
if (device_probe(child) != 0)
return;
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtpci_reset(sc);
vtpci_release_child_resources(sc);
/* Reset status for future attempt. */
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
} else {
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
{
device_t dev;
int nmsix, cnt, required;
dev = sc->vtpci_dev;
/* Allocate an additional vector for the config changes. */
required = nvectors + 1;
nmsix = pci_msix_count(dev);
if (nmsix < required)
return (1);
cnt = required;
if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
sc->vtpci_nmsix_resources = required;
return (0);
}
pci_release_msi(dev);
return (1);
}
static int
vtpci_alloc_msi(struct vtpci_softc *sc)
{
device_t dev;
int nmsi, cnt, required;
dev = sc->vtpci_dev;
required = 1;
nmsi = pci_msi_count(dev);
if (nmsi < required)
return (1);
cnt = required;
if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required)
return (0);
pci_release_msi(dev);
return (1);
}
static int
vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
{
int i, nvectors, error;
if (vtpci_disable_msix != 0 ||
sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
return (ENOTSUP);
for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
if (sc->vtpci_vqs[i].vtv_no_intr == 0)
nvectors++;
}
error = vtpci_alloc_msix(sc, nvectors);
if (error)
return (error);
sc->vtpci_flags |= VTPCI_FLAG_MSIX;
return (0);
}
static int
vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
{
int error;
if (vtpci_disable_msix != 0 ||
sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
return (ENOTSUP);
error = vtpci_alloc_msix(sc, 1);
if (error)
return (error);
sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
return (0);
}
static int
vtpci_alloc_intr_msi(struct vtpci_softc *sc)
{
int error;
/* Only BHyVe supports MSI. */
if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
return (ENOTSUP);
error = vtpci_alloc_msi(sc);
if (error)
return (error);
sc->vtpci_flags |= VTPCI_FLAG_MSI;
return (0);
}
static int
vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
{
sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
return (0);
}
static int
vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
struct vtpci_interrupt *intr)
{
struct resource *irq;
irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags);
if (irq == NULL)
return (ENXIO);
intr->vti_irq = irq;
intr->vti_rid = rid;
return (0);
}
static int
vtpci_alloc_intr_resources(struct vtpci_softc *sc)
{
struct vtpci_interrupt *intr;
int i, rid, flags, nvq_intrs, error;
rid = 0;
flags = RF_ACTIVE;
if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
flags |= RF_SHAREABLE;
else
rid = 1;
/*
* For legacy and MSI interrupts, this single resource handles all
* interrupts. For MSIX, this resource is used for the configuration
* changed interrupt.
*/
intr = &sc->vtpci_device_interrupt;
error = vtpci_alloc_interrupt(sc, rid, flags, intr);
if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI))
return (error);
/* Subtract one for the configuration changed interrupt. */
nvq_intrs = sc->vtpci_nmsix_resources - 1;
- intr = sc->vtpci_msix_vq_interrupts = mallocarray(nvq_intrs,
+ intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtpci_msix_vq_interrupts == NULL)
return (ENOMEM);
for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
error = vtpci_alloc_interrupt(sc, rid, flags, intr);
if (error)
return (error);
}
return (0);
}
static int
vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type)
{
struct vtpci_interrupt *intr;
int error;
intr = &sc->vtpci_device_interrupt;
error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL,
vtpci_legacy_intr, sc, &intr->vti_handler);
return (error);
}
static int
vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
{
struct vtpci_virtqueue *vqx;
struct vtpci_interrupt *intr;
int i, error;
intr = sc->vtpci_msix_vq_interrupts;
for (i = 0; i < sc->vtpci_nvqs; i++) {
vqx = &sc->vtpci_vqs[i];
if (vqx->vtv_no_intr)
continue;
error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type,
vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
&intr->vti_handler);
if (error)
return (error);
intr++;
}
return (0);
}
static int
vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
{
device_t dev;
struct vtpci_interrupt *intr;
int error;
dev = sc->vtpci_dev;
intr = &sc->vtpci_device_interrupt;
error = bus_setup_intr(dev, intr->vti_irq, type, NULL,
vtpci_config_intr, sc, &intr->vti_handler);
if (error)
return (error);
if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
intr = sc->vtpci_msix_vq_interrupts;
error = bus_setup_intr(dev, intr->vti_irq, type,
vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc,
&intr->vti_handler);
} else
error = vtpci_setup_pervq_msix_interrupts(sc, type);
return (error ? error : vtpci_set_host_msix_vectors(sc));
}
static int
vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type)
{
int error;
type |= INTR_MPSAFE;
KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags));
error = vtpci_alloc_intr_resources(sc);
if (error)
return (error);
if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
error = vtpci_setup_legacy_interrupt(sc, type);
else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
error = vtpci_setup_msi_interrupt(sc, type);
else
error = vtpci_setup_msix_interrupts(sc, type);
return (error);
}
static int
vtpci_register_msix_vector(struct vtpci_softc *sc, int offset,
struct vtpci_interrupt *intr)
{
device_t dev;
uint16_t vector;
dev = sc->vtpci_dev;
if (intr != NULL) {
/* Map from guest rid to host vector. */
vector = intr->vti_rid - 1;
} else
vector = VIRTIO_MSI_NO_VECTOR;
vtpci_write_config_2(sc, offset, vector);
/* Read vector to determine if the host had sufficient resources. */
if (vtpci_read_config_2(sc, offset) != vector) {
device_printf(dev,
"insufficient host resources for MSIX interrupts\n");
return (ENODEV);
}
return (0);
}
static int
vtpci_set_host_msix_vectors(struct vtpci_softc *sc)
{
struct vtpci_interrupt *intr, *tintr;
int idx, offset, error;
intr = &sc->vtpci_device_interrupt;
offset = VIRTIO_MSI_CONFIG_VECTOR;
error = vtpci_register_msix_vector(sc, offset, intr);
if (error)
return (error);
intr = sc->vtpci_msix_vq_interrupts;
offset = VIRTIO_MSI_QUEUE_VECTOR;
for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
vtpci_select_virtqueue(sc, idx);
if (sc->vtpci_vqs[idx].vtv_no_intr)
tintr = NULL;
else
tintr = intr;
error = vtpci_register_msix_vector(sc, offset, tintr);
if (error)
break;
/*
* For shared MSIX, all the virtqueues share the first
* interrupt.
*/
if (!sc->vtpci_vqs[idx].vtv_no_intr &&
(sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
intr++;
}
return (error);
}
static int
vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx)
{
struct vtpci_virtqueue *vqx;
struct virtqueue *vq;
int error;
uint16_t size;
vqx = &sc->vtpci_vqs[idx];
vq = vqx->vtv_vq;
KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
vtpci_select_virtqueue(sc, idx);
size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
error = virtqueue_reinit(vq, size);
if (error)
return (error);
vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
return (0);
}
static void
vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr)
{
device_t dev;
dev = sc->vtpci_dev;
if (intr->vti_handler != NULL) {
bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
intr->vti_handler = NULL;
}
if (intr->vti_irq != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
intr->vti_irq);
intr->vti_irq = NULL;
intr->vti_rid = -1;
}
}
static void
vtpci_free_interrupts(struct vtpci_softc *sc)
{
struct vtpci_interrupt *intr;
int i, nvq_intrs;
vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt);
if (sc->vtpci_nmsix_resources != 0) {
nvq_intrs = sc->vtpci_nmsix_resources - 1;
sc->vtpci_nmsix_resources = 0;
intr = sc->vtpci_msix_vq_interrupts;
if (intr != NULL) {
for (i = 0; i < nvq_intrs; i++, intr++)
vtpci_free_interrupt(sc, intr);
free(sc->vtpci_msix_vq_interrupts, M_DEVBUF);
sc->vtpci_msix_vq_interrupts = NULL;
}
}
if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
pci_release_msi(sc->vtpci_dev);
sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
}
static void
vtpci_free_virtqueues(struct vtpci_softc *sc)
{
struct vtpci_virtqueue *vqx;
int idx;
for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
vqx = &sc->vtpci_vqs[idx];
vtpci_select_virtqueue(sc, idx);
vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
virtqueue_free(vqx->vtv_vq);
vqx->vtv_vq = NULL;
}
free(sc->vtpci_vqs, M_DEVBUF);
sc->vtpci_vqs = NULL;
sc->vtpci_nvqs = 0;
}
static void
vtpci_release_child_resources(struct vtpci_softc *sc)
{
vtpci_free_interrupts(sc);
vtpci_free_virtqueues(sc);
}
static void
vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc)
{
int idx;
if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR,
VIRTIO_MSI_NO_VECTOR);
for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
vtpci_select_virtqueue(sc, idx);
vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR,
VIRTIO_MSI_NO_VECTOR);
}
}
vtpci_free_interrupts(sc);
}
static void
vtpci_reset(struct vtpci_softc *sc)
{
/*
* Setting the status to RESET sets the host device to
* the original, uninitialized state.
*/
vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET);
}
static void
vtpci_select_virtqueue(struct vtpci_softc *sc, int idx)
{
vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
}
static void
vtpci_legacy_intr(void *xsc)
{
struct vtpci_softc *sc;
struct vtpci_virtqueue *vqx;
int i;
uint8_t isr;
sc = xsc;
vqx = &sc->vtpci_vqs[0];
/* Reading the ISR also clears it. */
isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR);
if (isr & VIRTIO_PCI_ISR_CONFIG)
vtpci_config_intr(sc);
if (isr & VIRTIO_PCI_ISR_INTR) {
for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
if (vqx->vtv_no_intr == 0)
virtqueue_intr(vqx->vtv_vq);
}
}
}
static int
vtpci_vq_shared_intr_filter(void *xsc)
{
struct vtpci_softc *sc;
struct vtpci_virtqueue *vqx;
int i, rc;
rc = 0;
sc = xsc;
vqx = &sc->vtpci_vqs[0];
for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
if (vqx->vtv_no_intr == 0)
rc |= virtqueue_intr_filter(vqx->vtv_vq);
}
return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
}
static void
vtpci_vq_shared_intr(void *xsc)
{
struct vtpci_softc *sc;
struct vtpci_virtqueue *vqx;
int i;
sc = xsc;
vqx = &sc->vtpci_vqs[0];
for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
if (vqx->vtv_no_intr == 0)
virtqueue_intr(vqx->vtv_vq);
}
}
static int
vtpci_vq_intr_filter(void *xvq)
{
struct virtqueue *vq;
int rc;
vq = xvq;
rc = virtqueue_intr_filter(vq);
return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
}
static void
vtpci_vq_intr(void *xvq)
{
struct virtqueue *vq;
vq = xvq;
virtqueue_intr(vq);
}
static void
vtpci_config_intr(void *xsc)
{
struct vtpci_softc *sc;
device_t child;
sc = xsc;
child = sc->vtpci_child_dev;
if (child != NULL)
VIRTIO_CONFIG_CHANGE(child);
}
Index: head/sys/dev/vmware/vmxnet3/if_vmx.c
===================================================================
--- head/sys/dev/vmware/vmxnet3/if_vmx.c (revision 328217)
+++ head/sys/dev/vmware/vmxnet3/if_vmx.c (revision 328218)
@@ -1,3950 +1,3950 @@
/*-
* Copyright (c) 2013 Tsubai Masanari
* Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
*/
/* Driver for VMware vmxnet3 virtual ethernet devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/smp.h>
#include <sys/taskqueue.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <machine/in_cksum.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "if_vmxreg.h"
#include "if_vmxvar.h"
#include "opt_inet.h"
#include "opt_inet6.h"
#ifdef VMXNET3_FAILPOINTS
#include <sys/fail.h>
static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
"vmxnet3 fail points");
#define VMXNET3_FP _debug_fail_point_vmxnet3
#endif
static int vmxnet3_probe(device_t);
static int vmxnet3_attach(device_t);
static int vmxnet3_detach(device_t);
static int vmxnet3_shutdown(device_t);
static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
static void vmxnet3_free_resources(struct vmxnet3_softc *);
static int vmxnet3_check_version(struct vmxnet3_softc *);
static void vmxnet3_initial_config(struct vmxnet3_softc *);
static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
struct vmxnet3_interrupt *);
static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
struct vmxnet3_interrupt *);
static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
#ifndef VMXNET3_LEGACY_TX
static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
static void vmxnet3_start_taskqueue(struct vmxnet3_softc *);
static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
static void vmxnet3_free_taskqueue(struct vmxnet3_softc *);
#endif
static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
static void vmxnet3_init_hwassist(struct vmxnet3_softc *);
static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
static int vmxnet3_alloc_data(struct vmxnet3_softc *);
static void vmxnet3_free_data(struct vmxnet3_softc *);
static int vmxnet3_setup_interface(struct vmxnet3_softc *);
static void vmxnet3_evintr(struct vmxnet3_softc *);
static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
struct vmxnet3_rxring *, int);
static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
static void vmxnet3_legacy_intr(void *);
static void vmxnet3_txq_intr(void *);
static void vmxnet3_rxq_intr(void *);
static void vmxnet3_event_intr(void *);
static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
static void vmxnet3_stop(struct vmxnet3_softc *);
static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
static int vmxnet3_enable_device(struct vmxnet3_softc *);
static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
static int vmxnet3_reinit(struct vmxnet3_softc *);
static void vmxnet3_init_locked(struct vmxnet3_softc *);
static void vmxnet3_init(void *);
static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
int *, int *, int *);
static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
bus_dmamap_t, bus_dma_segment_t [], int *);
static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
#ifdef VMXNET3_LEGACY_TX
static void vmxnet3_start_locked(struct ifnet *);
static void vmxnet3_start(struct ifnet *);
#else
static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
struct mbuf *);
static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
static void vmxnet3_txq_tq_deferred(void *, int);
#endif
static void vmxnet3_txq_start(struct vmxnet3_txqueue *);
static void vmxnet3_tx_start_all(struct vmxnet3_softc *);
static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
uint16_t);
static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
static uint64_t vmxnet3_get_counter(struct ifnet *, ift_counter);
#ifndef VMXNET3_LEGACY_TX
static void vmxnet3_qflush(struct ifnet *);
#endif
static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
static void vmxnet3_tick(void *);
static void vmxnet3_link_status(struct vmxnet3_softc *);
static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
static int vmxnet3_media_change(struct ifnet *);
static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
struct sysctl_ctx_list *, struct sysctl_oid_list *);
static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
struct sysctl_ctx_list *, struct sysctl_oid_list *);
static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
struct sysctl_ctx_list *, struct sysctl_oid_list *);
static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
uint32_t);
static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
uint32_t);
static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
bus_size_t, struct vmxnet3_dma_alloc *);
static void vmxnet3_dma_free(struct vmxnet3_softc *,
struct vmxnet3_dma_alloc *);
static int vmxnet3_tunable_int(struct vmxnet3_softc *,
const char *, int);
typedef enum {
VMXNET3_BARRIER_RD,
VMXNET3_BARRIER_WR,
VMXNET3_BARRIER_RDWR,
} vmxnet3_barrier_t;
static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
/* Tunables. */
static int vmxnet3_mq_disable = 0;
TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
static device_method_t vmxnet3_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vmxnet3_probe),
DEVMETHOD(device_attach, vmxnet3_attach),
DEVMETHOD(device_detach, vmxnet3_detach),
DEVMETHOD(device_shutdown, vmxnet3_shutdown),
DEVMETHOD_END
};
static driver_t vmxnet3_driver = {
"vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
};
static devclass_t vmxnet3_devclass;
DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
MODULE_DEPEND(vmx, pci, 1, 1, 1);
MODULE_DEPEND(vmx, ether, 1, 1, 1);
#define VMXNET3_VMWARE_VENDOR_ID 0x15AD
#define VMXNET3_VMWARE_DEVICE_ID 0x07B0
static int
vmxnet3_probe(device_t dev)
{
if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
vmxnet3_attach(device_t dev)
{
struct vmxnet3_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vmx_dev = dev;
pci_enable_busmaster(dev);
VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
vmxnet3_initial_config(sc);
error = vmxnet3_alloc_resources(sc);
if (error)
goto fail;
error = vmxnet3_check_version(sc);
if (error)
goto fail;
error = vmxnet3_alloc_rxtx_queues(sc);
if (error)
goto fail;
#ifndef VMXNET3_LEGACY_TX
error = vmxnet3_alloc_taskqueue(sc);
if (error)
goto fail;
#endif
error = vmxnet3_alloc_interrupts(sc);
if (error)
goto fail;
vmxnet3_check_multiqueue(sc);
error = vmxnet3_alloc_data(sc);
if (error)
goto fail;
error = vmxnet3_setup_interface(sc);
if (error)
goto fail;
error = vmxnet3_setup_interrupts(sc);
if (error) {
ether_ifdetach(sc->vmx_ifp);
device_printf(dev, "could not set up interrupt\n");
goto fail;
}
vmxnet3_setup_sysctl(sc);
#ifndef VMXNET3_LEGACY_TX
vmxnet3_start_taskqueue(sc);
#endif
fail:
if (error)
vmxnet3_detach(dev);
return (error);
}
static int
vmxnet3_detach(device_t dev)
{
struct vmxnet3_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->vmx_ifp;
if (device_is_attached(dev)) {
VMXNET3_CORE_LOCK(sc);
vmxnet3_stop(sc);
VMXNET3_CORE_UNLOCK(sc);
callout_drain(&sc->vmx_tick);
#ifndef VMXNET3_LEGACY_TX
vmxnet3_drain_taskqueue(sc);
#endif
ether_ifdetach(ifp);
}
if (sc->vmx_vlan_attach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
sc->vmx_vlan_attach = NULL;
}
if (sc->vmx_vlan_detach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
sc->vmx_vlan_detach = NULL;
}
#ifndef VMXNET3_LEGACY_TX
vmxnet3_free_taskqueue(sc);
#endif
vmxnet3_free_interrupts(sc);
if (ifp != NULL) {
if_free(ifp);
sc->vmx_ifp = NULL;
}
ifmedia_removeall(&sc->vmx_media);
vmxnet3_free_data(sc);
vmxnet3_free_resources(sc);
vmxnet3_free_rxtx_queues(sc);
VMXNET3_CORE_LOCK_DESTROY(sc);
return (0);
}
static int
vmxnet3_shutdown(device_t dev)
{
return (0);
}
static int
vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
{
device_t dev;
int rid;
dev = sc->vmx_dev;
rid = PCIR_BAR(0);
sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->vmx_res0 == NULL) {
device_printf(dev,
"could not map BAR0 memory\n");
return (ENXIO);
}
sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
rid = PCIR_BAR(1);
sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->vmx_res1 == NULL) {
device_printf(dev,
"could not map BAR1 memory\n");
return (ENXIO);
}
sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
rid = PCIR_BAR(2);
sc->vmx_msix_res = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
}
if (sc->vmx_msix_res == NULL)
sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
return (0);
}
static void
vmxnet3_free_resources(struct vmxnet3_softc *sc)
{
device_t dev;
int rid;
dev = sc->vmx_dev;
if (sc->vmx_res0 != NULL) {
rid = PCIR_BAR(0);
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
sc->vmx_res0 = NULL;
}
if (sc->vmx_res1 != NULL) {
rid = PCIR_BAR(1);
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
sc->vmx_res1 = NULL;
}
if (sc->vmx_msix_res != NULL) {
rid = PCIR_BAR(2);
bus_release_resource(dev, SYS_RES_MEMORY, rid,
sc->vmx_msix_res);
sc->vmx_msix_res = NULL;
}
}
static int
vmxnet3_check_version(struct vmxnet3_softc *sc)
{
device_t dev;
uint32_t version;
dev = sc->vmx_dev;
version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
if ((version & 0x01) == 0) {
device_printf(dev, "unsupported hardware version %#x\n",
version);
return (ENOTSUP);
}
vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
if ((version & 0x01) == 0) {
device_printf(dev, "unsupported UPT version %#x\n", version);
return (ENOTSUP);
}
vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
return (0);
}
static int
trunc_powerof2(int val)
{
return (1U << (fls(val) - 1));
}
static void
vmxnet3_initial_config(struct vmxnet3_softc *sc)
{
int nqueue, ndesc;
nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
nqueue = VMXNET3_DEF_TX_QUEUES;
if (nqueue > mp_ncpus)
nqueue = mp_ncpus;
sc->vmx_max_ntxqueues = trunc_powerof2(nqueue);
nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
nqueue = VMXNET3_DEF_RX_QUEUES;
if (nqueue > mp_ncpus)
nqueue = mp_ncpus;
sc->vmx_max_nrxqueues = trunc_powerof2(nqueue);
if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
sc->vmx_max_nrxqueues = 1;
sc->vmx_max_ntxqueues = 1;
}
ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
ndesc = VMXNET3_DEF_TX_NDESC;
if (ndesc & VMXNET3_MASK_TX_NDESC)
ndesc &= ~VMXNET3_MASK_TX_NDESC;
sc->vmx_ntxdescs = ndesc;
ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
ndesc = VMXNET3_DEF_RX_NDESC;
if (ndesc & VMXNET3_MASK_RX_NDESC)
ndesc &= ~VMXNET3_MASK_RX_NDESC;
sc->vmx_nrxdescs = ndesc;
sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
}
static void
vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
{
if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
goto out;
/* BMV: Just use the maximum configured for now. */
sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
if (sc->vmx_nrxqueues > 1)
sc->vmx_flags |= VMXNET3_FLAG_RSS;
return;
out:
sc->vmx_ntxqueues = 1;
sc->vmx_nrxqueues = 1;
}
static int
vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
{
device_t dev;
int nmsix, cnt, required;
dev = sc->vmx_dev;
if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
return (1);
/* Allocate an additional vector for the events interrupt. */
required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
nmsix = pci_msix_count(dev);
if (nmsix < required)
return (1);
cnt = required;
if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
sc->vmx_nintrs = required;
return (0);
} else
pci_release_msi(dev);
/* BMV TODO Fallback to sharing MSIX vectors if possible. */
return (1);
}
static int
vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
{
device_t dev;
int nmsi, cnt, required;
dev = sc->vmx_dev;
required = 1;
nmsi = pci_msi_count(dev);
if (nmsi < required)
return (1);
cnt = required;
if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
sc->vmx_nintrs = 1;
return (0);
} else
pci_release_msi(dev);
return (1);
}
static int
vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
{
sc->vmx_nintrs = 1;
return (0);
}
static int
vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
struct vmxnet3_interrupt *intr)
{
struct resource *irq;
irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
if (irq == NULL)
return (ENXIO);
intr->vmxi_irq = irq;
intr->vmxi_rid = rid;
return (0);
}
static int
vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
{
int i, rid, flags, error;
rid = 0;
flags = RF_ACTIVE;
if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
flags |= RF_SHAREABLE;
else
rid = 1;
for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
error = vmxnet3_alloc_interrupt(sc, rid, flags,
&sc->vmx_intrs[i]);
if (error)
return (error);
}
return (0);
}
static int
vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
{
device_t dev;
struct vmxnet3_txqueue *txq;
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_interrupt *intr;
enum intr_type type;
int i, error;
dev = sc->vmx_dev;
intr = &sc->vmx_intrs[0];
type = INTR_TYPE_NET | INTR_MPSAFE;
for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
txq = &sc->vmx_txq[i];
error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
vmxnet3_txq_intr, txq, &intr->vmxi_handler);
if (error)
return (error);
bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
"tq%d", i);
txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
}
for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
rxq = &sc->vmx_rxq[i];
error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
if (error)
return (error);
bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
"rq%d", i);
rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
}
error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
vmxnet3_event_intr, sc, &intr->vmxi_handler);
if (error)
return (error);
bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
return (0);
}
static int
vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
{
struct vmxnet3_interrupt *intr;
int i, error;
intr = &sc->vmx_intrs[0];
error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
&intr->vmxi_handler);
for (i = 0; i < sc->vmx_ntxqueues; i++)
sc->vmx_txq[i].vxtxq_intr_idx = 0;
for (i = 0; i < sc->vmx_nrxqueues; i++)
sc->vmx_rxq[i].vxrxq_intr_idx = 0;
sc->vmx_event_intr_idx = 0;
return (error);
}
static void
vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
{
struct vmxnet3_txqueue *txq;
struct vmxnet3_txq_shared *txs;
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_rxq_shared *rxs;
int i;
sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
for (i = 0; i < sc->vmx_ntxqueues; i++) {
txq = &sc->vmx_txq[i];
txs = txq->vxtxq_ts;
txs->intr_idx = txq->vxtxq_intr_idx;
}
for (i = 0; i < sc->vmx_nrxqueues; i++) {
rxq = &sc->vmx_rxq[i];
rxs = rxq->vxrxq_rs;
rxs->intr_idx = rxq->vxrxq_intr_idx;
}
}
static int
vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
{
int error;
error = vmxnet3_alloc_intr_resources(sc);
if (error)
return (error);
switch (sc->vmx_intr_type) {
case VMXNET3_IT_MSIX:
error = vmxnet3_setup_msix_interrupts(sc);
break;
case VMXNET3_IT_MSI:
case VMXNET3_IT_LEGACY:
error = vmxnet3_setup_legacy_interrupt(sc);
break;
default:
panic("%s: invalid interrupt type %d", __func__,
sc->vmx_intr_type);
}
if (error == 0)
vmxnet3_set_interrupt_idx(sc);
return (error);
}
static int
vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
{
device_t dev;
uint32_t config;
int error;
dev = sc->vmx_dev;
config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
sc->vmx_intr_type = config & 0x03;
sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
switch (sc->vmx_intr_type) {
case VMXNET3_IT_AUTO:
sc->vmx_intr_type = VMXNET3_IT_MSIX;
/* FALLTHROUGH */
case VMXNET3_IT_MSIX:
error = vmxnet3_alloc_msix_interrupts(sc);
if (error == 0)
break;
sc->vmx_intr_type = VMXNET3_IT_MSI;
/* FALLTHROUGH */
case VMXNET3_IT_MSI:
error = vmxnet3_alloc_msi_interrupts(sc);
if (error == 0)
break;
sc->vmx_intr_type = VMXNET3_IT_LEGACY;
/* FALLTHROUGH */
case VMXNET3_IT_LEGACY:
error = vmxnet3_alloc_legacy_interrupts(sc);
if (error == 0)
break;
/* FALLTHROUGH */
default:
sc->vmx_intr_type = -1;
device_printf(dev, "cannot allocate any interrupt resources\n");
return (ENXIO);
}
return (error);
}
static void
vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
struct vmxnet3_interrupt *intr)
{
device_t dev;
dev = sc->vmx_dev;
if (intr->vmxi_handler != NULL) {
bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
intr->vmxi_handler = NULL;
}
if (intr->vmxi_irq != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
intr->vmxi_irq);
intr->vmxi_irq = NULL;
intr->vmxi_rid = -1;
}
}
static void
vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
{
int i;
for (i = 0; i < sc->vmx_nintrs; i++)
vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
sc->vmx_intr_type == VMXNET3_IT_MSIX)
pci_release_msi(sc->vmx_dev);
}
#ifndef VMXNET3_LEGACY_TX
static int
vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
{
device_t dev;
dev = sc->vmx_dev;
sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
taskqueue_thread_enqueue, &sc->vmx_tq);
if (sc->vmx_tq == NULL)
return (ENOMEM);
return (0);
}
static void
vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
{
device_t dev;
int nthreads, error;
dev = sc->vmx_dev;
/*
* The taskqueue is typically not frequently used, so a dedicated
* thread for each queue is unnecessary.
*/
nthreads = MAX(1, sc->vmx_ntxqueues / 2);
/*
* Most drivers just ignore the return value - it only fails
* with ENOMEM so an error is not likely. It is hard for us
* to recover from an error here.
*/
error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
"%s taskq", device_get_nameunit(dev));
if (error)
device_printf(dev, "failed to start taskqueue: %d", error);
}
static void
vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
{
struct vmxnet3_txqueue *txq;
int i;
if (sc->vmx_tq != NULL) {
for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
txq = &sc->vmx_txq[i];
taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
}
}
}
static void
vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
{
if (sc->vmx_tq != NULL) {
taskqueue_free(sc->vmx_tq);
sc->vmx_tq = NULL;
}
}
#endif
static int
vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
{
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_rxring *rxr;
int i;
rxq = &sc->vmx_rxq[q];
snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
device_get_nameunit(sc->vmx_dev), q);
mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
rxq->vxrxq_sc = sc;
rxq->vxrxq_id = q;
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
rxr->vxrxr_rid = i;
rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
- rxr->vxrxr_rxbuf = mallocarray(rxr->vxrxr_ndesc,
+ rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
if (rxr->vxrxr_rxbuf == NULL)
return (ENOMEM);
rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
}
return (0);
}
static int
vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
{
struct vmxnet3_txqueue *txq;
struct vmxnet3_txring *txr;
txq = &sc->vmx_txq[q];
txr = &txq->vxtxq_cmd_ring;
snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
device_get_nameunit(sc->vmx_dev), q);
mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
txq->vxtxq_sc = sc;
txq->vxtxq_id = q;
txr->vxtxr_ndesc = sc->vmx_ntxdescs;
- txr->vxtxr_txbuf = mallocarray(txr->vxtxr_ndesc,
+ txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
if (txr->vxtxr_txbuf == NULL)
return (ENOMEM);
txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
#ifndef VMXNET3_LEGACY_TX
TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &txq->vxtxq_mtx);
if (txq->vxtxq_br == NULL)
return (ENOMEM);
#endif
return (0);
}
static int
vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
{
int i, error;
/*
* Only attempt to create multiple queues if MSIX is available. MSIX is
* disabled by default because its apparently broken for devices passed
* through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
* must be set to zero for MSIX. This check prevents us from allocating
* queue structures that we will not use.
*/
if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
sc->vmx_max_nrxqueues = 1;
sc->vmx_max_ntxqueues = 1;
}
- sc->vmx_rxq = mallocarray(sc->vmx_max_nrxqueues,
- sizeof(struct vmxnet3_rxqueue), M_DEVBUF, M_NOWAIT | M_ZERO);
- sc->vmx_txq = mallocarray(sc->vmx_max_ntxqueues,
- sizeof(struct vmxnet3_txqueue), M_DEVBUF, M_NOWAIT | M_ZERO);
+ sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
+ sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
+ sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
+ sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
return (ENOMEM);
for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
error = vmxnet3_init_rxq(sc, i);
if (error)
return (error);
}
for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
error = vmxnet3_init_txq(sc, i);
if (error)
return (error);
}
return (0);
}
static void
vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
{
struct vmxnet3_rxring *rxr;
int i;
rxq->vxrxq_sc = NULL;
rxq->vxrxq_id = -1;
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
if (rxr->vxrxr_rxbuf != NULL) {
free(rxr->vxrxr_rxbuf, M_DEVBUF);
rxr->vxrxr_rxbuf = NULL;
}
}
if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
mtx_destroy(&rxq->vxrxq_mtx);
}
static void
vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
{
struct vmxnet3_txring *txr;
txr = &txq->vxtxq_cmd_ring;
txq->vxtxq_sc = NULL;
txq->vxtxq_id = -1;
#ifndef VMXNET3_LEGACY_TX
if (txq->vxtxq_br != NULL) {
buf_ring_free(txq->vxtxq_br, M_DEVBUF);
txq->vxtxq_br = NULL;
}
#endif
if (txr->vxtxr_txbuf != NULL) {
free(txr->vxtxr_txbuf, M_DEVBUF);
txr->vxtxr_txbuf = NULL;
}
if (mtx_initialized(&txq->vxtxq_mtx) != 0)
mtx_destroy(&txq->vxtxq_mtx);
}
static void
vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
{
int i;
if (sc->vmx_rxq != NULL) {
for (i = 0; i < sc->vmx_max_nrxqueues; i++)
vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
free(sc->vmx_rxq, M_DEVBUF);
sc->vmx_rxq = NULL;
}
if (sc->vmx_txq != NULL) {
for (i = 0; i < sc->vmx_max_ntxqueues; i++)
vmxnet3_destroy_txq(&sc->vmx_txq[i]);
free(sc->vmx_txq, M_DEVBUF);
sc->vmx_txq = NULL;
}
}
static int
vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
{
device_t dev;
uint8_t *kva;
size_t size;
int i, error;
dev = sc->vmx_dev;
size = sizeof(struct vmxnet3_driver_shared);
error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
if (error) {
device_printf(dev, "cannot alloc shared memory\n");
return (error);
}
sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
if (error) {
device_printf(dev, "cannot alloc queue shared memory\n");
return (error);
}
sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
kva = sc->vmx_qs;
for (i = 0; i < sc->vmx_ntxqueues; i++) {
sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
kva += sizeof(struct vmxnet3_txq_shared);
}
for (i = 0; i < sc->vmx_nrxqueues; i++) {
sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
kva += sizeof(struct vmxnet3_rxq_shared);
}
if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
size = sizeof(struct vmxnet3_rss_shared);
error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
if (error) {
device_printf(dev, "cannot alloc rss shared memory\n");
return (error);
}
sc->vmx_rss =
(struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
}
return (0);
}
static void
vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
{
if (sc->vmx_rss != NULL) {
vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
sc->vmx_rss = NULL;
}
if (sc->vmx_qs != NULL) {
vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
sc->vmx_qs = NULL;
}
if (sc->vmx_ds != NULL) {
vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
sc->vmx_ds = NULL;
}
}
static int
vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
{
device_t dev;
struct vmxnet3_txqueue *txq;
struct vmxnet3_txring *txr;
struct vmxnet3_comp_ring *txc;
size_t descsz, compsz;
int i, q, error;
dev = sc->vmx_dev;
for (q = 0; q < sc->vmx_ntxqueues; q++) {
txq = &sc->vmx_txq[q];
txr = &txq->vxtxq_cmd_ring;
txc = &txq->vxtxq_comp_ring;
descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
error = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VMXNET3_TX_MAXSIZE, /* maxsize */
VMXNET3_TX_MAXSEGS, /* nsegments */
VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&txr->vxtxr_txtag);
if (error) {
device_printf(dev,
"unable to create Tx buffer tag for queue %d\n", q);
return (error);
}
error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
if (error) {
device_printf(dev, "cannot alloc Tx descriptors for "
"queue %d error %d\n", q, error);
return (error);
}
txr->vxtxr_txd =
(struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
if (error) {
device_printf(dev, "cannot alloc Tx comp descriptors "
"for queue %d error %d\n", q, error);
return (error);
}
txc->vxcr_u.txcd =
(struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
for (i = 0; i < txr->vxtxr_ndesc; i++) {
error = bus_dmamap_create(txr->vxtxr_txtag, 0,
&txr->vxtxr_txbuf[i].vtxb_dmamap);
if (error) {
device_printf(dev, "unable to create Tx buf "
"dmamap for queue %d idx %d\n", q, i);
return (error);
}
}
}
return (0);
}
static void
vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
{
device_t dev;
struct vmxnet3_txqueue *txq;
struct vmxnet3_txring *txr;
struct vmxnet3_comp_ring *txc;
struct vmxnet3_txbuf *txb;
int i, q;
dev = sc->vmx_dev;
for (q = 0; q < sc->vmx_ntxqueues; q++) {
txq = &sc->vmx_txq[q];
txr = &txq->vxtxq_cmd_ring;
txc = &txq->vxtxq_comp_ring;
for (i = 0; i < txr->vxtxr_ndesc; i++) {
txb = &txr->vxtxr_txbuf[i];
if (txb->vtxb_dmamap != NULL) {
bus_dmamap_destroy(txr->vxtxr_txtag,
txb->vtxb_dmamap);
txb->vtxb_dmamap = NULL;
}
}
if (txc->vxcr_u.txcd != NULL) {
vmxnet3_dma_free(sc, &txc->vxcr_dma);
txc->vxcr_u.txcd = NULL;
}
if (txr->vxtxr_txd != NULL) {
vmxnet3_dma_free(sc, &txr->vxtxr_dma);
txr->vxtxr_txd = NULL;
}
if (txr->vxtxr_txtag != NULL) {
bus_dma_tag_destroy(txr->vxtxr_txtag);
txr->vxtxr_txtag = NULL;
}
}
}
static int
vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
{
device_t dev;
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_rxring *rxr;
struct vmxnet3_comp_ring *rxc;
int descsz, compsz;
int i, j, q, error;
dev = sc->vmx_dev;
for (q = 0; q < sc->vmx_nrxqueues; q++) {
rxq = &sc->vmx_rxq[q];
rxc = &rxq->vxrxq_comp_ring;
compsz = 0;
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
descsz = rxr->vxrxr_ndesc *
sizeof(struct vmxnet3_rxdesc);
compsz += rxr->vxrxr_ndesc *
sizeof(struct vmxnet3_rxcompdesc);
error = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUMPAGESIZE, /* maxsize */
1, /* nsegments */
MJUMPAGESIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&rxr->vxrxr_rxtag);
if (error) {
device_printf(dev,
"unable to create Rx buffer tag for "
"queue %d\n", q);
return (error);
}
error = vmxnet3_dma_malloc(sc, descsz, 512,
&rxr->vxrxr_dma);
if (error) {
device_printf(dev, "cannot allocate Rx "
"descriptors for queue %d/%d error %d\n",
i, q, error);
return (error);
}
rxr->vxrxr_rxd =
(struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
}
error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
if (error) {
device_printf(dev, "cannot alloc Rx comp descriptors "
"for queue %d error %d\n", q, error);
return (error);
}
rxc->vxcr_u.rxcd =
(struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
&rxr->vxrxr_spare_dmap);
if (error) {
device_printf(dev, "unable to create spare "
"dmamap for queue %d/%d error %d\n",
q, i, error);
return (error);
}
for (j = 0; j < rxr->vxrxr_ndesc; j++) {
error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
&rxr->vxrxr_rxbuf[j].vrxb_dmamap);
if (error) {
device_printf(dev, "unable to create "
"dmamap for queue %d/%d slot %d "
"error %d\n",
q, i, j, error);
return (error);
}
}
}
}
return (0);
}
static void
vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
{
device_t dev;
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_rxring *rxr;
struct vmxnet3_comp_ring *rxc;
struct vmxnet3_rxbuf *rxb;
int i, j, q;
dev = sc->vmx_dev;
for (q = 0; q < sc->vmx_nrxqueues; q++) {
rxq = &sc->vmx_rxq[q];
rxc = &rxq->vxrxq_comp_ring;
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
if (rxr->vxrxr_spare_dmap != NULL) {
bus_dmamap_destroy(rxr->vxrxr_rxtag,
rxr->vxrxr_spare_dmap);
rxr->vxrxr_spare_dmap = NULL;
}
for (j = 0; j < rxr->vxrxr_ndesc; j++) {
rxb = &rxr->vxrxr_rxbuf[j];
if (rxb->vrxb_dmamap != NULL) {
bus_dmamap_destroy(rxr->vxrxr_rxtag,
rxb->vrxb_dmamap);
rxb->vrxb_dmamap = NULL;
}
}
}
if (rxc->vxcr_u.rxcd != NULL) {
vmxnet3_dma_free(sc, &rxc->vxcr_dma);
rxc->vxcr_u.rxcd = NULL;
}
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
if (rxr->vxrxr_rxd != NULL) {
vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
rxr->vxrxr_rxd = NULL;
}
if (rxr->vxrxr_rxtag != NULL) {
bus_dma_tag_destroy(rxr->vxrxr_rxtag);
rxr->vxrxr_rxtag = NULL;
}
}
}
}
static int
vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
{
int error;
error = vmxnet3_alloc_txq_data(sc);
if (error)
return (error);
error = vmxnet3_alloc_rxq_data(sc);
if (error)
return (error);
return (0);
}
static void
vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
{
if (sc->vmx_rxq != NULL)
vmxnet3_free_rxq_data(sc);
if (sc->vmx_txq != NULL)
vmxnet3_free_txq_data(sc);
}
static int
vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
{
int error;
error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
32, &sc->vmx_mcast_dma);
if (error)
device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
else
sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
return (error);
}
static void
vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
{
if (sc->vmx_mcast != NULL) {
vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
sc->vmx_mcast = NULL;
}
}
static void
vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
{
struct vmxnet3_driver_shared *ds;
struct vmxnet3_txqueue *txq;
struct vmxnet3_txq_shared *txs;
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_rxq_shared *rxs;
int i;
ds = sc->vmx_ds;
/*
* Initialize fields of the shared data that remains the same across
* reinits. Note the shared data is zero'd when allocated.
*/
ds->magic = VMXNET3_REV1_MAGIC;
/* DriverInfo */
ds->version = VMXNET3_DRIVER_VERSION;
ds->guest = VMXNET3_GOS_FREEBSD |
#ifdef __LP64__
VMXNET3_GOS_64BIT;
#else
VMXNET3_GOS_32BIT;
#endif
ds->vmxnet3_revision = 1;
ds->upt_version = 1;
/* Misc. conf */
ds->driver_data = vtophys(sc);
ds->driver_data_len = sizeof(struct vmxnet3_softc);
ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
ds->nrxsg_max = sc->vmx_max_rxsegs;
/* RSS conf */
if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
ds->rss.version = 1;
ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
ds->rss.len = sc->vmx_rss_dma.dma_size;
}
/* Interrupt control. */
ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
ds->nintr = sc->vmx_nintrs;
ds->evintr = sc->vmx_event_intr_idx;
ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
for (i = 0; i < sc->vmx_nintrs; i++)
ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
/* Receive filter. */
ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
/* Tx queues */
for (i = 0; i < sc->vmx_ntxqueues; i++) {
txq = &sc->vmx_txq[i];
txs = txq->vxtxq_ts;
txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
txs->driver_data = vtophys(txq);
txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
}
/* Rx queues */
for (i = 0; i < sc->vmx_nrxqueues; i++) {
rxq = &sc->vmx_rxq[i];
rxs = rxq->vxrxq_rs;
rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
rxs->driver_data = vtophys(rxq);
rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
}
}
static void
vmxnet3_init_hwassist(struct vmxnet3_softc *sc)
{
struct ifnet *ifp = sc->vmx_ifp;
uint64_t hwassist;
hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM)
hwassist |= VMXNET3_CSUM_OFFLOAD;
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
if (ifp->if_capenable & IFCAP_TSO4)
hwassist |= CSUM_IP_TSO;
if (ifp->if_capenable & IFCAP_TSO6)
hwassist |= CSUM_IP6_TSO;
ifp->if_hwassist = hwassist;
}
static void
vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vmx_ifp;
/* Use the current MAC address. */
bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
vmxnet3_set_lladdr(sc);
vmxnet3_init_hwassist(sc);
}
static void
vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
{
/*
* Use the same key as the Linux driver until FreeBSD can do
* RSS (presumably Toeplitz) in software.
*/
static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
};
struct vmxnet3_driver_shared *ds;
struct vmxnet3_rss_shared *rss;
int i;
ds = sc->vmx_ds;
rss = sc->vmx_rss;
rss->hash_type =
UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
rss->ind_table[i] = i % sc->vmx_nrxqueues;
}
static void
vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
struct vmxnet3_driver_shared *ds;
ifp = sc->vmx_ifp;
ds = sc->vmx_ds;
ds->mtu = ifp->if_mtu;
ds->ntxqueue = sc->vmx_ntxqueues;
ds->nrxqueue = sc->vmx_nrxqueues;
ds->upt_features = 0;
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
ds->upt_features |= UPT1_F_CSUM;
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
ds->upt_features |= UPT1_F_VLAN;
if (ifp->if_capenable & IFCAP_LRO)
ds->upt_features |= UPT1_F_LRO;
if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
ds->upt_features |= UPT1_F_RSS;
vmxnet3_reinit_rss_shared_data(sc);
}
vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
(uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
}
static int
vmxnet3_alloc_data(struct vmxnet3_softc *sc)
{
int error;
error = vmxnet3_alloc_shared_data(sc);
if (error)
return (error);
error = vmxnet3_alloc_queue_data(sc);
if (error)
return (error);
error = vmxnet3_alloc_mcast_table(sc);
if (error)
return (error);
vmxnet3_init_shared_data(sc);
return (0);
}
static void
vmxnet3_free_data(struct vmxnet3_softc *sc)
{
vmxnet3_free_mcast_table(sc);
vmxnet3_free_queue_data(sc);
vmxnet3_free_shared_data(sc);
}
static int
vmxnet3_setup_interface(struct vmxnet3_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vmx_dev;
ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "cannot allocate ifnet structure\n");
return (ENOSPC);
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
#if __FreeBSD_version < 1000025
ifp->if_baudrate = 1000000000;
#elif __FreeBSD_version < 1100011
if_initbaudrate(ifp, IF_Gbps(10));
#else
ifp->if_baudrate = IF_Gbps(10);
#endif
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = vmxnet3_init;
ifp->if_ioctl = vmxnet3_ioctl;
ifp->if_get_counter = vmxnet3_get_counter;
ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
#ifdef VMXNET3_LEGACY_TX
ifp->if_start = vmxnet3_start;
ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
IFQ_SET_READY(&ifp->if_snd);
#else
ifp->if_transmit = vmxnet3_txq_mq_start;
ifp->if_qflush = vmxnet3_qflush;
#endif
vmxnet3_get_lladdr(sc);
ether_ifattach(ifp, sc->vmx_lladdr);
ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM;
ifp->if_capenable = ifp->if_capabilities;
/* These capabilities are not enabled by default. */
ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
vmxnet3_media_status);
ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
return (0);
}
static void
vmxnet3_evintr(struct vmxnet3_softc *sc)
{
device_t dev;
struct ifnet *ifp;
struct vmxnet3_txq_shared *ts;
struct vmxnet3_rxq_shared *rs;
uint32_t event;
int reset;
dev = sc->vmx_dev;
ifp = sc->vmx_ifp;
reset = 0;
VMXNET3_CORE_LOCK(sc);
/* Clear events. */
event = sc->vmx_ds->event;
vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
if (event & VMXNET3_EVENT_LINK) {
vmxnet3_link_status(sc);
if (sc->vmx_link_active != 0)
vmxnet3_tx_start_all(sc);
}
if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
reset = 1;
vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
ts = sc->vmx_txq[0].vxtxq_ts;
if (ts->stopped != 0)
device_printf(dev, "Tx queue error %#x\n", ts->error);
rs = sc->vmx_rxq[0].vxrxq_rs;
if (rs->stopped != 0)
device_printf(dev, "Rx queue error %#x\n", rs->error);
device_printf(dev, "Rx/Tx queue error event ... resetting\n");
}
if (event & VMXNET3_EVENT_DIC)
device_printf(dev, "device implementation change event\n");
if (event & VMXNET3_EVENT_DEBUG)
device_printf(dev, "debug event\n");
if (reset != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vmxnet3_init_locked(sc);
}
VMXNET3_CORE_UNLOCK(sc);
}
static void
vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
{
struct vmxnet3_softc *sc;
struct ifnet *ifp;
struct vmxnet3_txring *txr;
struct vmxnet3_comp_ring *txc;
struct vmxnet3_txcompdesc *txcd;
struct vmxnet3_txbuf *txb;
struct mbuf *m;
u_int sop;
sc = txq->vxtxq_sc;
ifp = sc->vmx_ifp;
txr = &txq->vxtxq_cmd_ring;
txc = &txq->vxtxq_comp_ring;
VMXNET3_TXQ_LOCK_ASSERT(txq);
for (;;) {
txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
if (txcd->gen != txc->vxcr_gen)
break;
vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
if (++txc->vxcr_next == txc->vxcr_ndesc) {
txc->vxcr_next = 0;
txc->vxcr_gen ^= 1;
}
sop = txr->vxtxr_next;
txb = &txr->vxtxr_txbuf[sop];
if ((m = txb->vtxb_m) != NULL) {
bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
txq->vxtxq_stats.vmtxs_opackets++;
txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
if (m->m_flags & M_MCAST)
txq->vxtxq_stats.vmtxs_omcasts++;
m_freem(m);
txb->vtxb_m = NULL;
}
txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
}
if (txr->vxtxr_head == txr->vxtxr_next)
txq->vxtxq_watchdog = 0;
}
static int
vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
{
struct ifnet *ifp;
struct mbuf *m;
struct vmxnet3_rxdesc *rxd;
struct vmxnet3_rxbuf *rxb;
bus_dma_tag_t tag;
bus_dmamap_t dmap;
bus_dma_segment_t segs[1];
int idx, clsize, btype, flags, nsegs, error;
ifp = sc->vmx_ifp;
tag = rxr->vxrxr_rxtag;
dmap = rxr->vxrxr_spare_dmap;
idx = rxr->vxrxr_fill;
rxd = &rxr->vxrxr_rxd[idx];
rxb = &rxr->vxrxr_rxbuf[idx];
#ifdef VMXNET3_FAILPOINTS
KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
if (rxr->vxrxr_rid != 0)
KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
#endif
if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
flags = M_PKTHDR;
clsize = MCLBYTES;
btype = VMXNET3_BTYPE_HEAD;
} else {
#if __FreeBSD_version < 902001
/*
* These mbufs will never be used for the start of a frame.
* Roughly prior to branching releng/9.2, the load_mbuf_sg()
* required the mbuf to always be a packet header. Avoid
* unnecessary mbuf initialization in newer versions where
* that is not the case.
*/
flags = M_PKTHDR;
#else
flags = 0;
#endif
clsize = MJUMPAGESIZE;
btype = VMXNET3_BTYPE_BODY;
}
m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
if (m == NULL) {
sc->vmx_stats.vmst_mgetcl_failed++;
return (ENOBUFS);
}
if (btype == VMXNET3_BTYPE_HEAD) {
m->m_len = m->m_pkthdr.len = clsize;
m_adj(m, ETHER_ALIGN);
} else
m->m_len = clsize;
error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
sc->vmx_stats.vmst_mbuf_load_failed++;
return (error);
}
KASSERT(nsegs == 1,
("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
#if __FreeBSD_version < 902001
if (btype == VMXNET3_BTYPE_BODY)
m->m_flags &= ~M_PKTHDR;
#endif
if (rxb->vrxb_m != NULL) {
bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(tag, rxb->vrxb_dmamap);
}
rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
rxb->vrxb_dmamap = dmap;
rxb->vrxb_m = m;
rxd->addr = segs[0].ds_addr;
rxd->len = segs[0].ds_len;
rxd->btype = btype;
rxd->gen = rxr->vxrxr_gen;
vmxnet3_rxr_increment_fill(rxr);
return (0);
}
static void
vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
struct vmxnet3_rxring *rxr, int idx)
{
struct vmxnet3_rxdesc *rxd;
rxd = &rxr->vxrxr_rxd[idx];
rxd->gen = rxr->vxrxr_gen;
vmxnet3_rxr_increment_fill(rxr);
}
static void
vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
{
struct vmxnet3_softc *sc;
struct vmxnet3_rxring *rxr;
struct vmxnet3_comp_ring *rxc;
struct vmxnet3_rxcompdesc *rxcd;
int idx, eof;
sc = rxq->vxrxq_sc;
rxc = &rxq->vxrxq_comp_ring;
do {
rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
if (rxcd->gen != rxc->vxcr_gen)
break; /* Not expected. */
vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
rxc->vxcr_next = 0;
rxc->vxcr_gen ^= 1;
}
idx = rxcd->rxd_idx;
eof = rxcd->eop;
if (rxcd->qid < sc->vmx_nrxqueues)
rxr = &rxq->vxrxq_cmd_ring[0];
else
rxr = &rxq->vxrxq_cmd_ring[1];
vmxnet3_rxq_eof_discard(rxq, rxr, idx);
} while (!eof);
}
static void
vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
{
if (rxcd->ipv4) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if (rxcd->ipcsum_ok)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
if (!rxcd->fragment) {
if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
}
}
}
static void
vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
{
struct vmxnet3_softc *sc;
struct ifnet *ifp;
sc = rxq->vxrxq_sc;
ifp = sc->vmx_ifp;
if (rxcd->error) {
rxq->vxrxq_stats.vmrxs_ierrors++;
m_freem(m);
return;
}
#ifdef notyet
switch (rxcd->rss_type) {
case VMXNET3_RCD_RSS_TYPE_IPV4:
m->m_pkthdr.flowid = rxcd->rss_hash;
M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
break;
case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
m->m_pkthdr.flowid = rxcd->rss_hash;
M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
break;
case VMXNET3_RCD_RSS_TYPE_IPV6:
m->m_pkthdr.flowid = rxcd->rss_hash;
M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
break;
case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
m->m_pkthdr.flowid = rxcd->rss_hash;
M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
break;
default: /* VMXNET3_RCD_RSS_TYPE_NONE */
m->m_pkthdr.flowid = rxq->vxrxq_id;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
break;
}
#else
m->m_pkthdr.flowid = rxq->vxrxq_id;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
#endif
if (!rxcd->no_csum)
vmxnet3_rx_csum(rxcd, m);
if (rxcd->vlan) {
m->m_flags |= M_VLANTAG;
m->m_pkthdr.ether_vtag = rxcd->vtag;
}
rxq->vxrxq_stats.vmrxs_ipackets++;
rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
VMXNET3_RXQ_UNLOCK(rxq);
(*ifp->if_input)(ifp, m);
VMXNET3_RXQ_LOCK(rxq);
}
static void
vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
{
struct vmxnet3_softc *sc;
struct ifnet *ifp;
struct vmxnet3_rxring *rxr;
struct vmxnet3_comp_ring *rxc;
struct vmxnet3_rxdesc *rxd;
struct vmxnet3_rxcompdesc *rxcd;
struct mbuf *m, *m_head, *m_tail;
int idx, length;
sc = rxq->vxrxq_sc;
ifp = sc->vmx_ifp;
rxc = &rxq->vxrxq_comp_ring;
VMXNET3_RXQ_LOCK_ASSERT(rxq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
m_head = rxq->vxrxq_mhead;
rxq->vxrxq_mhead = NULL;
m_tail = rxq->vxrxq_mtail;
rxq->vxrxq_mtail = NULL;
MPASS(m_head == NULL || m_tail != NULL);
for (;;) {
rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
if (rxcd->gen != rxc->vxcr_gen) {
rxq->vxrxq_mhead = m_head;
rxq->vxrxq_mtail = m_tail;
break;
}
vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
rxc->vxcr_next = 0;
rxc->vxcr_gen ^= 1;
}
idx = rxcd->rxd_idx;
length = rxcd->len;
if (rxcd->qid < sc->vmx_nrxqueues)
rxr = &rxq->vxrxq_cmd_ring[0];
else
rxr = &rxq->vxrxq_cmd_ring[1];
rxd = &rxr->vxrxr_rxd[idx];
m = rxr->vxrxr_rxbuf[idx].vrxb_m;
KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
__func__, rxcd->qid, idx));
/*
* The host may skip descriptors. We detect this when this
* descriptor does not match the previous fill index. Catch
* up with the host now.
*/
if (__predict_false(rxr->vxrxr_fill != idx)) {
while (rxr->vxrxr_fill != idx) {
rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
rxr->vxrxr_gen;
vmxnet3_rxr_increment_fill(rxr);
}
}
if (rxcd->sop) {
KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
("%s: start of frame w/o head buffer", __func__));
KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
("%s: start of frame not in ring 0", __func__));
KASSERT((idx % sc->vmx_rx_max_chain) == 0,
("%s: start of frame at unexcepted index %d (%d)",
__func__, idx, sc->vmx_rx_max_chain));
KASSERT(m_head == NULL,
("%s: duplicate start of frame?", __func__));
if (length == 0) {
/* Just ignore this descriptor. */
vmxnet3_rxq_eof_discard(rxq, rxr, idx);
goto nextp;
}
if (vmxnet3_newbuf(sc, rxr) != 0) {
rxq->vxrxq_stats.vmrxs_iqdrops++;
vmxnet3_rxq_eof_discard(rxq, rxr, idx);
if (!rxcd->eop)
vmxnet3_rxq_discard_chain(rxq);
goto nextp;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = length;
m->m_pkthdr.csum_flags = 0;
m_head = m_tail = m;
} else {
KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
("%s: non start of frame w/o body buffer", __func__));
if (m_head == NULL && m_tail == NULL) {
/*
* This is a continuation of a packet that we
* started to drop, but could not drop entirely
* because this segment was still owned by the
* host. So, drop the remainder now.
*/
vmxnet3_rxq_eof_discard(rxq, rxr, idx);
if (!rxcd->eop)
vmxnet3_rxq_discard_chain(rxq);
goto nextp;
}
KASSERT(m_head != NULL,
("%s: frame not started?", __func__));
if (vmxnet3_newbuf(sc, rxr) != 0) {
rxq->vxrxq_stats.vmrxs_iqdrops++;
vmxnet3_rxq_eof_discard(rxq, rxr, idx);
if (!rxcd->eop)
vmxnet3_rxq_discard_chain(rxq);
m_freem(m_head);
m_head = m_tail = NULL;
goto nextp;
}
m->m_len = length;
m_head->m_pkthdr.len += length;
m_tail->m_next = m;
m_tail = m;
}
if (rxcd->eop) {
vmxnet3_rxq_input(rxq, rxcd, m_head);
m_head = m_tail = NULL;
/* Must recheck after dropping the Rx lock. */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
}
nextp:
if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
int qid = rxcd->qid;
bus_size_t r;
idx = (idx + 1) % rxr->vxrxr_ndesc;
if (qid >= sc->vmx_nrxqueues) {
qid -= sc->vmx_nrxqueues;
r = VMXNET3_BAR0_RXH2(qid);
} else
r = VMXNET3_BAR0_RXH1(qid);
vmxnet3_write_bar0(sc, r, idx);
}
}
}
static void
vmxnet3_legacy_intr(void *xsc)
{
struct vmxnet3_softc *sc;
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_txqueue *txq;
sc = xsc;
rxq = &sc->vmx_rxq[0];
txq = &sc->vmx_txq[0];
if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
return;
}
if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_all_intrs(sc);
if (sc->vmx_ds->event != 0)
vmxnet3_evintr(sc);
VMXNET3_RXQ_LOCK(rxq);
vmxnet3_rxq_eof(rxq);
VMXNET3_RXQ_UNLOCK(rxq);
VMXNET3_TXQ_LOCK(txq);
vmxnet3_txq_eof(txq);
vmxnet3_txq_start(txq);
VMXNET3_TXQ_UNLOCK(txq);
vmxnet3_enable_all_intrs(sc);
}
static void
vmxnet3_txq_intr(void *xtxq)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txqueue *txq;
txq = xtxq;
sc = txq->vxtxq_sc;
if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
VMXNET3_TXQ_LOCK(txq);
vmxnet3_txq_eof(txq);
vmxnet3_txq_start(txq);
VMXNET3_TXQ_UNLOCK(txq);
vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
}
static void
vmxnet3_rxq_intr(void *xrxq)
{
struct vmxnet3_softc *sc;
struct vmxnet3_rxqueue *rxq;
rxq = xrxq;
sc = rxq->vxrxq_sc;
if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
VMXNET3_RXQ_LOCK(rxq);
vmxnet3_rxq_eof(rxq);
VMXNET3_RXQ_UNLOCK(rxq);
vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
}
static void
vmxnet3_event_intr(void *xsc)
{
struct vmxnet3_softc *sc;
sc = xsc;
if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
if (sc->vmx_ds->event != 0)
vmxnet3_evintr(sc);
vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
}
static void
vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
{
struct vmxnet3_txring *txr;
struct vmxnet3_txbuf *txb;
int i;
txr = &txq->vxtxq_cmd_ring;
for (i = 0; i < txr->vxtxr_ndesc; i++) {
txb = &txr->vxtxr_txbuf[i];
if (txb->vtxb_m == NULL)
continue;
bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
m_freem(txb->vtxb_m);
txb->vtxb_m = NULL;
}
}
static void
vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
{
struct vmxnet3_rxring *rxr;
struct vmxnet3_rxbuf *rxb;
int i, j;
if (rxq->vxrxq_mhead != NULL) {
m_freem(rxq->vxrxq_mhead);
rxq->vxrxq_mhead = NULL;
rxq->vxrxq_mtail = NULL;
}
for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
for (j = 0; j < rxr->vxrxr_ndesc; j++) {
rxb = &rxr->vxrxr_rxbuf[j];
if (rxb->vrxb_m == NULL)
continue;
bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
m_freem(rxb->vrxb_m);
rxb->vrxb_m = NULL;
}
}
}
static void
vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
{
struct vmxnet3_rxqueue *rxq;
struct vmxnet3_txqueue *txq;
int i;
for (i = 0; i < sc->vmx_nrxqueues; i++) {
rxq = &sc->vmx_rxq[i];
VMXNET3_RXQ_LOCK(rxq);
VMXNET3_RXQ_UNLOCK(rxq);
}
for (i = 0; i < sc->vmx_ntxqueues; i++) {
txq = &sc->vmx_txq[i];
VMXNET3_TXQ_LOCK(txq);
VMXNET3_TXQ_UNLOCK(txq);
}
}
static void
vmxnet3_stop(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
int q;
ifp = sc->vmx_ifp;
VMXNET3_CORE_LOCK_ASSERT(sc);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
sc->vmx_link_active = 0;
callout_stop(&sc->vmx_tick);
/* Disable interrupts. */
vmxnet3_disable_all_intrs(sc);
vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
vmxnet3_stop_rendezvous(sc);
for (q = 0; q < sc->vmx_ntxqueues; q++)
vmxnet3_txstop(sc, &sc->vmx_txq[q]);
for (q = 0; q < sc->vmx_nrxqueues; q++)
vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
}
static void
vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
{
struct vmxnet3_txring *txr;
struct vmxnet3_comp_ring *txc;
txr = &txq->vxtxq_cmd_ring;
txr->vxtxr_head = 0;
txr->vxtxr_next = 0;
txr->vxtxr_gen = VMXNET3_INIT_GEN;
bzero(txr->vxtxr_txd,
txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
txc = &txq->vxtxq_comp_ring;
txc->vxcr_next = 0;
txc->vxcr_gen = VMXNET3_INIT_GEN;
bzero(txc->vxcr_u.txcd,
txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
}
static int
vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
{
struct ifnet *ifp;
struct vmxnet3_rxring *rxr;
struct vmxnet3_comp_ring *rxc;
int i, populate, idx, frame_size, error;
ifp = sc->vmx_ifp;
frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
ifp->if_mtu;
/*
* If the MTU causes us to exceed what a regular sized cluster can
* handle, we allocate a second MJUMPAGESIZE cluster after it in
* ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
*
* Keep rx_max_chain a divisor of the maximum Rx ring size to make
* our life easier. We do not support changing the ring size after
* the attach.
*/
if (frame_size <= MCLBYTES)
sc->vmx_rx_max_chain = 1;
else
sc->vmx_rx_max_chain = 2;
/*
* Only populate ring 1 if the configuration will take advantage
* of it. That is either when LRO is enabled or the frame size
* exceeds what ring 0 can contain.
*/
if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
frame_size <= MCLBYTES + MJUMPAGESIZE)
populate = 1;
else
populate = VMXNET3_RXRINGS_PERQ;
for (i = 0; i < populate; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
rxr->vxrxr_fill = 0;
rxr->vxrxr_gen = VMXNET3_INIT_GEN;
bzero(rxr->vxrxr_rxd,
rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
error = vmxnet3_newbuf(sc, rxr);
if (error)
return (error);
}
}
for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
rxr = &rxq->vxrxq_cmd_ring[i];
rxr->vxrxr_fill = 0;
rxr->vxrxr_gen = 0;
bzero(rxr->vxrxr_rxd,
rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
}
rxc = &rxq->vxrxq_comp_ring;
rxc->vxcr_next = 0;
rxc->vxcr_gen = VMXNET3_INIT_GEN;
bzero(rxc->vxcr_u.rxcd,
rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
return (0);
}
static int
vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
{
device_t dev;
int q, error;
dev = sc->vmx_dev;
for (q = 0; q < sc->vmx_ntxqueues; q++)
vmxnet3_txinit(sc, &sc->vmx_txq[q]);
for (q = 0; q < sc->vmx_nrxqueues; q++) {
error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
if (error) {
device_printf(dev, "cannot populate Rx queue %d\n", q);
return (error);
}
}
return (0);
}
static int
vmxnet3_enable_device(struct vmxnet3_softc *sc)
{
int q;
if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
device_printf(sc->vmx_dev, "device enable command failed!\n");
return (1);
}
/* Reset the Rx queue heads. */
for (q = 0; q < sc->vmx_nrxqueues; q++) {
vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
}
return (0);
}
static void
vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vmx_ifp;
vmxnet3_set_rxfilter(sc);
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
sizeof(sc->vmx_ds->vlan_filter));
else
bzero(sc->vmx_ds->vlan_filter,
sizeof(sc->vmx_ds->vlan_filter));
vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
}
static int
vmxnet3_reinit(struct vmxnet3_softc *sc)
{
vmxnet3_reinit_interface(sc);
vmxnet3_reinit_shared_data(sc);
if (vmxnet3_reinit_queues(sc) != 0)
return (ENXIO);
if (vmxnet3_enable_device(sc) != 0)
return (ENXIO);
vmxnet3_reinit_rxfilters(sc);
return (0);
}
static void
vmxnet3_init_locked(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vmx_ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
vmxnet3_stop(sc);
if (vmxnet3_reinit(sc) != 0) {
vmxnet3_stop(sc);
return;
}
ifp->if_drv_flags |= IFF_DRV_RUNNING;
vmxnet3_link_status(sc);
vmxnet3_enable_all_intrs(sc);
callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
}
static void
vmxnet3_init(void *xsc)
{
struct vmxnet3_softc *sc;
sc = xsc;
VMXNET3_CORE_LOCK(sc);
vmxnet3_init_locked(sc);
VMXNET3_CORE_UNLOCK(sc);
}
/*
* BMV: Much of this can go away once we finally have offsets in
* the mbuf packet header. Bug andre@.
*/
static int
vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
int *etype, int *proto, int *start)
{
struct ether_vlan_header *evh;
int offset;
#if defined(INET)
struct ip *ip = NULL;
struct ip iphdr;
#endif
#if defined(INET6)
struct ip6_hdr *ip6 = NULL;
struct ip6_hdr ip6hdr;
#endif
evh = mtod(m, struct ether_vlan_header *);
if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
/* BMV: We should handle nested VLAN tags too. */
*etype = ntohs(evh->evl_proto);
offset = sizeof(struct ether_vlan_header);
} else {
*etype = ntohs(evh->evl_encap_proto);
offset = sizeof(struct ether_header);
}
switch (*etype) {
#if defined(INET)
case ETHERTYPE_IP:
if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
m_copydata(m, offset, sizeof(struct ip),
(caddr_t) &iphdr);
ip = &iphdr;
} else
ip = mtodo(m, offset);
*proto = ip->ip_p;
*start = offset + (ip->ip_hl << 2);
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len <
offset + sizeof(struct ip6_hdr))) {
m_copydata(m, offset, sizeof(struct ip6_hdr),
(caddr_t) &ip6hdr);
ip6 = &ip6hdr;
} else
ip6 = mtodo(m, offset);
*proto = -1;
*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
/* Assert the network stack sent us a valid packet. */
KASSERT(*start > offset,
("%s: mbuf %p start %d offset %d proto %d", __func__, m,
*start, offset, *proto));
break;
#endif
default:
return (EINVAL);
}
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
struct tcphdr *tcp, tcphdr;
uint16_t sum;
if (__predict_false(*proto != IPPROTO_TCP)) {
/* Likely failed to correctly parse the mbuf. */
return (EINVAL);
}
txq->vxtxq_stats.vmtxs_tso++;
switch (*etype) {
#if defined(INET)
case ETHERTYPE_IP:
sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
htons(IPPROTO_TCP));
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
break;
#endif
default:
sum = 0;
break;
}
if (m->m_len < *start + sizeof(struct tcphdr)) {
m_copyback(m, *start + offsetof(struct tcphdr, th_sum),
sizeof(uint16_t), (caddr_t) &sum);
m_copydata(m, *start, sizeof(struct tcphdr),
(caddr_t) &tcphdr);
tcp = &tcphdr;
} else {
tcp = mtodo(m, *start);
tcp->th_sum = sum;
}
/*
* For TSO, the size of the protocol header is also
* included in the descriptor header size.
*/
*start += (tcp->th_off << 2);
} else
txq->vxtxq_stats.vmtxs_csum++;
return (0);
}
static int
vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
{
struct vmxnet3_txring *txr;
struct mbuf *m;
bus_dma_tag_t tag;
int error;
txr = &txq->vxtxq_cmd_ring;
m = *m0;
tag = txr->vxtxr_txtag;
error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
if (error == 0 || error != EFBIG)
return (error);
m = m_defrag(m, M_NOWAIT);
if (m != NULL) {
*m0 = m;
error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
} else
error = ENOBUFS;
if (error) {
m_freem(*m0);
*m0 = NULL;
txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
} else
txq->vxtxq_sc->vmx_stats.vmst_defragged++;
return (error);
}
static void
vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
{
struct vmxnet3_txring *txr;
txr = &txq->vxtxq_cmd_ring;
bus_dmamap_unload(txr->vxtxr_txtag, dmap);
}
static int
vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txring *txr;
struct vmxnet3_txdesc *txd, *sop;
struct mbuf *m;
bus_dmamap_t dmap;
bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
int i, gen, nsegs, etype, proto, start, error;
sc = txq->vxtxq_sc;
start = 0;
txd = NULL;
txr = &txq->vxtxq_cmd_ring;
dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
if (error)
return (error);
m = *m0;
M_ASSERTPKTHDR(m);
KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
txq->vxtxq_stats.vmtxs_full++;
vmxnet3_txq_unload_mbuf(txq, dmap);
return (ENOSPC);
} else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
if (error) {
txq->vxtxq_stats.vmtxs_offload_failed++;
vmxnet3_txq_unload_mbuf(txq, dmap);
m_freem(m);
*m0 = NULL;
return (error);
}
}
txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
sop = &txr->vxtxr_txd[txr->vxtxr_head];
gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
for (i = 0; i < nsegs; i++) {
txd = &txr->vxtxr_txd[txr->vxtxr_head];
txd->addr = segs[i].ds_addr;
txd->len = segs[i].ds_len;
txd->gen = gen;
txd->dtype = 0;
txd->offload_mode = VMXNET3_OM_NONE;
txd->offload_pos = 0;
txd->hlen = 0;
txd->eop = 0;
txd->compreq = 0;
txd->vtag_mode = 0;
txd->vtag = 0;
if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
txr->vxtxr_head = 0;
txr->vxtxr_gen ^= 1;
}
gen = txr->vxtxr_gen;
}
txd->eop = 1;
txd->compreq = 1;
if (m->m_flags & M_VLANTAG) {
sop->vtag_mode = 1;
sop->vtag = m->m_pkthdr.ether_vtag;
}
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
sop->offload_mode = VMXNET3_OM_TSO;
sop->hlen = start;
sop->offload_pos = m->m_pkthdr.tso_segsz;
} else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
VMXNET3_CSUM_OFFLOAD_IPV6)) {
sop->offload_mode = VMXNET3_OM_CSUM;
sop->hlen = start;
sop->offload_pos = start + m->m_pkthdr.csum_data;
}
/* Finally, change the ownership. */
vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
sop->gen ^= 1;
txq->vxtxq_ts->npending += nsegs;
if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
txq->vxtxq_ts->npending = 0;
vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
txr->vxtxr_head);
}
return (0);
}
#ifdef VMXNET3_LEGACY_TX
static void
vmxnet3_start_locked(struct ifnet *ifp)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txqueue *txq;
struct vmxnet3_txring *txr;
struct mbuf *m_head;
int tx, avail;
sc = ifp->if_softc;
txq = &sc->vmx_txq[0];
txr = &txq->vxtxq_cmd_ring;
tx = 0;
VMXNET3_TXQ_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->vmx_link_active == 0)
return;
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
break;
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
/* Assume worse case if this mbuf is the head of a chain. */
if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
break;
}
if (vmxnet3_txq_encap(txq, &m_head) != 0) {
if (m_head != NULL)
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
break;
}
tx++;
ETHER_BPF_MTAP(ifp, m_head);
}
if (tx > 0)
txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
}
static void
vmxnet3_start(struct ifnet *ifp)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txqueue *txq;
sc = ifp->if_softc;
txq = &sc->vmx_txq[0];
VMXNET3_TXQ_LOCK(txq);
vmxnet3_start_locked(ifp);
VMXNET3_TXQ_UNLOCK(txq);
}
#else /* !VMXNET3_LEGACY_TX */
static int
vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txring *txr;
struct buf_ring *br;
struct ifnet *ifp;
int tx, avail, error;
sc = txq->vxtxq_sc;
br = txq->vxtxq_br;
ifp = sc->vmx_ifp;
txr = &txq->vxtxq_cmd_ring;
tx = 0;
error = 0;
VMXNET3_TXQ_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->vmx_link_active == 0) {
if (m != NULL)
error = drbr_enqueue(ifp, br, m);
return (error);
}
if (m != NULL) {
error = drbr_enqueue(ifp, br, m);
if (error)
return (error);
}
while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
m = drbr_peek(ifp, br);
if (m == NULL)
break;
/* Assume worse case if this mbuf is the head of a chain. */
if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
drbr_putback(ifp, br, m);
break;
}
if (vmxnet3_txq_encap(txq, &m) != 0) {
if (m != NULL)
drbr_putback(ifp, br, m);
else
drbr_advance(ifp, br);
break;
}
drbr_advance(ifp, br);
tx++;
ETHER_BPF_MTAP(ifp, m);
}
if (tx > 0)
txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
return (0);
}
static int
vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txqueue *txq;
int i, ntxq, error;
sc = ifp->if_softc;
ntxq = sc->vmx_ntxqueues;
/* check if flowid is set */
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
i = m->m_pkthdr.flowid % ntxq;
else
i = curcpu % ntxq;
txq = &sc->vmx_txq[i];
if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
error = vmxnet3_txq_mq_start_locked(txq, m);
VMXNET3_TXQ_UNLOCK(txq);
} else {
error = drbr_enqueue(ifp, txq->vxtxq_br, m);
taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
}
return (error);
}
static void
vmxnet3_txq_tq_deferred(void *xtxq, int pending)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txqueue *txq;
txq = xtxq;
sc = txq->vxtxq_sc;
VMXNET3_TXQ_LOCK(txq);
if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
vmxnet3_txq_mq_start_locked(txq, NULL);
VMXNET3_TXQ_UNLOCK(txq);
}
#endif /* VMXNET3_LEGACY_TX */
static void
vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
{
struct vmxnet3_softc *sc;
struct ifnet *ifp;
sc = txq->vxtxq_sc;
ifp = sc->vmx_ifp;
#ifdef VMXNET3_LEGACY_TX
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
vmxnet3_start_locked(ifp);
#else
if (!drbr_empty(ifp, txq->vxtxq_br))
vmxnet3_txq_mq_start_locked(txq, NULL);
#endif
}
static void
vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
{
struct vmxnet3_txqueue *txq;
int i;
VMXNET3_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vmx_ntxqueues; i++) {
txq = &sc->vmx_txq[i];
VMXNET3_TXQ_LOCK(txq);
vmxnet3_txq_start(txq);
VMXNET3_TXQ_UNLOCK(txq);
}
}
static void
vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
{
struct ifnet *ifp;
int idx, bit;
ifp = sc->vmx_ifp;
idx = (tag >> 5) & 0x7F;
bit = tag & 0x1F;
if (tag == 0 || tag > 4095)
return;
VMXNET3_CORE_LOCK(sc);
/* Update our private VLAN bitvector. */
if (add)
sc->vmx_vlan_filter[idx] |= (1 << bit);
else
sc->vmx_vlan_filter[idx] &= ~(1 << bit);
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
if (add)
sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
else
sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
}
VMXNET3_CORE_UNLOCK(sc);
}
static void
vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
{
if (ifp->if_softc == arg)
vmxnet3_update_vlan_filter(arg, 1, tag);
}
static void
vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
{
if (ifp->if_softc == arg)
vmxnet3_update_vlan_filter(arg, 0, tag);
}
static void
vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
struct vmxnet3_driver_shared *ds;
struct ifmultiaddr *ifma;
u_int mode;
ifp = sc->vmx_ifp;
ds = sc->vmx_ds;
mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
if (ifp->if_flags & IFF_PROMISC)
mode |= VMXNET3_RXMODE_PROMISC;
if (ifp->if_flags & IFF_ALLMULTI)
mode |= VMXNET3_RXMODE_ALLMULTI;
else {
int cnt = 0, overflow = 0;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
else if (cnt == VMXNET3_MULTICAST_MAX) {
overflow = 1;
break;
}
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
&sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
cnt++;
}
if_maddr_runlock(ifp);
if (overflow != 0) {
cnt = 0;
mode |= VMXNET3_RXMODE_ALLMULTI;
} else if (cnt > 0)
mode |= VMXNET3_RXMODE_MCAST;
ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
}
ds->rxmode = mode;
vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
}
static int
vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
{
struct ifnet *ifp;
ifp = sc->vmx_ifp;
if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
return (EINVAL);
ifp->if_mtu = mtu;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vmxnet3_init_locked(sc);
}
return (0);
}
static int
vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct vmxnet3_softc *sc;
struct ifreq *ifr;
int reinit, mask, error;
sc = ifp->if_softc;
ifr = (struct ifreq *) data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifp->if_mtu != ifr->ifr_mtu) {
VMXNET3_CORE_LOCK(sc);
error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
VMXNET3_CORE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
VMXNET3_CORE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
if ((ifp->if_flags ^ sc->vmx_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
vmxnet3_set_rxfilter(sc);
}
} else
vmxnet3_init_locked(sc);
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vmxnet3_stop(sc);
}
sc->vmx_if_flags = ifp->if_flags;
VMXNET3_CORE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
VMXNET3_CORE_LOCK(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vmxnet3_set_rxfilter(sc);
VMXNET3_CORE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
break;
case SIOCSIFCAP:
VMXNET3_CORE_LOCK(sc);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if (mask & IFCAP_TXCSUM)
ifp->if_capenable ^= IFCAP_TXCSUM;
if (mask & IFCAP_TXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
if (mask & IFCAP_TSO4)
ifp->if_capenable ^= IFCAP_TSO4;
if (mask & IFCAP_TSO6)
ifp->if_capenable ^= IFCAP_TSO6;
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
/* Changing these features requires us to reinit. */
reinit = 1;
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (mask & IFCAP_VLAN_HWFILTER)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
} else
reinit = 0;
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vmxnet3_init_locked(sc);
} else {
vmxnet3_init_hwassist(sc);
}
VMXNET3_CORE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
return (error);
}
#ifndef VMXNET3_LEGACY_TX
static void
vmxnet3_qflush(struct ifnet *ifp)
{
struct vmxnet3_softc *sc;
struct vmxnet3_txqueue *txq;
struct mbuf *m;
int i;
sc = ifp->if_softc;
for (i = 0; i < sc->vmx_ntxqueues; i++) {
txq = &sc->vmx_txq[i];
VMXNET3_TXQ_LOCK(txq);
while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
m_freem(m);
VMXNET3_TXQ_UNLOCK(txq);
}
if_qflush(ifp);
}
#endif
static int
vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
{
struct vmxnet3_softc *sc;
sc = txq->vxtxq_sc;
VMXNET3_TXQ_LOCK(txq);
if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
VMXNET3_TXQ_UNLOCK(txq);
return (0);
}
VMXNET3_TXQ_UNLOCK(txq);
if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
txq->vxtxq_id);
return (1);
}
static void
vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
{
vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
}
static uint64_t
vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt)
{
struct vmxnet3_softc *sc;
uint64_t rv;
sc = if_getsoftc(ifp);
rv = 0;
/*
* With the exception of if_ierrors, these ifnet statistics are
* only updated in the driver, so just set them to our accumulated
* values. if_ierrors is updated in ether_input() for malformed
* frames that we should have already discarded.
*/
switch (cnt) {
case IFCOUNTER_IPACKETS:
for (int i = 0; i < sc->vmx_nrxqueues; i++)
rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets;
return (rv);
case IFCOUNTER_IQDROPS:
for (int i = 0; i < sc->vmx_nrxqueues; i++)
rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops;
return (rv);
case IFCOUNTER_IERRORS:
for (int i = 0; i < sc->vmx_nrxqueues; i++)
rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors;
return (rv);
case IFCOUNTER_OPACKETS:
for (int i = 0; i < sc->vmx_ntxqueues; i++)
rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets;
return (rv);
#ifndef VMXNET3_LEGACY_TX
case IFCOUNTER_OBYTES:
for (int i = 0; i < sc->vmx_ntxqueues; i++)
rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes;
return (rv);
case IFCOUNTER_OMCASTS:
for (int i = 0; i < sc->vmx_ntxqueues; i++)
rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts;
return (rv);
#endif
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
vmxnet3_tick(void *xsc)
{
struct vmxnet3_softc *sc;
struct ifnet *ifp;
int i, timedout;
sc = xsc;
ifp = sc->vmx_ifp;
timedout = 0;
VMXNET3_CORE_LOCK_ASSERT(sc);
vmxnet3_refresh_host_stats(sc);
for (i = 0; i < sc->vmx_ntxqueues; i++)
timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
if (timedout != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vmxnet3_init_locked(sc);
} else
callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
}
static int
vmxnet3_link_is_up(struct vmxnet3_softc *sc)
{
uint32_t status;
/* Also update the link speed while here. */
status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
sc->vmx_link_speed = status >> 16;
return !!(status & 0x1);
}
static void
vmxnet3_link_status(struct vmxnet3_softc *sc)
{
struct ifnet *ifp;
int link;
ifp = sc->vmx_ifp;
link = vmxnet3_link_is_up(sc);
if (link != 0 && sc->vmx_link_active == 0) {
sc->vmx_link_active = 1;
if_link_state_change(ifp, LINK_STATE_UP);
} else if (link == 0 && sc->vmx_link_active != 0) {
sc->vmx_link_active = 0;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
static void
vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct vmxnet3_softc *sc;
sc = ifp->if_softc;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
VMXNET3_CORE_LOCK(sc);
if (vmxnet3_link_is_up(sc) != 0) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_AUTO;
} else
ifmr->ifm_active |= IFM_NONE;
VMXNET3_CORE_UNLOCK(sc);
}
static int
vmxnet3_media_change(struct ifnet *ifp)
{
/* Ignore. */
return (0);
}
static void
vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
{
uint32_t ml, mh;
ml = sc->vmx_lladdr[0];
ml |= sc->vmx_lladdr[1] << 8;
ml |= sc->vmx_lladdr[2] << 16;
ml |= sc->vmx_lladdr[3] << 24;
vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
mh = sc->vmx_lladdr[4];
mh |= sc->vmx_lladdr[5] << 8;
vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
}
static void
vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
{
uint32_t ml, mh;
ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
sc->vmx_lladdr[0] = ml;
sc->vmx_lladdr[1] = ml >> 8;
sc->vmx_lladdr[2] = ml >> 16;
sc->vmx_lladdr[3] = ml >> 24;
sc->vmx_lladdr[4] = mh;
sc->vmx_lladdr[5] = mh >> 8;
}
static void
vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
{
struct sysctl_oid *node, *txsnode;
struct sysctl_oid_list *list, *txslist;
struct vmxnet3_txq_stats *stats;
struct UPT1_TxStats *txstats;
char namebuf[16];
stats = &txq->vxtxq_stats;
txstats = &txq->vxtxq_ts->stats;
snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
NULL, "Transmit Queue");
txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
&stats->vmtxs_opackets, "Transmit packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
&stats->vmtxs_obytes, "Transmit bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
&stats->vmtxs_omcasts, "Transmit multicasts");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vmtxs_csum, "Transmit checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
&stats->vmtxs_tso, "Transmit TCP segmentation offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
&stats->vmtxs_full, "Transmit ring full");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
&stats->vmtxs_offload_failed, "Transmit checksum offload failed");
/*
* Add statistics reported by the host. These are updated once
* per second.
*/
txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
NULL, "Host Statistics");
txslist = SYSCTL_CHILDREN(txsnode);
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
&txstats->TSO_packets, "TSO packets");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
&txstats->TSO_bytes, "TSO bytes");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
&txstats->ucast_packets, "Unicast packets");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
&txstats->ucast_bytes, "Unicast bytes");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
&txstats->mcast_packets, "Multicast packets");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
&txstats->mcast_bytes, "Multicast bytes");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
&txstats->error, "Errors");
SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
&txstats->discard, "Discards");
}
static void
vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
{
struct sysctl_oid *node, *rxsnode;
struct sysctl_oid_list *list, *rxslist;
struct vmxnet3_rxq_stats *stats;
struct UPT1_RxStats *rxstats;
char namebuf[16];
stats = &rxq->vxrxq_stats;
rxstats = &rxq->vxrxq_rs->stats;
snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
NULL, "Receive Queue");
rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
&stats->vmrxs_ipackets, "Receive packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
&stats->vmrxs_ibytes, "Receive bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
&stats->vmrxs_iqdrops, "Receive drops");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
&stats->vmrxs_ierrors, "Receive errors");
/*
* Add statistics reported by the host. These are updated once
* per second.
*/
rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
NULL, "Host Statistics");
rxslist = SYSCTL_CHILDREN(rxsnode);
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
&rxstats->LRO_packets, "LRO packets");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
&rxstats->LRO_bytes, "LRO bytes");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
&rxstats->ucast_packets, "Unicast packets");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
&rxstats->ucast_bytes, "Unicast bytes");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
&rxstats->mcast_packets, "Multicast packets");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
&rxstats->mcast_bytes, "Multicast bytes");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
&rxstats->bcast_packets, "Broadcast packets");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
&rxstats->bcast_bytes, "Broadcast bytes");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
&rxstats->nobuffer, "No buffer");
SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
&rxstats->error, "Errors");
}
static void
vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
int i;
for (i = 0; i < sc->vmx_ntxqueues; i++) {
struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
"debug", CTLFLAG_RD, NULL, "");
list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
&txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
&txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
&txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
&txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
&txq->vxtxq_comp_ring.vxcr_next, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
&txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
&txq->vxtxq_comp_ring.vxcr_gen, 0, "");
}
for (i = 0; i < sc->vmx_nrxqueues; i++) {
struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
"debug", CTLFLAG_RD, NULL, "");
list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
&rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
&rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
&rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
&rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
&rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
&rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
&rxq->vxrxq_comp_ring.vxcr_next, 0, "");
SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
&rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
&rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
}
}
static void
vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
{
int i;
for (i = 0; i < sc->vmx_ntxqueues; i++)
vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
for (i = 0; i < sc->vmx_nrxqueues; i++)
vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
vmxnet3_setup_debug_sysctl(sc, ctx, child);
}
static void
vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
{
device_t dev;
struct vmxnet3_statistics *stats;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vmx_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
&sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
&sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
&sc->vmx_ntxqueues, 0, "Number of Tx queues");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
&sc->vmx_nrxqueues, 0, "Number of Rx queues");
stats = &sc->vmx_stats;
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
&stats->vmst_defragged, 0, "Tx mbuf chains defragged");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
&stats->vmst_defrag_failed, 0,
"Tx mbuf dropped because defrag failed");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
&stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
&stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
vmxnet3_setup_queue_sysctl(sc, ctx, child);
}
static void
vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
{
bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
}
static uint32_t
vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
{
return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
}
static void
vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
{
bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
}
static void
vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
{
vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
}
static uint32_t
vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
{
vmxnet3_write_cmd(sc, cmd);
bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
}
static void
vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
{
vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
}
static void
vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
{
vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
}
static void
vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
{
int i;
sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
for (i = 0; i < sc->vmx_nintrs; i++)
vmxnet3_enable_intr(sc, i);
}
static void
vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
{
int i;
sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
for (i = 0; i < sc->vmx_nintrs; i++)
vmxnet3_disable_intr(sc, i);
}
static void
vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *baddr = arg;
if (error == 0)
*baddr = segs->ds_addr;
}
static int
vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
struct vmxnet3_dma_alloc *dma)
{
device_t dev;
int error;
dev = sc->vmx_dev;
bzero(dma, sizeof(struct vmxnet3_dma_alloc));
error = bus_dma_tag_create(bus_get_dma_tag(dev),
align, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
size, /* maxsize */
1, /* nsegments */
size, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&dma->dma_tag);
if (error) {
device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
goto fail;
}
error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
if (error) {
device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
goto fail;
}
error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "bus_dmamap_load failed: %d\n", error);
goto fail;
}
dma->dma_size = size;
fail:
if (error)
vmxnet3_dma_free(sc, dma);
return (error);
}
static void
vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
{
if (dma->dma_tag != NULL) {
if (dma->dma_paddr != 0) {
bus_dmamap_sync(dma->dma_tag, dma->dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->dma_tag, dma->dma_map);
}
if (dma->dma_vaddr != NULL) {
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
dma->dma_map);
}
bus_dma_tag_destroy(dma->dma_tag);
}
bzero(dma, sizeof(struct vmxnet3_dma_alloc));
}
static int
vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path),
"hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
/*
* Since this is a purely paravirtualized device, we do not have
* to worry about DMA coherency. But at times, we must make sure
* both the compiler and CPU do not reorder memory operations.
*/
static inline void
vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
{
switch (type) {
case VMXNET3_BARRIER_RD:
rmb();
break;
case VMXNET3_BARRIER_WR:
wmb();
break;
case VMXNET3_BARRIER_RDWR:
mb();
break;
default:
panic("%s: bad barrier type %d", __func__, type);
}
}
Index: head/sys/dev/vnic/nicvf_queues.c
===================================================================
--- head/sys/dev/vnic/nicvf_queues.c (revision 328217)
+++ head/sys/dev/vnic/nicvf_queues.c (revision 328218)
@@ -1,2366 +1,2366 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bitset.h>
#include <sys/bitstring.h>
#include <sys/buf_ring.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/pciio.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/sockio.h>
#include <sys/socket.h>
#include <sys/stdatomic.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/taskqueue.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/vmparam.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ifq.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/sctp.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <netinet6/ip6_var.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "thunder_bgx.h"
#include "nic_reg.h"
#include "nic.h"
#include "q_struct.h"
#include "nicvf_queues.h"
#define DEBUG
#undef DEBUG
#ifdef DEBUG
#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__)
#else
#define dprintf(dev, fmt, ...)
#endif
MALLOC_DECLARE(M_NICVF);
static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
static void nicvf_sq_disable(struct nicvf *, int);
static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
static void nicvf_put_sq_desc(struct snd_queue *, int);
static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
boolean_t);
static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
static void nicvf_rbdr_task(void *, int);
static void nicvf_rbdr_task_nowait(void *, int);
struct rbuf_info {
bus_dma_tag_t dmat;
bus_dmamap_t dmap;
struct mbuf * mbuf;
};
#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
/* Poll a register for a specific value */
static int nicvf_poll_reg(struct nicvf *nic, int qidx,
uint64_t reg, int bit_pos, int bits, int val)
{
uint64_t bit_mask;
uint64_t reg_val;
int timeout = 10;
bit_mask = (1UL << bits) - 1;
bit_mask = (bit_mask << bit_pos);
while (timeout) {
reg_val = nicvf_queue_reg_read(nic, reg, qidx);
if (((reg_val & bit_mask) >> bit_pos) == val)
return (0);
DELAY(1000);
timeout--;
}
device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
return (ETIMEDOUT);
}
/* Callback for bus_dmamap_load() */
static void
nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *paddr;
KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
paddr = arg;
*paddr = segs->ds_addr;
}
/* Allocate memory for a queue's descriptors */
static int
nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
int q_len, int desc_size, int align_bytes)
{
int err, err_dmat;
/* Create DMA tag first */
err = bus_dma_tag_create(
bus_get_dma_tag(nic->dev), /* parent tag */
align_bytes, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
(q_len * desc_size), /* maxsize */
1, /* nsegments */
(q_len * desc_size), /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&dmem->dmat); /* dmat */
if (err != 0) {
device_printf(nic->dev,
"Failed to create busdma tag for descriptors ring\n");
return (err);
}
/* Allocate segment of continuous DMA safe memory */
err = bus_dmamem_alloc(
dmem->dmat, /* DMA tag */
&dmem->base, /* virtual address */
(BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */
&dmem->dmap); /* DMA map */
if (err != 0) {
device_printf(nic->dev, "Failed to allocate DMA safe memory for"
"descriptors ring\n");
goto dmamem_fail;
}
err = bus_dmamap_load(
dmem->dmat,
dmem->dmap,
dmem->base,
(q_len * desc_size), /* allocation size */
nicvf_dmamap_q_cb, /* map to DMA address cb. */
&dmem->phys_base, /* physical address */
BUS_DMA_NOWAIT);
if (err != 0) {
device_printf(nic->dev,
"Cannot load DMA map of descriptors ring\n");
goto dmamap_fail;
}
dmem->q_len = q_len;
dmem->size = (desc_size * q_len);
return (0);
dmamap_fail:
bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
dmem->phys_base = 0;
dmamem_fail:
err_dmat = bus_dma_tag_destroy(dmem->dmat);
dmem->base = NULL;
KASSERT(err_dmat == 0,
("%s: Trying to destroy BUSY DMA tag", __func__));
return (err);
}
/* Free queue's descriptor memory */
static void
nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
{
int err;
if ((dmem == NULL) || (dmem->base == NULL))
return;
/* Unload a map */
bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(dmem->dmat, dmem->dmap);
/* Free DMA memory */
bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
/* Destroy DMA tag */
err = bus_dma_tag_destroy(dmem->dmat);
KASSERT(err == 0,
("%s: Trying to destroy BUSY DMA tag", __func__));
dmem->phys_base = 0;
dmem->base = NULL;
}
/*
* Allocate buffer for packet reception
* HW returns memory address where packet is DMA'ed but not a pointer
* into RBDR ring, so save buffer address at the start of fragment and
* align the start address to a cache aligned address
*/
static __inline int
nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
{
struct mbuf *mbuf;
struct rbuf_info *rinfo;
bus_dma_segment_t segs[1];
int nsegs;
int err;
mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
if (mbuf == NULL)
return (ENOMEM);
/*
* The length is equal to the actual length + one 128b line
* used as a room for rbuf_info structure.
*/
mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
&nsegs, BUS_DMA_NOWAIT);
if (err != 0) {
device_printf(nic->dev,
"Failed to map mbuf into DMA visible memory, err: %d\n",
err);
m_freem(mbuf);
bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
return (err);
}
if (nsegs != 1)
panic("Unexpected number of DMA segments for RB: %d", nsegs);
/*
* Now use the room for rbuf_info structure
* and adjust mbuf data and length.
*/
rinfo = (struct rbuf_info *)mbuf->m_data;
m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
rinfo->dmat = rbdr->rbdr_buff_dmat;
rinfo->dmap = dmap;
rinfo->mbuf = mbuf;
*rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
return (0);
}
/* Retrieve mbuf for received packet */
static struct mbuf *
nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
{
struct mbuf *mbuf;
struct rbuf_info *rinfo;
/* Get buffer start address and alignment offset */
rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
/* Now retrieve mbuf to give to stack */
mbuf = rinfo->mbuf;
if (__predict_false(mbuf == NULL)) {
panic("%s: Received packet fragment with NULL mbuf",
device_get_nameunit(nic->dev));
}
/*
* Clear the mbuf in the descriptor to indicate
* that this slot is processed and free to use.
*/
rinfo->mbuf = NULL;
bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
return (mbuf);
}
/* Allocate RBDR ring and populate receive buffers */
static int
nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
int buf_size, int qidx)
{
bus_dmamap_t dmap;
bus_addr_t rbuf;
struct rbdr_entry_t *desc;
int idx;
int err;
/* Allocate rbdr descriptors ring */
err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
if (err != 0) {
device_printf(nic->dev,
"Failed to create RBDR descriptors ring\n");
return (err);
}
rbdr->desc = rbdr->dmem.base;
/*
* Buffer size has to be in multiples of 128 bytes.
* Make room for metadata of size of one line (128 bytes).
*/
rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
rbdr->enable = TRUE;
rbdr->thresh = RBDR_THRESH;
rbdr->nic = nic;
rbdr->idx = qidx;
/*
* Create DMA tag for Rx buffers.
* Each map created using this tag is intended to store Rx payload for
* one fragment and one header structure containing rbuf_info (thus
* additional 128 byte line since RB must be a multiple of 128 byte
* cache line).
*/
if (buf_size > MCLBYTES) {
device_printf(nic->dev,
"Buffer size to large for mbuf cluster\n");
return (EINVAL);
}
err = bus_dma_tag_create(
bus_get_dma_tag(nic->dev), /* parent tag */
NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */
0, /* boundary */
DMAP_MAX_PHYSADDR, /* lowaddr */
DMAP_MIN_PHYSADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
roundup2(buf_size, MCLBYTES), /* maxsize */
1, /* nsegments */
roundup2(buf_size, MCLBYTES), /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&rbdr->rbdr_buff_dmat); /* dmat */
if (err != 0) {
device_printf(nic->dev,
"Failed to create busdma tag for RBDR buffers\n");
return (err);
}
rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
ring_len, M_NICVF, (M_WAITOK | M_ZERO));
for (idx = 0; idx < ring_len; idx++) {
err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
if (err != 0) {
device_printf(nic->dev,
"Failed to create DMA map for RB\n");
return (err);
}
rbdr->rbdr_buff_dmaps[idx] = dmap;
err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
DMA_BUFFER_LEN, &rbuf);
if (err != 0)
return (err);
desc = GET_RBDR_DESC(rbdr, idx);
desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
}
/* Allocate taskqueue */
TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
device_get_nameunit(nic->dev));
return (0);
}
/* Free RBDR ring and its receive buffers */
static void
nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
{
struct mbuf *mbuf;
struct queue_set *qs;
struct rbdr_entry_t *desc;
struct rbuf_info *rinfo;
bus_addr_t buf_addr;
int head, tail, idx;
int err;
qs = nic->qs;
if ((qs == NULL) || (rbdr == NULL))
return;
rbdr->enable = FALSE;
if (rbdr->rbdr_taskq != NULL) {
/* Remove tasks */
while (taskqueue_cancel(rbdr->rbdr_taskq,
&rbdr->rbdr_task_nowait, NULL) != 0) {
/* Finish the nowait task first */
taskqueue_drain(rbdr->rbdr_taskq,
&rbdr->rbdr_task_nowait);
}
taskqueue_free(rbdr->rbdr_taskq);
rbdr->rbdr_taskq = NULL;
while (taskqueue_cancel(taskqueue_thread,
&rbdr->rbdr_task, NULL) != 0) {
/* Now finish the sleepable task */
taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
}
}
/*
* Free all of the memory under the RB descriptors.
* There are assumptions here:
* 1. Corresponding RBDR is disabled
* - it is safe to operate using head and tail indexes
* 2. All bffers that were received are properly freed by
* the receive handler
* - there is no need to unload DMA map and free MBUF for other
* descriptors than unused ones
*/
if (rbdr->rbdr_buff_dmat != NULL) {
head = rbdr->head;
tail = rbdr->tail;
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
mbuf = rinfo->mbuf;
/* This will destroy everything including rinfo! */
m_freem(mbuf);
head++;
head &= (rbdr->dmem.q_len - 1);
}
/* Free tail descriptor */
desc = GET_RBDR_DESC(rbdr, tail);
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
mbuf = rinfo->mbuf;
/* This will destroy everything including rinfo! */
m_freem(mbuf);
/* Destroy DMA maps */
for (idx = 0; idx < qs->rbdr_len; idx++) {
if (rbdr->rbdr_buff_dmaps[idx] == NULL)
continue;
err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
rbdr->rbdr_buff_dmaps[idx]);
KASSERT(err == 0,
("%s: Could not destroy DMA map for RB, desc: %d",
__func__, idx));
rbdr->rbdr_buff_dmaps[idx] = NULL;
}
/* Now destroy the tag */
err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
KASSERT(err == 0,
("%s: Trying to destroy BUSY DMA tag", __func__));
rbdr->head = 0;
rbdr->tail = 0;
}
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
}
/*
* Refill receive buffer descriptors with new buffers.
*/
static int
nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
{
struct nicvf *nic;
struct queue_set *qs;
int rbdr_idx;
int tail, qcount;
int refill_rb_cnt;
struct rbdr_entry_t *desc;
bus_dmamap_t dmap;
bus_addr_t rbuf;
boolean_t rb_alloc_fail;
int new_rb;
rb_alloc_fail = TRUE;
new_rb = 0;
nic = rbdr->nic;
qs = nic->qs;
rbdr_idx = rbdr->idx;
/* Check if it's enabled */
if (!rbdr->enable)
return (0);
/* Get no of desc's to be refilled */
qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
qcount &= 0x7FFFF;
/* Doorbell can be ringed with a max of ring size minus 1 */
if (qcount >= (qs->rbdr_len - 1)) {
rb_alloc_fail = FALSE;
goto out;
} else
refill_rb_cnt = qs->rbdr_len - qcount - 1;
/* Start filling descs from tail */
tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
while (refill_rb_cnt) {
tail++;
tail &= (rbdr->dmem.q_len - 1);
dmap = rbdr->rbdr_buff_dmaps[tail];
if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
DMA_BUFFER_LEN, &rbuf)) {
/* Something went wrong. Resign */
break;
}
desc = GET_RBDR_DESC(rbdr, tail);
desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
refill_rb_cnt--;
new_rb++;
}
/* make sure all memory stores are done before ringing doorbell */
wmb();
/* Check if buffer allocation failed */
if (refill_rb_cnt == 0)
rb_alloc_fail = FALSE;
/* Notify HW */
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
rbdr_idx, new_rb);
out:
if (!rb_alloc_fail) {
/*
* Re-enable RBDR interrupts only
* if buffer allocation is success.
*/
nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
return (0);
}
return (ENOMEM);
}
/* Refill RBs even if sleep is needed to reclaim memory */
static void
nicvf_rbdr_task(void *arg, int pending)
{
struct rbdr *rbdr;
int err;
rbdr = (struct rbdr *)arg;
err = nicvf_refill_rbdr(rbdr, M_WAITOK);
if (__predict_false(err != 0)) {
panic("%s: Failed to refill RBs even when sleep enabled",
__func__);
}
}
/* Refill RBs as soon as possible without waiting */
static void
nicvf_rbdr_task_nowait(void *arg, int pending)
{
struct rbdr *rbdr;
int err;
rbdr = (struct rbdr *)arg;
err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
if (err != 0) {
/*
* Schedule another, sleepable kernel thread
* that will for sure refill the buffers.
*/
taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
}
}
static int
nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
struct cqe_rx_t *cqe_rx, int cqe_type)
{
struct mbuf *mbuf;
struct rcv_queue *rq;
int rq_idx;
int err = 0;
rq_idx = cqe_rx->rq_idx;
rq = &nic->qs->rq[rq_idx];
/* Check for errors */
err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
if (err && !cqe_rx->rb_cnt)
return (0);
mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
if (mbuf == NULL) {
dprintf(nic->dev, "Packet not received\n");
return (0);
}
/* If error packet */
if (err != 0) {
m_freem(mbuf);
return (0);
}
if (rq->lro_enabled &&
((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
(mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
/*
* At this point it is known that there are no errors in the
* packet. Attempt to LRO enqueue. Send to stack if no resources
* or enqueue error.
*/
if ((rq->lro.lro_cnt != 0) &&
(tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
return (0);
}
/*
* Push this packet to the stack later to avoid
* unlocking completion task in the middle of work.
*/
err = buf_ring_enqueue(cq->rx_br, mbuf);
if (err != 0) {
/*
* Failed to enqueue this mbuf.
* We don't drop it, just schedule another task.
*/
return (err);
}
return (0);
}
static void
nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
struct cqe_send_t *cqe_tx, int cqe_type)
{
bus_dmamap_t dmap;
struct mbuf *mbuf;
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
mbuf = NULL;
sq = &nic->qs->sq[cqe_tx->sq_idx];
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
return;
dprintf(nic->dev,
"%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt);
dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
bus_dmamap_unload(sq->snd_buff_dmat, dmap);
mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
if (mbuf != NULL) {
m_freem(mbuf);
sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
}
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
}
static int
nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
{
struct mbuf *mbuf;
struct ifnet *ifp;
int processed_cqe, work_done = 0, tx_done = 0;
int cqe_count, cqe_head;
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_idx];
struct snd_queue *sq = &qs->sq[cq_idx];
struct rcv_queue *rq;
struct cqe_rx_t *cq_desc;
struct lro_ctrl *lro;
int rq_idx;
int cmp_err;
NICVF_CMP_LOCK(cq);
cmp_err = 0;
processed_cqe = 0;
/* Get no of valid CQ entries to process */
cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
cqe_count &= CQ_CQE_COUNT;
if (cqe_count == 0)
goto out;
/* Get head of the valid CQ entries */
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
cqe_head &= 0xFFFF;
dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
__func__, cq_idx, cqe_count, cqe_head);
while (processed_cqe < cqe_count) {
/* Get the CQ descriptor */
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
cqe_head++;
cqe_head &= (cq->dmem.q_len - 1);
/* Prefetch next CQ descriptor */
__builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
CQE_TYPE_RX);
if (__predict_false(cmp_err != 0)) {
/*
* Ups. Cannot finish now.
* Let's try again later.
*/
goto done;
}
work_done++;
break;
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
CQE_TYPE_SEND);
tx_done++;
break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
case CQE_TYPE_RX_TCP:
case CQE_TYPE_SEND_PTP:
/* Ignore for now */
break;
}
processed_cqe++;
}
done:
dprintf(nic->dev,
"%s CQ%d processed_cqe %d work_done %d\n",
__func__, cq_idx, processed_cqe, work_done);
/* Ring doorbell to inform H/W to reuse processed CQEs */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
if ((tx_done > 0) &&
((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
/* Reenable TXQ if its stopped earlier due to SQ full */
if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
}
out:
/*
* Flush any outstanding LRO work
*/
rq_idx = cq_idx;
rq = &nic->qs->rq[rq_idx];
lro = &rq->lro;
tcp_lro_flush_all(lro);
NICVF_CMP_UNLOCK(cq);
ifp = nic->ifp;
/* Push received MBUFs to the stack */
while (!buf_ring_empty(cq->rx_br)) {
mbuf = buf_ring_dequeue_mc(cq->rx_br);
if (__predict_true(mbuf != NULL))
(*ifp->if_input)(ifp, mbuf);
}
return (cmp_err);
}
/*
* Qset error interrupt handler
*
* As of now only CQ errors are handled
*/
static void
nicvf_qs_err_task(void *arg, int pending)
{
struct nicvf *nic;
struct queue_set *qs;
int qidx;
uint64_t status;
boolean_t enable = TRUE;
nic = (struct nicvf *)arg;
qs = nic->qs;
/* Deactivate network interface */
if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
/* Check if it is CQ err */
for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
qidx);
if ((status & CQ_ERR_MASK) == 0)
continue;
/* Process already queued CQEs and reconfig CQ */
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
nicvf_sq_disable(nic, qidx);
(void)nicvf_cq_intr_handler(nic, qidx);
nicvf_cmp_queue_config(nic, qs, qidx, enable);
nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
}
if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
/* Re-enable Qset error interrupt */
nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
}
static void
nicvf_cmp_task(void *arg, int pending)
{
struct cmp_queue *cq;
struct nicvf *nic;
int cmp_err;
cq = (struct cmp_queue *)arg;
nic = cq->nic;
/* Handle CQ descriptors */
cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
if (__predict_false(cmp_err != 0)) {
/*
* Schedule another thread here since we did not
* process the entire CQ due to Tx or Rx CQ parse error.
*/
taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
}
nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
/* Reenable interrupt (previously disabled in nicvf_intr_handler() */
nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
}
/* Initialize completion queue */
static int
nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
int qidx)
{
int err;
/* Initizalize lock */
snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
device_get_nameunit(nic->dev), qidx);
mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
NICVF_CQ_BASE_ALIGN_BYTES);
if (err != 0) {
device_printf(nic->dev,
"Could not allocate DMA memory for CQ\n");
return (err);
}
cq->desc = cq->dmem.base;
cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
cq->nic = nic;
cq->idx = qidx;
nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
&cq->mtx);
/* Allocate taskqueue */
TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
taskqueue_thread_enqueue, &cq->cmp_taskq);
taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
device_get_nameunit(nic->dev), qidx);
return (0);
}
static void
nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
{
if (cq == NULL)
return;
/*
* The completion queue itself should be disabled by now
* (ref. nicvf_snd_queue_config()).
* Ensure that it is safe to disable it or panic.
*/
if (cq->enable)
panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
if (cq->cmp_taskq != NULL) {
/* Remove task */
while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
taskqueue_free(cq->cmp_taskq);
cq->cmp_taskq = NULL;
}
/*
* Completion interrupt will possibly enable interrupts again
* so disable interrupting now after we finished processing
* completion task. It is safe to do so since the corresponding CQ
* was already disabled.
*/
nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
NICVF_CMP_LOCK(cq);
nicvf_free_q_desc_mem(nic, &cq->dmem);
drbr_free(cq->rx_br, M_DEVBUF);
NICVF_CMP_UNLOCK(cq);
mtx_destroy(&cq->mtx);
memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
}
int
nicvf_xmit_locked(struct snd_queue *sq)
{
struct nicvf *nic;
struct ifnet *ifp;
struct mbuf *next;
int err;
NICVF_TX_LOCK_ASSERT(sq);
nic = sq->nic;
ifp = nic->ifp;
err = 0;
while ((next = drbr_peek(ifp, sq->br)) != NULL) {
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, next);
err = nicvf_tx_mbuf_locked(sq, &next);
if (err != 0) {
if (next == NULL)
drbr_advance(ifp, sq->br);
else
drbr_putback(ifp, sq->br, next);
break;
}
drbr_advance(ifp, sq->br);
}
return (err);
}
static void
nicvf_snd_task(void *arg, int pending)
{
struct snd_queue *sq = (struct snd_queue *)arg;
struct nicvf *nic;
struct ifnet *ifp;
int err;
nic = sq->nic;
ifp = nic->ifp;
/*
* Skip sending anything if the driver is not running,
* SQ full or link is down.
*/
if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) || !nic->link_up)
return;
NICVF_TX_LOCK(sq);
err = nicvf_xmit_locked(sq);
NICVF_TX_UNLOCK(sq);
/* Try again */
if (err != 0)
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
}
/* Initialize transmit queue */
static int
nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
int qidx)
{
size_t i;
int err;
/* Initizalize TX lock for this queue */
snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
device_get_nameunit(nic->dev), qidx);
mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
NICVF_TX_LOCK(sq);
/* Allocate buffer ring */
sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
M_NOWAIT, &sq->mtx);
if (sq->br == NULL) {
device_printf(nic->dev,
"ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
err = ENOMEM;
goto error;
}
/* Allocate DMA memory for Tx descriptors */
err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
NICVF_SQ_BASE_ALIGN_BYTES);
if (err != 0) {
device_printf(nic->dev,
"Could not allocate DMA memory for SQ\n");
goto error;
}
sq->desc = sq->dmem.base;
sq->head = sq->tail = 0;
atomic_store_rel_int(&sq->free_cnt, q_len - 1);
sq->thresh = SND_QUEUE_THRESH;
sq->idx = qidx;
sq->nic = nic;
/*
* Allocate DMA maps for Tx buffers
*/
/* Create DMA tag first */
err = bus_dma_tag_create(
bus_get_dma_tag(nic->dev), /* parent tag */
1, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
NICVF_TSO_MAXSIZE, /* maxsize */
NICVF_TSO_NSEGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sq->snd_buff_dmat); /* dmat */
if (err != 0) {
device_printf(nic->dev,
"Failed to create busdma tag for Tx buffers\n");
goto error;
}
/* Allocate send buffers array */
- sq->snd_buff = mallocarray(q_len, sizeof(*sq->snd_buff), M_NICVF,
+ sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
(M_NOWAIT | M_ZERO));
if (sq->snd_buff == NULL) {
device_printf(nic->dev,
"Could not allocate memory for Tx buffers array\n");
err = ENOMEM;
goto error;
}
/* Now populate maps */
for (i = 0; i < q_len; i++) {
err = bus_dmamap_create(sq->snd_buff_dmat, 0,
&sq->snd_buff[i].dmap);
if (err != 0) {
device_printf(nic->dev,
"Failed to create DMA maps for Tx buffers\n");
goto error;
}
}
NICVF_TX_UNLOCK(sq);
/* Allocate taskqueue */
TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sq->snd_taskq);
taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
device_get_nameunit(nic->dev), qidx);
return (0);
error:
NICVF_TX_UNLOCK(sq);
return (err);
}
static void
nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct queue_set *qs = nic->qs;
size_t i;
int err;
if (sq == NULL)
return;
if (sq->snd_taskq != NULL) {
/* Remove task */
while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
taskqueue_drain(sq->snd_taskq, &sq->snd_task);
taskqueue_free(sq->snd_taskq);
sq->snd_taskq = NULL;
}
NICVF_TX_LOCK(sq);
if (sq->snd_buff_dmat != NULL) {
if (sq->snd_buff != NULL) {
for (i = 0; i < qs->sq_len; i++) {
m_freem(sq->snd_buff[i].mbuf);
sq->snd_buff[i].mbuf = NULL;
bus_dmamap_unload(sq->snd_buff_dmat,
sq->snd_buff[i].dmap);
err = bus_dmamap_destroy(sq->snd_buff_dmat,
sq->snd_buff[i].dmap);
/*
* If bus_dmamap_destroy fails it can cause
* random panic later if the tag is also
* destroyed in the process.
*/
KASSERT(err == 0,
("%s: Could not destroy DMA map for SQ",
__func__));
}
}
free(sq->snd_buff, M_NICVF);
err = bus_dma_tag_destroy(sq->snd_buff_dmat);
KASSERT(err == 0,
("%s: Trying to destroy BUSY DMA tag", __func__));
}
/* Free private driver ring for this send queue */
if (sq->br != NULL)
drbr_free(sq->br, M_DEVBUF);
if (sq->dmem.base != NULL)
nicvf_free_q_desc_mem(nic, &sq->dmem);
NICVF_TX_UNLOCK(sq);
/* Destroy Tx lock */
mtx_destroy(&sq->mtx);
memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
}
static void
nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
{
/* Disable send queue */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
/* Check if SQ is stopped */
if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
return;
/* Reset send queue */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
}
static void
nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
{
union nic_mbx mbx = {};
/* Make sure all packets in the pipeline are written back into mem */
mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
nicvf_send_msg_to_pf(nic, &mbx);
}
static void
nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
{
/* Disable timer threshold (doesn't get reset upon CQ reset */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
/* Disable completion queue */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
/* Reset completion queue */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
}
static void
nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
{
uint64_t tmp, fifo_state;
int timeout = 10;
/* Save head and tail pointers for feeing up buffers */
rbdr->head =
nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
rbdr->tail =
nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
/*
* If RBDR FIFO is in 'FAIL' state then do a reset first
* before relaiming.
*/
fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
if (((fifo_state >> 62) & 0x03) == 0x3) {
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
qidx, NICVF_RBDR_RESET);
}
/* Disable RBDR */
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
return;
while (1) {
tmp = nicvf_queue_reg_read(nic,
NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
break;
DELAY(1000);
timeout--;
if (!timeout) {
device_printf(nic->dev,
"Failed polling on prefetch status\n");
return;
}
}
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
NICVF_RBDR_RESET);
if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
return;
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
return;
}
/* Configures receive queue */
static void
nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable)
{
union nic_mbx mbx = {};
struct rcv_queue *rq;
struct rq_cfg rq_cfg;
struct ifnet *ifp;
struct lro_ctrl *lro;
ifp = nic->ifp;
rq = &qs->rq[qidx];
rq->enable = enable;
lro = &rq->lro;
/* Disable receive queue */
nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
if (!rq->enable) {
nicvf_reclaim_rcv_queue(nic, qs, qidx);
/* Free LRO memory */
tcp_lro_free(lro);
rq->lro_enabled = FALSE;
return;
}
/* Configure LRO if enabled */
rq->lro_enabled = FALSE;
if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
if (tcp_lro_init(lro) != 0) {
device_printf(nic->dev,
"Failed to initialize LRO for RXQ%d\n", qidx);
} else {
rq->lro_enabled = TRUE;
lro->ifp = nic->ifp;
}
}
rq->cq_qs = qs->vnic_id;
rq->cq_idx = qidx;
rq->start_rbdr_qs = qs->vnic_id;
rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
rq->cont_rbdr_qs = qs->vnic_id;
rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
/* all writes of RBDR data to be loaded into L2 Cache as well*/
rq->caching = 1;
/* Send a mailbox msg to PF to config RQ */
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
mbx.rq.rq_num = qidx;
mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
(rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
(rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
(rq->start_qs_rbdr_idx);
nicvf_send_msg_to_pf(nic, &mbx);
mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
nicvf_send_msg_to_pf(nic, &mbx);
/*
* RQ drop config
* Enable CQ drop to reserve sufficient CQEs for all tx packets
*/
mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
/* Enable Receive queue */
rq_cfg.ena = 1;
rq_cfg.tcp_ena = 0;
nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
*(uint64_t *)&rq_cfg);
}
/* Configures completion queue */
static void
nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, boolean_t enable)
{
struct cmp_queue *cq;
struct cq_cfg cq_cfg;
cq = &qs->cq[qidx];
cq->enable = enable;
if (!cq->enable) {
nicvf_reclaim_cmp_queue(nic, qs, qidx);
return;
}
/* Reset completion queue */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
/* Set completion queue base address */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
(uint64_t)(cq->dmem.phys_base));
/* Enable Completion queue */
cq_cfg.ena = 1;
cq_cfg.reset = 0;
cq_cfg.caching = 0;
cq_cfg.qsize = CMP_QSIZE;
cq_cfg.avg_con = 0;
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
nic->cq_coalesce_usecs);
}
/* Configures transmit queue */
static void
nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
boolean_t enable)
{
union nic_mbx mbx = {};
struct snd_queue *sq;
struct sq_cfg sq_cfg;
sq = &qs->sq[qidx];
sq->enable = enable;
if (!sq->enable) {
nicvf_reclaim_snd_queue(nic, qs, qidx);
return;
}
/* Reset send queue */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
sq->cq_qs = qs->vnic_id;
sq->cq_idx = qidx;
/* Send a mailbox msg to PF to config SQ */
mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
mbx.sq.qs_num = qs->vnic_id;
mbx.sq.sq_num = qidx;
mbx.sq.sqs_mode = nic->sqs_mode;
mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
nicvf_send_msg_to_pf(nic, &mbx);
/* Set queue base address */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
(uint64_t)(sq->dmem.phys_base));
/* Enable send queue & set queue size */
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
sq_cfg.qsize = SND_QSIZE;
sq_cfg.tstmp_bgx_intf = 0;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
}
/* Configures receive buffer descriptor ring */
static void
nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
boolean_t enable)
{
struct rbdr *rbdr;
struct rbdr_cfg rbdr_cfg;
rbdr = &qs->rbdr[qidx];
nicvf_reclaim_rbdr(nic, rbdr, qidx);
if (!enable)
return;
/* Set descriptor base address */
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
(uint64_t)(rbdr->dmem.phys_base));
/* Enable RBDR & set queue size */
/* Buffer size should be in multiples of 128 bytes */
rbdr_cfg.ena = 1;
rbdr_cfg.reset = 0;
rbdr_cfg.ldwb = 0;
rbdr_cfg.qsize = RBDR_SIZE;
rbdr_cfg.avg_con = 0;
rbdr_cfg.lines = rbdr->dma_size / 128;
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
*(uint64_t *)&rbdr_cfg);
/* Notify HW */
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
qs->rbdr_len - 1);
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
rbdr->thresh - 1);
}
/* Requests PF to assign and enable Qset */
void
nicvf_qset_config(struct nicvf *nic, boolean_t enable)
{
union nic_mbx mbx = {};
struct queue_set *qs;
struct qs_cfg *qs_cfg;
qs = nic->qs;
if (qs == NULL) {
device_printf(nic->dev,
"Qset is still not allocated, don't init queues\n");
return;
}
qs->enable = enable;
qs->vnic_id = nic->vf_id;
/* Send a mailbox msg to PF to config Qset */
mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
mbx.qs.num = qs->vnic_id;
mbx.qs.cfg = 0;
qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
if (qs->enable) {
qs_cfg->ena = 1;
qs_cfg->vnic = qs->vnic_id;
}
nicvf_send_msg_to_pf(nic, &mbx);
}
static void
nicvf_free_resources(struct nicvf *nic)
{
int qidx;
struct queue_set *qs;
qs = nic->qs;
/*
* Remove QS error task first since it has to be dead
* to safely free completion queue tasks.
*/
if (qs->qs_err_taskq != NULL) {
/* Shut down QS error tasks */
while (taskqueue_cancel(qs->qs_err_taskq,
&qs->qs_err_task, NULL) != 0) {
taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
}
taskqueue_free(qs->qs_err_taskq);
qs->qs_err_taskq = NULL;
}
/* Free receive buffer descriptor ring */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
/* Free completion queue */
for (qidx = 0; qidx < qs->cq_cnt; qidx++)
nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
/* Free send queue */
for (qidx = 0; qidx < qs->sq_cnt; qidx++)
nicvf_free_snd_queue(nic, &qs->sq[qidx]);
}
static int
nicvf_alloc_resources(struct nicvf *nic)
{
struct queue_set *qs = nic->qs;
int qidx;
/* Alloc receive buffer descriptor ring */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
DMA_BUFFER_LEN, qidx))
goto alloc_fail;
}
/* Alloc send queue */
for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
goto alloc_fail;
}
/* Alloc completion queue */
for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
goto alloc_fail;
}
/* Allocate QS error taskqueue */
TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
taskqueue_thread_enqueue, &qs->qs_err_taskq);
taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
device_get_nameunit(nic->dev));
return (0);
alloc_fail:
nicvf_free_resources(nic);
return (ENOMEM);
}
int
nicvf_set_qset_resources(struct nicvf *nic)
{
struct queue_set *qs;
qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
nic->qs = qs;
/* Set count of each queue */
qs->rbdr_cnt = RBDR_CNT;
qs->rq_cnt = RCV_QUEUE_CNT;
qs->sq_cnt = SND_QUEUE_CNT;
qs->cq_cnt = CMP_QUEUE_CNT;
/* Set queue lengths */
qs->rbdr_len = RCV_BUF_COUNT;
qs->sq_len = SND_QUEUE_LEN;
qs->cq_len = CMP_QUEUE_LEN;
nic->rx_queues = qs->rq_cnt;
nic->tx_queues = qs->sq_cnt;
return (0);
}
int
nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
{
boolean_t disable = FALSE;
struct queue_set *qs;
int qidx;
qs = nic->qs;
if (qs == NULL)
return (0);
if (enable) {
if (nicvf_alloc_resources(nic) != 0)
return (ENOMEM);
for (qidx = 0; qidx < qs->sq_cnt; qidx++)
nicvf_snd_queue_config(nic, qs, qidx, enable);
for (qidx = 0; qidx < qs->cq_cnt; qidx++)
nicvf_cmp_queue_config(nic, qs, qidx, enable);
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_rbdr_config(nic, qs, qidx, enable);
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
nicvf_rcv_queue_config(nic, qs, qidx, enable);
} else {
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
nicvf_rcv_queue_config(nic, qs, qidx, disable);
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_rbdr_config(nic, qs, qidx, disable);
for (qidx = 0; qidx < qs->sq_cnt; qidx++)
nicvf_snd_queue_config(nic, qs, qidx, disable);
for (qidx = 0; qidx < qs->cq_cnt; qidx++)
nicvf_cmp_queue_config(nic, qs, qidx, disable);
nicvf_free_resources(nic);
}
return (0);
}
/*
* Get a free desc from SQ
* returns descriptor ponter & descriptor number
*/
static __inline int
nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
{
int qentry;
qentry = sq->tail;
atomic_subtract_int(&sq->free_cnt, desc_cnt);
sq->tail += desc_cnt;
sq->tail &= (sq->dmem.q_len - 1);
return (qentry);
}
/* Free descriptor back to SQ for future use */
static void
nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{
atomic_add_int(&sq->free_cnt, desc_cnt);
sq->head += desc_cnt;
sq->head &= (sq->dmem.q_len - 1);
}
static __inline int
nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
{
qentry++;
qentry &= (sq->dmem.q_len - 1);
return (qentry);
}
static void
nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
{
uint64_t sq_cfg;
sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
sq_cfg |= NICVF_SQ_EN;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
/* Ring doorbell so that H/W restarts processing SQEs */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
}
static void
nicvf_sq_disable(struct nicvf *nic, int qidx)
{
uint64_t sq_cfg;
sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
sq_cfg &= ~NICVF_SQ_EN;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
}
static void
nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
{
uint64_t head;
struct snd_buff *snd_buff;
struct sq_hdr_subdesc *hdr;
NICVF_TX_LOCK(sq);
head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
while (sq->head != head) {
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
nicvf_put_sq_desc(sq, 1);
continue;
}
snd_buff = &sq->snd_buff[sq->head];
if (snd_buff->mbuf != NULL) {
bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
m_freem(snd_buff->mbuf);
sq->snd_buff[sq->head].mbuf = NULL;
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
}
NICVF_TX_UNLOCK(sq);
}
/*
* Add SQ HEADER subdescriptor.
* First subdescriptor for every send descriptor.
*/
static __inline int
nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
int subdesc_cnt, struct mbuf *mbuf, int len)
{
struct nicvf *nic;
struct sq_hdr_subdesc *hdr;
struct ether_vlan_header *eh;
#ifdef INET
struct ip *ip;
struct tcphdr *th;
#endif
uint16_t etype;
int ehdrlen, iphlen, poff, proto;
nic = sq->nic;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
sq->snd_buff[qentry].mbuf = mbuf;
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* No of subdescriptors following this */
hdr->subdesc_cnt = subdesc_cnt;
hdr->tot_len = len;
eh = mtod(mbuf, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
etype = ntohs(eh->evl_proto);
} else {
ehdrlen = ETHER_HDR_LEN;
etype = ntohs(eh->evl_encap_proto);
}
poff = proto = -1;
switch (etype) {
#ifdef INET6
case ETHERTYPE_IPV6:
if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
sq->snd_buff[qentry].mbuf = NULL;
if (mbuf == NULL)
return (ENOBUFS);
}
poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
if (poff < 0)
return (ENOBUFS);
poff += ehdrlen;
break;
#endif
#ifdef INET
case ETHERTYPE_IP:
if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
sq->snd_buff[qentry].mbuf = mbuf;
if (mbuf == NULL)
return (ENOBUFS);
}
if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
hdr->csum_l3 = 1; /* Enable IP csum calculation */
ip = (struct ip *)(mbuf->m_data + ehdrlen);
iphlen = ip->ip_hl << 2;
poff = ehdrlen + iphlen;
proto = ip->ip_p;
break;
#endif
}
#if defined(INET6) || defined(INET)
if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
switch (proto) {
case IPPROTO_TCP:
if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
break;
if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
sq->snd_buff[qentry].mbuf = mbuf;
if (mbuf == NULL)
return (ENOBUFS);
}
hdr->csum_l4 = SEND_L4_CSUM_TCP;
break;
case IPPROTO_UDP:
if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
break;
if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
sq->snd_buff[qentry].mbuf = mbuf;
if (mbuf == NULL)
return (ENOBUFS);
}
hdr->csum_l4 = SEND_L4_CSUM_UDP;
break;
case IPPROTO_SCTP:
if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
break;
if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
sq->snd_buff[qentry].mbuf = mbuf;
if (mbuf == NULL)
return (ENOBUFS);
}
hdr->csum_l4 = SEND_L4_CSUM_SCTP;
break;
default:
break;
}
hdr->l3_offset = ehdrlen;
hdr->l4_offset = poff;
}
if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
hdr->tso = 1;
hdr->tso_start = poff + (th->th_off * 4);
hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
hdr->inner_l3_offset = ehdrlen - 2;
nic->drv_stats.tx_tso++;
}
#endif
return (0);
}
/*
* SQ GATHER subdescriptor
* Must follow HDR descriptor
*/
static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
int size, uint64_t data)
{
struct sq_gather_subdesc *gather;
qentry &= (sq->dmem.q_len - 1);
gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
memset(gather, 0, SND_QUEUE_DESC_SIZE);
gather->subdesc_type = SQ_DESC_TYPE_GATHER;
gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
gather->size = size;
gather->addr = data;
}
/* Put an mbuf to a SQ for packet transfer. */
static int
nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
{
bus_dma_segment_t segs[256];
struct snd_buff *snd_buff;
size_t seg;
int nsegs, qentry;
int subdesc_cnt;
int err;
NICVF_TX_LOCK_ASSERT(sq);
if (sq->free_cnt == 0)
return (ENOBUFS);
snd_buff = &sq->snd_buff[sq->tail];
err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
*mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(err != 0)) {
/* ARM64TODO: Add mbuf defragmenting if we lack maps */
m_freem(*mbufp);
*mbufp = NULL;
return (err);
}
/* Set how many subdescriptors is required */
subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
if (subdesc_cnt > sq->free_cnt) {
/* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
return (ENOBUFS);
}
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
/* Add SQ header subdesc */
err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
(*mbufp)->m_pkthdr.len);
if (err != 0) {
nicvf_put_sq_desc(sq, subdesc_cnt);
bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
if (err == ENOBUFS) {
m_freem(*mbufp);
*mbufp = NULL;
}
return (err);
}
/* Add SQ gather subdescs */
for (seg = 0; seg < nsegs; seg++) {
qentry = nicvf_get_nxt_sqentry(sq, qentry);
nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
segs[seg].ds_addr);
}
/* make sure all memory stores are done before ringing doorbell */
bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
__func__, sq->idx, subdesc_cnt);
/* Inform HW to xmit new packet */
nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
sq->idx, subdesc_cnt);
return (0);
}
static __inline u_int
frag_num(u_int i)
{
#if BYTE_ORDER == BIG_ENDIAN
return ((i & ~3) + 3 - (i & 3));
#else
return (i);
#endif
}
/* Returns MBUF for a received packet */
struct mbuf *
nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
int frag;
int payload_len = 0;
struct mbuf *mbuf;
struct mbuf *mbuf_frag;
uint16_t *rb_lens = NULL;
uint64_t *rb_ptrs = NULL;
mbuf = NULL;
rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
payload_len = rb_lens[frag_num(frag)];
if (frag == 0) {
/* First fragment */
mbuf = nicvf_rb_ptr_to_mbuf(nic,
(*rb_ptrs - cqe_rx->align_pad));
mbuf->m_len = payload_len;
mbuf->m_data += cqe_rx->align_pad;
if_setrcvif(mbuf, nic->ifp);
} else {
/* Add fragments */
mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
m_append(mbuf, payload_len, mbuf_frag->m_data);
m_freem(mbuf_frag);
}
/* Next buffer pointer */
rb_ptrs++;
}
if (__predict_true(mbuf != NULL)) {
m_fixhdr(mbuf);
mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
/*
* HW by default verifies IP & TCP/UDP/SCTP checksums
*/
if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
mbuf->m_pkthdr.csum_flags =
(CSUM_IP_CHECKED | CSUM_IP_VALID);
}
switch (cqe_rx->l4_type) {
case L4TYPE_UDP:
case L4TYPE_TCP: /* fall through */
mbuf->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
mbuf->m_pkthdr.csum_data = 0xffff;
break;
case L4TYPE_SCTP:
mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
break;
default:
break;
}
}
}
return (mbuf);
}
/* Enable interrupt */
void
nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
{
uint64_t reg_val;
reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
switch (int_type) {
case NICVF_INTR_CQ:
reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
break;
case NICVF_INTR_SQ:
reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
break;
case NICVF_INTR_RBDR:
reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
break;
case NICVF_INTR_PKT_DROP:
reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
break;
case NICVF_INTR_TCP_TIMER:
reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
break;
case NICVF_INTR_MBOX:
reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
break;
case NICVF_INTR_QS_ERR:
reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
break;
default:
device_printf(nic->dev,
"Failed to enable interrupt: unknown type\n");
break;
}
nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
}
/* Disable interrupt */
void
nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
{
uint64_t reg_val = 0;
switch (int_type) {
case NICVF_INTR_CQ:
reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
break;
case NICVF_INTR_SQ:
reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
break;
case NICVF_INTR_RBDR:
reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
break;
case NICVF_INTR_PKT_DROP:
reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
break;
case NICVF_INTR_TCP_TIMER:
reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
break;
case NICVF_INTR_MBOX:
reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
break;
case NICVF_INTR_QS_ERR:
reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
break;
default:
device_printf(nic->dev,
"Failed to disable interrupt: unknown type\n");
break;
}
nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
}
/* Clear interrupt */
void
nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
{
uint64_t reg_val = 0;
switch (int_type) {
case NICVF_INTR_CQ:
reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
break;
case NICVF_INTR_SQ:
reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
break;
case NICVF_INTR_RBDR:
reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
break;
case NICVF_INTR_PKT_DROP:
reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
break;
case NICVF_INTR_TCP_TIMER:
reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
break;
case NICVF_INTR_MBOX:
reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
break;
case NICVF_INTR_QS_ERR:
reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
break;
default:
device_printf(nic->dev,
"Failed to clear interrupt: unknown type\n");
break;
}
nicvf_reg_write(nic, NIC_VF_INT, reg_val);
}
/* Check if interrupt is enabled */
int
nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
{
uint64_t reg_val;
uint64_t mask = 0xff;
reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
switch (int_type) {
case NICVF_INTR_CQ:
mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
break;
case NICVF_INTR_SQ:
mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
break;
case NICVF_INTR_RBDR:
mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
break;
case NICVF_INTR_PKT_DROP:
mask = NICVF_INTR_PKT_DROP_MASK;
break;
case NICVF_INTR_TCP_TIMER:
mask = NICVF_INTR_TCP_TIMER_MASK;
break;
case NICVF_INTR_MBOX:
mask = NICVF_INTR_MBOX_MASK;
break;
case NICVF_INTR_QS_ERR:
mask = NICVF_INTR_QS_ERR_MASK;
break;
default:
device_printf(nic->dev,
"Failed to check interrupt enable: unknown type\n");
break;
}
return (reg_val & mask);
}
void
nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
{
struct rcv_queue *rq;
#define GET_RQ_STATS(reg) \
nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
(rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
rq = &nic->qs->rq[rq_idx];
rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
}
void
nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
{
struct snd_queue *sq;
#define GET_SQ_STATS(reg) \
nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
(sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
sq = &nic->qs->sq[sq_idx];
sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
}
/* Check for errors in the receive cmp.queue entry */
int
nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
struct cqe_rx_t *cqe_rx)
{
struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
drv_stats->rx_frames_ok++;
return (0);
}
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
stats->rx_bgx_truncated_pkts++;
break;
case CQ_RX_ERROP_RE_JABBER:
stats->rx_jabber_errs++;
break;
case CQ_RX_ERROP_RE_FCS:
stats->rx_fcs_errs++;
break;
case CQ_RX_ERROP_RE_RX_CTL:
stats->rx_bgx_errs++;
break;
case CQ_RX_ERROP_PREL2_ERR:
stats->rx_prel2_errs++;
break;
case CQ_RX_ERROP_L2_MAL:
stats->rx_l2_hdr_malformed++;
break;
case CQ_RX_ERROP_L2_OVERSIZE:
stats->rx_oversize++;
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
stats->rx_undersize++;
break;
case CQ_RX_ERROP_L2_LENMISM:
stats->rx_l2_len_mismatch++;
break;
case CQ_RX_ERROP_L2_PCLP:
stats->rx_l2_pclp++;
break;
case CQ_RX_ERROP_IP_NOT:
stats->rx_ip_ver_errs++;
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
stats->rx_ip_csum_errs++;
break;
case CQ_RX_ERROP_IP_MAL:
stats->rx_ip_hdr_malformed++;
break;
case CQ_RX_ERROP_IP_MALD:
stats->rx_ip_payload_malformed++;
break;
case CQ_RX_ERROP_IP_HOP:
stats->rx_ip_ttl_errs++;
break;
case CQ_RX_ERROP_L3_PCLP:
stats->rx_l3_pclp++;
break;
case CQ_RX_ERROP_L4_MAL:
stats->rx_l4_malformed++;
break;
case CQ_RX_ERROP_L4_CHK:
stats->rx_l4_csum_errs++;
break;
case CQ_RX_ERROP_UDP_LEN:
stats->rx_udp_len_errs++;
break;
case CQ_RX_ERROP_L4_PORT:
stats->rx_l4_port_errs++;
break;
case CQ_RX_ERROP_TCP_FLAG:
stats->rx_tcp_flag_errs++;
break;
case CQ_RX_ERROP_TCP_OFFSET:
stats->rx_tcp_offset_errs++;
break;
case CQ_RX_ERROP_L4_PCLP:
stats->rx_l4_pclp++;
break;
case CQ_RX_ERROP_RBDR_TRUNC:
stats->rx_truncated_pkts++;
break;
}
return (1);
}
/* Check for errors in the send cmp.queue entry */
int
nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
struct cqe_send_t *cqe_tx)
{
struct cmp_queue_stats *stats = &cq->stats;
switch (cqe_tx->send_status) {
case CQ_TX_ERROP_GOOD:
stats->tx.good++;
return (0);
case CQ_TX_ERROP_DESC_FAULT:
stats->tx.desc_fault++;
break;
case CQ_TX_ERROP_HDR_CONS_ERR:
stats->tx.hdr_cons_err++;
break;
case CQ_TX_ERROP_SUBDC_ERR:
stats->tx.subdesc_err++;
break;
case CQ_TX_ERROP_IMM_SIZE_OFLOW:
stats->tx.imm_size_oflow++;
break;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
stats->tx.data_seq_err++;
break;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
stats->tx.mem_seq_err++;
break;
case CQ_TX_ERROP_LOCK_VIOL:
stats->tx.lock_viol++;
break;
case CQ_TX_ERROP_DATA_FAULT:
stats->tx.data_fault++;
break;
case CQ_TX_ERROP_TSTMP_CONFLICT:
stats->tx.tstmp_conflict++;
break;
case CQ_TX_ERROP_TSTMP_TIMEOUT:
stats->tx.tstmp_timeout++;
break;
case CQ_TX_ERROP_MEM_FAULT:
stats->tx.mem_fault++;
break;
case CQ_TX_ERROP_CK_OVERLAP:
stats->tx.csum_overlap++;
break;
case CQ_TX_ERROP_CK_OFLOW:
stats->tx.csum_overflow++;
break;
}
return (1);
}
Index: head/sys/dev/xen/blkback/blkback.c
===================================================================
--- head/sys/dev/xen/blkback/blkback.c (revision 328217)
+++ head/sys/dev/xen/blkback/blkback.c (revision 328218)
@@ -1,3938 +1,3938 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2009-2012 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
* Ken Merry (Spectra Logic Corporation)
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/**
* \file blkback.c
*
* \brief Device driver supporting the vending of block storage from
* a FreeBSD domain to other domains.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/devicestat.h>
#include <sys/disk.h>
#include <sys/fcntl.h>
#include <sys/filedesc.h>
#include <sys/kdb.h>
#include <sys/module.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/taskqueue.h>
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/sysctl.h>
#include <sys/bitstring.h>
#include <sys/sdt.h>
#include <geom/geom.h>
#include <machine/_inttypes.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <xen/xen-os.h>
#include <xen/blkif.h>
#include <xen/gnttab.h>
#include <xen/xen_intr.h>
#include <xen/interface/event_channel.h>
#include <xen/interface/grant_table.h>
#include <xen/xenbus/xenbusvar.h>
/*--------------------------- Compile-time Tunables --------------------------*/
/**
* The maximum number of shared memory ring pages we will allow in a
* negotiated block-front/back communication channel. Allow enough
* ring space for all requests to be XBB_MAX_REQUEST_SIZE'd.
*/
#define XBB_MAX_RING_PAGES 32
/**
* The maximum number of outstanding request blocks (request headers plus
* additional segment blocks) we will allow in a negotiated block-front/back
* communication channel.
*/
#define XBB_MAX_REQUESTS \
__CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES)
/**
* \brief Define to force all I/O to be performed on memory owned by the
* backend device, with a copy-in/out to the remote domain's memory.
*
* \note This option is currently required when this driver's domain is
* operating in HVM mode on a system using an IOMMU.
*
* This driver uses Xen's grant table API to gain access to the memory of
* the remote domains it serves. When our domain is operating in PV mode,
* the grant table mechanism directly updates our domain's page table entries
* to point to the physical pages of the remote domain. This scheme guarantees
* that blkback and the backing devices it uses can safely perform DMA
* operations to satisfy requests. In HVM mode, Xen may use a HW IOMMU to
* insure that our domain cannot DMA to pages owned by another domain. As
* of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant
* table API. For this reason, in HVM mode, we must bounce all requests into
* memory that is mapped into our domain at domain startup and thus has
* valid IOMMU mappings.
*/
#define XBB_USE_BOUNCE_BUFFERS
/**
* \brief Define to enable rudimentary request logging to the console.
*/
#undef XBB_DEBUG
/*---------------------------------- Macros ----------------------------------*/
/**
* Custom malloc type for all driver allocations.
*/
static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
#ifdef XBB_DEBUG
#define DPRINTF(fmt, args...) \
printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
#else
#define DPRINTF(fmt, args...) do {} while(0)
#endif
/**
* The maximum mapped region size per request we will allow in a negotiated
* block-front/back communication channel.
*/
#define XBB_MAX_REQUEST_SIZE \
MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE)
/**
* The maximum number of segments (within a request header and accompanying
* segment blocks) per request we will allow in a negotiated block-front/back
* communication channel.
*/
#define XBB_MAX_SEGMENTS_PER_REQUEST \
(MIN(UIO_MAXIOV, \
MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
(XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1)))
/**
* The maximum number of ring pages that we can allow per request list.
* We limit this to the maximum number of segments per request, because
* that is already a reasonable number of segments to aggregate. This
* number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST,
* because that would leave situations where we can't dispatch even one
* large request.
*/
#define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST
/*--------------------------- Forward Declarations ---------------------------*/
struct xbb_softc;
struct xbb_xen_req;
static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
...) __attribute__((format(printf, 3, 4)));
static int xbb_shutdown(struct xbb_softc *xbb);
/*------------------------------ Data Structures -----------------------------*/
STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req);
typedef enum {
XBB_REQLIST_NONE = 0x00,
XBB_REQLIST_MAPPED = 0x01
} xbb_reqlist_flags;
struct xbb_xen_reqlist {
/**
* Back reference to the parent block back instance for this
* request. Used during bio_done handling.
*/
struct xbb_softc *xbb;
/**
* BLKIF_OP code for this request.
*/
int operation;
/**
* Set to BLKIF_RSP_* to indicate request status.
*
* This field allows an error status to be recorded even if the
* delivery of this status must be deferred. Deferred reporting
* is necessary, for example, when an error is detected during
* completion processing of one bio when other bios for this
* request are still outstanding.
*/
int status;
/**
* Number of 512 byte sectors not transferred.
*/
int residual_512b_sectors;
/**
* Starting sector number of the first request in the list.
*/
off_t starting_sector_number;
/**
* If we're going to coalesce, the next contiguous sector would be
* this one.
*/
off_t next_contig_sector;
/**
* Number of child requests in the list.
*/
int num_children;
/**
* Number of I/O requests still pending on the backend.
*/
int pendcnt;
/**
* Total number of segments for requests in the list.
*/
int nr_segments;
/**
* Flags for this particular request list.
*/
xbb_reqlist_flags flags;
/**
* Kernel virtual address space reserved for this request
* list structure and used to map the remote domain's pages for
* this I/O, into our domain's address space.
*/
uint8_t *kva;
/**
* Base, psuedo-physical address, corresponding to the start
* of this request's kva region.
*/
uint64_t gnt_base;
#ifdef XBB_USE_BOUNCE_BUFFERS
/**
* Pre-allocated domain local memory used to proxy remote
* domain memory during I/O operations.
*/
uint8_t *bounce;
#endif
/**
* Array of grant handles (one per page) used to map this request.
*/
grant_handle_t *gnt_handles;
/**
* Device statistics request ordering type (ordered or simple).
*/
devstat_tag_type ds_tag_type;
/**
* Device statistics request type (read, write, no_data).
*/
devstat_trans_flags ds_trans_type;
/**
* The start time for this request.
*/
struct bintime ds_t0;
/**
* Linked list of contiguous requests with the same operation type.
*/
struct xbb_xen_req_list contig_req_list;
/**
* Linked list links used to aggregate idle requests in the
* request list free pool (xbb->reqlist_free_stailq) and pending
* requests waiting for execution (xbb->reqlist_pending_stailq).
*/
STAILQ_ENTRY(xbb_xen_reqlist) links;
};
STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist);
/**
* \brief Object tracking an in-flight I/O from a Xen VBD consumer.
*/
struct xbb_xen_req {
/**
* Linked list links used to aggregate requests into a reqlist
* and to store them in the request free pool.
*/
STAILQ_ENTRY(xbb_xen_req) links;
/**
* The remote domain's identifier for this I/O request.
*/
uint64_t id;
/**
* The number of pages currently mapped for this request.
*/
int nr_pages;
/**
* The number of 512 byte sectors comprising this requests.
*/
int nr_512b_sectors;
/**
* BLKIF_OP code for this request.
*/
int operation;
/**
* Storage used for non-native ring requests.
*/
blkif_request_t ring_req_storage;
/**
* Pointer to the Xen request in the ring.
*/
blkif_request_t *ring_req;
/**
* Consumer index for this request.
*/
RING_IDX req_ring_idx;
/**
* The start time for this request.
*/
struct bintime ds_t0;
/**
* Pointer back to our parent request list.
*/
struct xbb_xen_reqlist *reqlist;
};
SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req);
/**
* \brief Configuration data for the shared memory request ring
* used to communicate with the front-end client of this
* this driver.
*/
struct xbb_ring_config {
/** KVA address where ring memory is mapped. */
vm_offset_t va;
/** The pseudo-physical address where ring memory is mapped.*/
uint64_t gnt_addr;
/**
* Grant table handles, one per-ring page, returned by the
* hyperpervisor upon mapping of the ring and required to
* unmap it when a connection is torn down.
*/
grant_handle_t handle[XBB_MAX_RING_PAGES];
/**
* The device bus address returned by the hypervisor when
* mapping the ring and required to unmap it when a connection
* is torn down.
*/
uint64_t bus_addr[XBB_MAX_RING_PAGES];
/** The number of ring pages mapped for the current connection. */
u_int ring_pages;
/**
* The grant references, one per-ring page, supplied by the
* front-end, allowing us to reference the ring pages in the
* front-end's domain and to map these pages into our own domain.
*/
grant_ref_t ring_ref[XBB_MAX_RING_PAGES];
/** The interrupt driven even channel used to signal ring events. */
evtchn_port_t evtchn;
};
/**
* Per-instance connection state flags.
*/
typedef enum
{
/**
* The front-end requested a read-only mount of the
* back-end device/file.
*/
XBBF_READ_ONLY = 0x01,
/** Communication with the front-end has been established. */
XBBF_RING_CONNECTED = 0x02,
/**
* Front-end requests exist in the ring and are waiting for
* xbb_xen_req objects to free up.
*/
XBBF_RESOURCE_SHORTAGE = 0x04,
/** Connection teardown in progress. */
XBBF_SHUTDOWN = 0x08,
/** A thread is already performing shutdown processing. */
XBBF_IN_SHUTDOWN = 0x10
} xbb_flag_t;
/** Backend device type. */
typedef enum {
/** Backend type unknown. */
XBB_TYPE_NONE = 0x00,
/**
* Backend type disk (access via cdev switch
* strategy routine).
*/
XBB_TYPE_DISK = 0x01,
/** Backend type file (access vnode operations.). */
XBB_TYPE_FILE = 0x02
} xbb_type;
/**
* \brief Structure used to memoize information about a per-request
* scatter-gather list.
*
* The chief benefit of using this data structure is it avoids having
* to reparse the possibly discontiguous S/G list in the original
* request. Due to the way that the mapping of the memory backing an
* I/O transaction is handled by Xen, a second pass is unavoidable.
* At least this way the second walk is a simple array traversal.
*
* \note A single Scatter/Gather element in the block interface covers
* at most 1 machine page. In this context a sector (blkif
* nomenclature, not what I'd choose) is a 512b aligned unit
* of mapping within the machine page referenced by an S/G
* element.
*/
struct xbb_sg {
/** The number of 512b data chunks mapped in this S/G element. */
int16_t nsect;
/**
* The index (0 based) of the first 512b data chunk mapped
* in this S/G element.
*/
uint8_t first_sect;
/**
* The index (0 based) of the last 512b data chunk mapped
* in this S/G element.
*/
uint8_t last_sect;
};
/**
* Character device backend specific configuration data.
*/
struct xbb_dev_data {
/** Cdev used for device backend access. */
struct cdev *cdev;
/** Cdev switch used for device backend access. */
struct cdevsw *csw;
/** Used to hold a reference on opened cdev backend devices. */
int dev_ref;
};
/**
* File backend specific configuration data.
*/
struct xbb_file_data {
/** Credentials to use for vnode backed (file based) I/O. */
struct ucred *cred;
/**
* \brief Array of io vectors used to process file based I/O.
*
* Only a single file based request is outstanding per-xbb instance,
* so we only need one of these.
*/
struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
#ifdef XBB_USE_BOUNCE_BUFFERS
/**
* \brief Array of io vectors used to handle bouncing of file reads.
*
* Vnode operations are free to modify uio data during their
* exectuion. In the case of a read with bounce buffering active,
* we need some of the data from the original uio in order to
* bounce-out the read data. This array serves as the temporary
* storage for this saved data.
*/
struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
/**
* \brief Array of memoized bounce buffer kva offsets used
* in the file based backend.
*
* Due to the way that the mapping of the memory backing an
* I/O transaction is handled by Xen, a second pass through
* the request sg elements is unavoidable. We memoize the computed
* bounce address here to reduce the cost of the second walk.
*/
void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST];
#endif /* XBB_USE_BOUNCE_BUFFERS */
};
/**
* Collection of backend type specific data.
*/
union xbb_backend_data {
struct xbb_dev_data dev;
struct xbb_file_data file;
};
/**
* Function signature of backend specific I/O handlers.
*/
typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
struct xbb_xen_reqlist *reqlist, int operation,
int flags);
/**
* Per-instance configuration data.
*/
struct xbb_softc {
/**
* Task-queue used to process I/O requests.
*/
struct taskqueue *io_taskqueue;
/**
* Single "run the request queue" task enqueued
* on io_taskqueue.
*/
struct task io_task;
/** Device type for this instance. */
xbb_type device_type;
/** NewBus device corresponding to this instance. */
device_t dev;
/** Backend specific dispatch routine for this instance. */
xbb_dispatch_t dispatch_io;
/** The number of requests outstanding on the backend device/file. */
int active_request_count;
/** Free pool of request tracking structures. */
struct xbb_xen_req_list request_free_stailq;
/** Array, sized at connection time, of request tracking structures. */
struct xbb_xen_req *requests;
/** Free pool of request list structures. */
struct xbb_xen_reqlist_list reqlist_free_stailq;
/** List of pending request lists awaiting execution. */
struct xbb_xen_reqlist_list reqlist_pending_stailq;
/** Array, sized at connection time, of request list structures. */
struct xbb_xen_reqlist *request_lists;
/**
* Global pool of kva used for mapping remote domain ring
* and I/O transaction data.
*/
vm_offset_t kva;
/** Psuedo-physical address corresponding to kva. */
uint64_t gnt_base_addr;
/** The size of the global kva pool. */
int kva_size;
/** The size of the KVA area used for request lists. */
int reqlist_kva_size;
/** The number of pages of KVA used for request lists */
int reqlist_kva_pages;
/** Bitmap of free KVA pages */
bitstr_t *kva_free;
/**
* \brief Cached value of the front-end's domain id.
*
* This value is used at once for each mapped page in
* a transaction. We cache it to avoid incuring the
* cost of an ivar access every time this is needed.
*/
domid_t otherend_id;
/**
* \brief The blkif protocol abi in effect.
*
* There are situations where the back and front ends can
* have a different, native abi (e.g. intel x86_64 and
* 32bit x86 domains on the same machine). The back-end
* always accommodates the front-end's native abi. That
* value is pulled from the XenStore and recorded here.
*/
int abi;
/**
* \brief The maximum number of requests and request lists allowed
* to be in flight at a time.
*
* This value is negotiated via the XenStore.
*/
u_int max_requests;
/**
* \brief The maximum number of segments (1 page per segment)
* that can be mapped by a request.
*
* This value is negotiated via the XenStore.
*/
u_int max_request_segments;
/**
* \brief Maximum number of segments per request list.
*
* This value is derived from and will generally be larger than
* max_request_segments.
*/
u_int max_reqlist_segments;
/**
* The maximum size of any request to this back-end
* device.
*
* This value is negotiated via the XenStore.
*/
u_int max_request_size;
/**
* The maximum size of any request list. This is derived directly
* from max_reqlist_segments.
*/
u_int max_reqlist_size;
/** Various configuration and state bit flags. */
xbb_flag_t flags;
/** Ring mapping and interrupt configuration data. */
struct xbb_ring_config ring_config;
/** Runtime, cross-abi safe, structures for ring access. */
blkif_back_rings_t rings;
/** IRQ mapping for the communication ring event channel. */
xen_intr_handle_t xen_intr_handle;
/**
* \brief Backend access mode flags (e.g. write, or read-only).
*
* This value is passed to us by the front-end via the XenStore.
*/
char *dev_mode;
/**
* \brief Backend device type (e.g. "disk", "cdrom", "floppy").
*
* This value is passed to us by the front-end via the XenStore.
* Currently unused.
*/
char *dev_type;
/**
* \brief Backend device/file identifier.
*
* This value is passed to us by the front-end via the XenStore.
* We expect this to be a POSIX path indicating the file or
* device to open.
*/
char *dev_name;
/**
* Vnode corresponding to the backend device node or file
* we are acessing.
*/
struct vnode *vn;
union xbb_backend_data backend;
/** The native sector size of the backend. */
u_int sector_size;
/** log2 of sector_size. */
u_int sector_size_shift;
/** Size in bytes of the backend device or file. */
off_t media_size;
/**
* \brief media_size expressed in terms of the backend native
* sector size.
*
* (e.g. xbb->media_size >> xbb->sector_size_shift).
*/
uint64_t media_num_sectors;
/**
* \brief Array of memoized scatter gather data computed during the
* conversion of blkif ring requests to internal xbb_xen_req
* structures.
*
* Ring processing is serialized so we only need one of these.
*/
struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST];
/**
* Temporary grant table map used in xbb_dispatch_io(). When
* XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the
* stack could cause a stack overflow.
*/
struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQLIST];
/** Mutex protecting per-instance data. */
struct mtx lock;
/**
* Resource representing allocated physical address space
* associated with our per-instance kva region.
*/
struct resource *pseudo_phys_res;
/** Resource id for allocated physical address space. */
int pseudo_phys_res_id;
/**
* I/O statistics from BlockBack dispatch down. These are
* coalesced requests, and we start them right before execution.
*/
struct devstat *xbb_stats;
/**
* I/O statistics coming into BlockBack. These are the requests as
* we get them from BlockFront. They are started as soon as we
* receive a request, and completed when the I/O is complete.
*/
struct devstat *xbb_stats_in;
/** Disable sending flush to the backend */
int disable_flush;
/** Send a real flush for every N flush requests */
int flush_interval;
/** Count of flush requests in the interval */
int flush_count;
/** Don't coalesce requests if this is set */
int no_coalesce_reqs;
/** Number of requests we have received */
uint64_t reqs_received;
/** Number of requests we have completed*/
uint64_t reqs_completed;
/** Number of requests we queued but not pushed*/
uint64_t reqs_queued_for_completion;
/** Number of requests we completed with an error status*/
uint64_t reqs_completed_with_error;
/** How many forced dispatches (i.e. without coalescing) have happened */
uint64_t forced_dispatch;
/** How many normal dispatches have happened */
uint64_t normal_dispatch;
/** How many total dispatches have happened */
uint64_t total_dispatch;
/** How many times we have run out of KVA */
uint64_t kva_shortages;
/** How many times we have run out of request structures */
uint64_t request_shortages;
/** Watch to wait for hotplug script execution */
struct xs_watch hotplug_watch;
};
/*---------------------------- Request Processing ----------------------------*/
/**
* Allocate an internal transaction tracking structure from the free pool.
*
* \param xbb Per-instance xbb configuration structure.
*
* \return On success, a pointer to the allocated xbb_xen_req structure.
* Otherwise NULL.
*/
static inline struct xbb_xen_req *
xbb_get_req(struct xbb_softc *xbb)
{
struct xbb_xen_req *req;
req = NULL;
mtx_assert(&xbb->lock, MA_OWNED);
if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
xbb->active_request_count++;
}
return (req);
}
/**
* Return an allocated transaction tracking structure to the free pool.
*
* \param xbb Per-instance xbb configuration structure.
* \param req The request structure to free.
*/
static inline void
xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
{
mtx_assert(&xbb->lock, MA_OWNED);
STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
xbb->active_request_count--;
KASSERT(xbb->active_request_count >= 0,
("xbb_release_req: negative active count"));
}
/**
* Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool.
*
* \param xbb Per-instance xbb configuration structure.
* \param req_list The list of requests to free.
* \param nreqs The number of items in the list.
*/
static inline void
xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
int nreqs)
{
mtx_assert(&xbb->lock, MA_OWNED);
STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
xbb->active_request_count -= nreqs;
KASSERT(xbb->active_request_count >= 0,
("xbb_release_reqs: negative active count"));
}
/**
* Given a page index and 512b sector offset within that page,
* calculate an offset into a request's kva region.
*
* \param reqlist The request structure whose kva region will be accessed.
* \param pagenr The page index used to compute the kva offset.
* \param sector The 512b sector index used to compute the page relative
* kva offset.
*
* \return The computed global KVA offset.
*/
static inline uint8_t *
xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
}
#ifdef XBB_USE_BOUNCE_BUFFERS
/**
* Given a page index and 512b sector offset within that page,
* calculate an offset into a request's local bounce memory region.
*
* \param reqlist The request structure whose bounce region will be accessed.
* \param pagenr The page index used to compute the bounce offset.
* \param sector The 512b sector index used to compute the page relative
* bounce offset.
*
* \return The computed global bounce buffer address.
*/
static inline uint8_t *
xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
}
#endif
/**
* Given a page number and 512b sector offset within that page,
* calculate an offset into the request's memory region that the
* underlying backend device/file should use for I/O.
*
* \param reqlist The request structure whose I/O region will be accessed.
* \param pagenr The page index used to compute the I/O offset.
* \param sector The 512b sector index used to compute the page relative
* I/O offset.
*
* \return The computed global I/O address.
*
* Depending on configuration, this will either be a local bounce buffer
* or a pointer to the memory mapped in from the front-end domain for
* this request.
*/
static inline uint8_t *
xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
#ifdef XBB_USE_BOUNCE_BUFFERS
return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
#else
return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
#endif
}
/**
* Given a page index and 512b sector offset within that page, calculate
* an offset into the local psuedo-physical address space used to map a
* front-end's request data into a request.
*
* \param reqlist The request list structure whose pseudo-physical region
* will be accessed.
* \param pagenr The page index used to compute the pseudo-physical offset.
* \param sector The 512b sector index used to compute the page relative
* pseudo-physical offset.
*
* \return The computed global pseudo-phsyical address.
*
* Depending on configuration, this will either be a local bounce buffer
* or a pointer to the memory mapped in from the front-end domain for
* this request.
*/
static inline uintptr_t
xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
struct xbb_softc *xbb;
xbb = reqlist->xbb;
return ((uintptr_t)(xbb->gnt_base_addr +
(uintptr_t)(reqlist->kva - xbb->kva) +
(PAGE_SIZE * pagenr) + (sector << 9)));
}
/**
* Get Kernel Virtual Address space for mapping requests.
*
* \param xbb Per-instance xbb configuration structure.
* \param nr_pages Number of pages needed.
* \param check_only If set, check for free KVA but don't allocate it.
* \param have_lock If set, xbb lock is already held.
*
* \return On success, a pointer to the allocated KVA region. Otherwise NULL.
*
* Note: This should be unnecessary once we have either chaining or
* scatter/gather support for struct bio. At that point we'll be able to
* put multiple addresses and lengths in one bio/bio chain and won't need
* to map everything into one virtual segment.
*/
static uint8_t *
xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
{
int first_clear;
int num_clear;
uint8_t *free_kva;
int i;
KASSERT(nr_pages != 0, ("xbb_get_kva of zero length"));
first_clear = 0;
free_kva = NULL;
mtx_lock(&xbb->lock);
/*
* Look for the first available page. If there are none, we're done.
*/
bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
if (first_clear == -1)
goto bailout;
/*
* Starting at the first available page, look for consecutive free
* pages that will satisfy the user's request.
*/
for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
/*
* If this is true, the page is used, so we have to reset
* the number of clear pages and the first clear page
* (since it pointed to a region with an insufficient number
* of clear pages).
*/
if (bit_test(xbb->kva_free, i)) {
num_clear = 0;
first_clear = -1;
continue;
}
if (first_clear == -1)
first_clear = i;
/*
* If this is true, we've found a large enough free region
* to satisfy the request.
*/
if (++num_clear == nr_pages) {
bit_nset(xbb->kva_free, first_clear,
first_clear + nr_pages - 1);
free_kva = xbb->kva +
(uint8_t *)((intptr_t)first_clear * PAGE_SIZE);
KASSERT(free_kva >= (uint8_t *)xbb->kva &&
free_kva + (nr_pages * PAGE_SIZE) <=
(uint8_t *)xbb->ring_config.va,
("Free KVA %p len %d out of range, "
"kva = %#jx, ring VA = %#jx\n", free_kva,
nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
(uintmax_t)xbb->ring_config.va));
break;
}
}
bailout:
if (free_kva == NULL) {
xbb->flags |= XBBF_RESOURCE_SHORTAGE;
xbb->kva_shortages++;
}
mtx_unlock(&xbb->lock);
return (free_kva);
}
/**
* Free allocated KVA.
*
* \param xbb Per-instance xbb configuration structure.
* \param kva_ptr Pointer to allocated KVA region.
* \param nr_pages Number of pages in the KVA region.
*/
static void
xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
{
intptr_t start_page;
mtx_assert(&xbb->lock, MA_OWNED);
start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
}
/**
* Unmap the front-end pages associated with this I/O request.
*
* \param req The request structure to unmap.
*/
static void
xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
{
struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST];
u_int i;
u_int invcount;
int error;
invcount = 0;
for (i = 0; i < reqlist->nr_segments; i++) {
if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
continue;
unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0);
unmap[invcount].dev_bus_addr = 0;
unmap[invcount].handle = reqlist->gnt_handles[i];
reqlist->gnt_handles[i] = GRANT_REF_INVALID;
invcount++;
}
error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
unmap, invcount);
KASSERT(error == 0, ("Grant table operation failed"));
}
/**
* Allocate an internal transaction tracking structure from the free pool.
*
* \param xbb Per-instance xbb configuration structure.
*
* \return On success, a pointer to the allocated xbb_xen_reqlist structure.
* Otherwise NULL.
*/
static inline struct xbb_xen_reqlist *
xbb_get_reqlist(struct xbb_softc *xbb)
{
struct xbb_xen_reqlist *reqlist;
reqlist = NULL;
mtx_assert(&xbb->lock, MA_OWNED);
if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
reqlist->flags = XBB_REQLIST_NONE;
reqlist->kva = NULL;
reqlist->status = BLKIF_RSP_OKAY;
reqlist->residual_512b_sectors = 0;
reqlist->num_children = 0;
reqlist->nr_segments = 0;
STAILQ_INIT(&reqlist->contig_req_list);
}
return (reqlist);
}
/**
* Return an allocated transaction tracking structure to the free pool.
*
* \param xbb Per-instance xbb configuration structure.
* \param req The request list structure to free.
* \param wakeup If set, wakeup the work thread if freeing this reqlist
* during a resource shortage condition.
*/
static inline void
xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
int wakeup)
{
mtx_assert(&xbb->lock, MA_OWNED);
if (wakeup) {
wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
}
if (reqlist->kva != NULL)
xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
/*
* Shutdown is in progress. See if we can
* progress further now that one more request
* has completed and been returned to the
* free pool.
*/
xbb_shutdown(xbb);
}
if (wakeup != 0)
taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
}
/**
* Request resources and do basic request setup.
*
* \param xbb Per-instance xbb configuration structure.
* \param reqlist Pointer to reqlist pointer.
* \param ring_req Pointer to a block ring request.
* \param ring_index The ring index of this request.
*
* \return 0 for success, non-zero for failure.
*/
static int
xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
blkif_request_t *ring_req, RING_IDX ring_idx)
{
struct xbb_xen_reqlist *nreqlist;
struct xbb_xen_req *nreq;
nreqlist = NULL;
nreq = NULL;
mtx_lock(&xbb->lock);
/*
* We don't allow new resources to be allocated if we're in the
* process of shutting down.
*/
if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
mtx_unlock(&xbb->lock);
return (1);
}
/*
* Allocate a reqlist if the caller doesn't have one already.
*/
if (*reqlist == NULL) {
nreqlist = xbb_get_reqlist(xbb);
if (nreqlist == NULL)
goto bailout_error;
}
/* We always allocate a request. */
nreq = xbb_get_req(xbb);
if (nreq == NULL)
goto bailout_error;
mtx_unlock(&xbb->lock);
if (*reqlist == NULL) {
*reqlist = nreqlist;
nreqlist->operation = ring_req->operation;
nreqlist->starting_sector_number = ring_req->sector_number;
STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
links);
}
nreq->reqlist = *reqlist;
nreq->req_ring_idx = ring_idx;
nreq->id = ring_req->id;
nreq->operation = ring_req->operation;
if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req));
nreq->ring_req = &nreq->ring_req_storage;
} else {
nreq->ring_req = ring_req;
}
binuptime(&nreq->ds_t0);
devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
(*reqlist)->num_children++;
(*reqlist)->nr_segments += ring_req->nr_segments;
return (0);
bailout_error:
/*
* We're out of resources, so set the shortage flag. The next time
* a request is released, we'll try waking up the work thread to
* see if we can allocate more resources.
*/
xbb->flags |= XBBF_RESOURCE_SHORTAGE;
xbb->request_shortages++;
if (nreq != NULL)
xbb_release_req(xbb, nreq);
if (nreqlist != NULL)
xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
mtx_unlock(&xbb->lock);
return (1);
}
/**
* Create and queue a response to a blkif request.
*
* \param xbb Per-instance xbb configuration structure.
* \param req The request structure to which to respond.
* \param status The status code to report. See BLKIF_RSP_*
* in sys/xen/interface/io/blkif.h.
*/
static void
xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
{
blkif_response_t *resp;
/*
* The mutex is required here, and should be held across this call
* until after the subsequent call to xbb_push_responses(). This
* is to guarantee that another context won't queue responses and
* push them while we're active.
*
* That could lead to the other end being notified of responses
* before the resources have been freed on this end. The other end
* would then be able to queue additional I/O, and we may run out
* of resources because we haven't freed them all yet.
*/
mtx_assert(&xbb->lock, MA_OWNED);
/*
* Place on the response ring for the relevant domain.
* For now, only the spacing between entries is different
* in the different ABIs, not the response entry layout.
*/
switch (xbb->abi) {
case BLKIF_PROTOCOL_NATIVE:
resp = RING_GET_RESPONSE(&xbb->rings.native,
xbb->rings.native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
resp = (blkif_response_t *)
RING_GET_RESPONSE(&xbb->rings.x86_32,
xbb->rings.x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
resp = (blkif_response_t *)
RING_GET_RESPONSE(&xbb->rings.x86_64,
xbb->rings.x86_64.rsp_prod_pvt);
break;
default:
panic("Unexpected blkif protocol ABI.");
}
resp->id = req->id;
resp->operation = req->operation;
resp->status = status;
if (status != BLKIF_RSP_OKAY)
xbb->reqs_completed_with_error++;
xbb->rings.common.rsp_prod_pvt++;
xbb->reqs_queued_for_completion++;
}
/**
* Send queued responses to blkif requests.
*
* \param xbb Per-instance xbb configuration structure.
* \param run_taskqueue Flag that is set to 1 if the taskqueue
* should be run, 0 if it does not need to be run.
* \param notify Flag that is set to 1 if the other end should be
* notified via irq, 0 if the other end should not be
* notified.
*/
static void
xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
{
int more_to_do;
/*
* The mutex is required here.
*/
mtx_assert(&xbb->lock, MA_OWNED);
more_to_do = 0;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
/*
* Tail check for pending requests. Allows frontend to avoid
* notifications if requests are already in flight (lower
* overheads and promotes batching).
*/
RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
} else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
more_to_do = 1;
}
xbb->reqs_completed += xbb->reqs_queued_for_completion;
xbb->reqs_queued_for_completion = 0;
*run_taskqueue = more_to_do;
}
/**
* Complete a request list.
*
* \param xbb Per-instance xbb configuration structure.
* \param reqlist Allocated internal request list structure.
*/
static void
xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
{
struct xbb_xen_req *nreq;
off_t sectors_sent;
int notify, run_taskqueue;
sectors_sent = 0;
if (reqlist->flags & XBB_REQLIST_MAPPED)
xbb_unmap_reqlist(reqlist);
mtx_lock(&xbb->lock);
/*
* All I/O is done, send the response. A lock is not necessary
* to protect the request list, because all requests have
* completed. Therefore this is the only context accessing this
* reqlist right now. However, in order to make sure that no one
* else queues responses onto the queue or pushes them to the other
* side while we're active, we need to hold the lock across the
* calls to xbb_queue_response() and xbb_push_responses().
*/
STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
off_t cur_sectors_sent;
/* Put this response on the ring, but don't push yet */
xbb_queue_response(xbb, nreq, reqlist->status);
/* We don't report bytes sent if there is an error. */
if (reqlist->status == BLKIF_RSP_OKAY)
cur_sectors_sent = nreq->nr_512b_sectors;
else
cur_sectors_sent = 0;
sectors_sent += cur_sectors_sent;
devstat_end_transaction(xbb->xbb_stats_in,
/*bytes*/cur_sectors_sent << 9,
reqlist->ds_tag_type,
reqlist->ds_trans_type,
/*now*/NULL,
/*then*/&nreq->ds_t0);
}
/*
* Take out any sectors not sent. If we wind up negative (which
* might happen if an error is reported as well as a residual), just
* report 0 sectors sent.
*/
sectors_sent -= reqlist->residual_512b_sectors;
if (sectors_sent < 0)
sectors_sent = 0;
devstat_end_transaction(xbb->xbb_stats,
/*bytes*/ sectors_sent << 9,
reqlist->ds_tag_type,
reqlist->ds_trans_type,
/*now*/NULL,
/*then*/&reqlist->ds_t0);
xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
xbb_push_responses(xbb, &run_taskqueue, &notify);
mtx_unlock(&xbb->lock);
if (run_taskqueue)
taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
if (notify)
xen_intr_signal(xbb->xen_intr_handle);
}
/**
* Completion handler for buffer I/O requests issued by the device
* backend driver.
*
* \param bio The buffer I/O request on which to perform completion
* processing.
*/
static void
xbb_bio_done(struct bio *bio)
{
struct xbb_softc *xbb;
struct xbb_xen_reqlist *reqlist;
reqlist = bio->bio_caller1;
xbb = reqlist->xbb;
reqlist->residual_512b_sectors += bio->bio_resid >> 9;
/*
* This is a bit imprecise. With aggregated I/O a single
* request list can contain multiple front-end requests and
* a multiple bios may point to a single request. By carefully
* walking the request list, we could map residuals and errors
* back to the original front-end request, but the interface
* isn't sufficiently rich for us to properly report the error.
* So, we just treat the entire request list as having failed if an
* error occurs on any part. And, if an error occurs, we treat
* the amount of data transferred as 0.
*
* For residuals, we report it on the overall aggregated device,
* but not on the individual requests, since we don't currently
* do the work to determine which front-end request to which the
* residual applies.
*/
if (bio->bio_error) {
DPRINTF("BIO returned error %d for operation on device %s\n",
bio->bio_error, xbb->dev_name);
reqlist->status = BLKIF_RSP_ERROR;
if (bio->bio_error == ENXIO
&& xenbus_get_state(xbb->dev) == XenbusStateConnected) {
/*
* Backend device has disappeared. Signal the
* front-end that we (the device proxy) want to
* go away.
*/
xenbus_set_state(xbb->dev, XenbusStateClosing);
}
}
#ifdef XBB_USE_BOUNCE_BUFFERS
if (bio->bio_cmd == BIO_READ) {
vm_offset_t kva_offset;
kva_offset = (vm_offset_t)bio->bio_data
- (vm_offset_t)reqlist->bounce;
memcpy((uint8_t *)reqlist->kva + kva_offset,
bio->bio_data, bio->bio_bcount);
}
#endif /* XBB_USE_BOUNCE_BUFFERS */
/*
* Decrement the pending count for the request list. When we're
* done with the requests, send status back for all of them.
*/
if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
xbb_complete_reqlist(xbb, reqlist);
g_destroy_bio(bio);
}
/**
* Parse a blkif request into an internal request structure and send
* it to the backend for processing.
*
* \param xbb Per-instance xbb configuration structure.
* \param reqlist Allocated internal request list structure.
*
* \return On success, 0. For resource shortages, non-zero.
*
* This routine performs the backend common aspects of request parsing
* including compiling an internal request structure, parsing the S/G
* list and any secondary ring requests in which they may reside, and
* the mapping of front-end I/O pages into our domain.
*/
static int
xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
{
struct xbb_sg *xbb_sg;
struct gnttab_map_grant_ref *map;
struct blkif_request_segment *sg;
struct blkif_request_segment *last_block_sg;
struct xbb_xen_req *nreq;
u_int nseg;
u_int seg_idx;
u_int block_segs;
int nr_sects;
int total_sects;
int operation;
uint8_t bio_flags;
int error;
reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
bio_flags = 0;
total_sects = 0;
nr_sects = 0;
/*
* First determine whether we have enough free KVA to satisfy this
* request list. If not, tell xbb_run_queue() so it can go to
* sleep until we have more KVA.
*/
reqlist->kva = NULL;
if (reqlist->nr_segments != 0) {
reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
if (reqlist->kva == NULL) {
/*
* If we're out of KVA, return ENOMEM.
*/
return (ENOMEM);
}
}
binuptime(&reqlist->ds_t0);
devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
switch (reqlist->operation) {
case BLKIF_OP_WRITE_BARRIER:
bio_flags |= BIO_ORDERED;
reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
/* FALLTHROUGH */
case BLKIF_OP_WRITE:
operation = BIO_WRITE;
reqlist->ds_trans_type = DEVSTAT_WRITE;
if ((xbb->flags & XBBF_READ_ONLY) != 0) {
DPRINTF("Attempt to write to read only device %s\n",
xbb->dev_name);
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
break;
case BLKIF_OP_READ:
operation = BIO_READ;
reqlist->ds_trans_type = DEVSTAT_READ;
break;
case BLKIF_OP_FLUSH_DISKCACHE:
/*
* If this is true, the user has requested that we disable
* flush support. So we just complete the requests
* successfully.
*/
if (xbb->disable_flush != 0) {
goto send_response;
}
/*
* The user has requested that we only send a real flush
* for every N flush requests. So keep count, and either
* complete the request immediately or queue it for the
* backend.
*/
if (xbb->flush_interval != 0) {
if (++(xbb->flush_count) < xbb->flush_interval) {
goto send_response;
} else
xbb->flush_count = 0;
}
operation = BIO_FLUSH;
reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
reqlist->ds_trans_type = DEVSTAT_NO_DATA;
goto do_dispatch;
/*NOTREACHED*/
default:
DPRINTF("error: unknown block io operation [%d]\n",
reqlist->operation);
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
reqlist->xbb = xbb;
xbb_sg = xbb->xbb_sgs;
map = xbb->maps;
seg_idx = 0;
STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
blkif_request_t *ring_req;
RING_IDX req_ring_idx;
u_int req_seg_idx;
ring_req = nreq->ring_req;
req_ring_idx = nreq->req_ring_idx;
nr_sects = 0;
nseg = ring_req->nr_segments;
nreq->nr_pages = nseg;
nreq->nr_512b_sectors = 0;
req_seg_idx = 0;
sg = NULL;
/* Check that number of segments is sane. */
if (__predict_false(nseg == 0)
|| __predict_false(nseg > xbb->max_request_segments)) {
DPRINTF("Bad number of segments in request (%d)\n",
nseg);
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
block_segs = nseg;
sg = ring_req->seg;
last_block_sg = sg + block_segs;
while (sg < last_block_sg) {
KASSERT(seg_idx <
XBB_MAX_SEGMENTS_PER_REQLIST,
("seg_idx %d is too large, max "
"segs %d\n", seg_idx,
XBB_MAX_SEGMENTS_PER_REQLIST));
xbb_sg->first_sect = sg->first_sect;
xbb_sg->last_sect = sg->last_sect;
xbb_sg->nsect =
(int8_t)(sg->last_sect -
sg->first_sect + 1);
if ((sg->last_sect >= (PAGE_SIZE >> 9))
|| (xbb_sg->nsect <= 0)) {
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
nr_sects += xbb_sg->nsect;
map->host_addr = xbb_get_gntaddr(reqlist,
seg_idx, /*sector*/0);
KASSERT(map->host_addr + PAGE_SIZE <=
xbb->ring_config.gnt_addr,
("Host address %#jx len %d overlaps "
"ring address %#jx\n",
(uintmax_t)map->host_addr, PAGE_SIZE,
(uintmax_t)xbb->ring_config.gnt_addr));
map->flags = GNTMAP_host_map;
map->ref = sg->gref;
map->dom = xbb->otherend_id;
if (operation == BIO_WRITE)
map->flags |= GNTMAP_readonly;
sg++;
map++;
xbb_sg++;
seg_idx++;
req_seg_idx++;
}
/* Convert to the disk's sector size */
nreq->nr_512b_sectors = nr_sects;
nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
total_sects += nr_sects;
if ((nreq->nr_512b_sectors &
((xbb->sector_size >> 9) - 1)) != 0) {
device_printf(xbb->dev, "%s: I/O size (%d) is not "
"a multiple of the backing store sector "
"size (%d)\n", __func__,
nreq->nr_512b_sectors << 9,
xbb->sector_size);
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
}
error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
xbb->maps, reqlist->nr_segments);
if (error != 0)
panic("Grant table operation failed (%d)", error);
reqlist->flags |= XBB_REQLIST_MAPPED;
for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
seg_idx++, map++){
if (__predict_false(map->status != 0)) {
DPRINTF("invalid buffer -- could not remap "
"it (%d)\n", map->status);
DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags "
"0x%x ref 0x%x, dom %d\n", seg_idx,
map->host_addr, map->flags, map->ref,
map->dom);
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
reqlist->gnt_handles[seg_idx] = map->handle;
}
if (reqlist->starting_sector_number + total_sects >
xbb->media_num_sectors) {
DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] "
"extends past end of device %s\n",
operation == BIO_READ ? "read" : "write",
reqlist->starting_sector_number,
reqlist->starting_sector_number + total_sects,
xbb->dev_name);
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
do_dispatch:
error = xbb->dispatch_io(xbb,
reqlist,
operation,
bio_flags);
if (error != 0) {
reqlist->status = BLKIF_RSP_ERROR;
goto send_response;
}
return (0);
send_response:
xbb_complete_reqlist(xbb, reqlist);
return (0);
}
static __inline int
xbb_count_sects(blkif_request_t *ring_req)
{
int i;
int cur_size = 0;
for (i = 0; i < ring_req->nr_segments; i++) {
int nsect;
nsect = (int8_t)(ring_req->seg[i].last_sect -
ring_req->seg[i].first_sect + 1);
if (nsect <= 0)
break;
cur_size += nsect;
}
return (cur_size);
}
/**
* Process incoming requests from the shared communication ring in response
* to a signal on the ring's event channel.
*
* \param context Callback argument registerd during task initialization -
* the xbb_softc for this instance.
* \param pending The number of taskqueue_enqueue events that have
* occurred since this handler was last run.
*/
static void
xbb_run_queue(void *context, int pending)
{
struct xbb_softc *xbb;
blkif_back_rings_t *rings;
RING_IDX rp;
uint64_t cur_sector;
int cur_operation;
struct xbb_xen_reqlist *reqlist;
xbb = (struct xbb_softc *)context;
rings = &xbb->rings;
/*
* Work gather and dispatch loop. Note that we have a bias here
* towards gathering I/O sent by blockfront. We first gather up
* everything in the ring, as long as we have resources. Then we
* dispatch one request, and then attempt to gather up any
* additional requests that have come in while we were dispatching
* the request.
*
* This allows us to get a clearer picture (via devstat) of how
* many requests blockfront is queueing to us at any given time.
*/
for (;;) {
int retval;
/*
* Initialize reqlist to the last element in the pending
* queue, if there is one. This allows us to add more
* requests to that request list, if we have room.
*/
reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
xbb_xen_reqlist, links);
if (reqlist != NULL) {
cur_sector = reqlist->next_contig_sector;
cur_operation = reqlist->operation;
} else {
cur_operation = 0;
cur_sector = 0;
}
/*
* Cache req_prod to avoid accessing a cache line shared
* with the frontend.
*/
rp = rings->common.sring->req_prod;
/* Ensure we see queued requests up to 'rp'. */
rmb();
/**
* Run so long as there is work to consume and the generation
* of a response will not overflow the ring.
*
* @note There's a 1 to 1 relationship between requests and
* responses, so an overflow should never occur. This
* test is to protect our domain from digesting bogus
* data. Shouldn't we log this?
*/
while (rings->common.req_cons != rp
&& RING_REQUEST_CONS_OVERFLOW(&rings->common,
rings->common.req_cons) == 0){
blkif_request_t ring_req_storage;
blkif_request_t *ring_req;
int cur_size;
switch (xbb->abi) {
case BLKIF_PROTOCOL_NATIVE:
ring_req = RING_GET_REQUEST(&xbb->rings.native,
rings->common.req_cons);
break;
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_request *ring_req32;
ring_req32 = RING_GET_REQUEST(
&xbb->rings.x86_32, rings->common.req_cons);
blkif_get_x86_32_req(&ring_req_storage,
ring_req32);
ring_req = &ring_req_storage;
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_request *ring_req64;
ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
rings->common.req_cons);
blkif_get_x86_64_req(&ring_req_storage,
ring_req64);
ring_req = &ring_req_storage;
break;
}
default:
panic("Unexpected blkif protocol ABI.");
/* NOTREACHED */
}
/*
* Check for situations that would require closing
* off this I/O for further coalescing:
* - Coalescing is turned off.
* - Current I/O is out of sequence with the previous
* I/O.
* - Coalesced I/O would be too large.
*/
if ((reqlist != NULL)
&& ((xbb->no_coalesce_reqs != 0)
|| ((xbb->no_coalesce_reqs == 0)
&& ((ring_req->sector_number != cur_sector)
|| (ring_req->operation != cur_operation)
|| ((ring_req->nr_segments + reqlist->nr_segments) >
xbb->max_reqlist_segments))))) {
reqlist = NULL;
}
/*
* Grab and check for all resources in one shot.
* If we can't get all of the resources we need,
* the shortage is noted and the thread will get
* woken up when more resources are available.
*/
retval = xbb_get_resources(xbb, &reqlist, ring_req,
xbb->rings.common.req_cons);
if (retval != 0) {
/*
* Resource shortage has been recorded.
* We'll be scheduled to run once a request
* object frees up due to a completion.
*/
break;
}
/*
* Signify that we can overwrite this request with
* a response by incrementing our consumer index.
* The response won't be generated until after
* we've already consumed all necessary data out
* of the version of the request in the ring buffer
* (for native mode). We must update the consumer
* index before issuing back-end I/O so there is
* no possibility that it will complete and a
* response be generated before we make room in
* the queue for that response.
*/
xbb->rings.common.req_cons++;
xbb->reqs_received++;
cur_size = xbb_count_sects(ring_req);
cur_sector = ring_req->sector_number + cur_size;
reqlist->next_contig_sector = cur_sector;
cur_operation = ring_req->operation;
}
/* Check for I/O to dispatch */
reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
if (reqlist == NULL) {
/*
* We're out of work to do, put the task queue to
* sleep.
*/
break;
}
/*
* Grab the first request off the queue and attempt
* to dispatch it.
*/
STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
retval = xbb_dispatch_io(xbb, reqlist);
if (retval != 0) {
/*
* xbb_dispatch_io() returns non-zero only when
* there is a resource shortage. If that's the
* case, re-queue this request on the head of the
* queue, and go to sleep until we have more
* resources.
*/
STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
reqlist, links);
break;
} else {
/*
* If we still have anything on the queue after
* removing the head entry, that is because we
* met one of the criteria to create a new
* request list (outlined above), and we'll call
* that a forced dispatch for statistical purposes.
*
* Otherwise, if there is only one element on the
* queue, we coalesced everything available on
* the ring and we'll call that a normal dispatch.
*/
reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
if (reqlist != NULL)
xbb->forced_dispatch++;
else
xbb->normal_dispatch++;
xbb->total_dispatch++;
}
}
}
/**
* Interrupt handler bound to the shared ring's event channel.
*
* \param arg Callback argument registerd during event channel
* binding - the xbb_softc for this instance.
*/
static int
xbb_filter(void *arg)
{
struct xbb_softc *xbb;
/* Defer to taskqueue thread. */
xbb = (struct xbb_softc *)arg;
taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
return (FILTER_HANDLED);
}
SDT_PROVIDER_DEFINE(xbb);
SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
"uint64_t");
SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
"uint64_t", "uint64_t");
/*----------------------------- Backend Handlers -----------------------------*/
/**
* Backend handler for character device access.
*
* \param xbb Per-instance xbb configuration structure.
* \param reqlist Allocated internal request list structure.
* \param operation BIO_* I/O operation code.
* \param bio_flags Additional bio_flag data to pass to any generated
* bios (e.g. BIO_ORDERED)..
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
int operation, int bio_flags)
{
struct xbb_dev_data *dev_data;
struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST];
off_t bio_offset;
struct bio *bio;
struct xbb_sg *xbb_sg;
u_int nbio;
u_int bio_idx;
u_int nseg;
u_int seg_idx;
int error;
dev_data = &xbb->backend.dev;
bio_offset = (off_t)reqlist->starting_sector_number
<< xbb->sector_size_shift;
error = 0;
nbio = 0;
bio_idx = 0;
if (operation == BIO_FLUSH) {
bio = g_new_bio();
if (__predict_false(bio == NULL)) {
DPRINTF("Unable to allocate bio for BIO_FLUSH\n");
error = ENOMEM;
return (error);
}
bio->bio_cmd = BIO_FLUSH;
bio->bio_flags |= BIO_ORDERED;
bio->bio_dev = dev_data->cdev;
bio->bio_offset = 0;
bio->bio_data = 0;
bio->bio_done = xbb_bio_done;
bio->bio_caller1 = reqlist;
bio->bio_pblkno = 0;
reqlist->pendcnt = 1;
SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
device_get_unit(xbb->dev));
(*dev_data->csw->d_strategy)(bio);
return (0);
}
xbb_sg = xbb->xbb_sgs;
bio = NULL;
nseg = reqlist->nr_segments;
for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
/*
* KVA will not be contiguous, so any additional
* I/O will need to be represented in a new bio.
*/
if ((bio != NULL)
&& (xbb_sg->first_sect != 0)) {
if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
printf("%s: Discontiguous I/O request "
"from domain %d ends on "
"non-sector boundary\n",
__func__, xbb->otherend_id);
error = EINVAL;
goto fail_free_bios;
}
bio = NULL;
}
if (bio == NULL) {
/*
* Make sure that the start of this bio is
* aligned to a device sector.
*/
if ((bio_offset & (xbb->sector_size - 1)) != 0){
printf("%s: Misaligned I/O request "
"from domain %d\n", __func__,
xbb->otherend_id);
error = EINVAL;
goto fail_free_bios;
}
bio = bios[nbio++] = g_new_bio();
if (__predict_false(bio == NULL)) {
error = ENOMEM;
goto fail_free_bios;
}
bio->bio_cmd = operation;
bio->bio_flags |= bio_flags;
bio->bio_dev = dev_data->cdev;
bio->bio_offset = bio_offset;
bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx,
xbb_sg->first_sect);
bio->bio_done = xbb_bio_done;
bio->bio_caller1 = reqlist;
bio->bio_pblkno = bio_offset >> xbb->sector_size_shift;
}
bio->bio_length += xbb_sg->nsect << 9;
bio->bio_bcount = bio->bio_length;
bio_offset += xbb_sg->nsect << 9;
if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) {
if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
printf("%s: Discontiguous I/O request "
"from domain %d ends on "
"non-sector boundary\n",
__func__, xbb->otherend_id);
error = EINVAL;
goto fail_free_bios;
}
/*
* KVA will not be contiguous, so any additional
* I/O will need to be represented in a new bio.
*/
bio = NULL;
}
}
reqlist->pendcnt = nbio;
for (bio_idx = 0; bio_idx < nbio; bio_idx++)
{
#ifdef XBB_USE_BOUNCE_BUFFERS
vm_offset_t kva_offset;
kva_offset = (vm_offset_t)bios[bio_idx]->bio_data
- (vm_offset_t)reqlist->bounce;
if (operation == BIO_WRITE) {
memcpy(bios[bio_idx]->bio_data,
(uint8_t *)reqlist->kva + kva_offset,
bios[bio_idx]->bio_bcount);
}
#endif
if (operation == BIO_READ) {
SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
device_get_unit(xbb->dev),
bios[bio_idx]->bio_offset,
bios[bio_idx]->bio_length);
} else if (operation == BIO_WRITE) {
SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
device_get_unit(xbb->dev),
bios[bio_idx]->bio_offset,
bios[bio_idx]->bio_length);
}
(*dev_data->csw->d_strategy)(bios[bio_idx]);
}
return (error);
fail_free_bios:
for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++)
g_destroy_bio(bios[bio_idx]);
return (error);
}
SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
"uint64_t");
SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
"uint64_t", "uint64_t");
/**
* Backend handler for file access.
*
* \param xbb Per-instance xbb configuration structure.
* \param reqlist Allocated internal request list.
* \param operation BIO_* I/O operation code.
* \param flags Additional bio_flag data to pass to any generated bios
* (e.g. BIO_ORDERED)..
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
int operation, int flags)
{
struct xbb_file_data *file_data;
u_int seg_idx;
u_int nseg;
struct uio xuio;
struct xbb_sg *xbb_sg;
struct iovec *xiovec;
#ifdef XBB_USE_BOUNCE_BUFFERS
void **p_vaddr;
int saved_uio_iovcnt;
#endif /* XBB_USE_BOUNCE_BUFFERS */
int error;
file_data = &xbb->backend.file;
error = 0;
bzero(&xuio, sizeof(xuio));
switch (operation) {
case BIO_READ:
xuio.uio_rw = UIO_READ;
break;
case BIO_WRITE:
xuio.uio_rw = UIO_WRITE;
break;
case BIO_FLUSH: {
struct mount *mountpoint;
SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
device_get_unit(xbb->dev));
(void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
VOP_UNLOCK(xbb->vn, 0);
vn_finished_write(mountpoint);
goto bailout_send_response;
/* NOTREACHED */
}
default:
panic("invalid operation %d", operation);
/* NOTREACHED */
}
xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
<< xbb->sector_size_shift;
xuio.uio_segflg = UIO_SYSSPACE;
xuio.uio_iov = file_data->xiovecs;
xuio.uio_iovcnt = 0;
xbb_sg = xbb->xbb_sgs;
nseg = reqlist->nr_segments;
for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
/*
* If the first sector is not 0, the KVA will
* not be contiguous and we'll need to go on
* to another segment.
*/
if (xbb_sg->first_sect != 0)
xiovec = NULL;
if (xiovec == NULL) {
xiovec = &file_data->xiovecs[xuio.uio_iovcnt];
xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
seg_idx, xbb_sg->first_sect);
#ifdef XBB_USE_BOUNCE_BUFFERS
/*
* Store the address of the incoming
* buffer at this particular offset
* as well, so we can do the copy
* later without having to do more
* work to recalculate this address.
*/
p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt];
*p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
xbb_sg->first_sect);
#endif /* XBB_USE_BOUNCE_BUFFERS */
xiovec->iov_len = 0;
xuio.uio_iovcnt++;
}
xiovec->iov_len += xbb_sg->nsect << 9;
xuio.uio_resid += xbb_sg->nsect << 9;
/*
* If the last sector is not the full page
* size count, the next segment will not be
* contiguous in KVA and we need a new iovec.
*/
if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9)
xiovec = NULL;
}
xuio.uio_td = curthread;
#ifdef XBB_USE_BOUNCE_BUFFERS
saved_uio_iovcnt = xuio.uio_iovcnt;
if (operation == BIO_WRITE) {
/* Copy the write data to the local buffer. */
for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt;
seg_idx++, xiovec++, p_vaddr++) {
memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len);
}
} else {
/*
* We only need to save off the iovecs in the case of a
* read, because the copy for the read happens after the
* VOP_READ(). (The uio will get modified in that call
* sequence.)
*/
memcpy(file_data->saved_xiovecs, xuio.uio_iov,
xuio.uio_iovcnt * sizeof(xuio.uio_iov[0]));
}
#endif /* XBB_USE_BOUNCE_BUFFERS */
switch (operation) {
case BIO_READ:
SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
device_get_unit(xbb->dev), xuio.uio_offset,
xuio.uio_resid);
vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
/*
* UFS pays attention to IO_DIRECT for reads. If the
* DIRECTIO option is configured into the kernel, it calls
* ffs_rawread(). But that only works for single-segment
* uios with user space addresses. In our case, with a
* kernel uio, it still reads into the buffer cache, but it
* will just try to release the buffer from the cache later
* on in ffs_read().
*
* ZFS does not pay attention to IO_DIRECT for reads.
*
* UFS does not pay attention to IO_SYNC for reads.
*
* ZFS pays attention to IO_SYNC (which translates into the
* Solaris define FRSYNC for zfs_read()) for reads. It
* attempts to sync the file before reading.
*
* So, to attempt to provide some barrier semantics in the
* BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
*/
error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
(IO_DIRECT|IO_SYNC) : 0, file_data->cred);
VOP_UNLOCK(xbb->vn, 0);
break;
case BIO_WRITE: {
struct mount *mountpoint;
SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
device_get_unit(xbb->dev), xuio.uio_offset,
xuio.uio_resid);
(void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
/*
* UFS pays attention to IO_DIRECT for writes. The write
* is done asynchronously. (Normally the write would just
* get put into cache.
*
* UFS pays attention to IO_SYNC for writes. It will
* attempt to write the buffer out synchronously if that
* flag is set.
*
* ZFS does not pay attention to IO_DIRECT for writes.
*
* ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
* for writes. It will flush the transaction from the
* cache before returning.
*
* So if we've got the BIO_ORDERED flag set, we want
* IO_SYNC in either the UFS or ZFS case.
*/
error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
IO_SYNC : 0, file_data->cred);
VOP_UNLOCK(xbb->vn, 0);
vn_finished_write(mountpoint);
break;
}
default:
panic("invalid operation %d", operation);
/* NOTREACHED */
}
#ifdef XBB_USE_BOUNCE_BUFFERS
/* We only need to copy here for read operations */
if (operation == BIO_READ) {
for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
xiovec = file_data->saved_xiovecs;
seg_idx < saved_uio_iovcnt; seg_idx++,
xiovec++, p_vaddr++) {
/*
* Note that we have to use the copy of the
* io vector we made above. uiomove() modifies
* the uio and its referenced vector as uiomove
* performs the copy, so we can't rely on any
* state from the original uio.
*/
memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len);
}
}
#endif /* XBB_USE_BOUNCE_BUFFERS */
bailout_send_response:
if (error != 0)
reqlist->status = BLKIF_RSP_ERROR;
xbb_complete_reqlist(xbb, reqlist);
return (0);
}
/*--------------------------- Backend Configuration --------------------------*/
/**
* Close and cleanup any backend device/file specific state for this
* block back instance.
*
* \param xbb Per-instance xbb configuration structure.
*/
static void
xbb_close_backend(struct xbb_softc *xbb)
{
DROP_GIANT();
DPRINTF("closing dev=%s\n", xbb->dev_name);
if (xbb->vn) {
int flags = FREAD;
if ((xbb->flags & XBBF_READ_ONLY) == 0)
flags |= FWRITE;
switch (xbb->device_type) {
case XBB_TYPE_DISK:
if (xbb->backend.dev.csw) {
dev_relthread(xbb->backend.dev.cdev,
xbb->backend.dev.dev_ref);
xbb->backend.dev.csw = NULL;
xbb->backend.dev.cdev = NULL;
}
break;
case XBB_TYPE_FILE:
break;
case XBB_TYPE_NONE:
default:
panic("Unexpected backend type.");
break;
}
(void)vn_close(xbb->vn, flags, NOCRED, curthread);
xbb->vn = NULL;
switch (xbb->device_type) {
case XBB_TYPE_DISK:
break;
case XBB_TYPE_FILE:
if (xbb->backend.file.cred != NULL) {
crfree(xbb->backend.file.cred);
xbb->backend.file.cred = NULL;
}
break;
case XBB_TYPE_NONE:
default:
panic("Unexpected backend type.");
break;
}
}
PICKUP_GIANT();
}
/**
* Open a character device to be used for backend I/O.
*
* \param xbb Per-instance xbb configuration structure.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_open_dev(struct xbb_softc *xbb)
{
struct vattr vattr;
struct cdev *dev;
struct cdevsw *devsw;
int error;
xbb->device_type = XBB_TYPE_DISK;
xbb->dispatch_io = xbb_dispatch_dev;
xbb->backend.dev.cdev = xbb->vn->v_rdev;
xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
&xbb->backend.dev.dev_ref);
if (xbb->backend.dev.csw == NULL)
panic("Unable to retrieve device switch");
error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
if (error) {
xenbus_dev_fatal(xbb->dev, error, "error getting "
"vnode attributes for device %s",
xbb->dev_name);
return (error);
}
dev = xbb->vn->v_rdev;
devsw = dev->si_devsw;
if (!devsw->d_ioctl) {
xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
"device %s!", xbb->dev_name);
return (ENODEV);
}
error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
(caddr_t)&xbb->sector_size, FREAD,
curthread);
if (error) {
xenbus_dev_fatal(xbb->dev, error,
"error calling ioctl DIOCGSECTORSIZE "
"for device %s", xbb->dev_name);
return (error);
}
error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
(caddr_t)&xbb->media_size, FREAD,
curthread);
if (error) {
xenbus_dev_fatal(xbb->dev, error,
"error calling ioctl DIOCGMEDIASIZE "
"for device %s", xbb->dev_name);
return (error);
}
return (0);
}
/**
* Open a file to be used for backend I/O.
*
* \param xbb Per-instance xbb configuration structure.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_open_file(struct xbb_softc *xbb)
{
struct xbb_file_data *file_data;
struct vattr vattr;
int error;
file_data = &xbb->backend.file;
xbb->device_type = XBB_TYPE_FILE;
xbb->dispatch_io = xbb_dispatch_file;
error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
if (error != 0) {
xenbus_dev_fatal(xbb->dev, error,
"error calling VOP_GETATTR()"
"for file %s", xbb->dev_name);
return (error);
}
/*
* Verify that we have the ability to upgrade to exclusive
* access on this file so we can trap errors at open instead
* of reporting them during first access.
*/
if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
if (xbb->vn->v_iflag & VI_DOOMED) {
error = EBADF;
xenbus_dev_fatal(xbb->dev, error,
"error locking file %s",
xbb->dev_name);
return (error);
}
}
file_data->cred = crhold(curthread->td_ucred);
xbb->media_size = vattr.va_size;
/*
* XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
* With ZFS, it is 131072 bytes. Block sizes that large don't work
* with disklabel and UFS on FreeBSD at least. Large block sizes
* may not work with other OSes as well. So just export a sector
* size of 512 bytes, which should work with any OS or
* application. Since our backing is a file, any block size will
* work fine for the backing store.
*/
#if 0
xbb->sector_size = vattr.va_blocksize;
#endif
xbb->sector_size = 512;
/*
* Sanity check. The media size has to be at least one
* sector long.
*/
if (xbb->media_size < xbb->sector_size) {
error = EINVAL;
xenbus_dev_fatal(xbb->dev, error,
"file %s size %ju < block size %u",
xbb->dev_name,
(uintmax_t)xbb->media_size,
xbb->sector_size);
}
return (error);
}
/**
* Open the backend provider for this connection.
*
* \param xbb Per-instance xbb configuration structure.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_open_backend(struct xbb_softc *xbb)
{
struct nameidata nd;
int flags;
int error;
flags = FREAD;
error = 0;
DPRINTF("opening dev=%s\n", xbb->dev_name);
if (rootvnode == NULL) {
xenbus_dev_fatal(xbb->dev, ENOENT,
"Root file system not mounted");
return (ENOENT);
}
if ((xbb->flags & XBBF_READ_ONLY) == 0)
flags |= FWRITE;
pwd_ensure_dirs();
again:
NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread);
error = vn_open(&nd, &flags, 0, NULL);
if (error) {
/*
* This is the only reasonable guess we can make as far as
* path if the user doesn't give us a fully qualified path.
* If they want to specify a file, they need to specify the
* full path.
*/
if (xbb->dev_name[0] != '/') {
char *dev_path = "/dev/";
char *dev_name;
/* Try adding device path at beginning of name */
dev_name = malloc(strlen(xbb->dev_name)
+ strlen(dev_path) + 1,
M_XENBLOCKBACK, M_NOWAIT);
if (dev_name) {
sprintf(dev_name, "%s%s", dev_path,
xbb->dev_name);
free(xbb->dev_name, M_XENBLOCKBACK);
xbb->dev_name = dev_name;
goto again;
}
}
xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
xbb->dev_name);
return (error);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
xbb->vn = nd.ni_vp;
/* We only support disks and files. */
if (vn_isdisk(xbb->vn, &error)) {
error = xbb_open_dev(xbb);
} else if (xbb->vn->v_type == VREG) {
error = xbb_open_file(xbb);
} else {
error = EINVAL;
xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
"or file", xbb->dev_name);
}
VOP_UNLOCK(xbb->vn, 0);
if (error != 0) {
xbb_close_backend(xbb);
return (error);
}
xbb->sector_size_shift = fls(xbb->sector_size) - 1;
xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n",
(xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
xbb->dev_name, xbb->sector_size, xbb->media_size);
return (0);
}
/*------------------------ Inter-Domain Communication ------------------------*/
/**
* Free dynamically allocated KVA or pseudo-physical address allocations.
*
* \param xbb Per-instance xbb configuration structure.
*/
static void
xbb_free_communication_mem(struct xbb_softc *xbb)
{
if (xbb->kva != 0) {
if (xbb->pseudo_phys_res != NULL) {
xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
xbb->pseudo_phys_res);
xbb->pseudo_phys_res = NULL;
}
}
xbb->kva = 0;
xbb->gnt_base_addr = 0;
if (xbb->kva_free != NULL) {
free(xbb->kva_free, M_XENBLOCKBACK);
xbb->kva_free = NULL;
}
}
/**
* Cleanup all inter-domain communication mechanisms.
*
* \param xbb Per-instance xbb configuration structure.
*/
static int
xbb_disconnect(struct xbb_softc *xbb)
{
struct gnttab_unmap_grant_ref ops[XBB_MAX_RING_PAGES];
struct gnttab_unmap_grant_ref *op;
u_int ring_idx;
int error;
DPRINTF("\n");
if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
return (0);
xen_intr_unbind(&xbb->xen_intr_handle);
mtx_unlock(&xbb->lock);
taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
mtx_lock(&xbb->lock);
/*
* No new interrupts can generate work, but we must wait
* for all currently active requests to drain.
*/
if (xbb->active_request_count != 0)
return (EAGAIN);
for (ring_idx = 0, op = ops;
ring_idx < xbb->ring_config.ring_pages;
ring_idx++, op++) {
op->host_addr = xbb->ring_config.gnt_addr
+ (ring_idx * PAGE_SIZE);
op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
op->handle = xbb->ring_config.handle[ring_idx];
}
error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops,
xbb->ring_config.ring_pages);
if (error != 0)
panic("Grant table op failed (%d)", error);
xbb_free_communication_mem(xbb);
if (xbb->requests != NULL) {
free(xbb->requests, M_XENBLOCKBACK);
xbb->requests = NULL;
}
if (xbb->request_lists != NULL) {
struct xbb_xen_reqlist *reqlist;
int i;
/* There is one request list for ever allocated request. */
for (i = 0, reqlist = xbb->request_lists;
i < xbb->max_requests; i++, reqlist++){
#ifdef XBB_USE_BOUNCE_BUFFERS
if (reqlist->bounce != NULL) {
free(reqlist->bounce, M_XENBLOCKBACK);
reqlist->bounce = NULL;
}
#endif
if (reqlist->gnt_handles != NULL) {
free(reqlist->gnt_handles, M_XENBLOCKBACK);
reqlist->gnt_handles = NULL;
}
}
free(xbb->request_lists, M_XENBLOCKBACK);
xbb->request_lists = NULL;
}
xbb->flags &= ~XBBF_RING_CONNECTED;
return (0);
}
/**
* Map shared memory ring into domain local address space, initialize
* ring control structures, and bind an interrupt to the event channel
* used to notify us of ring changes.
*
* \param xbb Per-instance xbb configuration structure.
*/
static int
xbb_connect_ring(struct xbb_softc *xbb)
{
struct gnttab_map_grant_ref gnts[XBB_MAX_RING_PAGES];
struct gnttab_map_grant_ref *gnt;
u_int ring_idx;
int error;
if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
return (0);
/*
* Kva for our ring is at the tail of the region of kva allocated
* by xbb_alloc_communication_mem().
*/
xbb->ring_config.va = xbb->kva
+ (xbb->kva_size
- (xbb->ring_config.ring_pages * PAGE_SIZE));
xbb->ring_config.gnt_addr = xbb->gnt_base_addr
+ (xbb->kva_size
- (xbb->ring_config.ring_pages * PAGE_SIZE));
for (ring_idx = 0, gnt = gnts;
ring_idx < xbb->ring_config.ring_pages;
ring_idx++, gnt++) {
gnt->host_addr = xbb->ring_config.gnt_addr
+ (ring_idx * PAGE_SIZE);
gnt->flags = GNTMAP_host_map;
gnt->ref = xbb->ring_config.ring_ref[ring_idx];
gnt->dom = xbb->otherend_id;
}
error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts,
xbb->ring_config.ring_pages);
if (error)
panic("blkback: Ring page grant table op failed (%d)", error);
for (ring_idx = 0, gnt = gnts;
ring_idx < xbb->ring_config.ring_pages;
ring_idx++, gnt++) {
if (gnt->status != 0) {
xbb->ring_config.va = 0;
xenbus_dev_fatal(xbb->dev, EACCES,
"Ring shared page mapping failed. "
"Status %d.", gnt->status);
return (EACCES);
}
xbb->ring_config.handle[ring_idx] = gnt->handle;
xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
}
/* Initialize the ring based on ABI. */
switch (xbb->abi) {
case BLKIF_PROTOCOL_NATIVE:
{
blkif_sring_t *sring;
sring = (blkif_sring_t *)xbb->ring_config.va;
BACK_RING_INIT(&xbb->rings.native, sring,
xbb->ring_config.ring_pages * PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
blkif_x86_32_sring_t *sring_x86_32;
sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
xbb->ring_config.ring_pages * PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
blkif_x86_64_sring_t *sring_x86_64;
sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
xbb->ring_config.ring_pages * PAGE_SIZE);
break;
}
default:
panic("Unexpected blkif protocol ABI.");
}
xbb->flags |= XBBF_RING_CONNECTED;
error = xen_intr_bind_remote_port(xbb->dev,
xbb->otherend_id,
xbb->ring_config.evtchn,
xbb_filter,
/*ithread_handler*/NULL,
/*arg*/xbb,
INTR_TYPE_BIO | INTR_MPSAFE,
&xbb->xen_intr_handle);
if (error) {
(void)xbb_disconnect(xbb);
xenbus_dev_fatal(xbb->dev, error, "binding event channel");
return (error);
}
DPRINTF("rings connected!\n");
return 0;
}
/**
* Size KVA and pseudo-physical address allocations based on negotiated
* values for the size and number of I/O requests, and the size of our
* communication ring.
*
* \param xbb Per-instance xbb configuration structure.
*
* These address spaces are used to dynamically map pages in the
* front-end's domain into our own.
*/
static int
xbb_alloc_communication_mem(struct xbb_softc *xbb)
{
xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
xbb->kva_size = xbb->reqlist_kva_size +
(xbb->ring_config.ring_pages * PAGE_SIZE);
xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT);
if (xbb->kva_free == NULL)
return (ENOMEM);
DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n",
device_get_nameunit(xbb->dev), xbb->kva_size,
xbb->reqlist_kva_size);
/*
* Reserve a range of pseudo physical memory that we can map
* into kva. These pages will only be backed by machine
* pages ("real memory") during the lifetime of front-end requests
* via grant table operations.
*/
xbb->pseudo_phys_res_id = 0;
xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
xbb->kva_size);
if (xbb->pseudo_phys_res == NULL) {
xbb->kva = 0;
return (ENOMEM);
}
xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
(uintmax_t)xbb->gnt_base_addr);
return (0);
}
/**
* Collect front-end information from the XenStore.
*
* \param xbb Per-instance xbb configuration structure.
*/
static int
xbb_collect_frontend_info(struct xbb_softc *xbb)
{
char protocol_abi[64];
const char *otherend_path;
int error;
u_int ring_idx;
u_int ring_page_order;
size_t ring_size;
otherend_path = xenbus_get_otherend_path(xbb->dev);
/*
* Protocol defaults valid even if all negotiation fails.
*/
xbb->ring_config.ring_pages = 1;
xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE;
/*
* Mandatory data (used in all versions of the protocol) first.
*/
error = xs_scanf(XST_NIL, otherend_path,
"event-channel", NULL, "%" PRIu32,
&xbb->ring_config.evtchn);
if (error != 0) {
xenbus_dev_fatal(xbb->dev, error,
"Unable to retrieve event-channel information "
"from frontend %s. Unable to connect.",
xenbus_get_otherend_path(xbb->dev));
return (error);
}
/*
* These fields are initialized to legacy protocol defaults
* so we only need to fail if reading the updated value succeeds
* and the new value is outside of its allowed range.
*
* \note xs_gather() returns on the first encountered error, so
* we must use independent calls in order to guarantee
* we don't miss information in a sparsly populated front-end
* tree.
*
* \note xs_scanf() does not update variables for unmatched
* fields.
*/
ring_page_order = 0;
xbb->max_requests = 32;
(void)xs_scanf(XST_NIL, otherend_path,
"ring-page-order", NULL, "%u",
&ring_page_order);
xbb->ring_config.ring_pages = 1 << ring_page_order;
ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) {
xenbus_dev_fatal(xbb->dev, EINVAL,
"Front-end specified ring-pages of %u "
"exceeds backend limit of %u. "
"Unable to connect.",
xbb->ring_config.ring_pages,
XBB_MAX_RING_PAGES);
return (EINVAL);
}
if (xbb->ring_config.ring_pages == 1) {
error = xs_gather(XST_NIL, otherend_path,
"ring-ref", "%" PRIu32,
&xbb->ring_config.ring_ref[0],
NULL);
if (error != 0) {
xenbus_dev_fatal(xbb->dev, error,
"Unable to retrieve ring information "
"from frontend %s. Unable to "
"connect.",
xenbus_get_otherend_path(xbb->dev));
return (error);
}
} else {
/* Multi-page ring format. */
for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
ring_idx++) {
char ring_ref_name[]= "ring_refXX";
snprintf(ring_ref_name, sizeof(ring_ref_name),
"ring-ref%u", ring_idx);
error = xs_scanf(XST_NIL, otherend_path,
ring_ref_name, NULL, "%" PRIu32,
&xbb->ring_config.ring_ref[ring_idx]);
if (error != 0) {
xenbus_dev_fatal(xbb->dev, error,
"Failed to retriev grant "
"reference for page %u of "
"shared ring. Unable "
"to connect.", ring_idx);
return (error);
}
}
}
error = xs_gather(XST_NIL, otherend_path,
"protocol", "%63s", protocol_abi,
NULL);
if (error != 0
|| !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) {
/*
* Assume native if the frontend has not
* published ABI data or it has published and
* matches our own ABI.
*/
xbb->abi = BLKIF_PROTOCOL_NATIVE;
} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) {
xbb->abi = BLKIF_PROTOCOL_X86_32;
} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) {
xbb->abi = BLKIF_PROTOCOL_X86_64;
} else {
xenbus_dev_fatal(xbb->dev, EINVAL,
"Unknown protocol ABI (%s) published by "
"frontend. Unable to connect.", protocol_abi);
return (EINVAL);
}
return (0);
}
/**
* Allocate per-request data structures given request size and number
* information negotiated with the front-end.
*
* \param xbb Per-instance xbb configuration structure.
*/
static int
xbb_alloc_requests(struct xbb_softc *xbb)
{
struct xbb_xen_req *req;
struct xbb_xen_req *last_req;
/*
* Allocate request book keeping datastructures.
*/
- xbb->requests = mallocarray(xbb->max_requests, sizeof(*xbb->requests),
+ xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
if (xbb->requests == NULL) {
xenbus_dev_fatal(xbb->dev, ENOMEM,
"Unable to allocate request structures");
return (ENOMEM);
}
req = xbb->requests;
last_req = &xbb->requests[xbb->max_requests - 1];
STAILQ_INIT(&xbb->request_free_stailq);
while (req <= last_req) {
STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
req++;
}
return (0);
}
static int
xbb_alloc_request_lists(struct xbb_softc *xbb)
{
struct xbb_xen_reqlist *reqlist;
int i;
/*
* If no requests can be merged, we need 1 request list per
* in flight request.
*/
- xbb->request_lists = mallocarray(xbb->max_requests,
+ xbb->request_lists = malloc(xbb->max_requests *
sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
if (xbb->request_lists == NULL) {
xenbus_dev_fatal(xbb->dev, ENOMEM,
"Unable to allocate request list structures");
return (ENOMEM);
}
STAILQ_INIT(&xbb->reqlist_free_stailq);
STAILQ_INIT(&xbb->reqlist_pending_stailq);
for (i = 0; i < xbb->max_requests; i++) {
int seg;
reqlist = &xbb->request_lists[i];
reqlist->xbb = xbb;
#ifdef XBB_USE_BOUNCE_BUFFERS
reqlist->bounce = malloc(xbb->max_reqlist_size,
M_XENBLOCKBACK, M_NOWAIT);
if (reqlist->bounce == NULL) {
xenbus_dev_fatal(xbb->dev, ENOMEM,
"Unable to allocate request "
"bounce buffers");
return (ENOMEM);
}
#endif /* XBB_USE_BOUNCE_BUFFERS */
- reqlist->gnt_handles = mallocarray(xbb->max_reqlist_segments,
+ reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
sizeof(*reqlist->gnt_handles),
M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
if (reqlist->gnt_handles == NULL) {
xenbus_dev_fatal(xbb->dev, ENOMEM,
"Unable to allocate request "
"grant references");
return (ENOMEM);
}
for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
}
return (0);
}
/**
* Supply information about the physical device to the frontend
* via XenBus.
*
* \param xbb Per-instance xbb configuration structure.
*/
static int
xbb_publish_backend_info(struct xbb_softc *xbb)
{
struct xs_transaction xst;
const char *our_path;
const char *leaf;
int error;
our_path = xenbus_get_node(xbb->dev);
while (1) {
error = xs_transaction_start(&xst);
if (error != 0) {
xenbus_dev_fatal(xbb->dev, error,
"Error publishing backend info "
"(start transaction)");
return (error);
}
leaf = "sectors";
error = xs_printf(xst, our_path, leaf,
"%"PRIu64, xbb->media_num_sectors);
if (error != 0)
break;
/* XXX Support all VBD attributes here. */
leaf = "info";
error = xs_printf(xst, our_path, leaf, "%u",
xbb->flags & XBBF_READ_ONLY
? VDISK_READONLY : 0);
if (error != 0)
break;
leaf = "sector-size";
error = xs_printf(xst, our_path, leaf, "%u",
xbb->sector_size);
if (error != 0)
break;
error = xs_transaction_end(xst, 0);
if (error == 0) {
return (0);
} else if (error != EAGAIN) {
xenbus_dev_fatal(xbb->dev, error, "ending transaction");
return (error);
}
}
xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
our_path, leaf);
xs_transaction_end(xst, 1);
return (error);
}
/**
* Connect to our blkfront peer now that it has completed publishing
* its configuration into the XenStore.
*
* \param xbb Per-instance xbb configuration structure.
*/
static void
xbb_connect(struct xbb_softc *xbb)
{
int error;
if (xenbus_get_state(xbb->dev) != XenbusStateInitialised)
return;
if (xbb_collect_frontend_info(xbb) != 0)
return;
xbb->flags &= ~XBBF_SHUTDOWN;
/*
* We limit the maximum number of reqlist segments to the maximum
* number of segments in the ring, or our absolute maximum,
* whichever is smaller.
*/
xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
/*
* The maximum size is simply a function of the number of segments
* we can handle.
*/
xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
/* Allocate resources whose size depends on front-end configuration. */
error = xbb_alloc_communication_mem(xbb);
if (error != 0) {
xenbus_dev_fatal(xbb->dev, error,
"Unable to allocate communication memory");
return;
}
error = xbb_alloc_requests(xbb);
if (error != 0) {
/* Specific errors are reported by xbb_alloc_requests(). */
return;
}
error = xbb_alloc_request_lists(xbb);
if (error != 0) {
/* Specific errors are reported by xbb_alloc_request_lists(). */
return;
}
/*
* Connect communication channel.
*/
error = xbb_connect_ring(xbb);
if (error != 0) {
/* Specific errors are reported by xbb_connect_ring(). */
return;
}
if (xbb_publish_backend_info(xbb) != 0) {
/*
* If we can't publish our data, we cannot participate
* in this connection, and waiting for a front-end state
* change will not help the situation.
*/
(void)xbb_disconnect(xbb);
return;
}
/* Ready for I/O. */
xenbus_set_state(xbb->dev, XenbusStateConnected);
}
/*-------------------------- Device Teardown Support -------------------------*/
/**
* Perform device shutdown functions.
*
* \param xbb Per-instance xbb configuration structure.
*
* Mark this instance as shutting down, wait for any active I/O on the
* backend device/file to drain, disconnect from the front-end, and notify
* any waiters (e.g. a thread invoking our detach method) that detach can
* now proceed.
*/
static int
xbb_shutdown(struct xbb_softc *xbb)
{
XenbusState frontState;
int error;
DPRINTF("\n");
/*
* Due to the need to drop our mutex during some
* xenbus operations, it is possible for two threads
* to attempt to close out shutdown processing at
* the same time. Tell the caller that hits this
* race to try back later.
*/
if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
return (EAGAIN);
xbb->flags |= XBBF_IN_SHUTDOWN;
mtx_unlock(&xbb->lock);
if (xbb->hotplug_watch.node != NULL) {
xs_unregister_watch(&xbb->hotplug_watch);
free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
xbb->hotplug_watch.node = NULL;
}
if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
xenbus_set_state(xbb->dev, XenbusStateClosing);
frontState = xenbus_get_otherend_state(xbb->dev);
mtx_lock(&xbb->lock);
xbb->flags &= ~XBBF_IN_SHUTDOWN;
/* Wait for the frontend to disconnect (if it's connected). */
if (frontState == XenbusStateConnected)
return (EAGAIN);
DPRINTF("\n");
/* Indicate shutdown is in progress. */
xbb->flags |= XBBF_SHUTDOWN;
/* Disconnect from the front-end. */
error = xbb_disconnect(xbb);
if (error != 0) {
/*
* Requests still outstanding. We'll be called again
* once they complete.
*/
KASSERT(error == EAGAIN,
("%s: Unexpected xbb_disconnect() failure %d",
__func__, error));
return (error);
}
DPRINTF("\n");
/* Indicate to xbb_detach() that is it safe to proceed. */
wakeup(xbb);
return (0);
}
/**
* Report an attach time error to the console and Xen, and cleanup
* this instance by forcing immediate detach processing.
*
* \param xbb Per-instance xbb configuration structure.
* \param err Errno describing the error.
* \param fmt Printf style format and arguments
*/
static void
xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
{
va_list ap;
va_list ap_hotplug;
va_start(ap, fmt);
va_copy(ap_hotplug, ap);
xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
"hotplug-error", fmt, ap_hotplug);
va_end(ap_hotplug);
xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
"hotplug-status", "error");
xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
va_end(ap);
xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
"online", "0");
mtx_lock(&xbb->lock);
xbb_shutdown(xbb);
mtx_unlock(&xbb->lock);
}
/*---------------------------- NewBus Entrypoints ----------------------------*/
/**
* Inspect a XenBus device and claim it if is of the appropriate type.
*
* \param dev NewBus device object representing a candidate XenBus device.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_probe(device_t dev)
{
if (!strcmp(xenbus_get_type(dev), "vbd")) {
device_set_desc(dev, "Backend Virtual Block Device");
device_quiet(dev);
return (0);
}
return (ENXIO);
}
/**
* Setup sysctl variables to control various Block Back parameters.
*
* \param xbb Xen Block Back softc.
*
*/
static void
xbb_setup_sysctl(struct xbb_softc *xbb)
{
struct sysctl_ctx_list *sysctl_ctx = NULL;
struct sysctl_oid *sysctl_tree = NULL;
sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
if (sysctl_ctx == NULL)
return;
sysctl_tree = device_get_sysctl_tree(xbb->dev);
if (sysctl_tree == NULL)
return;
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
"fake the flush command");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
"send a real flush for N flush requests");
SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
"Don't coalesce contiguous requests");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"reqs_received", CTLFLAG_RW, &xbb->reqs_received,
"how many I/O requests we have received");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
"how many I/O requests have been completed");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"reqs_queued_for_completion", CTLFLAG_RW,
&xbb->reqs_queued_for_completion,
"how many I/O requests queued but not yet pushed");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"reqs_completed_with_error", CTLFLAG_RW,
&xbb->reqs_completed_with_error,
"how many I/O requests completed with error status");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
"how many I/O dispatches were forced");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
"how many I/O dispatches were normal");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
"total number of I/O dispatches");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
"how many times we have run out of KVA");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"request_shortages", CTLFLAG_RW,
&xbb->request_shortages,
"how many times we have run out of requests");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
"maximum outstanding requests (negotiated)");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"max_request_segments", CTLFLAG_RD,
&xbb->max_request_segments, 0,
"maximum number of pages per requests (negotiated)");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"max_request_size", CTLFLAG_RD,
&xbb->max_request_size, 0,
"maximum size in bytes of a request (negotiated)");
SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"ring_pages", CTLFLAG_RD,
&xbb->ring_config.ring_pages, 0,
"communication channel pages (negotiated)");
}
static void
xbb_attach_disk(struct xs_watch *watch, const char **vec, unsigned int len)
{
device_t dev;
struct xbb_softc *xbb;
int error;
dev = (device_t) watch->callback_data;
xbb = device_get_softc(dev);
error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path",
NULL, &xbb->dev_name, NULL);
if (error != 0)
return;
xs_unregister_watch(watch);
free(watch->node, M_XENBLOCKBACK);
watch->node = NULL;
/* Collect physical device information. */
error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev),
"device-type", NULL, &xbb->dev_type,
NULL);
if (error != 0)
xbb->dev_type = NULL;
error = xs_gather(XST_NIL, xenbus_get_node(dev),
"mode", NULL, &xbb->dev_mode,
NULL);
if (error != 0) {
xbb_attach_failed(xbb, error, "reading backend fields at %s",
xenbus_get_node(dev));
return;
}
/* Parse fopen style mode flags. */
if (strchr(xbb->dev_mode, 'w') == NULL)
xbb->flags |= XBBF_READ_ONLY;
/*
* Verify the physical device is present and can support
* the desired I/O mode.
*/
error = xbb_open_backend(xbb);
if (error != 0) {
xbb_attach_failed(xbb, error, "Unable to open %s",
xbb->dev_name);
return;
}
/* Use devstat(9) for recording statistics. */
xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
xbb->sector_size,
DEVSTAT_ALL_SUPPORTED,
DEVSTAT_TYPE_DIRECT
| DEVSTAT_TYPE_IF_OTHER,
DEVSTAT_PRIORITY_OTHER);
xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
xbb->sector_size,
DEVSTAT_ALL_SUPPORTED,
DEVSTAT_TYPE_DIRECT
| DEVSTAT_TYPE_IF_OTHER,
DEVSTAT_PRIORITY_OTHER);
/*
* Setup sysctl variables.
*/
xbb_setup_sysctl(xbb);
/*
* Create a taskqueue for doing work that must occur from a
* thread context.
*/
xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
M_NOWAIT,
taskqueue_thread_enqueue,
/*contxt*/&xbb->io_taskqueue);
if (xbb->io_taskqueue == NULL) {
xbb_attach_failed(xbb, error, "Unable to create taskqueue");
return;
}
taskqueue_start_threads(&xbb->io_taskqueue,
/*num threads*/1,
/*priority*/PWAIT,
/*thread name*/
"%s taskq", device_get_nameunit(dev));
/* Update hot-plug status to satisfy xend. */
error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
"hotplug-status", "connected");
if (error) {
xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
xenbus_get_node(xbb->dev));
return;
}
/* Tell the front end that we are ready to connect. */
xenbus_set_state(dev, XenbusStateInitialised);
}
/**
* Attach to a XenBus device that has been claimed by our probe routine.
*
* \param dev NewBus device object representing this Xen Block Back instance.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_attach(device_t dev)
{
struct xbb_softc *xbb;
int error;
u_int max_ring_page_order;
struct sbuf *watch_path;
DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
/*
* Basic initialization.
* After this block it is safe to call xbb_detach()
* to clean up any allocated data for this instance.
*/
xbb = device_get_softc(dev);
xbb->dev = dev;
xbb->otherend_id = xenbus_get_otherend_id(dev);
TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
/*
* Publish protocol capabilities for consumption by the
* front-end.
*/
error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
"feature-barrier", "1");
if (error) {
xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
xenbus_get_node(xbb->dev));
return (error);
}
error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
"feature-flush-cache", "1");
if (error) {
xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
xenbus_get_node(xbb->dev));
return (error);
}
max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1;
error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
"max-ring-page-order", "%u", max_ring_page_order);
if (error) {
xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
xenbus_get_node(xbb->dev));
return (error);
}
/*
* We need to wait for hotplug script execution before
* moving forward.
*/
watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path");
xbb->hotplug_watch.callback_data = (uintptr_t)dev;
xbb->hotplug_watch.callback = xbb_attach_disk;
KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
sbuf_delete(watch_path);
error = xs_register_watch(&xbb->hotplug_watch);
if (error != 0) {
xbb_attach_failed(xbb, error, "failed to create watch on %s",
xbb->hotplug_watch.node);
free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
return (error);
}
/* Tell the toolstack blkback has attached. */
xenbus_set_state(dev, XenbusStateInitWait);
return (0);
}
/**
* Detach from a block back device instance.
*
* \param dev NewBus device object representing this Xen Block Back instance.
*
* \return 0 for success, errno codes for failure.
*
* \note A block back device may be detached at any time in its life-cycle,
* including part way through the attach process. For this reason,
* initialization order and the initialization state checks in this
* routine must be carefully coupled so that attach time failures
* are gracefully handled.
*/
static int
xbb_detach(device_t dev)
{
struct xbb_softc *xbb;
DPRINTF("\n");
xbb = device_get_softc(dev);
mtx_lock(&xbb->lock);
while (xbb_shutdown(xbb) == EAGAIN) {
msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
"xbb_shutdown", 0);
}
mtx_unlock(&xbb->lock);
DPRINTF("\n");
if (xbb->io_taskqueue != NULL)
taskqueue_free(xbb->io_taskqueue);
if (xbb->xbb_stats != NULL)
devstat_remove_entry(xbb->xbb_stats);
if (xbb->xbb_stats_in != NULL)
devstat_remove_entry(xbb->xbb_stats_in);
xbb_close_backend(xbb);
if (xbb->dev_mode != NULL) {
free(xbb->dev_mode, M_XENSTORE);
xbb->dev_mode = NULL;
}
if (xbb->dev_type != NULL) {
free(xbb->dev_type, M_XENSTORE);
xbb->dev_type = NULL;
}
if (xbb->dev_name != NULL) {
free(xbb->dev_name, M_XENSTORE);
xbb->dev_name = NULL;
}
mtx_destroy(&xbb->lock);
return (0);
}
/**
* Prepare this block back device for suspension of this VM.
*
* \param dev NewBus device object representing this Xen Block Back instance.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_suspend(device_t dev)
{
#ifdef NOT_YET
struct xbb_softc *sc = device_get_softc(dev);
/* Prevent new requests being issued until we fix things up. */
mtx_lock(&sc->xb_io_lock);
sc->connected = BLKIF_STATE_SUSPENDED;
mtx_unlock(&sc->xb_io_lock);
#endif
return (0);
}
/**
* Perform any processing required to recover from a suspended state.
*
* \param dev NewBus device object representing this Xen Block Back instance.
*
* \return 0 for success, errno codes for failure.
*/
static int
xbb_resume(device_t dev)
{
return (0);
}
/**
* Handle state changes expressed via the XenStore by our front-end peer.
*
* \param dev NewBus device object representing this Xen
* Block Back instance.
* \param frontend_state The new state of the front-end.
*
* \return 0 for success, errno codes for failure.
*/
static void
xbb_frontend_changed(device_t dev, XenbusState frontend_state)
{
struct xbb_softc *xbb = device_get_softc(dev);
DPRINTF("frontend_state=%s, xbb_state=%s\n",
xenbus_strstate(frontend_state),
xenbus_strstate(xenbus_get_state(xbb->dev)));
switch (frontend_state) {
case XenbusStateInitialising:
break;
case XenbusStateInitialised:
case XenbusStateConnected:
xbb_connect(xbb);
break;
case XenbusStateClosing:
case XenbusStateClosed:
mtx_lock(&xbb->lock);
xbb_shutdown(xbb);
mtx_unlock(&xbb->lock);
if (frontend_state == XenbusStateClosed)
xenbus_set_state(xbb->dev, XenbusStateClosed);
break;
default:
xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",
frontend_state);
break;
}
}
/*---------------------------- NewBus Registration ---------------------------*/
static device_method_t xbb_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, xbb_probe),
DEVMETHOD(device_attach, xbb_attach),
DEVMETHOD(device_detach, xbb_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, xbb_suspend),
DEVMETHOD(device_resume, xbb_resume),
/* Xenbus interface */
DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed),
{ 0, 0 }
};
static driver_t xbb_driver = {
"xbbd",
xbb_methods,
sizeof(struct xbb_softc),
};
devclass_t xbb_devclass;
DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0);
Index: head/sys/dev/xen/blkfront/blkfront.c
===================================================================
--- head/sys/dev/xen/blkfront/blkfront.c (revision 328217)
+++ head/sys/dev/xen/blkfront/blkfront.c (revision 328218)
@@ -1,1614 +1,1615 @@
/*
* XenBSD block device driver
*
* Copyright (c) 2010-2013 Spectra Logic Corporation
* Copyright (c) 2009 Scott Long, Yahoo!
* Copyright (c) 2009 Frank Suchomel, Citrix
* Copyright (c) 2009 Doug F. Rabson, Citrix
* Copyright (c) 2005 Kip Macy
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
* Modifications by Mark A. Williamson are (c) Intel Research Cambridge
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/module.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <machine/intr_machdep.h>
#include <machine/vmparam.h>
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
#include <xen/xen_intr.h>
#include <xen/gnttab.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/protocols.h>
#include <xen/xenbus/xenbusvar.h>
#include <machine/_inttypes.h>
#include <geom/geom_disk.h>
#include <dev/xen/blkfront/block.h>
#include "xenbus_if.h"
/*--------------------------- Forward Declarations ---------------------------*/
static void xbd_closing(device_t);
static void xbd_startio(struct xbd_softc *sc);
/*---------------------------------- Macros ----------------------------------*/
#if 0
#define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args)
#else
#define DPRINTK(fmt, args...)
#endif
#define XBD_SECTOR_SHFT 9
/*---------------------------- Global Static Data ----------------------------*/
static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
static int xbd_enable_indirect = 1;
SYSCTL_NODE(_hw, OID_AUTO, xbd, CTLFLAG_RD, 0, "xbd driver parameters");
SYSCTL_INT(_hw_xbd, OID_AUTO, xbd_enable_indirect, CTLFLAG_RDTUN,
&xbd_enable_indirect, 0, "Enable xbd indirect segments");
/*---------------------------- Command Processing ----------------------------*/
static void
xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
{
if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0)
return;
sc->xbd_flags |= xbd_flag;
sc->xbd_qfrozen_cnt++;
}
static void
xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
{
if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0)
return;
if (sc->xbd_qfrozen_cnt == 0)
panic("%s: Thaw with flag 0x%x while not frozen.",
__func__, xbd_flag);
sc->xbd_flags &= ~xbd_flag;
sc->xbd_qfrozen_cnt--;
}
static void
xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
{
if ((cm->cm_flags & XBDCF_FROZEN) != 0)
return;
cm->cm_flags |= XBDCF_FROZEN|cm_flag;
xbd_freeze(sc, XBDF_NONE);
}
static void
xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
{
if ((cm->cm_flags & XBDCF_FROZEN) == 0)
return;
cm->cm_flags &= ~XBDCF_FROZEN;
xbd_thaw(sc, XBDF_NONE);
}
static inline void
xbd_flush_requests(struct xbd_softc *sc)
{
int notify;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify);
if (notify)
xen_intr_signal(sc->xen_intr_handle);
}
static void
xbd_free_command(struct xbd_command *cm)
{
KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
("Freeing command that is still on queue %d.",
cm->cm_flags & XBDCF_Q_MASK));
cm->cm_flags = XBDCF_INITIALIZER;
cm->cm_bp = NULL;
cm->cm_complete = NULL;
xbd_enqueue_cm(cm, XBD_Q_FREE);
xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE);
}
static void
xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
grant_ref_t * gref_head, int otherend_id, int readonly,
grant_ref_t * sg_ref, struct blkif_request_segment *sg)
{
struct blkif_request_segment *last_block_sg = sg + nsegs;
vm_paddr_t buffer_ma;
uint64_t fsect, lsect;
int ref;
while (sg < last_block_sg) {
KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0,
("XEN disk driver I/O must be sector aligned"));
KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0,
("XEN disk driver I/Os must be a multiple of "
"the sector length"));
buffer_ma = segs->ds_addr;
fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
KASSERT(lsect <= 7, ("XEN disk driver data cannot "
"cross a page boundary"));
/* install a grant reference. */
ref = gnttab_claim_grant_reference(gref_head);
/*
* GNTTAB_LIST_END == 0xffffffff, but it is private
* to gnttab.c.
*/
KASSERT(ref != ~0, ("grant_reference failed"));
gnttab_grant_foreign_access_ref(
ref,
otherend_id,
buffer_ma >> PAGE_SHIFT,
readonly);
*sg_ref = ref;
*sg = (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect
};
sg++;
sg_ref++;
segs++;
}
}
static void
xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct xbd_softc *sc;
struct xbd_command *cm;
int op;
cm = arg;
sc = cm->cm_sc;
if (error) {
cm->cm_bp->bio_error = EIO;
biodone(cm->cm_bp);
xbd_free_command(cm);
return;
}
KASSERT(nsegs <= sc->xbd_max_request_segments,
("Too many segments in a blkfront I/O"));
if (nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) {
blkif_request_t *ring_req;
/* Fill out a blkif_request_t structure. */
ring_req = (blkif_request_t *)
RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
sc->xbd_ring.req_prod_pvt++;
ring_req->id = cm->cm_id;
ring_req->operation = cm->cm_operation;
ring_req->sector_number = cm->cm_sector_number;
ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
ring_req->nr_segments = nsegs;
cm->cm_nseg = nsegs;
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
xenbus_get_otherend_id(sc->xbd_dev),
cm->cm_operation == BLKIF_OP_WRITE,
cm->cm_sg_refs, ring_req->seg);
} else {
blkif_request_indirect_t *ring_req;
/* Fill out a blkif_request_indirect_t structure. */
ring_req = (blkif_request_indirect_t *)
RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
sc->xbd_ring.req_prod_pvt++;
ring_req->id = cm->cm_id;
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->indirect_op = cm->cm_operation;
ring_req->sector_number = cm->cm_sector_number;
ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
ring_req->nr_segments = nsegs;
cm->cm_nseg = nsegs;
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
xenbus_get_otherend_id(sc->xbd_dev),
cm->cm_operation == BLKIF_OP_WRITE,
cm->cm_sg_refs, cm->cm_indirectionpages);
memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs,
sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages);
}
if (cm->cm_operation == BLKIF_OP_READ)
op = BUS_DMASYNC_PREREAD;
else if (cm->cm_operation == BLKIF_OP_WRITE)
op = BUS_DMASYNC_PREWRITE;
else
op = 0;
bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
gnttab_free_grant_references(cm->cm_gref_head);
xbd_enqueue_cm(cm, XBD_Q_BUSY);
/*
* If bus dma had to asynchronously call us back to dispatch
* this command, we are no longer executing in the context of
* xbd_startio(). Thus we cannot rely on xbd_startio()'s call to
* xbd_flush_requests() to publish this command to the backend
* along with any other commands that it could batch.
*/
if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
xbd_flush_requests(sc);
return;
}
static int
xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
{
int error;
if (cm->cm_bp != NULL)
error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
cm->cm_bp, xbd_queue_cb, cm, 0);
else
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
if (error == EINPROGRESS) {
/*
* Maintain queuing order by freezing the queue. The next
* command may not require as many resources as the command
* we just attempted to map, so we can't rely on bus dma
* blocking for it too.
*/
xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING);
return (0);
}
return (error);
}
static void
xbd_restart_queue_callback(void *arg)
{
struct xbd_softc *sc = arg;
mtx_lock(&sc->xbd_io_lock);
xbd_thaw(sc, XBDF_GNT_SHORTAGE);
xbd_startio(sc);
mtx_unlock(&sc->xbd_io_lock);
}
static struct xbd_command *
xbd_bio_command(struct xbd_softc *sc)
{
struct xbd_command *cm;
struct bio *bp;
if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED))
return (NULL);
bp = xbd_dequeue_bio(sc);
if (bp == NULL)
return (NULL);
if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
xbd_freeze(sc, XBDF_CM_SHORTAGE);
xbd_requeue_bio(sc, bp);
return (NULL);
}
if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
&cm->cm_gref_head) != 0) {
gnttab_request_free_callback(&sc->xbd_callback,
xbd_restart_queue_callback, sc,
sc->xbd_max_request_segments);
xbd_freeze(sc, XBDF_GNT_SHORTAGE);
xbd_requeue_bio(sc, bp);
xbd_enqueue_cm(cm, XBD_Q_FREE);
return (NULL);
}
cm->cm_bp = bp;
cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
switch (bp->bio_cmd) {
case BIO_READ:
cm->cm_operation = BLKIF_OP_READ;
break;
case BIO_WRITE:
cm->cm_operation = BLKIF_OP_WRITE;
if ((bp->bio_flags & BIO_ORDERED) != 0) {
if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
} else {
/*
* Single step this command.
*/
cm->cm_flags |= XBDCF_Q_FREEZE;
if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
/*
* Wait for in-flight requests to
* finish.
*/
xbd_freeze(sc, XBDF_WAIT_IDLE);
xbd_requeue_cm(cm, XBD_Q_READY);
return (NULL);
}
}
}
break;
case BIO_FLUSH:
if ((sc->xbd_flags & XBDF_FLUSH) != 0)
cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE;
else if ((sc->xbd_flags & XBDF_BARRIER) != 0)
cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
else
panic("flush request, but no flush support available");
break;
default:
panic("unknown bio command %d", bp->bio_cmd);
}
return (cm);
}
/*
* Dequeue buffers and place them in the shared communication ring.
* Return when no more requests can be accepted or all buffers have
* been queued.
*
* Signal XEN once the ring has been filled out.
*/
static void
xbd_startio(struct xbd_softc *sc)
{
struct xbd_command *cm;
int error, queued = 0;
mtx_assert(&sc->xbd_io_lock, MA_OWNED);
if (sc->xbd_state != XBD_STATE_CONNECTED)
return;
while (!RING_FULL(&sc->xbd_ring)) {
if (sc->xbd_qfrozen_cnt != 0)
break;
cm = xbd_dequeue_cm(sc, XBD_Q_READY);
if (cm == NULL)
cm = xbd_bio_command(sc);
if (cm == NULL)
break;
if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) {
/*
* Single step command. Future work is
* held off until this command completes.
*/
xbd_cm_freeze(sc, cm, XBDCF_Q_FREEZE);
}
if ((error = xbd_queue_request(sc, cm)) != 0) {
printf("xbd_queue_request returned %d\n", error);
break;
}
queued++;
}
if (queued != 0)
xbd_flush_requests(sc);
}
static void
xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
{
struct bio *bp;
bp = cm->cm_bp;
if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) {
disk_err(bp, "disk error" , -1, 0);
printf(" status: %x\n", cm->cm_status);
bp->bio_flags |= BIO_ERROR;
}
if (bp->bio_flags & BIO_ERROR)
bp->bio_error = EIO;
else
bp->bio_resid = 0;
xbd_free_command(cm);
biodone(bp);
}
static void
xbd_int(void *xsc)
{
struct xbd_softc *sc = xsc;
struct xbd_command *cm;
blkif_response_t *bret;
RING_IDX i, rp;
int op;
mtx_lock(&sc->xbd_io_lock);
if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) {
mtx_unlock(&sc->xbd_io_lock);
return;
}
again:
rp = sc->xbd_ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
for (i = sc->xbd_ring.rsp_cons; i != rp;) {
bret = RING_GET_RESPONSE(&sc->xbd_ring, i);
cm = &sc->xbd_shadow[bret->id];
xbd_remove_cm(cm, XBD_Q_BUSY);
gnttab_end_foreign_access_references(cm->cm_nseg,
cm->cm_sg_refs);
i++;
if (cm->cm_operation == BLKIF_OP_READ)
op = BUS_DMASYNC_POSTREAD;
else if (cm->cm_operation == BLKIF_OP_WRITE ||
cm->cm_operation == BLKIF_OP_WRITE_BARRIER)
op = BUS_DMASYNC_POSTWRITE;
else
op = 0;
bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
/*
* Release any hold this command has on future command
* dispatch.
*/
xbd_cm_thaw(sc, cm);
/*
* Directly call the i/o complete routine to save an
* an indirection in the common case.
*/
cm->cm_status = bret->status;
if (cm->cm_bp)
xbd_bio_complete(sc, cm);
else if (cm->cm_complete != NULL)
cm->cm_complete(cm);
else
xbd_free_command(cm);
}
sc->xbd_ring.rsp_cons = i;
if (i != sc->xbd_ring.req_prod_pvt) {
int more_to_do;
RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do);
if (more_to_do)
goto again;
} else {
sc->xbd_ring.sring->rsp_event = i + 1;
}
if (xbd_queue_length(sc, XBD_Q_BUSY) == 0)
xbd_thaw(sc, XBDF_WAIT_IDLE);
xbd_startio(sc);
if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED))
wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]);
mtx_unlock(&sc->xbd_io_lock);
}
/*------------------------------- Dump Support -------------------------------*/
/**
* Quiesce the disk writes for a dump file before allowing the next buffer.
*/
static void
xbd_quiesce(struct xbd_softc *sc)
{
int mtd;
// While there are outstanding requests
while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
if (mtd) {
/* Received request completions, update queue. */
xbd_int(sc);
}
if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
/*
* Still pending requests, wait for the disk i/o
* to complete.
*/
HYPERVISOR_yield();
}
}
}
/* Kernel dump function for a paravirtualized disk device */
static void
xbd_dump_complete(struct xbd_command *cm)
{
xbd_enqueue_cm(cm, XBD_Q_COMPLETE);
}
static int
xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
size_t length)
{
struct disk *dp = arg;
struct xbd_softc *sc = dp->d_drv1;
struct xbd_command *cm;
size_t chunk;
int sbp;
int rc = 0;
if (length <= 0)
return (rc);
xbd_quiesce(sc); /* All quiet on the western front. */
/*
* If this lock is held, then this module is failing, and a
* successful kernel dump is highly unlikely anyway.
*/
mtx_lock(&sc->xbd_io_lock);
/* Split the 64KB block as needed */
for (sbp=0; length > 0; sbp++) {
cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
if (cm == NULL) {
mtx_unlock(&sc->xbd_io_lock);
device_printf(sc->xbd_dev, "dump: no more commands?\n");
return (EBUSY);
}
if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
&cm->cm_gref_head) != 0) {
xbd_free_command(cm);
mtx_unlock(&sc->xbd_io_lock);
device_printf(sc->xbd_dev, "no more grant allocs?\n");
return (EBUSY);
}
chunk = length > sc->xbd_max_request_size ?
sc->xbd_max_request_size : length;
cm->cm_data = virtual;
cm->cm_datalen = chunk;
cm->cm_operation = BLKIF_OP_WRITE;
cm->cm_sector_number = offset / dp->d_sectorsize;
cm->cm_complete = xbd_dump_complete;
xbd_enqueue_cm(cm, XBD_Q_READY);
length -= chunk;
offset += chunk;
virtual = (char *) virtual + chunk;
}
/* Tell DOM0 to do the I/O */
xbd_startio(sc);
mtx_unlock(&sc->xbd_io_lock);
/* Poll for the completion. */
xbd_quiesce(sc); /* All quite on the eastern front */
/* If there were any errors, bail out... */
while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
if (cm->cm_status != BLKIF_RSP_OKAY) {
device_printf(sc->xbd_dev,
"Dump I/O failed at sector %jd\n",
cm->cm_sector_number);
rc = EIO;
}
xbd_free_command(cm);
}
return (rc);
}
/*----------------------------- Disk Entrypoints -----------------------------*/
static int
xbd_open(struct disk *dp)
{
struct xbd_softc *sc = dp->d_drv1;
if (sc == NULL) {
printf("xbd%d: not found", dp->d_unit);
return (ENXIO);
}
sc->xbd_flags |= XBDF_OPEN;
sc->xbd_users++;
return (0);
}
static int
xbd_close(struct disk *dp)
{
struct xbd_softc *sc = dp->d_drv1;
if (sc == NULL)
return (ENXIO);
sc->xbd_flags &= ~XBDF_OPEN;
if (--(sc->xbd_users) == 0) {
/*
* Check whether we have been instructed to close. We will
* have ignored this request initially, as the device was
* still mounted.
*/
if (xenbus_get_otherend_state(sc->xbd_dev) ==
XenbusStateClosing)
xbd_closing(sc->xbd_dev);
}
return (0);
}
static int
xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
{
struct xbd_softc *sc = dp->d_drv1;
if (sc == NULL)
return (ENXIO);
return (ENOTTY);
}
/*
* Read/write routine for a buffer. Finds the proper unit, place it on
* the sortq and kick the controller.
*/
static void
xbd_strategy(struct bio *bp)
{
struct xbd_softc *sc = bp->bio_disk->d_drv1;
/* bogus disk? */
if (sc == NULL) {
bp->bio_error = EINVAL;
bp->bio_flags |= BIO_ERROR;
bp->bio_resid = bp->bio_bcount;
biodone(bp);
return;
}
/*
* Place it in the queue of disk activities for this disk
*/
mtx_lock(&sc->xbd_io_lock);
xbd_enqueue_bio(sc, bp);
xbd_startio(sc);
mtx_unlock(&sc->xbd_io_lock);
return;
}
/*------------------------------ Ring Management -----------------------------*/
static int
xbd_alloc_ring(struct xbd_softc *sc)
{
blkif_sring_t *sring;
uintptr_t sring_page_addr;
int error;
int i;
sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
M_NOWAIT|M_ZERO);
if (sring == NULL) {
xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring");
return (ENOMEM);
}
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE);
for (i = 0, sring_page_addr = (uintptr_t)sring;
i < sc->xbd_ring_pages;
i++, sring_page_addr += PAGE_SIZE) {
error = xenbus_grant_ring(sc->xbd_dev,
(vtophys(sring_page_addr) >> PAGE_SHIFT),
&sc->xbd_ring_ref[i]);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"granting ring_ref(%d)", i);
return (error);
}
}
if (sc->xbd_ring_pages == 1) {
error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
"ring-ref", "%u", sc->xbd_ring_ref[0]);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"writing %s/ring-ref",
xenbus_get_node(sc->xbd_dev));
return (error);
}
} else {
for (i = 0; i < sc->xbd_ring_pages; i++) {
char ring_ref_name[]= "ring_refXX";
snprintf(ring_ref_name, sizeof(ring_ref_name),
"ring-ref%u", i);
error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
ring_ref_name, "%u", sc->xbd_ring_ref[i]);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"writing %s/%s",
xenbus_get_node(sc->xbd_dev),
ring_ref_name);
return (error);
}
}
}
error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev,
xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc,
INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"xen_intr_alloc_and_bind_local_port failed");
return (error);
}
return (0);
}
static void
xbd_free_ring(struct xbd_softc *sc)
{
int i;
if (sc->xbd_ring.sring == NULL)
return;
for (i = 0; i < sc->xbd_ring_pages; i++) {
if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) {
gnttab_end_foreign_access_ref(sc->xbd_ring_ref[i]);
sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
}
}
free(sc->xbd_ring.sring, M_XENBLOCKFRONT);
sc->xbd_ring.sring = NULL;
}
/*-------------------------- Initialization/Teardown -------------------------*/
static int
xbd_feature_string(struct xbd_softc *sc, char *features, size_t len)
{
struct sbuf sb;
int feature_cnt;
sbuf_new(&sb, features, len, SBUF_FIXEDLEN);
feature_cnt = 0;
if ((sc->xbd_flags & XBDF_FLUSH) != 0) {
sbuf_printf(&sb, "flush");
feature_cnt++;
}
if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
if (feature_cnt != 0)
sbuf_printf(&sb, ", ");
sbuf_printf(&sb, "write_barrier");
feature_cnt++;
}
if ((sc->xbd_flags & XBDF_DISCARD) != 0) {
if (feature_cnt != 0)
sbuf_printf(&sb, ", ");
sbuf_printf(&sb, "discard");
feature_cnt++;
}
if ((sc->xbd_flags & XBDF_PERSISTENT) != 0) {
if (feature_cnt != 0)
sbuf_printf(&sb, ", ");
sbuf_printf(&sb, "persistent_grants");
feature_cnt++;
}
(void) sbuf_finish(&sb);
return (sbuf_len(&sb));
}
static int
xbd_sysctl_features(SYSCTL_HANDLER_ARGS)
{
char features[80];
struct xbd_softc *sc = arg1;
int error;
int len;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
len = xbd_feature_string(sc, features, sizeof(features));
/* len is -1 on error, which will make the SYSCTL_OUT a no-op. */
return (SYSCTL_OUT(req, features, len + 1/*NUL*/));
}
static void
xbd_setup_sysctl(struct xbd_softc *xbd)
{
struct sysctl_ctx_list *sysctl_ctx = NULL;
struct sysctl_oid *sysctl_tree = NULL;
struct sysctl_oid_list *children;
sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
if (sysctl_ctx == NULL)
return;
sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev);
if (sysctl_tree == NULL)
return;
children = SYSCTL_CHILDREN(sysctl_tree);
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
"max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1,
"maximum outstanding requests (negotiated)");
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
"max_request_segments", CTLFLAG_RD,
&xbd->xbd_max_request_segments, 0,
"maximum number of pages per requests (negotiated)");
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
"max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0,
"maximum size in bytes of a request (negotiated)");
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
"ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0,
"communication channel pages (negotiated)");
SYSCTL_ADD_PROC(sysctl_ctx, children, OID_AUTO,
"features", CTLTYPE_STRING|CTLFLAG_RD, xbd, 0,
xbd_sysctl_features, "A", "protocol features (negotiated)");
}
/*
* Translate Linux major/minor to an appropriate name and unit
* number. For HVM guests, this allows us to use the same drive names
* with blkfront as the emulated drives, easing transition slightly.
*/
static void
xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
{
static struct vdev_info {
int major;
int shift;
int base;
const char *name;
} info[] = {
{3, 6, 0, "ada"}, /* ide0 */
{22, 6, 2, "ada"}, /* ide1 */
{33, 6, 4, "ada"}, /* ide2 */
{34, 6, 6, "ada"}, /* ide3 */
{56, 6, 8, "ada"}, /* ide4 */
{57, 6, 10, "ada"}, /* ide5 */
{88, 6, 12, "ada"}, /* ide6 */
{89, 6, 14, "ada"}, /* ide7 */
{90, 6, 16, "ada"}, /* ide8 */
{91, 6, 18, "ada"}, /* ide9 */
{8, 4, 0, "da"}, /* scsi disk0 */
{65, 4, 16, "da"}, /* scsi disk1 */
{66, 4, 32, "da"}, /* scsi disk2 */
{67, 4, 48, "da"}, /* scsi disk3 */
{68, 4, 64, "da"}, /* scsi disk4 */
{69, 4, 80, "da"}, /* scsi disk5 */
{70, 4, 96, "da"}, /* scsi disk6 */
{71, 4, 112, "da"}, /* scsi disk7 */
{128, 4, 128, "da"}, /* scsi disk8 */
{129, 4, 144, "da"}, /* scsi disk9 */
{130, 4, 160, "da"}, /* scsi disk10 */
{131, 4, 176, "da"}, /* scsi disk11 */
{132, 4, 192, "da"}, /* scsi disk12 */
{133, 4, 208, "da"}, /* scsi disk13 */
{134, 4, 224, "da"}, /* scsi disk14 */
{135, 4, 240, "da"}, /* scsi disk15 */
{202, 4, 0, "xbd"}, /* xbd */
{0, 0, 0, NULL},
};
int major = vdevice >> 8;
int minor = vdevice & 0xff;
int i;
if (vdevice & (1 << 28)) {
*unit = (vdevice & ((1 << 28) - 1)) >> 8;
*name = "xbd";
return;
}
for (i = 0; info[i].major; i++) {
if (info[i].major == major) {
*unit = info[i].base + (minor >> info[i].shift);
*name = info[i].name;
return;
}
}
*unit = minor >> 4;
*name = "xbd";
}
int
xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
int vdevice, uint16_t vdisk_info, unsigned long sector_size,
unsigned long phys_sector_size)
{
char features[80];
int unit, error = 0;
const char *name;
xbd_vdevice_to_unit(vdevice, &unit, &name);
sc->xbd_unit = unit;
if (strcmp(name, "xbd") != 0)
device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit);
if (xbd_feature_string(sc, features, sizeof(features)) > 0) {
device_printf(sc->xbd_dev, "features: %s\n",
features);
}
sc->xbd_disk = disk_alloc();
sc->xbd_disk->d_unit = sc->xbd_unit;
sc->xbd_disk->d_open = xbd_open;
sc->xbd_disk->d_close = xbd_close;
sc->xbd_disk->d_ioctl = xbd_ioctl;
sc->xbd_disk->d_strategy = xbd_strategy;
sc->xbd_disk->d_dump = xbd_dump;
sc->xbd_disk->d_name = name;
sc->xbd_disk->d_drv1 = sc;
sc->xbd_disk->d_sectorsize = sector_size;
sc->xbd_disk->d_stripesize = phys_sector_size;
sc->xbd_disk->d_stripeoffset = 0;
sc->xbd_disk->d_mediasize = sectors * sector_size;
sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
device_printf(sc->xbd_dev,
"synchronize cache commands enabled.\n");
}
disk_create(sc->xbd_disk, DISK_VERSION);
return error;
}
static void
xbd_free(struct xbd_softc *sc)
{
int i;
/* Prevent new requests being issued until we fix things up. */
mtx_lock(&sc->xbd_io_lock);
sc->xbd_state = XBD_STATE_DISCONNECTED;
mtx_unlock(&sc->xbd_io_lock);
/* Free resources associated with old device channel. */
xbd_free_ring(sc);
if (sc->xbd_shadow) {
for (i = 0; i < sc->xbd_max_requests; i++) {
struct xbd_command *cm;
cm = &sc->xbd_shadow[i];
if (cm->cm_sg_refs != NULL) {
free(cm->cm_sg_refs, M_XENBLOCKFRONT);
cm->cm_sg_refs = NULL;
}
if (cm->cm_indirectionpages != NULL) {
gnttab_end_foreign_access_references(
sc->xbd_max_request_indirectpages,
&cm->cm_indirectionrefs[0]);
contigfree(cm->cm_indirectionpages, PAGE_SIZE *
sc->xbd_max_request_indirectpages,
M_XENBLOCKFRONT);
cm->cm_indirectionpages = NULL;
}
bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map);
}
free(sc->xbd_shadow, M_XENBLOCKFRONT);
sc->xbd_shadow = NULL;
bus_dma_tag_destroy(sc->xbd_io_dmat);
xbd_initq_cm(sc, XBD_Q_FREE);
xbd_initq_cm(sc, XBD_Q_READY);
xbd_initq_cm(sc, XBD_Q_COMPLETE);
}
xen_intr_unbind(&sc->xen_intr_handle);
}
/*--------------------------- State Change Handlers --------------------------*/
static void
xbd_initialize(struct xbd_softc *sc)
{
const char *otherend_path;
const char *node_path;
uint32_t max_ring_page_order;
int error;
if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
/* Initialization has already been performed. */
return;
}
/*
* Protocol defaults valid even if negotiation for a
* setting fails.
*/
max_ring_page_order = 0;
sc->xbd_ring_pages = 1;
/*
* Protocol negotiation.
*
* \note xs_gather() returns on the first encountered error, so
* we must use independent calls in order to guarantee
* we don't miss information in a sparsly populated back-end
* tree.
*
* \note xs_scanf() does not update variables for unmatched
* fields.
*/
otherend_path = xenbus_get_otherend_path(sc->xbd_dev);
node_path = xenbus_get_node(sc->xbd_dev);
/* Support both backend schemes for relaying ring page limits. */
(void)xs_scanf(XST_NIL, otherend_path,
"max-ring-page-order", NULL, "%" PRIu32,
&max_ring_page_order);
sc->xbd_ring_pages = 1 << max_ring_page_order;
(void)xs_scanf(XST_NIL, otherend_path,
"max-ring-pages", NULL, "%" PRIu32,
&sc->xbd_ring_pages);
if (sc->xbd_ring_pages < 1)
sc->xbd_ring_pages = 1;
if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) {
device_printf(sc->xbd_dev,
"Back-end specified ring-pages of %u "
"limited to front-end limit of %u.\n",
sc->xbd_ring_pages, XBD_MAX_RING_PAGES);
sc->xbd_ring_pages = XBD_MAX_RING_PAGES;
}
if (powerof2(sc->xbd_ring_pages) == 0) {
uint32_t new_page_limit;
new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1);
device_printf(sc->xbd_dev,
"Back-end specified ring-pages of %u "
"is not a power of 2. Limited to %u.\n",
sc->xbd_ring_pages, new_page_limit);
sc->xbd_ring_pages = new_page_limit;
}
sc->xbd_max_requests =
BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
if (sc->xbd_max_requests > XBD_MAX_REQUESTS) {
device_printf(sc->xbd_dev,
"Back-end specified max_requests of %u "
"limited to front-end limit of %zu.\n",
sc->xbd_max_requests, XBD_MAX_REQUESTS);
sc->xbd_max_requests = XBD_MAX_REQUESTS;
}
if (xbd_alloc_ring(sc) != 0)
return;
/* Support both backend schemes for relaying ring page limits. */
if (sc->xbd_ring_pages > 1) {
error = xs_printf(XST_NIL, node_path,
"num-ring-pages","%u",
sc->xbd_ring_pages);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"writing %s/num-ring-pages",
node_path);
return;
}
error = xs_printf(XST_NIL, node_path,
"ring-page-order", "%u",
fls(sc->xbd_ring_pages) - 1);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"writing %s/ring-page-order",
node_path);
return;
}
}
error = xs_printf(XST_NIL, node_path, "event-channel",
"%u", xen_intr_port(sc->xen_intr_handle));
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"writing %s/event-channel",
node_path);
return;
}
error = xs_printf(XST_NIL, node_path, "protocol",
"%s", XEN_IO_PROTO_ABI_NATIVE);
if (error) {
xenbus_dev_fatal(sc->xbd_dev, error,
"writing %s/protocol",
node_path);
return;
}
xenbus_set_state(sc->xbd_dev, XenbusStateInitialised);
}
/*
* Invoked when the backend is finally 'ready' (and has published
* the details about the physical device - #sectors, size, etc).
*/
static void
xbd_connect(struct xbd_softc *sc)
{
device_t dev = sc->xbd_dev;
unsigned long sectors, sector_size, phys_sector_size;
unsigned int binfo;
int err, feature_barrier, feature_flush;
int i, j;
if (sc->xbd_state == XBD_STATE_CONNECTED ||
sc->xbd_state == XBD_STATE_SUSPENDED)
return;
DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
"sectors", "%lu", &sectors,
"info", "%u", &binfo,
"sector-size", "%lu", &sector_size,
NULL);
if (err) {
xenbus_dev_fatal(dev, err,
"reading backend fields at %s",
xenbus_get_otherend_path(dev));
return;
}
if ((sectors == 0) || (sector_size == 0)) {
xenbus_dev_fatal(dev, 0,
"invalid parameters from %s:"
" sectors = %lu, sector_size = %lu",
xenbus_get_otherend_path(dev),
sectors, sector_size);
return;
}
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
"physical-sector-size", "%lu", &phys_sector_size,
NULL);
if (err || phys_sector_size <= sector_size)
phys_sector_size = 0;
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
"feature-barrier", "%d", &feature_barrier,
NULL);
if (err == 0 && feature_barrier != 0)
sc->xbd_flags |= XBDF_BARRIER;
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
"feature-flush-cache", "%d", &feature_flush,
NULL);
if (err == 0 && feature_flush != 0)
sc->xbd_flags |= XBDF_FLUSH;
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
"feature-max-indirect-segments", "%" PRIu32,
&sc->xbd_max_request_segments, NULL);
if ((err != 0) || (xbd_enable_indirect == 0))
sc->xbd_max_request_segments = 0;
if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS)
sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS;
if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS))
sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS);
sc->xbd_max_request_indirectpages =
XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments);
if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
sc->xbd_max_request_size =
XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
/* Allocate datastructures based on negotiated values. */
err = bus_dma_tag_create(
bus_get_dma_tag(sc->xbd_dev), /* parent */
512, PAGE_SIZE, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->xbd_max_request_size,
sc->xbd_max_request_segments,
PAGE_SIZE, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&sc->xbd_io_lock, /* lockarg */
&sc->xbd_io_dmat);
if (err != 0) {
xenbus_dev_fatal(sc->xbd_dev, err,
"Cannot allocate parent DMA tag\n");
return;
}
/* Per-transaction data allocation. */
- sc->xbd_shadow = mallocarray(sc->xbd_max_requests,
- sizeof(*sc->xbd_shadow), M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
+ sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
+ M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
if (sc->xbd_shadow == NULL) {
bus_dma_tag_destroy(sc->xbd_io_dmat);
xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
"Cannot allocate request structures\n");
return;
}
for (i = 0; i < sc->xbd_max_requests; i++) {
struct xbd_command *cm;
void * indirectpages;
cm = &sc->xbd_shadow[i];
- cm->cm_sg_refs = mallocarray(sc->xbd_max_request_segments,
- sizeof(grant_ref_t), M_XENBLOCKFRONT, M_NOWAIT);
+ cm->cm_sg_refs = malloc(
+ sizeof(grant_ref_t) * sc->xbd_max_request_segments,
+ M_XENBLOCKFRONT, M_NOWAIT);
if (cm->cm_sg_refs == NULL)
break;
cm->cm_id = i;
cm->cm_flags = XBDCF_INITIALIZER;
cm->cm_sc = sc;
if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
break;
if (sc->xbd_max_request_indirectpages > 0) {
indirectpages = contigmalloc(
PAGE_SIZE * sc->xbd_max_request_indirectpages,
M_XENBLOCKFRONT, M_ZERO, 0, ~0, PAGE_SIZE, 0);
} else {
indirectpages = NULL;
}
for (j = 0; j < sc->xbd_max_request_indirectpages; j++) {
if (gnttab_grant_foreign_access(
xenbus_get_otherend_id(sc->xbd_dev),
(vtophys(indirectpages) >> PAGE_SHIFT) + j,
1 /* grant read-only access */,
&cm->cm_indirectionrefs[j]))
break;
}
if (j < sc->xbd_max_request_indirectpages)
break;
cm->cm_indirectionpages = indirectpages;
xbd_free_command(cm);
}
if (sc->xbd_disk == NULL) {
device_printf(dev, "%juMB <%s> at %s",
(uintmax_t) sectors / (1048576 / sector_size),
device_get_desc(dev),
xenbus_get_node(dev));
bus_print_child_footer(device_get_parent(dev), dev);
xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
sector_size, phys_sector_size);
}
(void)xenbus_set_state(dev, XenbusStateConnected);
/* Kick pending requests. */
mtx_lock(&sc->xbd_io_lock);
sc->xbd_state = XBD_STATE_CONNECTED;
xbd_startio(sc);
sc->xbd_flags |= XBDF_READY;
mtx_unlock(&sc->xbd_io_lock);
}
/**
* Handle the change of state of the backend to Closing. We must delete our
* device-layer structures now, to ensure that writes are flushed through to
* the backend. Once this is done, we can switch to Closed in
* acknowledgement.
*/
static void
xbd_closing(device_t dev)
{
struct xbd_softc *sc = device_get_softc(dev);
xenbus_set_state(dev, XenbusStateClosing);
DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev));
if (sc->xbd_disk != NULL) {
disk_destroy(sc->xbd_disk);
sc->xbd_disk = NULL;
}
xenbus_set_state(dev, XenbusStateClosed);
}
/*---------------------------- NewBus Entrypoints ----------------------------*/
static int
xbd_probe(device_t dev)
{
if (strcmp(xenbus_get_type(dev), "vbd") != 0)
return (ENXIO);
if (xen_hvm_domain() && xen_disable_pv_disks != 0)
return (ENXIO);
if (xen_hvm_domain()) {
int error;
char *type;
/*
* When running in an HVM domain, IDE disk emulation is
* disabled early in boot so that native drivers will
* not see emulated hardware. However, CDROM device
* emulation cannot be disabled.
*
* Through use of FreeBSD's vm_guest and xen_hvm_domain()
* APIs, we could modify the native CDROM driver to fail its
* probe when running under Xen. Unfortunatlely, the PV
* CDROM support in XenServer (up through at least version
* 6.2) isn't functional, so we instead rely on the emulated
* CDROM instance, and fail to attach the PV one here in
* the blkfront driver.
*/
error = xs_read(XST_NIL, xenbus_get_node(dev),
"device-type", NULL, (void **) &type);
if (error)
return (ENXIO);
if (strncmp(type, "cdrom", 5) == 0) {
free(type, M_XENSTORE);
return (ENXIO);
}
free(type, M_XENSTORE);
}
device_set_desc(dev, "Virtual Block Device");
device_quiet(dev);
return (0);
}
/*
* Setup supplies the backend dir, virtual device. We place an event
* channel and shared frame entries. We watch backend to wait if it's
* ok.
*/
static int
xbd_attach(device_t dev)
{
struct xbd_softc *sc;
const char *name;
uint32_t vdevice;
int error;
int i;
int unit;
/* FIXME: Use dynamic device id if this is not set. */
error = xs_scanf(XST_NIL, xenbus_get_node(dev),
"virtual-device", NULL, "%" PRIu32, &vdevice);
if (error)
error = xs_scanf(XST_NIL, xenbus_get_node(dev),
"virtual-device-ext", NULL, "%" PRIu32, &vdevice);
if (error) {
xenbus_dev_fatal(dev, error, "reading virtual-device");
device_printf(dev, "Couldn't determine virtual device.\n");
return (error);
}
xbd_vdevice_to_unit(vdevice, &unit, &name);
if (!strcmp(name, "xbd"))
device_set_unit(dev, unit);
sc = device_get_softc(dev);
mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
xbd_initqs(sc);
for (i = 0; i < XBD_MAX_RING_PAGES; i++)
sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
sc->xbd_dev = dev;
sc->xbd_vdevice = vdevice;
sc->xbd_state = XBD_STATE_DISCONNECTED;
xbd_setup_sysctl(sc);
/* Wait for backend device to publish its protocol capabilities. */
xenbus_set_state(dev, XenbusStateInitialising);
return (0);
}
static int
xbd_detach(device_t dev)
{
struct xbd_softc *sc = device_get_softc(dev);
DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev));
xbd_free(sc);
mtx_destroy(&sc->xbd_io_lock);
return 0;
}
static int
xbd_suspend(device_t dev)
{
struct xbd_softc *sc = device_get_softc(dev);
int retval;
int saved_state;
/* Prevent new requests being issued until we fix things up. */
mtx_lock(&sc->xbd_io_lock);
saved_state = sc->xbd_state;
sc->xbd_state = XBD_STATE_SUSPENDED;
/* Wait for outstanding I/O to drain. */
retval = 0;
while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock,
PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
retval = EBUSY;
break;
}
}
mtx_unlock(&sc->xbd_io_lock);
if (retval != 0)
sc->xbd_state = saved_state;
return (retval);
}
static int
xbd_resume(device_t dev)
{
struct xbd_softc *sc = device_get_softc(dev);
if (xen_suspend_cancelled) {
sc->xbd_state = XBD_STATE_CONNECTED;
return (0);
}
DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev));
xbd_free(sc);
xbd_initialize(sc);
return (0);
}
/**
* Callback received when the backend's state changes.
*/
static void
xbd_backend_changed(device_t dev, XenbusState backend_state)
{
struct xbd_softc *sc = device_get_softc(dev);
DPRINTK("backend_state=%d\n", backend_state);
switch (backend_state) {
case XenbusStateUnknown:
case XenbusStateInitialising:
case XenbusStateReconfigured:
case XenbusStateReconfiguring:
case XenbusStateClosed:
break;
case XenbusStateInitWait:
case XenbusStateInitialised:
xbd_initialize(sc);
break;
case XenbusStateConnected:
xbd_initialize(sc);
xbd_connect(sc);
break;
case XenbusStateClosing:
if (sc->xbd_users > 0) {
device_printf(dev, "detaching with pending users\n");
KASSERT(sc->xbd_disk != NULL,
("NULL disk with pending users\n"));
disk_gone(sc->xbd_disk);
} else {
xbd_closing(dev);
}
break;
}
}
/*---------------------------- NewBus Registration ---------------------------*/
static device_method_t xbd_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, xbd_probe),
DEVMETHOD(device_attach, xbd_attach),
DEVMETHOD(device_detach, xbd_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, xbd_suspend),
DEVMETHOD(device_resume, xbd_resume),
/* Xenbus interface */
DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed),
{ 0, 0 }
};
static driver_t xbd_driver = {
"xbd",
xbd_methods,
sizeof(struct xbd_softc),
};
devclass_t xbd_devclass;
DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);
Index: head/sys/fs/nfsclient/nfs_clvnops.c
===================================================================
--- head/sys/fs/nfsclient/nfs_clvnops.c (revision 328217)
+++ head/sys/fs/nfsclient/nfs_clvnops.c (revision 328218)
@@ -1,3541 +1,3541 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Rick Macklem at The University of Guelph.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from nfs_vnops.c 8.16 (Berkeley) 5/27/95
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* vnode op calls for Sun NFS version 2, 3 and 4
*/
#include "opt_inet.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/resourcevar.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/jail.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/namei.h>
#include <sys/socket.h>
#include <sys/vnode.h>
#include <sys/dirent.h>
#include <sys/fcntl.h>
#include <sys/lockf.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/signalvar.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <fs/nfs/nfsport.h>
#include <fs/nfsclient/nfsnode.h>
#include <fs/nfsclient/nfsmount.h>
#include <fs/nfsclient/nfs.h>
#include <fs/nfsclient/nfs_kdtrace.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <nfs/nfs_lock.h>
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
dtrace_nfsclient_accesscache_flush_probe_func_t
dtrace_nfscl_accesscache_flush_done_probe;
uint32_t nfscl_accesscache_flush_done_id;
dtrace_nfsclient_accesscache_get_probe_func_t
dtrace_nfscl_accesscache_get_hit_probe,
dtrace_nfscl_accesscache_get_miss_probe;
uint32_t nfscl_accesscache_get_hit_id;
uint32_t nfscl_accesscache_get_miss_id;
dtrace_nfsclient_accesscache_load_probe_func_t
dtrace_nfscl_accesscache_load_done_probe;
uint32_t nfscl_accesscache_load_done_id;
#endif /* !KDTRACE_HOOKS */
/* Defs */
#define TRUE 1
#define FALSE 0
extern struct nfsstatsv1 nfsstatsv1;
extern int nfsrv_useacl;
extern int nfscl_debuglevel;
MALLOC_DECLARE(M_NEWNFSREQ);
static vop_read_t nfsfifo_read;
static vop_write_t nfsfifo_write;
static vop_close_t nfsfifo_close;
static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
struct thread *);
static vop_lookup_t nfs_lookup;
static vop_create_t nfs_create;
static vop_mknod_t nfs_mknod;
static vop_open_t nfs_open;
static vop_pathconf_t nfs_pathconf;
static vop_close_t nfs_close;
static vop_access_t nfs_access;
static vop_getattr_t nfs_getattr;
static vop_setattr_t nfs_setattr;
static vop_read_t nfs_read;
static vop_fsync_t nfs_fsync;
static vop_remove_t nfs_remove;
static vop_link_t nfs_link;
static vop_rename_t nfs_rename;
static vop_mkdir_t nfs_mkdir;
static vop_rmdir_t nfs_rmdir;
static vop_symlink_t nfs_symlink;
static vop_readdir_t nfs_readdir;
static vop_strategy_t nfs_strategy;
static int nfs_lookitup(struct vnode *, char *, int,
struct ucred *, struct thread *, struct nfsnode **);
static int nfs_sillyrename(struct vnode *, struct vnode *,
struct componentname *);
static vop_access_t nfsspec_access;
static vop_readlink_t nfs_readlink;
static vop_print_t nfs_print;
static vop_advlock_t nfs_advlock;
static vop_advlockasync_t nfs_advlockasync;
static vop_getacl_t nfs_getacl;
static vop_setacl_t nfs_setacl;
static vop_set_text_t nfs_set_text;
/*
* Global vfs data structures for nfs
*/
struct vop_vector newnfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_access = nfs_access,
.vop_advlock = nfs_advlock,
.vop_advlockasync = nfs_advlockasync,
.vop_close = nfs_close,
.vop_create = nfs_create,
.vop_fsync = nfs_fsync,
.vop_getattr = nfs_getattr,
.vop_getpages = ncl_getpages,
.vop_putpages = ncl_putpages,
.vop_inactive = ncl_inactive,
.vop_link = nfs_link,
.vop_lookup = nfs_lookup,
.vop_mkdir = nfs_mkdir,
.vop_mknod = nfs_mknod,
.vop_open = nfs_open,
.vop_pathconf = nfs_pathconf,
.vop_print = nfs_print,
.vop_read = nfs_read,
.vop_readdir = nfs_readdir,
.vop_readlink = nfs_readlink,
.vop_reclaim = ncl_reclaim,
.vop_remove = nfs_remove,
.vop_rename = nfs_rename,
.vop_rmdir = nfs_rmdir,
.vop_setattr = nfs_setattr,
.vop_strategy = nfs_strategy,
.vop_symlink = nfs_symlink,
.vop_write = ncl_write,
.vop_getacl = nfs_getacl,
.vop_setacl = nfs_setacl,
.vop_set_text = nfs_set_text,
};
struct vop_vector newnfs_fifoops = {
.vop_default = &fifo_specops,
.vop_access = nfsspec_access,
.vop_close = nfsfifo_close,
.vop_fsync = nfs_fsync,
.vop_getattr = nfs_getattr,
.vop_inactive = ncl_inactive,
.vop_pathconf = nfs_pathconf,
.vop_print = nfs_print,
.vop_read = nfsfifo_read,
.vop_reclaim = ncl_reclaim,
.vop_setattr = nfs_setattr,
.vop_write = nfsfifo_write,
};
static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
struct componentname *cnp, struct vattr *vap);
static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
int namelen, struct ucred *cred, struct thread *td);
static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp,
char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp,
char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td);
static int nfs_renameit(struct vnode *sdvp, struct vnode *svp,
struct componentname *scnp, struct sillyrename *sp);
/*
* Global variables
*/
SYSCTL_DECL(_vfs_nfs);
static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
&nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
static int nfs_prime_access_cache = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
&nfs_prime_access_cache, 0,
"Prime NFS ACCESS cache when fetching attributes");
static int newnfs_commit_on_close = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
&newnfs_commit_on_close, 0, "write+commit on close, else only write");
static int nfs_clean_pages_on_close = 1;
SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
&nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
int newnfs_directio_enable = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
&newnfs_directio_enable, 0, "Enable NFS directio");
int nfs_keep_dirty_on_error;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW,
&nfs_keep_dirty_on_error, 0, "Retry pageout if error returned");
/*
* This sysctl allows other processes to mmap a file that has been opened
* O_DIRECT by a process. In general, having processes mmap the file while
* Direct IO is in progress can lead to Data Inconsistencies. But, we allow
* this by default to prevent DoS attacks - to prevent a malicious user from
* opening up files O_DIRECT preventing other users from mmap'ing these
* files. "Protected" environments where stricter consistency guarantees are
* required can disable this knob. The process that opened the file O_DIRECT
* cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
* meaningful.
*/
int newnfs_directio_allow_mmap = 1;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
&newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
#define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \
| NFSACCESS_EXTEND | NFSACCESS_EXECUTE \
| NFSACCESS_DELETE | NFSACCESS_LOOKUP)
/*
* SMP Locking Note :
* The list of locks after the description of the lock is the ordering
* of other locks acquired with the lock held.
* np->n_mtx : Protects the fields in the nfsnode.
VM Object Lock
VI_MTX (acquired indirectly)
* nmp->nm_mtx : Protects the fields in the nfsmount.
rep->r_mtx
* ncl_iod_mutex : Global lock, protects shared nfsiod state.
* nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
nmp->nm_mtx
rep->r_mtx
* rep->r_mtx : Protects the fields in an nfsreq.
*/
static int
nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
struct ucred *cred, u_int32_t *retmode)
{
int error = 0, attrflag, i, lrupos;
u_int32_t rmode;
struct nfsnode *np = VTONFS(vp);
struct nfsvattr nfsva;
error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag,
&rmode, NULL);
if (attrflag)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (!error) {
lrupos = 0;
mtx_lock(&np->n_mtx);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
if (np->n_accesscache[i].uid == cred->cr_uid) {
np->n_accesscache[i].mode = rmode;
np->n_accesscache[i].stamp = time_second;
break;
}
if (i > 0 && np->n_accesscache[i].stamp <
np->n_accesscache[lrupos].stamp)
lrupos = i;
}
if (i == NFS_ACCESSCACHESIZE) {
np->n_accesscache[lrupos].uid = cred->cr_uid;
np->n_accesscache[lrupos].mode = rmode;
np->n_accesscache[lrupos].stamp = time_second;
}
mtx_unlock(&np->n_mtx);
if (retmode != NULL)
*retmode = rmode;
KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
} else if (NFS_ISV4(vp)) {
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
}
#ifdef KDTRACE_HOOKS
if (error != 0)
KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
error);
#endif
return (error);
}
/*
* nfs access vnode op.
* For nfs version 2, just return ok. File accesses may fail later.
* For nfs version 3, use the access rpc to check accessibility. If file modes
* are changed on the server, accesses might still fail later.
*/
static int
nfs_access(struct vop_access_args *ap)
{
struct vnode *vp = ap->a_vp;
int error = 0, i, gotahit;
u_int32_t mode, wmode, rmode;
int v34 = NFS_ISV34(vp);
struct nfsnode *np = VTONFS(vp);
/*
* Disallow write attempts on filesystems mounted read-only;
* unless the file is a socket, fifo, or a block or character
* device resident on the filesystem.
*/
if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS |
VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL |
VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
switch (vp->v_type) {
case VREG:
case VDIR:
case VLNK:
return (EROFS);
default:
break;
}
}
/*
* For nfs v3 or v4, check to see if we have done this recently, and if
* so return our cached result instead of making an ACCESS call.
* If not, do an access rpc, otherwise you are stuck emulating
* ufs_access() locally using the vattr. This may not be correct,
* since the server may apply other access criteria such as
* client uid-->server uid mapping that we do not know about.
*/
if (v34) {
if (ap->a_accmode & VREAD)
mode = NFSACCESS_READ;
else
mode = 0;
if (vp->v_type != VDIR) {
if (ap->a_accmode & VWRITE)
mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
if (ap->a_accmode & VAPPEND)
mode |= NFSACCESS_EXTEND;
if (ap->a_accmode & VEXEC)
mode |= NFSACCESS_EXECUTE;
if (ap->a_accmode & VDELETE)
mode |= NFSACCESS_DELETE;
} else {
if (ap->a_accmode & VWRITE)
mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
if (ap->a_accmode & VAPPEND)
mode |= NFSACCESS_EXTEND;
if (ap->a_accmode & VEXEC)
mode |= NFSACCESS_LOOKUP;
if (ap->a_accmode & VDELETE)
mode |= NFSACCESS_DELETE;
if (ap->a_accmode & VDELETE_CHILD)
mode |= NFSACCESS_MODIFY;
}
/* XXX safety belt, only make blanket request if caching */
if (nfsaccess_cache_timeout > 0) {
wmode = NFSACCESS_READ | NFSACCESS_MODIFY |
NFSACCESS_EXTEND | NFSACCESS_EXECUTE |
NFSACCESS_DELETE | NFSACCESS_LOOKUP;
} else {
wmode = mode;
}
/*
* Does our cached result allow us to give a definite yes to
* this request?
*/
gotahit = 0;
mtx_lock(&np->n_mtx);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
if (time_second < (np->n_accesscache[i].stamp
+ nfsaccess_cache_timeout) &&
(np->n_accesscache[i].mode & mode) == mode) {
NFSINCRGLOBAL(nfsstatsv1.accesscache_hits);
gotahit = 1;
}
break;
}
}
mtx_unlock(&np->n_mtx);
#ifdef KDTRACE_HOOKS
if (gotahit != 0)
KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
ap->a_cred->cr_uid, mode);
else
KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
ap->a_cred->cr_uid, mode);
#endif
if (gotahit == 0) {
/*
* Either a no, or a don't know. Go to the wire.
*/
NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
error = nfs34_access_otw(vp, wmode, ap->a_td,
ap->a_cred, &rmode);
if (!error &&
(rmode & mode) != mode)
error = EACCES;
}
return (error);
} else {
if ((error = nfsspec_access(ap)) != 0) {
return (error);
}
/*
* Attempt to prevent a mapped root from accessing a file
* which it shouldn't. We try to read a byte from the file
* if the user is root and the file is not zero length.
* After calling nfsspec_access, we should have the correct
* file size cached.
*/
mtx_lock(&np->n_mtx);
if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
&& VTONFS(vp)->n_size > 0) {
struct iovec aiov;
struct uio auio;
char buf[1];
mtx_unlock(&np->n_mtx);
aiov.iov_base = buf;
aiov.iov_len = 1;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = 0;
auio.uio_resid = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_READ;
auio.uio_td = ap->a_td;
if (vp->v_type == VREG)
error = ncl_readrpc(vp, &auio, ap->a_cred);
else if (vp->v_type == VDIR) {
char* bp;
bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
aiov.iov_base = bp;
aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
error = ncl_readdirrpc(vp, &auio, ap->a_cred,
ap->a_td);
free(bp, M_TEMP);
} else if (vp->v_type == VLNK)
error = ncl_readlinkrpc(vp, &auio, ap->a_cred);
else
error = EACCES;
} else
mtx_unlock(&np->n_mtx);
return (error);
}
}
/*
* nfs open vnode op
* Check to see if the type is ok
* and that deletion is not in progress.
* For paged in text files, you will need to flush the page cache
* if consistency is lost.
*/
/* ARGSUSED */
static int
nfs_open(struct vop_open_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
struct vattr vattr;
int error;
int fmode = ap->a_mode;
struct ucred *cred;
if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
return (EOPNOTSUPP);
/*
* For NFSv4, we need to do the Open Op before cache validation,
* so that we conform to RFC3530 Sec. 9.3.1.
*/
if (NFS_ISV4(vp)) {
error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td);
if (error) {
error = nfscl_maperr(ap->a_td, error, (uid_t)0,
(gid_t)0);
return (error);
}
}
/*
* Now, if this Open will be doing reading, re-validate/flush the
* cache, so that Close/Open coherency is maintained.
*/
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
mtx_unlock(&np->n_mtx);
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
if (error == EINTR || error == EIO) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
if (vp->v_type == VDIR)
np->n_direofoffset = 0;
mtx_unlock(&np->n_mtx);
error = VOP_GETATTR(vp, &vattr, ap->a_cred);
if (error) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
np->n_mtime = vattr.va_mtime;
if (NFS_ISV4(vp))
np->n_change = vattr.va_filerev;
} else {
mtx_unlock(&np->n_mtx);
error = VOP_GETATTR(vp, &vattr, ap->a_cred);
if (error) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) ||
NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
if (vp->v_type == VDIR)
np->n_direofoffset = 0;
mtx_unlock(&np->n_mtx);
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
if (error == EINTR || error == EIO) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
np->n_mtime = vattr.va_mtime;
if (NFS_ISV4(vp))
np->n_change = vattr.va_filerev;
}
}
/*
* If the object has >= 1 O_DIRECT active opens, we disable caching.
*/
if (newnfs_directio_enable && (fmode & O_DIRECT) &&
(vp->v_type == VREG)) {
if (np->n_directio_opens == 0) {
mtx_unlock(&np->n_mtx);
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
if (error) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
np->n_flag |= NNONCACHE;
}
np->n_directio_opens++;
}
/* If opened for writing via NFSv4.1 or later, mark that for pNFS. */
if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0)
np->n_flag |= NWRITEOPENED;
/*
* If this is an open for writing, capture a reference to the
* credentials, so they can be used by ncl_putpages(). Using
* these write credentials is preferable to the credentials of
* whatever thread happens to be doing the VOP_PUTPAGES() since
* the write RPCs are less likely to fail with EACCES.
*/
if ((fmode & FWRITE) != 0) {
cred = np->n_writecred;
np->n_writecred = crhold(ap->a_cred);
} else
cred = NULL;
mtx_unlock(&np->n_mtx);
if (cred != NULL)
crfree(cred);
vnode_create_vobject(vp, vattr.va_size, ap->a_td);
return (0);
}
/*
* nfs close vnode op
* What an NFS client should do upon close after writing is a debatable issue.
* Most NFS clients push delayed writes to the server upon close, basically for
* two reasons:
* 1 - So that any write errors may be reported back to the client process
* doing the close system call. By far the two most likely errors are
* NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
* 2 - To put a worst case upper bound on cache inconsistency between
* multiple clients for the file.
* There is also a consistency problem for Version 2 of the protocol w.r.t.
* not being able to tell if other clients are writing a file concurrently,
* since there is no way of knowing if the changed modify time in the reply
* is only due to the write for this client.
* (NFS Version 3 provides weak cache consistency data in the reply that
* should be sufficient to detect and handle this case.)
*
* The current code does the following:
* for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
* for NFS Version 3 - flush dirty buffers to the server but don't invalidate
* or commit them (this satisfies 1 and 2 except for the
* case where the server crashes after this close but
* before the commit RPC, which is felt to be "good
* enough". Changing the last argument to ncl_flush() to
* a 1 would force a commit operation, if it is felt a
* commit is necessary now.
* for NFS Version 4 - flush the dirty buffers and commit them, if
* nfscl_mustflush() says this is necessary.
* It is necessary if there is no write delegation held,
* in order to satisfy open/close coherency.
* If the file isn't cached on local stable storage,
* it may be necessary in order to detect "out of space"
* errors from the server, if the write delegation
* issued by the server doesn't allow the file to grow.
*/
/* ARGSUSED */
static int
nfs_close(struct vop_close_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
struct nfsvattr nfsva;
struct ucred *cred;
int error = 0, ret, localcred = 0;
int fmode = ap->a_fflag;
if (NFSCL_FORCEDISM(vp->v_mount))
return (0);
/*
* During shutdown, a_cred isn't valid, so just use root.
*/
if (ap->a_cred == NOCRED) {
cred = newnfs_getcred();
localcred = 1;
} else {
cred = ap->a_cred;
}
if (vp->v_type == VREG) {
/*
* Examine and clean dirty pages, regardless of NMODIFIED.
* This closes a major hole in close-to-open consistency.
* We want to push out all dirty pages (and buffers) on
* close, regardless of whether they were dirtied by
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
mtx_unlock(&np->n_mtx);
if (NFS_ISV3(vp)) {
/*
* Under NFSv3 we have dirty buffers to dispose of. We
* must flush them to the NFS server. We have the option
* of waiting all the way through the commit rpc or just
* waiting for the initial write. The default is to only
* wait through the initial write so the data is in the
* server's cache, which is roughly similar to the state
* a standard disk subsystem leaves the file in on close().
*
* We cannot clear the NMODIFIED bit in np->n_flag due to
* potential races with other processes, and certainly
* cannot clear it if we don't commit.
* These races occur when there is no longer the old
* traditional vnode locking implemented for Vnode Ops.
*/
int cm = newnfs_commit_on_close ? 1 : 0;
error = ncl_flush(vp, MNT_WAIT, ap->a_td, cm, 0);
/* np->n_flag &= ~NMODIFIED; */
} else if (NFS_ISV4(vp)) {
if (nfscl_mustflush(vp) != 0) {
int cm = newnfs_commit_on_close ? 1 : 0;
error = ncl_flush(vp, MNT_WAIT, ap->a_td,
cm, 0);
/*
* as above w.r.t races when clearing
* NMODIFIED.
* np->n_flag &= ~NMODIFIED;
*/
}
} else {
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
}
mtx_lock(&np->n_mtx);
}
/*
* Invalidate the attribute cache in all cases.
* An open is going to fetch fresh attrs any way, other procs
* on this node that have file open will be forced to do an
* otw attr fetch, but this is safe.
* --> A user found that their RPC count dropped by 20% when
* this was commented out and I can't see any requirement
* for it, so I've disabled it when negative lookups are
* enabled. (What does this have to do with negative lookup
* caching? Well nothing, except it was reported by the
* same user that needed negative lookup caching and I wanted
* there to be a way to disable it to see if it
* is the cause of some caching/coherency issue that might
* crop up.)
*/
if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) {
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
}
if (np->n_flag & NWRITEERR) {
np->n_flag &= ~NWRITEERR;
error = np->n_error;
}
mtx_unlock(&np->n_mtx);
}
if (NFS_ISV4(vp)) {
/*
* Get attributes so "change" is up to date.
*/
if (error == 0 && nfscl_mustflush(vp) != 0 &&
vp->v_type == VREG &&
(VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOCTO) == 0) {
ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva,
NULL);
if (!ret) {
np->n_change = nfsva.na_filerev;
(void) nfscl_loadattrcache(&vp, &nfsva, NULL,
NULL, 0, 0);
}
}
/*
* and do the close.
*/
ret = nfsrpc_close(vp, 0, ap->a_td);
if (!error && ret)
error = ret;
if (error)
error = nfscl_maperr(ap->a_td, error, (uid_t)0,
(gid_t)0);
}
if (newnfs_directio_enable)
KASSERT((np->n_directio_asyncwr == 0),
("nfs_close: dirty unflushed (%d) directio buffers\n",
np->n_directio_asyncwr));
if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
mtx_lock(&np->n_mtx);
KASSERT((np->n_directio_opens > 0),
("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
np->n_directio_opens--;
if (np->n_directio_opens == 0)
np->n_flag &= ~NNONCACHE;
mtx_unlock(&np->n_mtx);
}
if (localcred)
NFSFREECRED(cred);
return (error);
}
/*
* nfs getattr call from vfs.
*/
static int
nfs_getattr(struct vop_getattr_args *ap)
{
struct vnode *vp = ap->a_vp;
struct thread *td = curthread; /* XXX */
struct nfsnode *np = VTONFS(vp);
int error = 0;
struct nfsvattr nfsva;
struct vattr *vap = ap->a_vap;
struct vattr vattr;
/*
* Update local times for special files.
*/
mtx_lock(&np->n_mtx);
if (np->n_flag & (NACC | NUPD))
np->n_flag |= NCHG;
mtx_unlock(&np->n_mtx);
/*
* First look in the cache.
*/
if (ncl_getattrcache(vp, &vattr) == 0) {
vap->va_type = vattr.va_type;
vap->va_mode = vattr.va_mode;
vap->va_nlink = vattr.va_nlink;
vap->va_uid = vattr.va_uid;
vap->va_gid = vattr.va_gid;
vap->va_fsid = vattr.va_fsid;
vap->va_fileid = vattr.va_fileid;
vap->va_size = vattr.va_size;
vap->va_blocksize = vattr.va_blocksize;
vap->va_atime = vattr.va_atime;
vap->va_mtime = vattr.va_mtime;
vap->va_ctime = vattr.va_ctime;
vap->va_gen = vattr.va_gen;
vap->va_flags = vattr.va_flags;
vap->va_rdev = vattr.va_rdev;
vap->va_bytes = vattr.va_bytes;
vap->va_filerev = vattr.va_filerev;
/*
* Get the local modify time for the case of a write
* delegation.
*/
nfscl_deleggetmodtime(vp, &vap->va_mtime);
return (0);
}
if (NFS_ISV34(vp) && nfs_prime_access_cache &&
nfsaccess_cache_timeout > 0) {
NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL);
if (ncl_getattrcache(vp, ap->a_vap) == 0) {
nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime);
return (0);
}
}
error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL);
if (!error)
error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0);
if (!error) {
/*
* Get the local modify time for the case of a write
* delegation.
*/
nfscl_deleggetmodtime(vp, &vap->va_mtime);
} else if (NFS_ISV4(vp)) {
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
}
return (error);
}
/*
* nfs setattr call.
*/
static int
nfs_setattr(struct vop_setattr_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
struct thread *td = curthread; /* XXX */
struct vattr *vap = ap->a_vap;
int error = 0;
u_quad_t tsize;
#ifndef nolint
tsize = (u_quad_t)0;
#endif
/*
* Setting of flags and marking of atimes are not supported.
*/
if (vap->va_flags != VNOVAL)
return (EOPNOTSUPP);
/*
* Disallow write attempts if the filesystem is mounted read-only.
*/
if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
(vp->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
if (vap->va_size != VNOVAL) {
switch (vp->v_type) {
case VDIR:
return (EISDIR);
case VCHR:
case VBLK:
case VSOCK:
case VFIFO:
if (vap->va_mtime.tv_sec == VNOVAL &&
vap->va_atime.tv_sec == VNOVAL &&
vap->va_mode == (mode_t)VNOVAL &&
vap->va_uid == (uid_t)VNOVAL &&
vap->va_gid == (gid_t)VNOVAL)
return (0);
vap->va_size = VNOVAL;
break;
default:
/*
* Disallow write attempts if the filesystem is
* mounted read-only.
*/
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
/*
* We run vnode_pager_setsize() early (why?),
* we must set np->n_size now to avoid vinvalbuf
* V_SAVE races that might setsize a lower
* value.
*/
mtx_lock(&np->n_mtx);
tsize = np->n_size;
mtx_unlock(&np->n_mtx);
error = ncl_meta_setsize(vp, ap->a_cred, td,
vap->va_size);
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
tsize = np->n_size;
mtx_unlock(&np->n_mtx);
error = ncl_vinvalbuf(vp, vap->va_size == 0 ?
0 : V_SAVE, td, 1);
if (error != 0) {
vnode_pager_setsize(vp, tsize);
return (error);
}
/*
* Call nfscl_delegmodtime() to set the modify time
* locally, as required.
*/
nfscl_delegmodtime(vp);
} else
mtx_unlock(&np->n_mtx);
/*
* np->n_size has already been set to vap->va_size
* in ncl_meta_setsize(). We must set it again since
* nfs_loadattrcache() could be called through
* ncl_meta_setsize() and could modify np->n_size.
*/
mtx_lock(&np->n_mtx);
np->n_vattr.na_size = np->n_size = vap->va_size;
mtx_unlock(&np->n_mtx);
}
} else {
mtx_lock(&np->n_mtx);
if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) &&
(np->n_flag & NMODIFIED) && vp->v_type == VREG) {
mtx_unlock(&np->n_mtx);
error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
if (error == EINTR || error == EIO)
return (error);
} else
mtx_unlock(&np->n_mtx);
}
error = nfs_setattrrpc(vp, vap, ap->a_cred, td);
if (error && vap->va_size != VNOVAL) {
mtx_lock(&np->n_mtx);
np->n_size = np->n_vattr.na_size = tsize;
vnode_pager_setsize(vp, tsize);
mtx_unlock(&np->n_mtx);
}
return (error);
}
/*
* Do an nfs setattr rpc.
*/
static int
nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
struct thread *td)
{
struct nfsnode *np = VTONFS(vp);
int error, ret, attrflag, i;
struct nfsvattr nfsva;
if (NFS_ISV34(vp)) {
mtx_lock(&np->n_mtx);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
np->n_accesscache[i].stamp = 0;
np->n_flag |= NDELEGMOD;
mtx_unlock(&np->n_mtx);
KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
}
error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag,
NULL);
if (attrflag) {
ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (ret && !error)
error = ret;
}
if (error && NFS_ISV4(vp))
error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid);
return (error);
}
/*
* nfs lookup call, one step at a time...
* First look in cache
* If not found, unlock the directory nfsnode and do the rpc
*/
static int
nfs_lookup(struct vop_lookup_args *ap)
{
struct componentname *cnp = ap->a_cnp;
struct vnode *dvp = ap->a_dvp;
struct vnode **vpp = ap->a_vpp;
struct mount *mp = dvp->v_mount;
int flags = cnp->cn_flags;
struct vnode *newvp;
struct nfsmount *nmp;
struct nfsnode *np, *newnp;
int error = 0, attrflag, dattrflag, ltype, ncticks;
struct thread *td = cnp->cn_thread;
struct nfsfh *nfhp;
struct nfsvattr dnfsva, nfsva;
struct vattr vattr;
struct timespec nctime;
*vpp = NULLVP;
if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
if (dvp->v_type != VDIR)
return (ENOTDIR);
nmp = VFSTONFS(mp);
np = VTONFS(dvp);
/* For NFSv4, wait until any remove is done. */
mtx_lock(&np->n_mtx);
while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) {
np->n_flag |= NREMOVEWANT;
(void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0);
}
mtx_unlock(&np->n_mtx);
if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0)
return (error);
error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks);
if (error > 0 && error != ENOENT)
return (error);
if (error == -1) {
/*
* Lookups of "." are special and always return the
* current directory. cache_lookup() already handles
* associated locking bookkeeping, etc.
*/
if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
/* XXX: Is this really correct? */
if (cnp->cn_nameiop != LOOKUP &&
(flags & ISLASTCN))
cnp->cn_flags |= SAVENAME;
return (0);
}
/*
* We only accept a positive hit in the cache if the
* change time of the file matches our cached copy.
* Otherwise, we discard the cache entry and fallback
* to doing a lookup RPC. We also only trust cache
* entries for less than nm_nametimeo seconds.
*
* To better handle stale file handles and attributes,
* clear the attribute cache of this node if it is a
* leaf component, part of an open() call, and not
* locally modified before fetching the attributes.
* This should allow stale file handles to be detected
* here where we can fall back to a LOOKUP RPC to
* recover rather than having nfs_open() detect the
* stale file handle and failing open(2) with ESTALE.
*/
newvp = *vpp;
newnp = VTONFS(newvp);
if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
(flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
!(newnp->n_flag & NMODIFIED)) {
mtx_lock(&newnp->n_mtx);
newnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
mtx_unlock(&newnp->n_mtx);
}
if (nfscl_nodeleg(newvp, 0) == 0 ||
((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
timespeccmp(&vattr.va_ctime, &nctime, ==))) {
NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
if (cnp->cn_nameiop != LOOKUP &&
(flags & ISLASTCN))
cnp->cn_flags |= SAVENAME;
return (0);
}
cache_purge(newvp);
if (dvp != newvp)
vput(newvp);
else
vrele(newvp);
*vpp = NULLVP;
} else if (error == ENOENT) {
if (dvp->v_iflag & VI_DOOMED)
return (ENOENT);
/*
* We only accept a negative hit in the cache if the
* modification time of the parent directory matches
* the cached copy in the name cache entry.
* Otherwise, we discard all of the negative cache
* entries for this directory. We also only trust
* negative cache entries for up to nm_negnametimeo
* seconds.
*/
if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) &&
VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
timespeccmp(&vattr.va_mtime, &nctime, ==)) {
NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
return (ENOENT);
}
cache_purge_negative(dvp);
}
error = 0;
newvp = NULLVP;
NFSINCRGLOBAL(nfsstatsv1.lookupcache_misses);
error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
NULL);
if (dattrflag)
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
if (error) {
if (newvp != NULLVP) {
vput(newvp);
*vpp = NULLVP;
}
if (error != ENOENT) {
if (NFS_ISV4(dvp))
error = nfscl_maperr(td, error, (uid_t)0,
(gid_t)0);
return (error);
}
/* The requested file was not found. */
if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
(flags & ISLASTCN)) {
/*
* XXX: UFS does a full VOP_ACCESS(dvp,
* VWRITE) here instead of just checking
* MNT_RDONLY.
*/
if (mp->mnt_flag & MNT_RDONLY)
return (EROFS);
cnp->cn_flags |= SAVENAME;
return (EJUSTRETURN);
}
if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) {
/*
* Cache the modification time of the parent
* directory from the post-op attributes in
* the name cache entry. The negative cache
* entry will be ignored once the directory
* has changed. Don't bother adding the entry
* if the directory has already changed.
*/
mtx_lock(&np->n_mtx);
if (timespeccmp(&np->n_vattr.na_mtime,
&dnfsva.na_mtime, ==)) {
mtx_unlock(&np->n_mtx);
cache_enter_time(dvp, NULL, cnp,
&dnfsva.na_mtime, NULL);
} else
mtx_unlock(&np->n_mtx);
}
return (ENOENT);
}
/*
* Handle RENAME case...
*/
if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
FREE((caddr_t)nfhp, M_NFSFH);
return (EISDIR);
}
error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
LK_EXCLUSIVE);
if (error)
return (error);
newvp = NFSTOV(np);
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
*vpp = newvp;
cnp->cn_flags |= SAVENAME;
return (0);
}
if (flags & ISDOTDOT) {
ltype = NFSVOPISLOCKED(dvp);
error = vfs_busy(mp, MBF_NOWAIT);
if (error != 0) {
vfs_ref(mp);
NFSVOPUNLOCK(dvp, 0);
error = vfs_busy(mp, 0);
NFSVOPLOCK(dvp, ltype | LK_RETRY);
vfs_rel(mp);
if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
vfs_unbusy(mp);
error = ENOENT;
}
if (error != 0)
return (error);
}
NFSVOPUNLOCK(dvp, 0);
error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
cnp->cn_lkflags);
if (error == 0)
newvp = NFSTOV(np);
vfs_unbusy(mp);
if (newvp != dvp)
NFSVOPLOCK(dvp, ltype | LK_RETRY);
if (dvp->v_iflag & VI_DOOMED) {
if (error == 0) {
if (newvp == dvp)
vrele(newvp);
else
vput(newvp);
}
error = ENOENT;
}
if (error != 0)
return (error);
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
} else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
FREE((caddr_t)nfhp, M_NFSFH);
VREF(dvp);
newvp = dvp;
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
} else {
error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
cnp->cn_lkflags);
if (error)
return (error);
newvp = NFSTOV(np);
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
!(np->n_flag & NMODIFIED)) {
/*
* Flush the attribute cache when opening a
* leaf node to ensure that fresh attributes
* are fetched in nfs_open() since we did not
* fetch attributes from the LOOKUP reply.
*/
mtx_lock(&np->n_mtx);
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
mtx_unlock(&np->n_mtx);
}
}
if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
cnp->cn_flags |= SAVENAME;
if ((cnp->cn_flags & MAKEENTRY) &&
(cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) &&
attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0))
cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime);
*vpp = newvp;
return (0);
}
/*
* nfs read call.
* Just call ncl_bioread() to do the work.
*/
static int
nfs_read(struct vop_read_args *ap)
{
struct vnode *vp = ap->a_vp;
switch (vp->v_type) {
case VREG:
return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
case VDIR:
return (EISDIR);
default:
return (EOPNOTSUPP);
}
}
/*
* nfs readlink call
*/
static int
nfs_readlink(struct vop_readlink_args *ap)
{
struct vnode *vp = ap->a_vp;
if (vp->v_type != VLNK)
return (EINVAL);
return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred));
}
/*
* Do a readlink rpc.
* Called by ncl_doio() from below the buffer cache.
*/
int
ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
{
int error, ret, attrflag;
struct nfsvattr nfsva;
error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva,
&attrflag, NULL);
if (attrflag) {
ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (ret && !error)
error = ret;
}
if (error && NFS_ISV4(vp))
error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* nfs read rpc call
* Ditto above
*/
int
ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
{
int error, ret, attrflag;
struct nfsvattr nfsva;
struct nfsmount *nmp;
nmp = VFSTONFS(vnode_mount(vp));
error = EIO;
attrflag = 0;
if (NFSHASPNFS(nmp))
error = nfscl_doiods(vp, uiop, NULL, NULL,
NFSV4OPEN_ACCESSREAD, 0, cred, uiop->uio_td);
NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error);
if (error != 0)
error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva,
&attrflag, NULL);
if (attrflag) {
ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (ret && !error)
error = ret;
}
if (error && NFS_ISV4(vp))
error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* nfs write call
*/
int
ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
int *iomode, int *must_commit, int called_from_strategy)
{
struct nfsvattr nfsva;
int error, attrflag, ret;
struct nfsmount *nmp;
nmp = VFSTONFS(vnode_mount(vp));
error = EIO;
attrflag = 0;
if (NFSHASPNFS(nmp))
error = nfscl_doiods(vp, uiop, iomode, must_commit,
NFSV4OPEN_ACCESSWRITE, 0, cred, uiop->uio_td);
NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error);
if (error != 0)
error = nfsrpc_write(vp, uiop, iomode, must_commit, cred,
uiop->uio_td, &nfsva, &attrflag, NULL,
called_from_strategy);
if (attrflag) {
if (VTONFS(vp)->n_flag & ND_NFSV4)
ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1,
1);
else
ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
1);
if (ret && !error)
error = ret;
}
if (DOINGASYNC(vp))
*iomode = NFSWRITE_FILESYNC;
if (error && NFS_ISV4(vp))
error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* nfs mknod rpc
* For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
* mode set to specify the file type and the size field for rdev.
*/
static int
nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
struct vattr *vap)
{
struct nfsvattr nfsva, dnfsva;
struct vnode *newvp = NULL;
struct nfsnode *np = NULL, *dnp;
struct nfsfh *nfhp;
struct vattr vattr;
int error = 0, attrflag, dattrflag;
u_int32_t rdev;
if (vap->va_type == VCHR || vap->va_type == VBLK)
rdev = vap->va_rdev;
else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
rdev = 0xffffffff;
else
return (EOPNOTSUPP);
if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
return (error);
error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap,
rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva,
&nfsva, &nfhp, &attrflag, &dattrflag, NULL);
if (!error) {
if (!nfhp)
(void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
&dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
NULL);
if (nfhp)
error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
}
if (dattrflag)
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
if (!error) {
newvp = NFSTOV(np);
if (attrflag != 0) {
error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
if (error != 0)
vput(newvp);
}
}
if (!error) {
*vpp = newvp;
} else if (NFS_ISV4(dvp)) {
error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
vap->va_gid);
}
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NMODIFIED;
if (!dattrflag) {
dnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
mtx_unlock(&dnp->n_mtx);
return (error);
}
/*
* nfs mknod vop
* just call nfs_mknodrpc() to do the work.
*/
/* ARGSUSED */
static int
nfs_mknod(struct vop_mknod_args *ap)
{
return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
}
static struct mtx nfs_cverf_mtx;
MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex",
MTX_DEF);
static nfsquad_t
nfs_get_cverf(void)
{
static nfsquad_t cverf;
nfsquad_t ret;
static int cverf_initialized = 0;
mtx_lock(&nfs_cverf_mtx);
if (cverf_initialized == 0) {
cverf.lval[0] = arc4random();
cverf.lval[1] = arc4random();
cverf_initialized = 1;
} else
cverf.qval++;
ret = cverf;
mtx_unlock(&nfs_cverf_mtx);
return (ret);
}
/*
* nfs file create call
*/
static int
nfs_create(struct vop_create_args *ap)
{
struct vnode *dvp = ap->a_dvp;
struct vattr *vap = ap->a_vap;
struct componentname *cnp = ap->a_cnp;
struct nfsnode *np = NULL, *dnp;
struct vnode *newvp = NULL;
struct nfsmount *nmp;
struct nfsvattr dnfsva, nfsva;
struct nfsfh *nfhp;
nfsquad_t cverf;
int error = 0, attrflag, dattrflag, fmode = 0;
struct vattr vattr;
/*
* Oops, not for me..
*/
if (vap->va_type == VSOCK)
return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
return (error);
if (vap->va_vaflags & VA_EXCLUSIVE)
fmode |= O_EXCL;
dnp = VTONFS(dvp);
nmp = VFSTONFS(vnode_mount(dvp));
again:
/* For NFSv4, wait until any remove is done. */
mtx_lock(&dnp->n_mtx);
while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) {
dnp->n_flag |= NREMOVEWANT;
(void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0);
}
mtx_unlock(&dnp->n_mtx);
cverf = nfs_get_cverf();
error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen,
vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva,
&nfhp, &attrflag, &dattrflag, NULL);
if (!error) {
if (nfhp == NULL)
(void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
&dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
NULL);
if (nfhp != NULL)
error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
}
if (dattrflag)
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
if (!error) {
newvp = NFSTOV(np);
if (attrflag == 0)
error = nfsrpc_getattr(newvp, cnp->cn_cred,
cnp->cn_thread, &nfsva, NULL);
if (error == 0)
error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
}
if (error) {
if (newvp != NULL) {
vput(newvp);
newvp = NULL;
}
if (NFS_ISV34(dvp) && (fmode & O_EXCL) &&
error == NFSERR_NOTSUPP) {
fmode &= ~O_EXCL;
goto again;
}
} else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) {
if (nfscl_checksattr(vap, &nfsva)) {
error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred,
cnp->cn_thread, &nfsva, &attrflag, NULL);
if (error && (vap->va_uid != (uid_t)VNOVAL ||
vap->va_gid != (gid_t)VNOVAL)) {
/* try again without setting uid/gid */
vap->va_uid = (uid_t)VNOVAL;
vap->va_gid = (uid_t)VNOVAL;
error = nfsrpc_setattr(newvp, vap, NULL,
cnp->cn_cred, cnp->cn_thread, &nfsva,
&attrflag, NULL);
}
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
NULL, 0, 1);
if (error != 0)
vput(newvp);
}
}
if (!error) {
if ((cnp->cn_flags & MAKEENTRY) && attrflag)
cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
NULL);
*ap->a_vpp = newvp;
} else if (NFS_ISV4(dvp)) {
error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
vap->va_gid);
}
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NMODIFIED;
if (!dattrflag) {
dnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
mtx_unlock(&dnp->n_mtx);
return (error);
}
/*
* nfs file remove call
* To try and make nfs semantics closer to ufs semantics, a file that has
* other processes using the vnode is renamed instead of removed and then
* removed later on the last close.
* - If v_usecount > 1
* If a rename is not already in the works
* call nfs_sillyrename() to set it up
* else
* do the remove rpc
*/
static int
nfs_remove(struct vop_remove_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vnode *dvp = ap->a_dvp;
struct componentname *cnp = ap->a_cnp;
struct nfsnode *np = VTONFS(vp);
int error = 0;
struct vattr vattr;
KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name"));
KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
if (vp->v_type == VDIR)
error = EPERM;
else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
vattr.va_nlink > 1)) {
/*
* Purge the name cache so that the chance of a lookup for
* the name succeeding while the remove is in progress is
* minimized. Without node locking it can still happen, such
* that an I/O op returns ESTALE, but since you get this if
* another host removes the file..
*/
cache_purge(vp);
/*
* throw away biocache buffers, mainly to avoid
* unnecessary delayed writes later.
*/
error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1);
if (error != EINTR && error != EIO)
/* Do the rpc */
error = nfs_removerpc(dvp, vp, cnp->cn_nameptr,
cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
/*
* Kludge City: If the first reply to the remove rpc is lost..
* the reply to the retransmitted request will be ENOENT
* since the file was in fact removed
* Therefore, we cheat and return success.
*/
if (error == ENOENT)
error = 0;
} else if (!np->n_sillyrename)
error = nfs_sillyrename(dvp, vp, cnp);
mtx_lock(&np->n_mtx);
np->n_attrstamp = 0;
mtx_unlock(&np->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
return (error);
}
/*
* nfs file remove rpc called from nfs_inactive
*/
int
ncl_removeit(struct sillyrename *sp, struct vnode *vp)
{
/*
* Make sure that the directory vnode is still valid.
* XXX we should lock sp->s_dvp here.
*/
if (sp->s_dvp->v_type == VBAD)
return (0);
return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen,
sp->s_cred, NULL));
}
/*
* Nfs remove rpc, called from nfs_remove() and ncl_removeit().
*/
static int
nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
int namelen, struct ucred *cred, struct thread *td)
{
struct nfsvattr dnfsva;
struct nfsnode *dnp = VTONFS(dvp);
int error = 0, dattrflag;
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NREMOVEINPROG;
mtx_unlock(&dnp->n_mtx);
error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva,
&dattrflag, NULL);
mtx_lock(&dnp->n_mtx);
if ((dnp->n_flag & NREMOVEWANT)) {
dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG);
mtx_unlock(&dnp->n_mtx);
wakeup((caddr_t)dnp);
} else {
dnp->n_flag &= ~NREMOVEINPROG;
mtx_unlock(&dnp->n_mtx);
}
if (dattrflag)
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NMODIFIED;
if (!dattrflag) {
dnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
mtx_unlock(&dnp->n_mtx);
if (error && NFS_ISV4(dvp))
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* nfs file rename call
*/
static int
nfs_rename(struct vop_rename_args *ap)
{
struct vnode *fvp = ap->a_fvp;
struct vnode *tvp = ap->a_tvp;
struct vnode *fdvp = ap->a_fdvp;
struct vnode *tdvp = ap->a_tdvp;
struct componentname *tcnp = ap->a_tcnp;
struct componentname *fcnp = ap->a_fcnp;
struct nfsnode *fnp = VTONFS(ap->a_fvp);
struct nfsnode *tdnp = VTONFS(ap->a_tdvp);
struct nfsv4node *newv4 = NULL;
int error;
KASSERT((tcnp->cn_flags & HASBUF) != 0 &&
(fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name"));
/* Check for cross-device rename */
if ((fvp->v_mount != tdvp->v_mount) ||
(tvp && (fvp->v_mount != tvp->v_mount))) {
error = EXDEV;
goto out;
}
if (fvp == tvp) {
printf("nfs_rename: fvp == tvp (can't happen)\n");
error = 0;
goto out;
}
if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0)
goto out;
/*
* We have to flush B_DELWRI data prior to renaming
* the file. If we don't, the delayed-write buffers
* can be flushed out later after the file has gone stale
* under NFSV3. NFSV2 does not have this problem because
* ( as far as I can tell ) it flushes dirty buffers more
* often.
*
* Skip the rename operation if the fsync fails, this can happen
* due to the server's volume being full, when we pushed out data
* that was written back to our cache earlier. Not checking for
* this condition can result in potential (silent) data loss.
*/
error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
NFSVOPUNLOCK(fvp, 0);
if (!error && tvp)
error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
if (error)
goto out;
/*
* If the tvp exists and is in use, sillyrename it before doing the
* rename of the new file over it.
* XXX Can't sillyrename a directory.
*/
if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
vput(tvp);
tvp = NULL;
}
error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen,
tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
tcnp->cn_thread);
if (error == 0 && NFS_ISV4(tdvp)) {
/*
* For NFSv4, check to see if it is the same name and
* replace the name, if it is different.
*/
MALLOC(newv4, struct nfsv4node *,
sizeof (struct nfsv4node) +
tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1,
M_NFSV4NODE, M_WAITOK);
mtx_lock(&tdnp->n_mtx);
mtx_lock(&fnp->n_mtx);
if (fnp->n_v4 != NULL && fvp->v_type == VREG &&
(fnp->n_v4->n4_namelen != tcnp->cn_namelen ||
NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4),
tcnp->cn_namelen) ||
tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen ||
NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
tdnp->n_fhp->nfh_len))) {
#ifdef notdef
{ char nnn[100]; int nnnl;
nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99;
bcopy(tcnp->cn_nameptr, nnn, nnnl);
nnn[nnnl] = '\0';
printf("ren replace=%s\n",nnn);
}
#endif
FREE((caddr_t)fnp->n_v4, M_NFSV4NODE);
fnp->n_v4 = newv4;
newv4 = NULL;
fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len;
fnp->n_v4->n4_namelen = tcnp->cn_namelen;
NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
tdnp->n_fhp->nfh_len);
NFSBCOPY(tcnp->cn_nameptr,
NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen);
}
mtx_unlock(&tdnp->n_mtx);
mtx_unlock(&fnp->n_mtx);
if (newv4 != NULL)
FREE((caddr_t)newv4, M_NFSV4NODE);
}
if (fvp->v_type == VDIR) {
if (tvp != NULL && tvp->v_type == VDIR)
cache_purge(tdvp);
cache_purge(fdvp);
}
out:
if (tdvp == tvp)
vrele(tdvp);
else
vput(tdvp);
if (tvp)
vput(tvp);
vrele(fdvp);
vrele(fvp);
/*
* Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
*/
if (error == ENOENT)
error = 0;
return (error);
}
/*
* nfs file rename rpc called from nfs_remove() above
*/
static int
nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp,
struct sillyrename *sp)
{
return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen,
sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred,
scnp->cn_thread));
}
/*
* Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
*/
static int
nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr,
int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr,
int tnamelen, struct ucred *cred, struct thread *td)
{
struct nfsvattr fnfsva, tnfsva;
struct nfsnode *fdnp = VTONFS(fdvp);
struct nfsnode *tdnp = VTONFS(tdvp);
int error = 0, fattrflag, tattrflag;
error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp,
tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag,
&tattrflag, NULL, NULL);
mtx_lock(&fdnp->n_mtx);
fdnp->n_flag |= NMODIFIED;
if (fattrflag != 0) {
mtx_unlock(&fdnp->n_mtx);
(void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1);
} else {
fdnp->n_attrstamp = 0;
mtx_unlock(&fdnp->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
}
mtx_lock(&tdnp->n_mtx);
tdnp->n_flag |= NMODIFIED;
if (tattrflag != 0) {
mtx_unlock(&tdnp->n_mtx);
(void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1);
} else {
tdnp->n_attrstamp = 0;
mtx_unlock(&tdnp->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
}
if (error && NFS_ISV4(fdvp))
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* nfs hard link create call
*/
static int
nfs_link(struct vop_link_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vnode *tdvp = ap->a_tdvp;
struct componentname *cnp = ap->a_cnp;
struct nfsnode *np, *tdnp;
struct nfsvattr nfsva, dnfsva;
int error = 0, attrflag, dattrflag;
/*
* Push all writes to the server, so that the attribute cache
* doesn't get "out of sync" with the server.
* XXX There should be a better way!
*/
VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag,
&dattrflag, NULL);
tdnp = VTONFS(tdvp);
mtx_lock(&tdnp->n_mtx);
tdnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&tdnp->n_mtx);
(void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1);
} else {
tdnp->n_attrstamp = 0;
mtx_unlock(&tdnp->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
}
if (attrflag)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
else {
np = VTONFS(vp);
mtx_lock(&np->n_mtx);
np->n_attrstamp = 0;
mtx_unlock(&np->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
}
/*
* If negative lookup caching is enabled, I might as well
* add an entry for this node. Not necessary for correctness,
* but if negative caching is enabled, then the system
* must care about lookup caching hit rate, so...
*/
if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 &&
(cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL);
}
if (error && NFS_ISV4(vp))
error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
(gid_t)0);
return (error);
}
/*
* nfs symbolic link create call
*/
static int
nfs_symlink(struct vop_symlink_args *ap)
{
struct vnode *dvp = ap->a_dvp;
struct vattr *vap = ap->a_vap;
struct componentname *cnp = ap->a_cnp;
struct nfsvattr nfsva, dnfsva;
struct nfsfh *nfhp;
struct nfsnode *np = NULL, *dnp;
struct vnode *newvp = NULL;
int error = 0, attrflag, dattrflag, ret;
vap->va_type = VLNK;
error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen,
ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva,
&nfsva, &nfhp, &attrflag, &dattrflag, NULL);
if (nfhp) {
ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
&np, NULL, LK_EXCLUSIVE);
if (!ret)
newvp = NFSTOV(np);
else if (!error)
error = ret;
}
if (newvp != NULL) {
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
} else if (!error) {
/*
* If we do not have an error and we could not extract the
* newvp from the response due to the request being NFSv2, we
* have to do a lookup in order to obtain a newvp to return.
*/
error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, cnp->cn_thread, &np);
if (!error)
newvp = NFSTOV(np);
}
if (error) {
if (newvp)
vput(newvp);
if (NFS_ISV4(dvp))
error = nfscl_maperr(cnp->cn_thread, error,
vap->va_uid, vap->va_gid);
} else {
*ap->a_vpp = newvp;
}
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&dnp->n_mtx);
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
} else {
dnp->n_attrstamp = 0;
mtx_unlock(&dnp->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
/*
* If negative lookup caching is enabled, I might as well
* add an entry for this node. Not necessary for correctness,
* but if negative caching is enabled, then the system
* must care about lookup caching hit rate, so...
*/
if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
(cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL);
}
return (error);
}
/*
* nfs make dir call
*/
static int
nfs_mkdir(struct vop_mkdir_args *ap)
{
struct vnode *dvp = ap->a_dvp;
struct vattr *vap = ap->a_vap;
struct componentname *cnp = ap->a_cnp;
struct nfsnode *np = NULL, *dnp;
struct vnode *newvp = NULL;
struct vattr vattr;
struct nfsfh *nfhp;
struct nfsvattr nfsva, dnfsva;
int error = 0, attrflag, dattrflag, ret;
if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
return (error);
vap->va_type = VDIR;
error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp,
&attrflag, &dattrflag, NULL);
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&dnp->n_mtx);
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
} else {
dnp->n_attrstamp = 0;
mtx_unlock(&dnp->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
if (nfhp) {
ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
&np, NULL, LK_EXCLUSIVE);
if (!ret) {
newvp = NFSTOV(np);
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
NULL, 0, 1);
} else if (!error)
error = ret;
}
if (!error && newvp == NULL) {
error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, cnp->cn_thread, &np);
if (!error) {
newvp = NFSTOV(np);
if (newvp->v_type != VDIR)
error = EEXIST;
}
}
if (error) {
if (newvp)
vput(newvp);
if (NFS_ISV4(dvp))
error = nfscl_maperr(cnp->cn_thread, error,
vap->va_uid, vap->va_gid);
} else {
/*
* If negative lookup caching is enabled, I might as well
* add an entry for this node. Not necessary for correctness,
* but if negative caching is enabled, then the system
* must care about lookup caching hit rate, so...
*/
if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
(cnp->cn_flags & MAKEENTRY) &&
attrflag != 0 && dattrflag != 0)
cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
&dnfsva.na_ctime);
*ap->a_vpp = newvp;
}
return (error);
}
/*
* nfs remove directory call
*/
static int
nfs_rmdir(struct vop_rmdir_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vnode *dvp = ap->a_dvp;
struct componentname *cnp = ap->a_cnp;
struct nfsnode *dnp;
struct nfsvattr dnfsva;
int error, dattrflag;
if (dvp == vp)
return (EINVAL);
error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL);
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
dnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&dnp->n_mtx);
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
} else {
dnp->n_attrstamp = 0;
mtx_unlock(&dnp->n_mtx);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
cache_purge(dvp);
cache_purge(vp);
if (error && NFS_ISV4(dvp))
error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
(gid_t)0);
/*
* Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
*/
if (error == ENOENT)
error = 0;
return (error);
}
/*
* nfs readdir call
*/
static int
nfs_readdir(struct vop_readdir_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
struct uio *uio = ap->a_uio;
ssize_t tresid, left;
int error = 0;
struct vattr vattr;
if (ap->a_eofflag != NULL)
*ap->a_eofflag = 0;
if (vp->v_type != VDIR)
return(EPERM);
/*
* First, check for hit on the EOF offset cache
*/
if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
(np->n_flag & NMODIFIED) == 0) {
if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
mtx_lock(&np->n_mtx);
if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
mtx_unlock(&np->n_mtx);
NFSINCRGLOBAL(nfsstatsv1.direofcache_hits);
if (ap->a_eofflag != NULL)
*ap->a_eofflag = 1;
return (0);
} else
mtx_unlock(&np->n_mtx);
}
}
/*
* NFS always guarantees that directory entries don't straddle
* DIRBLKSIZ boundaries. As such, we need to limit the size
* to an exact multiple of DIRBLKSIZ, to avoid copying a partial
* directory entry.
*/
left = uio->uio_resid % DIRBLKSIZ;
if (left == uio->uio_resid)
return (EINVAL);
uio->uio_resid -= left;
/*
* Call ncl_bioread() to do the real work.
*/
tresid = uio->uio_resid;
error = ncl_bioread(vp, uio, 0, ap->a_cred);
if (!error && uio->uio_resid == tresid) {
NFSINCRGLOBAL(nfsstatsv1.direofcache_misses);
if (ap->a_eofflag != NULL)
*ap->a_eofflag = 1;
}
/* Add the partial DIRBLKSIZ (left) back in. */
uio->uio_resid += left;
return (error);
}
/*
* Readdir rpc call.
* Called from below the buffer cache by ncl_doio().
*/
int
ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
struct thread *td)
{
struct nfsvattr nfsva;
nfsuint64 *cookiep, cookie;
struct nfsnode *dnp = VTONFS(vp);
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int error = 0, eof, attrflag;
KASSERT(uiop->uio_iovcnt == 1 &&
(uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
(uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
("nfs readdirrpc bad uio"));
/*
* If there is no cookie, assume directory was stale.
*/
ncl_dircookie_lock(dnp);
cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
if (cookiep) {
cookie = *cookiep;
ncl_dircookie_unlock(dnp);
} else {
ncl_dircookie_unlock(dnp);
return (NFSERR_BAD_COOKIE);
}
if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
(void)ncl_fsinfo(nmp, vp, cred, td);
error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva,
&attrflag, &eof, NULL);
if (attrflag)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (!error) {
/*
* We are now either at the end of the directory or have filled
* the block.
*/
if (eof)
dnp->n_direofoffset = uiop->uio_offset;
else {
if (uiop->uio_resid > 0)
printf("EEK! readdirrpc resid > 0\n");
ncl_dircookie_lock(dnp);
cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
*cookiep = cookie;
ncl_dircookie_unlock(dnp);
}
} else if (NFS_ISV4(vp)) {
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
}
return (error);
}
/*
* NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc().
*/
int
ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
struct thread *td)
{
struct nfsvattr nfsva;
nfsuint64 *cookiep, cookie;
struct nfsnode *dnp = VTONFS(vp);
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int error = 0, attrflag, eof;
KASSERT(uiop->uio_iovcnt == 1 &&
(uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
(uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
("nfs readdirplusrpc bad uio"));
/*
* If there is no cookie, assume directory was stale.
*/
ncl_dircookie_lock(dnp);
cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
if (cookiep) {
cookie = *cookiep;
ncl_dircookie_unlock(dnp);
} else {
ncl_dircookie_unlock(dnp);
return (NFSERR_BAD_COOKIE);
}
if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
(void)ncl_fsinfo(nmp, vp, cred, td);
error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva,
&attrflag, &eof, NULL);
if (attrflag)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (!error) {
/*
* We are now either at end of the directory or have filled the
* the block.
*/
if (eof)
dnp->n_direofoffset = uiop->uio_offset;
else {
if (uiop->uio_resid > 0)
printf("EEK! readdirplusrpc resid > 0\n");
ncl_dircookie_lock(dnp);
cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
*cookiep = cookie;
ncl_dircookie_unlock(dnp);
}
} else if (NFS_ISV4(vp)) {
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
}
return (error);
}
/*
* Silly rename. To make the NFS filesystem that is stateless look a little
* more like the "ufs" a remove of an active vnode is translated to a rename
* to a funny looking filename that is removed by nfs_inactive on the
* nfsnode. There is the potential for another process on a different client
* to create the same funny name between the nfs_lookitup() fails and the
* nfs_rename() completes, but...
*/
static int
nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
{
struct sillyrename *sp;
struct nfsnode *np;
int error;
short pid;
unsigned int lticks;
cache_purge(dvp);
np = VTONFS(vp);
KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
M_NEWNFSREQ, M_WAITOK);
sp->s_cred = crhold(cnp->cn_cred);
sp->s_dvp = dvp;
VREF(dvp);
/*
* Fudge together a funny name.
* Changing the format of the funny name to accommodate more
* sillynames per directory.
* The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is
* CPU ticks since boot.
*/
pid = cnp->cn_thread->td_proc->p_pid;
lticks = (unsigned int)ticks;
for ( ; ; ) {
sp->s_namlen = sprintf(sp->s_name,
".nfs.%08x.%04x4.4", lticks,
pid);
if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
cnp->cn_thread, NULL))
break;
lticks++;
}
error = nfs_renameit(dvp, vp, cnp, sp);
if (error)
goto bad;
error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
cnp->cn_thread, &np);
np->n_sillyrename = sp;
return (0);
bad:
vrele(sp->s_dvp);
crfree(sp->s_cred);
free((caddr_t)sp, M_NEWNFSREQ);
return (error);
}
/*
* Look up a file name and optionally either update the file handle or
* allocate an nfsnode, depending on the value of npp.
* npp == NULL --> just do the lookup
* *npp == NULL --> allocate a new nfsnode and make sure attributes are
* handled too
* *npp != NULL --> update the file handle in the vnode
*/
static int
nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
struct thread *td, struct nfsnode **npp)
{
struct vnode *newvp = NULL, *vp;
struct nfsnode *np, *dnp = VTONFS(dvp);
struct nfsfh *nfhp, *onfhp;
struct nfsvattr nfsva, dnfsva;
struct componentname cn;
int error = 0, attrflag, dattrflag;
u_int hash;
error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva,
&nfhp, &attrflag, &dattrflag, NULL);
if (dattrflag)
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
if (npp && !error) {
if (*npp != NULL) {
np = *npp;
vp = NFSTOV(np);
/*
* For NFSv4, check to see if it is the same name and
* replace the name, if it is different.
*/
if (np->n_v4 != NULL && nfsva.na_type == VREG &&
(np->n_v4->n4_namelen != len ||
NFSBCMP(name, NFS4NODENAME(np->n_v4), len) ||
dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
dnp->n_fhp->nfh_len))) {
#ifdef notdef
{ char nnn[100]; int nnnl;
nnnl = (len < 100) ? len : 99;
bcopy(name, nnn, nnnl);
nnn[nnnl] = '\0';
printf("replace=%s\n",nnn);
}
#endif
FREE((caddr_t)np->n_v4, M_NFSV4NODE);
MALLOC(np->n_v4, struct nfsv4node *,
sizeof (struct nfsv4node) +
dnp->n_fhp->nfh_len + len - 1,
M_NFSV4NODE, M_WAITOK);
np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
np->n_v4->n4_namelen = len;
NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
dnp->n_fhp->nfh_len);
NFSBCOPY(name, NFS4NODENAME(np->n_v4), len);
}
hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len,
FNV1_32_INIT);
onfhp = np->n_fhp;
/*
* Rehash node for new file handle.
*/
vfs_hash_rehash(vp, hash);
np->n_fhp = nfhp;
if (onfhp != NULL)
FREE((caddr_t)onfhp, M_NFSFH);
newvp = NFSTOV(np);
} else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) {
FREE((caddr_t)nfhp, M_NFSFH);
VREF(dvp);
newvp = dvp;
} else {
cn.cn_nameptr = name;
cn.cn_namelen = len;
error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td,
&np, NULL, LK_EXCLUSIVE);
if (error)
return (error);
newvp = NFSTOV(np);
}
if (!attrflag && *npp == NULL) {
if (newvp == dvp)
vrele(newvp);
else
vput(newvp);
return (ENOENT);
}
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
0, 1);
}
if (npp && *npp == NULL) {
if (error) {
if (newvp) {
if (newvp == dvp)
vrele(newvp);
else
vput(newvp);
}
} else
*npp = np;
}
if (error && NFS_ISV4(dvp))
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* Nfs Version 3 and 4 commit rpc
*/
int
ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
struct thread *td)
{
struct nfsvattr nfsva;
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
struct nfsnode *np;
struct uio uio;
int error, attrflag;
np = VTONFS(vp);
error = EIO;
attrflag = 0;
if (NFSHASPNFS(nmp) && (np->n_flag & NDSCOMMIT) != 0) {
uio.uio_offset = offset;
uio.uio_resid = cnt;
error = nfscl_doiods(vp, &uio, NULL, NULL,
NFSV4OPEN_ACCESSWRITE, 1, cred, td);
if (error != 0) {
mtx_lock(&np->n_mtx);
np->n_flag &= ~NDSCOMMIT;
mtx_unlock(&np->n_mtx);
}
}
if (error != 0) {
mtx_lock(&nmp->nm_mtx);
if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
mtx_unlock(&nmp->nm_mtx);
return (0);
}
mtx_unlock(&nmp->nm_mtx);
error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva,
&attrflag, NULL);
}
if (attrflag != 0)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL,
0, 1);
if (error != 0 && NFS_ISV4(vp))
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
return (error);
}
/*
* Strategy routine.
* For async requests when nfsiod(s) are running, queue the request by
* calling ncl_asyncio(), otherwise just all ncl_doio() to do the
* request.
*/
static int
nfs_strategy(struct vop_strategy_args *ap)
{
struct buf *bp;
struct vnode *vp;
struct ucred *cr;
bp = ap->a_bp;
vp = ap->a_vp;
KASSERT(bp->b_vp == vp, ("missing b_getvp"));
KASSERT(!(bp->b_flags & B_DONE),
("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
BUF_ASSERT_HELD(bp);
if (vp->v_type == VREG && bp->b_blkno == bp->b_lblkno)
bp->b_blkno = bp->b_lblkno * (vp->v_bufobj.bo_bsize /
DEV_BSIZE);
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
else
cr = bp->b_wcred;
/*
* If the op is asynchronous and an i/o daemon is waiting
* queue the request, wake it up and wait for completion
* otherwise just do it ourselves.
*/
if ((bp->b_flags & B_ASYNC) == 0 ||
ncl_asyncio(VFSTONFS(vp->v_mount), bp, NOCRED, curthread))
(void) ncl_doio(vp, bp, cr, curthread, 1);
return (0);
}
/*
* fsync vnode op. Just call ncl_flush() with commit == 1.
*/
/* ARGSUSED */
static int
nfs_fsync(struct vop_fsync_args *ap)
{
if (ap->a_vp->v_type != VREG) {
/*
* For NFS, metadata is changed synchronously on the server,
* so there is nothing to flush. Also, ncl_flush() clears
* the NMODIFIED flag and that shouldn't be done here for
* directories.
*/
return (0);
}
return (ncl_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1, 0));
}
/*
* Flush all the blocks associated with a vnode.
* Walk through the buffer pool and push any dirty pages
* associated with the vnode.
* If the called_from_renewthread argument is TRUE, it has been called
* from the NFSv4 renew thread and, as such, cannot block indefinitely
* waiting for a buffer write to complete.
*/
int
ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
int commit, int called_from_renewthread)
{
struct nfsnode *np = VTONFS(vp);
struct buf *bp;
int i;
struct buf *nbp;
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
int passone = 1, trycnt = 0;
u_quad_t off, endoff, toff;
struct ucred* wcred = NULL;
struct buf **bvec = NULL;
struct bufobj *bo;
#ifndef NFS_COMMITBVECSIZ
#define NFS_COMMITBVECSIZ 20
#endif
struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
- u_int bvecsize = 0, bveccount;
+ int bvecsize = 0, bveccount;
if (called_from_renewthread != 0)
slptimeo = hz;
if (nmp->nm_flag & NFSMNT_INT)
slpflag = PCATCH;
if (!commit)
passone = 0;
bo = &vp->v_bufobj;
/*
* A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
* server, but has not been committed to stable storage on the server
* yet. On the first pass, the byte range is worked out and the commit
* rpc is done. On the second pass, ncl_writebp() is called to do the
* job.
*/
again:
off = (u_quad_t)-1;
endoff = 0;
bvecpos = 0;
if (NFS_ISV34(vp) && commit) {
if (bvec != NULL && bvec != bvec_on_stack)
free(bvec, M_TEMP);
/*
* Count up how many buffers waiting for a commit.
*/
bveccount = 0;
BO_LOCK(bo);
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bveccount++;
}
/*
* Allocate space to remember the list of bufs to commit. It is
* important to use M_NOWAIT here to avoid a race with nfs_write.
* If we can't get memory (for whatever reason), we will end up
* committing the buffers one-by-one in the loop below.
*/
if (bveccount > NFS_COMMITBVECSIZ) {
/*
* Release the vnode interlock to avoid a lock
* order reversal.
*/
BO_UNLOCK(bo);
bvec = (struct buf **)
malloc(bveccount * sizeof(struct buf *),
M_TEMP, M_NOWAIT);
BO_LOCK(bo);
if (bvec == NULL) {
bvec = bvec_on_stack;
bvecsize = NFS_COMMITBVECSIZ;
} else
bvecsize = bveccount;
} else {
bvec = bvec_on_stack;
bvecsize = NFS_COMMITBVECSIZ;
}
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
if (bvecpos >= bvecsize)
break;
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
nbp = TAILQ_NEXT(bp, b_bobufs);
continue;
}
if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
(B_DELWRI | B_NEEDCOMMIT)) {
BUF_UNLOCK(bp);
nbp = TAILQ_NEXT(bp, b_bobufs);
continue;
}
BO_UNLOCK(bo);
bremfree(bp);
/*
* Work out if all buffers are using the same cred
* so we can deal with them all with one commit.
*
* NOTE: we are not clearing B_DONE here, so we have
* to do it later on in this routine if we intend to
* initiate I/O on the bp.
*
* Note: to avoid loopback deadlocks, we do not
* assign b_runningbufspace.
*/
if (wcred == NULL)
wcred = bp->b_wcred;
else if (wcred != bp->b_wcred)
wcred = NOCRED;
vfs_busy_pages(bp, 1);
BO_LOCK(bo);
/*
* bp is protected by being locked, but nbp is not
* and vfs_busy_pages() may sleep. We have to
* recalculate nbp.
*/
nbp = TAILQ_NEXT(bp, b_bobufs);
/*
* A list of these buffers is kept so that the
* second loop knows which buffers have actually
* been committed. This is necessary, since there
* may be a race between the commit rpc and new
* uncommitted writes on the file.
*/
bvec[bvecpos++] = bp;
toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
bp->b_dirtyoff;
if (toff < off)
off = toff;
toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
if (toff > endoff)
endoff = toff;
}
BO_UNLOCK(bo);
}
if (bvecpos > 0) {
/*
* Commit data on the server, as required.
* If all bufs are using the same wcred, then use that with
* one call for all of them, otherwise commit each one
* separately.
*/
if (wcred != NOCRED)
retv = ncl_commit(vp, off, (int)(endoff - off),
wcred, td);
else {
retv = 0;
for (i = 0; i < bvecpos; i++) {
off_t off, size;
bp = bvec[i];
off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
bp->b_dirtyoff;
size = (u_quad_t)(bp->b_dirtyend
- bp->b_dirtyoff);
retv = ncl_commit(vp, off, (int)size,
bp->b_wcred, td);
if (retv) break;
}
}
if (retv == NFSERR_STALEWRITEVERF)
ncl_clearcommit(vp->v_mount);
/*
* Now, either mark the blocks I/O done or mark the
* blocks dirty, depending on whether the commit
* succeeded.
*/
for (i = 0; i < bvecpos; i++) {
bp = bvec[i];
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
if (retv) {
/*
* Error, leave B_DELWRI intact
*/
vfs_unbusy_pages(bp);
brelse(bp);
} else {
/*
* Success, remove B_DELWRI ( bundirty() ).
*
* b_dirtyoff/b_dirtyend seem to be NFS
* specific. We should probably move that
* into bundirty(). XXX
*/
bufobj_wref(bo);
bp->b_flags |= B_ASYNC;
bundirty(bp);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_dirtyoff = bp->b_dirtyend = 0;
bufdone(bp);
}
}
}
/*
* Start/do any write(s) that are required.
*/
loop:
BO_LOCK(bo);
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
if (waitfor != MNT_WAIT || passone)
continue;
error = BUF_TIMELOCK(bp,
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo);
if (error == 0) {
BUF_UNLOCK(bp);
goto loop;
}
if (error == ENOLCK) {
error = 0;
goto loop;
}
if (called_from_renewthread != 0) {
/*
* Return EIO so the flush will be retried
* later.
*/
error = EIO;
goto done;
}
if (newnfs_sigintr(nmp, td)) {
error = EINTR;
goto done;
}
if (slpflag == PCATCH) {
slpflag = 0;
slptimeo = 2 * hz;
}
goto loop;
}
if ((bp->b_flags & B_DELWRI) == 0)
panic("nfs_fsync: not dirty");
if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
BUF_UNLOCK(bp);
continue;
}
BO_UNLOCK(bo);
bremfree(bp);
if (passone || !commit)
bp->b_flags |= B_ASYNC;
else
bp->b_flags |= B_ASYNC;
bwrite(bp);
if (newnfs_sigintr(nmp, td)) {
error = EINTR;
goto done;
}
goto loop;
}
if (passone) {
passone = 0;
BO_UNLOCK(bo);
goto again;
}
if (waitfor == MNT_WAIT) {
while (bo->bo_numoutput) {
error = bufobj_wwait(bo, slpflag, slptimeo);
if (error) {
BO_UNLOCK(bo);
if (called_from_renewthread != 0) {
/*
* Return EIO so that the flush will be
* retried later.
*/
error = EIO;
goto done;
}
error = newnfs_sigintr(nmp, td);
if (error)
goto done;
if (slpflag == PCATCH) {
slpflag = 0;
slptimeo = 2 * hz;
}
BO_LOCK(bo);
}
}
if (bo->bo_dirty.bv_cnt != 0 && commit) {
BO_UNLOCK(bo);
goto loop;
}
/*
* Wait for all the async IO requests to drain
*/
BO_UNLOCK(bo);
mtx_lock(&np->n_mtx);
while (np->n_directio_asyncwr > 0) {
np->n_flag |= NFSYNCWAIT;
error = newnfs_msleep(td, &np->n_directio_asyncwr,
&np->n_mtx, slpflag | (PRIBIO + 1),
"nfsfsync", 0);
if (error) {
if (newnfs_sigintr(nmp, td)) {
mtx_unlock(&np->n_mtx);
error = EINTR;
goto done;
}
}
}
mtx_unlock(&np->n_mtx);
} else
BO_UNLOCK(bo);
if (NFSHASPNFS(nmp)) {
nfscl_layoutcommit(vp, td);
/*
* Invalidate the attribute cache, since writes to a DS
* won't update the size attribute.
*/
mtx_lock(&np->n_mtx);
np->n_attrstamp = 0;
} else
mtx_lock(&np->n_mtx);
if (np->n_flag & NWRITEERR) {
error = np->n_error;
np->n_flag &= ~NWRITEERR;
}
if (commit && bo->bo_dirty.bv_cnt == 0 &&
bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
np->n_flag &= ~NMODIFIED;
mtx_unlock(&np->n_mtx);
done:
if (bvec != NULL && bvec != bvec_on_stack)
free(bvec, M_TEMP);
if (error == 0 && commit != 0 && waitfor == MNT_WAIT &&
(bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 ||
np->n_directio_asyncwr != 0)) {
if (trycnt++ < 5) {
/* try, try again... */
passone = 1;
wcred = NULL;
bvec = NULL;
bvecsize = 0;
goto again;
}
vn_printf(vp, "ncl_flush failed");
error = called_from_renewthread != 0 ? EIO : EBUSY;
}
return (error);
}
/*
* NFS advisory byte-level locks.
*/
static int
nfs_advlock(struct vop_advlock_args *ap)
{
struct vnode *vp = ap->a_vp;
struct ucred *cred;
struct nfsnode *np = VTONFS(ap->a_vp);
struct proc *p = (struct proc *)ap->a_id;
struct thread *td = curthread; /* XXX */
struct vattr va;
int ret, error = EOPNOTSUPP;
u_quad_t size;
if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) {
if (vp->v_type != VREG)
return (EINVAL);
if ((ap->a_flags & F_POSIX) != 0)
cred = p->p_ucred;
else
cred = td->td_ucred;
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
NFSVOPUNLOCK(vp, 0);
return (EBADF);
}
/*
* If this is unlocking a write locked region, flush and
* commit them before unlocking. This is required by
* RFC3530 Sec. 9.3.2.
*/
if (ap->a_op == F_UNLCK &&
nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id,
ap->a_flags))
(void) ncl_flush(vp, MNT_WAIT, td, 1, 0);
/*
* Loop around doing the lock op, while a blocking lock
* must wait for the lock op to succeed.
*/
do {
ret = nfsrpc_advlock(vp, np->n_size, ap->a_op,
ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags);
if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
ap->a_op == F_SETLK) {
NFSVOPUNLOCK(vp, 0);
error = nfs_catnap(PZERO | PCATCH, ret,
"ncladvl");
if (error)
return (EINTR);
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
NFSVOPUNLOCK(vp, 0);
return (EBADF);
}
}
} while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
ap->a_op == F_SETLK);
if (ret == NFSERR_DENIED) {
NFSVOPUNLOCK(vp, 0);
return (EAGAIN);
} else if (ret == EINVAL || ret == EBADF || ret == EINTR) {
NFSVOPUNLOCK(vp, 0);
return (ret);
} else if (ret != 0) {
NFSVOPUNLOCK(vp, 0);
return (EACCES);
}
/*
* Now, if we just got a lock, invalidate data in the buffer
* cache, as required, so that the coherency conforms with
* RFC3530 Sec. 9.3.2.
*/
if (ap->a_op == F_SETLK) {
if ((np->n_flag & NMODIFIED) == 0) {
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
ret = VOP_GETATTR(vp, &va, cred);
}
if ((np->n_flag & NMODIFIED) || ret ||
np->n_change != va.va_filerev) {
(void) ncl_vinvalbuf(vp, V_SAVE, td, 1);
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
ret = VOP_GETATTR(vp, &va, cred);
if (!ret) {
np->n_mtime = va.va_mtime;
np->n_change = va.va_filerev;
}
}
/* Mark that a file lock has been acquired. */
mtx_lock(&np->n_mtx);
np->n_flag |= NHASBEENLOCKED;
mtx_unlock(&np->n_mtx);
}
NFSVOPUNLOCK(vp, 0);
return (0);
} else if (!NFS_ISV4(vp)) {
error = NFSVOPLOCK(vp, LK_SHARED);
if (error)
return (error);
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
size = VTONFS(vp)->n_size;
NFSVOPUNLOCK(vp, 0);
error = lf_advlock(ap, &(vp->v_lockf), size);
} else {
if (nfs_advlock_p != NULL)
error = nfs_advlock_p(ap);
else {
NFSVOPUNLOCK(vp, 0);
error = ENOLCK;
}
}
if (error == 0 && ap->a_op == F_SETLK) {
error = NFSVOPLOCK(vp, LK_SHARED);
if (error == 0) {
/* Mark that a file lock has been acquired. */
mtx_lock(&np->n_mtx);
np->n_flag |= NHASBEENLOCKED;
mtx_unlock(&np->n_mtx);
NFSVOPUNLOCK(vp, 0);
}
}
}
return (error);
}
/*
* NFS advisory byte-level locks.
*/
static int
nfs_advlockasync(struct vop_advlockasync_args *ap)
{
struct vnode *vp = ap->a_vp;
u_quad_t size;
int error;
if (NFS_ISV4(vp))
return (EOPNOTSUPP);
error = NFSVOPLOCK(vp, LK_SHARED);
if (error)
return (error);
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
size = VTONFS(vp)->n_size;
NFSVOPUNLOCK(vp, 0);
error = lf_advlockasync(ap, &(vp->v_lockf), size);
} else {
NFSVOPUNLOCK(vp, 0);
error = EOPNOTSUPP;
}
return (error);
}
/*
* Print out the contents of an nfsnode.
*/
static int
nfs_print(struct vop_print_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
printf("\tfileid %jd fsid 0x%jx", (uintmax_t)np->n_vattr.na_fileid,
(uintmax_t)np->n_vattr.na_fsid);
if (vp->v_type == VFIFO)
fifo_printinfo(vp);
printf("\n");
return (0);
}
/*
* This is the "real" nfs::bwrite(struct buf*).
* We set B_CACHE if this is a VMIO buffer.
*/
int
ncl_writebp(struct buf *bp, int force __unused, struct thread *td)
{
int oldflags, rtval;
BUF_ASSERT_HELD(bp);
if (bp->b_flags & B_INVAL) {
brelse(bp);
return (0);
}
oldflags = bp->b_flags;
bp->b_flags |= B_CACHE;
/*
* Undirty the bp. We will redirty it later if the I/O fails.
*/
bundirty(bp);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_iocmd = BIO_WRITE;
bufobj_wref(bp->b_bufobj);
curthread->td_ru.ru_oublock++;
/*
* Note: to avoid loopback deadlocks, we do not
* assign b_runningbufspace.
*/
vfs_busy_pages(bp, 1);
BUF_KERNPROC(bp);
bp->b_iooffset = dbtob(bp->b_blkno);
bstrategy(bp);
if ((oldflags & B_ASYNC) != 0)
return (0);
rtval = bufwait(bp);
if (oldflags & B_DELWRI)
reassignbuf(bp);
brelse(bp);
return (rtval);
}
/*
* nfs special file access vnode op.
* Essentially just get vattr and then imitate iaccess() since the device is
* local to the client.
*/
static int
nfsspec_access(struct vop_access_args *ap)
{
struct vattr *vap;
struct ucred *cred = ap->a_cred;
struct vnode *vp = ap->a_vp;
accmode_t accmode = ap->a_accmode;
struct vattr vattr;
int error;
/*
* Disallow write attempts on filesystems mounted read-only;
* unless the file is a socket, fifo, or a block or character
* device resident on the filesystem.
*/
if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
switch (vp->v_type) {
case VREG:
case VDIR:
case VLNK:
return (EROFS);
default:
break;
}
}
vap = &vattr;
error = VOP_GETATTR(vp, vap, cred);
if (error)
goto out;
error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
accmode, cred, NULL);
out:
return error;
}
/*
* Read wrapper for fifos.
*/
static int
nfsfifo_read(struct vop_read_args *ap)
{
struct nfsnode *np = VTONFS(ap->a_vp);
int error;
/*
* Set access flag.
*/
mtx_lock(&np->n_mtx);
np->n_flag |= NACC;
vfs_timestamp(&np->n_atim);
mtx_unlock(&np->n_mtx);
error = fifo_specops.vop_read(ap);
return error;
}
/*
* Write wrapper for fifos.
*/
static int
nfsfifo_write(struct vop_write_args *ap)
{
struct nfsnode *np = VTONFS(ap->a_vp);
/*
* Set update flag.
*/
mtx_lock(&np->n_mtx);
np->n_flag |= NUPD;
vfs_timestamp(&np->n_mtim);
mtx_unlock(&np->n_mtx);
return(fifo_specops.vop_write(ap));
}
/*
* Close wrapper for fifos.
*
* Update the times on the nfsnode then do fifo close.
*/
static int
nfsfifo_close(struct vop_close_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
struct vattr vattr;
struct timespec ts;
mtx_lock(&np->n_mtx);
if (np->n_flag & (NACC | NUPD)) {
vfs_timestamp(&ts);
if (np->n_flag & NACC)
np->n_atim = ts;
if (np->n_flag & NUPD)
np->n_mtim = ts;
np->n_flag |= NCHG;
if (vrefcnt(vp) == 1 &&
(vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
VATTR_NULL(&vattr);
if (np->n_flag & NACC)
vattr.va_atime = np->n_atim;
if (np->n_flag & NUPD)
vattr.va_mtime = np->n_mtim;
mtx_unlock(&np->n_mtx);
(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
goto out;
}
}
mtx_unlock(&np->n_mtx);
out:
return (fifo_specops.vop_close(ap));
}
/*
* Just call ncl_writebp() with the force argument set to 1.
*
* NOTE: B_DONE may or may not be set in a_bp on call.
*/
static int
nfs_bwrite(struct buf *bp)
{
return (ncl_writebp(bp, 1, curthread));
}
struct buf_ops buf_ops_newnfs = {
.bop_name = "buf_ops_nfs",
.bop_write = nfs_bwrite,
.bop_strategy = bufstrategy,
.bop_sync = bufsync,
.bop_bdflush = bufbdflush,
};
static int
nfs_getacl(struct vop_getacl_args *ap)
{
int error;
if (ap->a_type != ACL_TYPE_NFS4)
return (EOPNOTSUPP);
error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
NULL);
if (error > NFSERR_STALE) {
(void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
error = EPERM;
}
return (error);
}
static int
nfs_setacl(struct vop_setacl_args *ap)
{
int error;
if (ap->a_type != ACL_TYPE_NFS4)
return (EOPNOTSUPP);
error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
NULL);
if (error > NFSERR_STALE) {
(void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
error = EPERM;
}
return (error);
}
static int
nfs_set_text(struct vop_set_text_args *ap)
{
struct vnode *vp = ap->a_vp;
struct nfsnode *np;
/*
* If the text file has been mmap'd, flush any dirty pages to the
* buffer cache and then...
* Make sure all writes are pushed to the NFS server. If this is not
* done, the modify time of the file can change while the text
* file is being executed. This will cause the process that is
* executing the text file to be terminated.
*/
if (vp->v_object != NULL) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_object);
}
/* Now, flush the buffer cache. */
ncl_flush(vp, MNT_WAIT, curthread, 0, 0);
/* And, finally, make sure that n_mtime is up to date. */
np = VTONFS(vp);
mtx_lock(&np->n_mtx);
np->n_mtime = np->n_vattr.na_mtime;
mtx_unlock(&np->n_mtx);
vp->v_vflag |= VV_TEXT;
return (0);
}
/*
* Return POSIX pathconf information applicable to nfs filesystems.
*/
static int
nfs_pathconf(struct vop_pathconf_args *ap)
{
struct nfsv3_pathconf pc;
struct nfsvattr nfsva;
struct vnode *vp = ap->a_vp;
struct thread *td = curthread;
int attrflag, error;
if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX ||
ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED ||
ap->a_name == _PC_NO_TRUNC)) ||
(NFS_ISV4(vp) && ap->a_name == _PC_ACL_NFS4)) {
/*
* Since only the above 4 a_names are returned by the NFSv3
* Pathconf RPC, there is no point in doing it for others.
* For NFSv4, the Pathconf RPC (actually a Getattr Op.) can
* be used for _PC_NFS4_ACL as well.
*/
error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva,
&attrflag, NULL);
if (attrflag != 0)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
1);
if (error != 0)
return (error);
} else {
/*
* For NFSv2 (or NFSv3 when not one of the above 4 a_names),
* just fake them.
*/
pc.pc_linkmax = NFS_LINK_MAX;
pc.pc_namemax = NFS_MAXNAMLEN;
pc.pc_notrunc = 1;
pc.pc_chownrestricted = 1;
pc.pc_caseinsensitive = 0;
pc.pc_casepreserving = 1;
error = 0;
}
switch (ap->a_name) {
case _PC_LINK_MAX:
#ifdef _LP64
*ap->a_retval = pc.pc_linkmax;
#else
*ap->a_retval = MIN(LONG_MAX, pc.pc_linkmax);
#endif
break;
case _PC_NAME_MAX:
*ap->a_retval = pc.pc_namemax;
break;
case _PC_PIPE_BUF:
if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO)
*ap->a_retval = PIPE_BUF;
else
error = EINVAL;
break;
case _PC_CHOWN_RESTRICTED:
*ap->a_retval = pc.pc_chownrestricted;
break;
case _PC_NO_TRUNC:
*ap->a_retval = pc.pc_notrunc;
break;
case _PC_ACL_EXTENDED:
*ap->a_retval = 0;
break;
case _PC_ACL_NFS4:
if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 &&
NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL))
*ap->a_retval = 1;
else
*ap->a_retval = 0;
break;
case _PC_ACL_PATH_MAX:
if (NFS_ISV4(vp))
*ap->a_retval = ACL_MAX_ENTRIES;
else
*ap->a_retval = 3;
break;
case _PC_MAC_PRESENT:
*ap->a_retval = 0;
break;
case _PC_PRIO_IO:
*ap->a_retval = 0;
break;
case _PC_SYNC_IO:
*ap->a_retval = 0;
break;
case _PC_ALLOC_SIZE_MIN:
*ap->a_retval = vp->v_mount->mnt_stat.f_bsize;
break;
case _PC_FILESIZEBITS:
if (NFS_ISV34(vp))
*ap->a_retval = 64;
else
*ap->a_retval = 32;
break;
case _PC_REC_INCR_XFER_SIZE:
*ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
break;
case _PC_REC_MAX_XFER_SIZE:
*ap->a_retval = -1; /* means ``unlimited'' */
break;
case _PC_REC_MIN_XFER_SIZE:
*ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
break;
case _PC_REC_XFER_ALIGN:
*ap->a_retval = PAGE_SIZE;
break;
case _PC_SYMLINK_MAX:
*ap->a_retval = NFS_MAXPATHLEN;
break;
default:
error = vop_stdpathconf(ap);
break;
}
return (error);
}
Index: head/sys/geom/uzip/g_uzip_zlib.c
===================================================================
--- head/sys/geom/uzip/g_uzip_zlib.c (revision 328217)
+++ head/sys/geom/uzip/g_uzip_zlib.c (revision 328218)
@@ -1,145 +1,145 @@
/*-
* Copyright (c) 2004 Max Khon
* Copyright (c) 2014 Juniper Networks, Inc.
* Copyright (c) 2006-2016 Maxim Sobolev <sobomax@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/zlib.h>
#include <geom/uzip/g_uzip.h>
#include <geom/uzip/g_uzip_dapi.h>
#include <geom/uzip/g_uzip_zlib.h>
struct g_uzip_zlib {
uint32_t blksz;
struct g_uzip_dapi pub;
/* Zlib decoder structs */
z_stream zs;
};
static void *z_alloc(void *, u_int, u_int);
static void z_free(void *, void *);
static int g_uzip_zlib_rewind(struct g_uzip_dapi *, const char *);
static void
g_uzip_zlib_free(struct g_uzip_dapi *zpp)
{
struct g_uzip_zlib *zp;
zp = (struct g_uzip_zlib *)zpp->pvt;
inflateEnd(&zp->zs);
free(zp, M_GEOM_UZIP);
}
static int
g_uzip_zlib_decompress(struct g_uzip_dapi *zpp, const char *gp_name, void *ibp,
size_t ilen, void *obp)
{
int err;
struct g_uzip_zlib *zp;
zp = (struct g_uzip_zlib *)zpp->pvt;
zp->zs.next_in = ibp;
zp->zs.avail_in = ilen;
zp->zs.next_out = obp;
zp->zs.avail_out = zp->blksz;
err = (inflate(&zp->zs, Z_FINISH) != Z_STREAM_END) ? 1 : 0;
if (err != 0) {
printf("%s: UZIP(zlib) inflate() failed\n", gp_name);
}
return (err);
}
static int
g_uzip_zlib_rewind(struct g_uzip_dapi *zpp, const char *gp_name)
{
int err;
struct g_uzip_zlib *zp;
zp = (struct g_uzip_zlib *)zpp->pvt;
err = 0;
if (inflateReset(&zp->zs) != Z_OK) {
printf("%s: UZIP(zlib) decoder reset failed\n", gp_name);
err = 1;
}
return (err);
}
static int
z_compressBound(int len)
{
return (len + (len >> 12) + (len >> 14) + 11);
}
struct g_uzip_dapi *
g_uzip_zlib_ctor(uint32_t blksz)
{
struct g_uzip_zlib *zp;
zp = malloc(sizeof(struct g_uzip_zlib), M_GEOM_UZIP, M_WAITOK);
zp->zs.zalloc = z_alloc;
zp->zs.zfree = z_free;
if (inflateInit(&zp->zs) != Z_OK) {
goto e1;
}
zp->blksz = blksz;
zp->pub.max_blen = z_compressBound(blksz);
zp->pub.decompress = &g_uzip_zlib_decompress;
zp->pub.free = &g_uzip_zlib_free;
zp->pub.rewind = &g_uzip_zlib_rewind;
zp->pub.pvt = (void *)zp;
return (&zp->pub);
e1:
free(zp, M_GEOM_UZIP);
return (NULL);
}
static void *
z_alloc(void *nil, u_int type, u_int size)
{
void *ptr;
- ptr = mallocarray(type, size, M_GEOM_UZIP, M_NOWAIT);
+ ptr = malloc(type * size, M_GEOM_UZIP, M_NOWAIT);
return (ptr);
}
static void
z_free(void *nil, void *ptr)
{
free(ptr, M_GEOM_UZIP);
}
Index: head/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c
===================================================================
--- head/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c (revision 328217)
+++ head/sys/gnu/dev/bwn/phy_n/if_bwn_phy_n_core.c (revision 328218)
@@ -1,6834 +1,6833 @@
/*
Broadcom B43 wireless driver
IEEE 802.11n PHY data tables
Copyright (c) 2008 Michael Buesch <m@bues.ch>
Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* The Broadcom Wireless LAN controller driver.
*/
#include "opt_wlan.h"
#include "opt_bwn.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/firmware.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/bwn/if_bwnreg.h>
#include <dev/bwn/if_bwnvar.h>
#include <dev/bwn/if_bwn_misc.h>
#include <dev/bwn/if_bwn_util.h>
#include <dev/bwn/if_bwn_debug.h>
#include <dev/bwn/if_bwn_phy_common.h>
#include <dev/bwn/if_bwn_chipid.h>
#include <dev/bwn/if_bwn_cordic.h>
#include <gnu/dev/bwn/phy_n/if_bwn_phy_n_regs.h>
#include <gnu/dev/bwn/phy_n/if_bwn_phy_n_ppr.h>
#include <gnu/dev/bwn/phy_n/if_bwn_phy_n_tables.h>
#include <gnu/dev/bwn/phy_n/if_bwn_radio_2055.h>
#include <gnu/dev/bwn/phy_n/if_bwn_radio_2056.h>
#include <gnu/dev/bwn/phy_n/if_bwn_radio_2057.h>
#include <gnu/dev/bwn/phy_n/if_bwn_phy_n_core.h>
struct bwn_nphy_txgains {
uint16_t tx_lpf[2];
uint16_t txgm[2];
uint16_t pga[2];
uint16_t pad[2];
uint16_t ipa[2];
};
struct bwn_nphy_iqcal_params {
uint16_t tx_lpf;
uint16_t txgm;
uint16_t pga;
uint16_t pad;
uint16_t ipa;
uint16_t cal_gain;
uint16_t ncorr[5];
};
struct bwn_nphy_iq_est {
int32_t iq0_prod;
uint32_t i0_pwr;
uint32_t q0_pwr;
int32_t iq1_prod;
uint32_t i1_pwr;
uint32_t q1_pwr;
};
enum bwn_nphy_rf_sequence {
BWN_RFSEQ_RX2TX,
BWN_RFSEQ_TX2RX,
BWN_RFSEQ_RESET2RX,
BWN_RFSEQ_UPDATE_GAINH,
BWN_RFSEQ_UPDATE_GAINL,
BWN_RFSEQ_UPDATE_GAINU,
};
enum n_rf_ctl_over_cmd {
N_RF_CTL_OVER_CMD_RXRF_PU = 0,
N_RF_CTL_OVER_CMD_RX_PU = 1,
N_RF_CTL_OVER_CMD_TX_PU = 2,
N_RF_CTL_OVER_CMD_RX_GAIN = 3,
N_RF_CTL_OVER_CMD_TX_GAIN = 4,
};
enum n_intc_override {
N_INTC_OVERRIDE_OFF = 0,
N_INTC_OVERRIDE_TRSW = 1,
N_INTC_OVERRIDE_PA = 2,
N_INTC_OVERRIDE_EXT_LNA_PU = 3,
N_INTC_OVERRIDE_EXT_LNA_GAIN = 4,
};
enum n_rssi_type {
N_RSSI_W1 = 0,
N_RSSI_W2,
N_RSSI_NB,
N_RSSI_IQ,
N_RSSI_TSSI_2G,
N_RSSI_TSSI_5G,
N_RSSI_TBD,
};
enum n_rail_type {
N_RAIL_I = 0,
N_RAIL_Q = 1,
};
static inline bool bwn_nphy_ipa(struct bwn_mac *mac)
{
bwn_band_t band = bwn_current_band(mac);
return ((mac->mac_phy.phy_n->ipa2g_on && band == BWN_BAND_2G) ||
(mac->mac_phy.phy_n->ipa5g_on && band == BWN_BAND_5G));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
static uint8_t bwn_nphy_get_rx_core_state(struct bwn_mac *mac)
{
return (BWN_PHY_READ(mac, BWN_NPHY_RFSEQCA) & BWN_NPHY_RFSEQCA_RXEN) >>
BWN_NPHY_RFSEQCA_RXEN_SHIFT;
}
/**************************************************
* RF (just without bwn_nphy_rf_ctl_intc_override)
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
static void bwn_nphy_force_rf_sequence(struct bwn_mac *mac,
enum bwn_nphy_rf_sequence seq)
{
static const uint16_t trigger[] = {
[BWN_RFSEQ_RX2TX] = BWN_NPHY_RFSEQTR_RX2TX,
[BWN_RFSEQ_TX2RX] = BWN_NPHY_RFSEQTR_TX2RX,
[BWN_RFSEQ_RESET2RX] = BWN_NPHY_RFSEQTR_RST2RX,
[BWN_RFSEQ_UPDATE_GAINH] = BWN_NPHY_RFSEQTR_UPGH,
[BWN_RFSEQ_UPDATE_GAINL] = BWN_NPHY_RFSEQTR_UPGL,
[BWN_RFSEQ_UPDATE_GAINU] = BWN_NPHY_RFSEQTR_UPGU,
};
int i;
uint16_t seq_mode = BWN_PHY_READ(mac, BWN_NPHY_RFSEQMODE);
if (seq >= nitems(trigger)) {
BWN_WARNPRINTF(mac->mac_sc, "%s: seq %d > max", __func__, seq);
}
BWN_PHY_SET(mac, BWN_NPHY_RFSEQMODE,
BWN_NPHY_RFSEQMODE_CAOVER | BWN_NPHY_RFSEQMODE_TROVER);
BWN_PHY_SET(mac, BWN_NPHY_RFSEQTR, trigger[seq]);
for (i = 0; i < 200; i++) {
if (!(BWN_PHY_READ(mac, BWN_NPHY_RFSEQST) & trigger[seq]))
goto ok;
DELAY(1000);
}
BWN_ERRPRINTF(mac->mac_sc, "RF sequence status timeout\n");
ok:
BWN_PHY_WRITE(mac, BWN_NPHY_RFSEQMODE, seq_mode);
}
static void bwn_nphy_rf_ctl_override_rev19(struct bwn_mac *mac, uint16_t field,
uint16_t value, uint8_t core, bool off,
uint8_t override_id)
{
/* TODO */
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
static void bwn_nphy_rf_ctl_override_rev7(struct bwn_mac *mac, uint16_t field,
uint16_t value, uint8_t core, bool off,
uint8_t override)
{
struct bwn_phy *phy = &mac->mac_phy;
const struct bwn_nphy_rf_control_override_rev7 *e;
uint16_t en_addrs[3][2] = {
{ 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
};
uint16_t en_addr;
uint16_t en_mask = field;
uint16_t val_addr;
uint8_t i;
if (phy->rev >= 19 || phy->rev < 3) {
BWN_WARNPRINTF(mac->mac_sc, "%s: phy rev %d out of range\n",
__func__,
phy->rev);
return;
}
/* Remember: we can get NULL! */
e = bwn_nphy_get_rf_ctl_over_rev7(mac, field, override);
for (i = 0; i < 2; i++) {
if (override >= nitems(en_addrs)) {
BWN_ERRPRINTF(mac->mac_sc, "Invalid override value %d\n", override);
return;
}
en_addr = en_addrs[override][i];
if (e)
val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
if (off) {
BWN_PHY_MASK(mac, en_addr, ~en_mask);
if (e) /* Do it safer, better than wl */
BWN_PHY_MASK(mac, val_addr, ~e->val_mask);
} else {
if (!core || (core & (1 << i))) {
BWN_PHY_SET(mac, en_addr, en_mask);
if (e)
BWN_PHY_SETMASK(mac, val_addr, ~e->val_mask, (value << e->val_shift));
}
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */
static void bwn_nphy_rf_ctl_override_one_to_many(struct bwn_mac *mac,
enum n_rf_ctl_over_cmd cmd,
uint16_t value, uint8_t core, bool off)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t tmp;
if (phy->rev < 7) {
BWN_ERRPRINTF(mac->mac_sc, "%s: phy rev %d out of range\n",
__func__,
phy->rev);
}
switch (cmd) {
case N_RF_CTL_OVER_CMD_RXRF_PU:
bwn_nphy_rf_ctl_override_rev7(mac, 0x20, value, core, off, 1);
bwn_nphy_rf_ctl_override_rev7(mac, 0x10, value, core, off, 1);
bwn_nphy_rf_ctl_override_rev7(mac, 0x08, value, core, off, 1);
break;
case N_RF_CTL_OVER_CMD_RX_PU:
bwn_nphy_rf_ctl_override_rev7(mac, 0x4, value, core, off, 1);
bwn_nphy_rf_ctl_override_rev7(mac, 0x2, value, core, off, 1);
bwn_nphy_rf_ctl_override_rev7(mac, 0x1, value, core, off, 1);
bwn_nphy_rf_ctl_override_rev7(mac, 0x2, value, core, off, 2);
bwn_nphy_rf_ctl_override_rev7(mac, 0x0800, 0, core, off, 1);
break;
case N_RF_CTL_OVER_CMD_TX_PU:
bwn_nphy_rf_ctl_override_rev7(mac, 0x4, value, core, off, 0);
bwn_nphy_rf_ctl_override_rev7(mac, 0x2, value, core, off, 1);
bwn_nphy_rf_ctl_override_rev7(mac, 0x1, value, core, off, 2);
bwn_nphy_rf_ctl_override_rev7(mac, 0x0800, 1, core, off, 1);
break;
case N_RF_CTL_OVER_CMD_RX_GAIN:
tmp = value & 0xFF;
bwn_nphy_rf_ctl_override_rev7(mac, 0x0800, tmp, core, off, 0);
tmp = value >> 8;
bwn_nphy_rf_ctl_override_rev7(mac, 0x6000, tmp, core, off, 0);
break;
case N_RF_CTL_OVER_CMD_TX_GAIN:
tmp = value & 0x7FFF;
bwn_nphy_rf_ctl_override_rev7(mac, 0x1000, tmp, core, off, 0);
tmp = value >> 14;
bwn_nphy_rf_ctl_override_rev7(mac, 0x4000, tmp, core, off, 0);
break;
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
static void bwn_nphy_rf_ctl_override(struct bwn_mac *mac, uint16_t field,
uint16_t value, uint8_t core, bool off)
{
int i;
uint8_t index = fls(field);
uint8_t addr, en_addr, val_addr;
/* we expect only one bit set */
if (field & (~(1 << (index - 1)))) {
BWN_ERRPRINTF(mac->mac_sc, "%s: field 0x%04x has >1 bit set\n",
__func__,
field);
}
if (mac->mac_phy.rev >= 3) {
const struct bwn_nphy_rf_control_override_rev3 *rf_ctrl;
for (i = 0; i < 2; i++) {
if (index == 0 || index == 16) {
BWN_ERRPRINTF(mac->mac_sc,
"Unsupported RF Ctrl Override call\n");
return;
}
rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
en_addr = BWN_PHY_N((i == 0) ?
rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
val_addr = BWN_PHY_N((i == 0) ?
rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
if (off) {
BWN_PHY_MASK(mac, en_addr, ~(field));
BWN_PHY_MASK(mac, val_addr,
~(rf_ctrl->val_mask));
} else {
if (core == 0 || ((1 << i) & core)) {
BWN_PHY_SET(mac, en_addr, field);
BWN_PHY_SETMASK(mac, val_addr,
~(rf_ctrl->val_mask),
(value << rf_ctrl->val_shift));
}
}
}
} else {
const struct bwn_nphy_rf_control_override_rev2 *rf_ctrl;
if (off) {
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER, ~(field));
value = 0;
} else {
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_OVER, field);
}
for (i = 0; i < 2; i++) {
if (index <= 1 || index == 16) {
BWN_ERRPRINTF(mac->mac_sc,
"Unsupported RF Ctrl Override call\n");
return;
}
if (index == 2 || index == 10 ||
(index >= 13 && index <= 15)) {
core = 1;
}
rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
addr = BWN_PHY_N((i == 0) ?
rf_ctrl->addr0 : rf_ctrl->addr1);
if ((1 << i) & core)
BWN_PHY_SETMASK(mac, addr, ~(rf_ctrl->bmask),
(value << rf_ctrl->shift));
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_OVER, 0x1);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_START);
DELAY(1);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER, 0xFFFE);
}
}
}
static void bwn_nphy_rf_ctl_intc_override_rev7(struct bwn_mac *mac,
enum n_intc_override intc_override,
uint16_t value, uint8_t core_sel)
{
uint16_t reg, tmp, tmp2, val;
int core;
/* TODO: What about rev19+? Revs 3+ and 7+ are a bit similar */
for (core = 0; core < 2; core++) {
if ((core_sel == 1 && core != 0) ||
(core_sel == 2 && core != 1))
continue;
reg = (core == 0) ? BWN_NPHY_RFCTL_INTC1 : BWN_NPHY_RFCTL_INTC2;
switch (intc_override) {
case N_INTC_OVERRIDE_OFF:
BWN_PHY_WRITE(mac, reg, 0);
BWN_PHY_MASK(mac, 0x2ff, ~0x2000);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
break;
case N_INTC_OVERRIDE_TRSW:
BWN_PHY_SETMASK(mac, reg, ~0xC0, value << 6);
BWN_PHY_SET(mac, reg, 0x400);
BWN_PHY_MASK(mac, 0x2ff, ~0xC000 & 0xFFFF);
BWN_PHY_SET(mac, 0x2ff, 0x2000);
BWN_PHY_SET(mac, 0x2ff, 0x0001);
break;
case N_INTC_OVERRIDE_PA:
tmp = 0x0030;
if (bwn_current_band(mac) == BWN_BAND_5G)
val = value << 5;
else
val = value << 4;
BWN_PHY_SETMASK(mac, reg, ~tmp, val);
BWN_PHY_SET(mac, reg, 0x1000);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
if (bwn_current_band(mac) == BWN_BAND_5G) {
tmp = 0x0001;
tmp2 = 0x0004;
val = value;
} else {
tmp = 0x0004;
tmp2 = 0x0001;
val = value << 2;
}
BWN_PHY_SETMASK(mac, reg, ~tmp, val);
BWN_PHY_MASK(mac, reg, ~tmp2);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
if (bwn_current_band(mac) == BWN_BAND_5G) {
tmp = 0x0002;
tmp2 = 0x0008;
val = value << 1;
} else {
tmp = 0x0008;
tmp2 = 0x0002;
val = value << 3;
}
BWN_PHY_SETMASK(mac, reg, ~tmp, val);
BWN_PHY_MASK(mac, reg, ~tmp2);
break;
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
static void bwn_nphy_rf_ctl_intc_override(struct bwn_mac *mac,
enum n_intc_override intc_override,
uint16_t value, uint8_t core)
{
uint8_t i, j;
uint16_t reg, tmp, val;
if (mac->mac_phy.rev >= 7) {
bwn_nphy_rf_ctl_intc_override_rev7(mac, intc_override, value,
core);
return;
}
if (mac->mac_phy.rev < 3) {
BWN_ERRPRINTF(mac->mac_sc, "%s: phy rev %d out of range\n",
__func__,
mac->mac_phy.rev);
}
for (i = 0; i < 2; i++) {
if ((core == 1 && i == 1) || (core == 2 && !i))
continue;
reg = (i == 0) ?
BWN_NPHY_RFCTL_INTC1 : BWN_NPHY_RFCTL_INTC2;
BWN_PHY_SET(mac, reg, 0x400);
switch (intc_override) {
case N_INTC_OVERRIDE_OFF:
BWN_PHY_WRITE(mac, reg, 0);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
break;
case N_INTC_OVERRIDE_TRSW:
if (!i) {
BWN_PHY_SETMASK(mac, BWN_NPHY_RFCTL_INTC1,
0xFC3F, (value << 6));
BWN_PHY_SETMASK(mac, BWN_NPHY_TXF_40CO_B1S1,
0xFFFE, 1);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_START);
for (j = 0; j < 100; j++) {
if (!(BWN_PHY_READ(mac, BWN_NPHY_RFCTL_CMD) & BWN_NPHY_RFCTL_CMD_START)) {
j = 0;
break;
}
DELAY(10);
}
if (j)
BWN_ERRPRINTF(mac->mac_sc,
"intc override timeout\n");
BWN_PHY_MASK(mac, BWN_NPHY_TXF_40CO_B1S1,
0xFFFE);
} else {
BWN_PHY_SETMASK(mac, BWN_NPHY_RFCTL_INTC2,
0xFC3F, (value << 6));
BWN_PHY_SETMASK(mac, BWN_NPHY_RFCTL_OVER,
0xFFFE, 1);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_RXTX);
for (j = 0; j < 100; j++) {
if (!(BWN_PHY_READ(mac, BWN_NPHY_RFCTL_CMD) & BWN_NPHY_RFCTL_CMD_RXTX)) {
j = 0;
break;
}
DELAY(10);
}
if (j)
BWN_ERRPRINTF(mac->mac_sc,
"intc override timeout\n");
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER,
0xFFFE);
}
break;
case N_INTC_OVERRIDE_PA:
if (bwn_current_band(mac) == BWN_BAND_5G) {
tmp = 0x0020;
val = value << 5;
} else {
tmp = 0x0010;
val = value << 4;
}
BWN_PHY_SETMASK(mac, reg, ~tmp, val);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
if (bwn_current_band(mac) == BWN_BAND_5G) {
tmp = 0x0001;
val = value;
} else {
tmp = 0x0004;
val = value << 2;
}
BWN_PHY_SETMASK(mac, reg, ~tmp, val);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
if (bwn_current_band(mac) == BWN_BAND_5G) {
tmp = 0x0002;
val = value << 1;
} else {
tmp = 0x0008;
val = value << 3;
}
BWN_PHY_SETMASK(mac, reg, ~tmp, val);
break;
}
}
}
/**************************************************
* Various PHY ops
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
static void bwn_nphy_write_clip_detection(struct bwn_mac *mac,
const uint16_t *clip_st)
{
BWN_PHY_WRITE(mac, BWN_NPHY_C1_CLIP1THRES, clip_st[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_C2_CLIP1THRES, clip_st[1]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
static void bwn_nphy_read_clip_detection(struct bwn_mac *mac, uint16_t *clip_st)
{
clip_st[0] = BWN_PHY_READ(mac, BWN_NPHY_C1_CLIP1THRES);
clip_st[1] = BWN_PHY_READ(mac, BWN_NPHY_C2_CLIP1THRES);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
static uint16_t bwn_nphy_classifier(struct bwn_mac *mac, uint16_t mask, uint16_t val)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t tmp;
if (siba_get_revid(sc->sc_dev) == 16)
bwn_mac_suspend(mac);
tmp = BWN_PHY_READ(mac, BWN_NPHY_CLASSCTL);
tmp &= (BWN_NPHY_CLASSCTL_CCKEN | BWN_NPHY_CLASSCTL_OFDMEN |
BWN_NPHY_CLASSCTL_WAITEDEN);
tmp &= ~mask;
tmp |= (val & mask);
BWN_PHY_SETMASK(mac, BWN_NPHY_CLASSCTL, 0xFFF8, tmp);
if (siba_get_revid(sc->sc_dev) == 16)
bwn_mac_enable(mac);
return tmp;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
static void bwn_nphy_reset_cca(struct bwn_mac *mac)
{
uint16_t bbcfg;
bwn_phy_force_clock(mac, 1);
bbcfg = BWN_PHY_READ(mac, BWN_NPHY_BBCFG);
BWN_PHY_WRITE(mac, BWN_NPHY_BBCFG, bbcfg | BWN_NPHY_BBCFG_RSTCCA);
DELAY(1);
BWN_PHY_WRITE(mac, BWN_NPHY_BBCFG, bbcfg & ~BWN_NPHY_BBCFG_RSTCCA);
bwn_phy_force_clock(mac, 0);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
static void bwn_nphy_stay_in_carrier_search(struct bwn_mac *mac, bool enable)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
if (enable) {
static const uint16_t clip[] = { 0xFFFF, 0xFFFF };
if (nphy->deaf_count++ == 0) {
nphy->classifier_state = bwn_nphy_classifier(mac, 0, 0);
bwn_nphy_classifier(mac, 0x7,
BWN_NPHY_CLASSCTL_WAITEDEN);
bwn_nphy_read_clip_detection(mac, nphy->clip_state);
bwn_nphy_write_clip_detection(mac, clip);
}
bwn_nphy_reset_cca(mac);
} else {
if (--nphy->deaf_count == 0) {
bwn_nphy_classifier(mac, 0x7, nphy->classifier_state);
bwn_nphy_write_clip_detection(mac, nphy->clip_state);
}
}
}
/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
static uint16_t bwn_nphy_read_lpf_ctl(struct bwn_mac *mac, uint16_t offset)
{
if (!offset)
offset = bwn_is_40mhz(mac) ? 0x159 : 0x154;
return bwn_ntab_read(mac, BWN_NTAB16(7, offset)) & 0x7;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
static void bwn_nphy_adjust_lna_gain_table(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t i;
int16_t tmp;
uint16_t data[4];
int16_t gain[2];
uint16_t minmax[2];
static const uint16_t lna_gain[4] = { -2, 10, 19, 25 };
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
if (nphy->gain_boost) {
if (bwn_current_band(mac) == BWN_BAND_2G) {
gain[0] = 6;
gain[1] = 6;
} else {
tmp = 40370 - 315 * bwn_get_chan(mac);
gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
tmp = 23242 - 224 * bwn_get_chan(mac);
gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
}
} else {
gain[0] = 0;
gain[1] = 0;
}
for (i = 0; i < 2; i++) {
if (nphy->elna_gain_config) {
data[0] = 19 + gain[i];
data[1] = 25 + gain[i];
data[2] = 25 + gain[i];
data[3] = 25 + gain[i];
} else {
data[0] = lna_gain[0] + gain[i];
data[1] = lna_gain[1] + gain[i];
data[2] = lna_gain[2] + gain[i];
data[3] = lna_gain[3] + gain[i];
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(i, 8), 4, data);
minmax[i] = 23 + gain[i];
}
BWN_PHY_SETMASK(mac, BWN_NPHY_C1_MINMAX_GAIN, ~BWN_NPHY_C1_MINGAIN,
minmax[0] << BWN_NPHY_C1_MINGAIN_SHIFT);
BWN_PHY_SETMASK(mac, BWN_NPHY_C2_MINMAX_GAIN, ~BWN_NPHY_C2_MINGAIN,
minmax[1] << BWN_NPHY_C2_MINGAIN_SHIFT);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
static void bwn_nphy_set_rf_sequence(struct bwn_mac *mac, uint8_t cmd,
uint8_t *events, uint8_t *delays, uint8_t length)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t i;
uint8_t end = (mac->mac_phy.rev >= 3) ? 0x1F : 0x0F;
uint16_t offset1 = cmd << 4;
uint16_t offset2 = offset1 + 0x80;
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, true);
bwn_ntab_write_bulk(mac, BWN_NTAB8(7, offset1), length, events);
bwn_ntab_write_bulk(mac, BWN_NTAB8(7, offset2), length, delays);
for (i = length; i < 16; i++) {
bwn_ntab_write(mac, BWN_NTAB8(7, offset1 + i), end);
bwn_ntab_write(mac, BWN_NTAB8(7, offset2 + i), 1);
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, false);
}
/**************************************************
* Radio 0x2057
**************************************************/
static void bwn_radio_2057_chantab_upload(struct bwn_mac *mac,
const struct bwn_nphy_chantabent_rev7 *e_r7,
const struct bwn_nphy_chantabent_rev7_2g *e_r7_2g)
{
if (e_r7_2g) {
BWN_RF_WRITE(mac, R2057_VCOCAL_COUNTVAL0, e_r7_2g->radio_vcocal_countval0);
BWN_RF_WRITE(mac, R2057_VCOCAL_COUNTVAL1, e_r7_2g->radio_vcocal_countval1);
BWN_RF_WRITE(mac, R2057_RFPLL_REFMASTER_SPAREXTALSIZE, e_r7_2g->radio_rfpll_refmaster_sparextalsize);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_R1, e_r7_2g->radio_rfpll_loopfilter_r1);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C2, e_r7_2g->radio_rfpll_loopfilter_c2);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C1, e_r7_2g->radio_rfpll_loopfilter_c1);
BWN_RF_WRITE(mac, R2057_CP_KPD_IDAC, e_r7_2g->radio_cp_kpd_idac);
BWN_RF_WRITE(mac, R2057_RFPLL_MMD0, e_r7_2g->radio_rfpll_mmd0);
BWN_RF_WRITE(mac, R2057_RFPLL_MMD1, e_r7_2g->radio_rfpll_mmd1);
BWN_RF_WRITE(mac, R2057_VCOBUF_TUNE, e_r7_2g->radio_vcobuf_tune);
BWN_RF_WRITE(mac, R2057_LOGEN_MX2G_TUNE, e_r7_2g->radio_logen_mx2g_tune);
BWN_RF_WRITE(mac, R2057_LOGEN_INDBUF2G_TUNE, e_r7_2g->radio_logen_indbuf2g_tune);
BWN_RF_WRITE(mac, R2057_TXMIX2G_TUNE_BOOST_PU_CORE0, e_r7_2g->radio_txmix2g_tune_boost_pu_core0);
BWN_RF_WRITE(mac, R2057_PAD2G_TUNE_PUS_CORE0, e_r7_2g->radio_pad2g_tune_pus_core0);
BWN_RF_WRITE(mac, R2057_LNA2G_TUNE_CORE0, e_r7_2g->radio_lna2g_tune_core0);
BWN_RF_WRITE(mac, R2057_TXMIX2G_TUNE_BOOST_PU_CORE1, e_r7_2g->radio_txmix2g_tune_boost_pu_core1);
BWN_RF_WRITE(mac, R2057_PAD2G_TUNE_PUS_CORE1, e_r7_2g->radio_pad2g_tune_pus_core1);
BWN_RF_WRITE(mac, R2057_LNA2G_TUNE_CORE1, e_r7_2g->radio_lna2g_tune_core1);
} else {
BWN_RF_WRITE(mac, R2057_VCOCAL_COUNTVAL0, e_r7->radio_vcocal_countval0);
BWN_RF_WRITE(mac, R2057_VCOCAL_COUNTVAL1, e_r7->radio_vcocal_countval1);
BWN_RF_WRITE(mac, R2057_RFPLL_REFMASTER_SPAREXTALSIZE, e_r7->radio_rfpll_refmaster_sparextalsize);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_R1, e_r7->radio_rfpll_loopfilter_r1);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C2, e_r7->radio_rfpll_loopfilter_c2);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C1, e_r7->radio_rfpll_loopfilter_c1);
BWN_RF_WRITE(mac, R2057_CP_KPD_IDAC, e_r7->radio_cp_kpd_idac);
BWN_RF_WRITE(mac, R2057_RFPLL_MMD0, e_r7->radio_rfpll_mmd0);
BWN_RF_WRITE(mac, R2057_RFPLL_MMD1, e_r7->radio_rfpll_mmd1);
BWN_RF_WRITE(mac, R2057_VCOBUF_TUNE, e_r7->radio_vcobuf_tune);
BWN_RF_WRITE(mac, R2057_LOGEN_MX2G_TUNE, e_r7->radio_logen_mx2g_tune);
BWN_RF_WRITE(mac, R2057_LOGEN_MX5G_TUNE, e_r7->radio_logen_mx5g_tune);
BWN_RF_WRITE(mac, R2057_LOGEN_INDBUF2G_TUNE, e_r7->radio_logen_indbuf2g_tune);
BWN_RF_WRITE(mac, R2057_LOGEN_INDBUF5G_TUNE, e_r7->radio_logen_indbuf5g_tune);
BWN_RF_WRITE(mac, R2057_TXMIX2G_TUNE_BOOST_PU_CORE0, e_r7->radio_txmix2g_tune_boost_pu_core0);
BWN_RF_WRITE(mac, R2057_PAD2G_TUNE_PUS_CORE0, e_r7->radio_pad2g_tune_pus_core0);
BWN_RF_WRITE(mac, R2057_PGA_BOOST_TUNE_CORE0, e_r7->radio_pga_boost_tune_core0);
BWN_RF_WRITE(mac, R2057_TXMIX5G_BOOST_TUNE_CORE0, e_r7->radio_txmix5g_boost_tune_core0);
BWN_RF_WRITE(mac, R2057_PAD5G_TUNE_MISC_PUS_CORE0, e_r7->radio_pad5g_tune_misc_pus_core0);
BWN_RF_WRITE(mac, R2057_LNA2G_TUNE_CORE0, e_r7->radio_lna2g_tune_core0);
BWN_RF_WRITE(mac, R2057_LNA5G_TUNE_CORE0, e_r7->radio_lna5g_tune_core0);
BWN_RF_WRITE(mac, R2057_TXMIX2G_TUNE_BOOST_PU_CORE1, e_r7->radio_txmix2g_tune_boost_pu_core1);
BWN_RF_WRITE(mac, R2057_PAD2G_TUNE_PUS_CORE1, e_r7->radio_pad2g_tune_pus_core1);
BWN_RF_WRITE(mac, R2057_PGA_BOOST_TUNE_CORE1, e_r7->radio_pga_boost_tune_core1);
BWN_RF_WRITE(mac, R2057_TXMIX5G_BOOST_TUNE_CORE1, e_r7->radio_txmix5g_boost_tune_core1);
BWN_RF_WRITE(mac, R2057_PAD5G_TUNE_MISC_PUS_CORE1, e_r7->radio_pad5g_tune_misc_pus_core1);
BWN_RF_WRITE(mac, R2057_LNA2G_TUNE_CORE1, e_r7->radio_lna2g_tune_core1);
BWN_RF_WRITE(mac, R2057_LNA5G_TUNE_CORE1, e_r7->radio_lna5g_tune_core1);
}
}
static void bwn_radio_2057_setup(struct bwn_mac *mac,
const struct bwn_nphy_chantabent_rev7 *tabent_r7,
const struct bwn_nphy_chantabent_rev7_2g *tabent_r7_2g)
{
struct bwn_phy *phy = &mac->mac_phy;
bwn_radio_2057_chantab_upload(mac, tabent_r7, tabent_r7_2g);
switch (phy->rf_rev) {
case 0 ... 4:
case 6:
if (bwn_current_band(mac) == BWN_BAND_2G) {
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
BWN_RF_WRITE(mac, R2057_CP_KPD_IDAC, 0x3f);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C1, 0x8);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C2, 0x8);
} else {
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_R1, 0x1f);
BWN_RF_WRITE(mac, R2057_CP_KPD_IDAC, 0x3f);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C1, 0x8);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C2, 0x8);
}
break;
case 9: /* e.g. PHY rev 16 */
BWN_RF_WRITE(mac, R2057_LOGEN_PTAT_RESETS, 0x20);
BWN_RF_WRITE(mac, R2057_VCOBUF_IDACS, 0x18);
if (bwn_current_band(mac) == BWN_BAND_5G) {
BWN_RF_WRITE(mac, R2057_LOGEN_PTAT_RESETS, 0x38);
BWN_RF_WRITE(mac, R2057_VCOBUF_IDACS, 0x0f);
if (bwn_is_40mhz(mac)) {
/* TODO */
} else {
BWN_RF_WRITE(mac,
R2057_PAD_BIAS_FILTER_BWS_CORE0,
0x3c);
BWN_RF_WRITE(mac,
R2057_PAD_BIAS_FILTER_BWS_CORE1,
0x3c);
}
}
break;
case 14: /* 2 GHz only */
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_R1, 0x1b);
BWN_RF_WRITE(mac, R2057_CP_KPD_IDAC, 0x3f);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C1, 0x1f);
BWN_RF_WRITE(mac, R2057_RFPLL_LOOPFILTER_C2, 0x1f);
break;
}
if (bwn_current_band(mac) == BWN_BAND_2G) {
uint16_t txmix2g_tune_boost_pu = 0;
uint16_t pad2g_tune_pus = 0;
if (bwn_nphy_ipa(mac)) {
switch (phy->rf_rev) {
case 9:
txmix2g_tune_boost_pu = 0x0041;
/* TODO */
break;
case 14:
txmix2g_tune_boost_pu = 0x21;
pad2g_tune_pus = 0x23;
break;
}
}
if (txmix2g_tune_boost_pu)
BWN_RF_WRITE(mac, R2057_TXMIX2G_TUNE_BOOST_PU_CORE0,
txmix2g_tune_boost_pu);
if (pad2g_tune_pus)
BWN_RF_WRITE(mac, R2057_PAD2G_TUNE_PUS_CORE0,
pad2g_tune_pus);
if (txmix2g_tune_boost_pu)
BWN_RF_WRITE(mac, R2057_TXMIX2G_TUNE_BOOST_PU_CORE1,
txmix2g_tune_boost_pu);
if (pad2g_tune_pus)
BWN_RF_WRITE(mac, R2057_PAD2G_TUNE_PUS_CORE1,
pad2g_tune_pus);
}
/* 50..100 */
DELAY(100);
/* VCO calibration */
BWN_RF_MASK(mac, R2057_RFPLL_MISC_EN, ~0x01);
BWN_RF_MASK(mac, R2057_RFPLL_MISC_CAL_RESETN, ~0x04);
BWN_RF_SET(mac, R2057_RFPLL_MISC_CAL_RESETN, 0x4);
BWN_RF_SET(mac, R2057_RFPLL_MISC_EN, 0x01);
/* 300..600 */
DELAY(600);
}
/* Calibrate resistors in LPF of PLL?
* http://bcm-v4.sipsolutions.net/PHY/radio205x_rcal
*/
static uint8_t bwn_radio_2057_rcal(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t saved_regs_phy[12];
uint16_t saved_regs_phy_rf[6];
uint16_t saved_regs_radio[2] = { };
static const uint16_t phy_to_store[] = {
BWN_NPHY_RFCTL_RSSIO1, BWN_NPHY_RFCTL_RSSIO2,
BWN_NPHY_RFCTL_LUT_TRSW_LO1, BWN_NPHY_RFCTL_LUT_TRSW_LO2,
BWN_NPHY_RFCTL_RXG1, BWN_NPHY_RFCTL_RXG2,
BWN_NPHY_RFCTL_TXG1, BWN_NPHY_RFCTL_TXG2,
BWN_NPHY_REV7_RF_CTL_MISC_REG3, BWN_NPHY_REV7_RF_CTL_MISC_REG4,
BWN_NPHY_REV7_RF_CTL_MISC_REG5, BWN_NPHY_REV7_RF_CTL_MISC_REG6,
};
static const uint16_t phy_to_store_rf[] = {
BWN_NPHY_REV3_RFCTL_OVER0, BWN_NPHY_REV3_RFCTL_OVER1,
BWN_NPHY_REV7_RF_CTL_OVER3, BWN_NPHY_REV7_RF_CTL_OVER4,
BWN_NPHY_REV7_RF_CTL_OVER5, BWN_NPHY_REV7_RF_CTL_OVER6,
};
uint16_t tmp;
int i;
/* Save */
for (i = 0; i < nitems(phy_to_store); i++)
saved_regs_phy[i] = BWN_PHY_READ(mac, phy_to_store[i]);
for (i = 0; i < nitems(phy_to_store_rf); i++)
saved_regs_phy_rf[i] = BWN_PHY_READ(mac, phy_to_store_rf[i]);
/* Set */
for (i = 0; i < nitems(phy_to_store); i++)
BWN_PHY_WRITE(mac, phy_to_store[i], 0);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_RFCTL_OVER0, 0x07ff);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_RFCTL_OVER1, 0x07ff);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER3, 0x07ff);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER4, 0x07ff);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER5, 0x007f);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER6, 0x007f);
switch (phy->rf_rev) {
case 5:
BWN_PHY_MASK(mac, BWN_NPHY_REV7_RF_CTL_OVER3, ~0x2);
DELAY(10);
BWN_RF_SET(mac, R2057_IQTEST_SEL_PU, 0x1);
BWN_RF_SETMASK(mac, R2057v7_IQTEST_SEL_PU2, ~0x2, 0x1);
break;
case 9:
BWN_PHY_SET(mac, BWN_NPHY_REV7_RF_CTL_OVER3, 0x2);
BWN_PHY_SET(mac, BWN_NPHY_REV7_RF_CTL_MISC_REG3, 0x2);
saved_regs_radio[0] = BWN_RF_READ(mac, R2057_IQTEST_SEL_PU);
BWN_RF_WRITE(mac, R2057_IQTEST_SEL_PU, 0x11);
break;
case 14:
saved_regs_radio[0] = BWN_RF_READ(mac, R2057_IQTEST_SEL_PU);
saved_regs_radio[1] = BWN_RF_READ(mac, R2057v7_IQTEST_SEL_PU2);
BWN_PHY_SET(mac, BWN_NPHY_REV7_RF_CTL_MISC_REG3, 0x2);
BWN_PHY_SET(mac, BWN_NPHY_REV7_RF_CTL_OVER3, 0x2);
BWN_RF_WRITE(mac, R2057v7_IQTEST_SEL_PU2, 0x2);
BWN_RF_WRITE(mac, R2057_IQTEST_SEL_PU, 0x1);
break;
}
/* Enable */
BWN_RF_SET(mac, R2057_RCAL_CONFIG, 0x1);
DELAY(10);
/* Start */
BWN_RF_SET(mac, R2057_RCAL_CONFIG, 0x2);
/* 100..200 */
DELAY(200);
/* Stop */
BWN_RF_MASK(mac, R2057_RCAL_CONFIG, ~0x2);
/* Wait and check for result */
if (!bwn_radio_wait_value(mac, R2057_RCAL_STATUS, 1, 1, 100, 1000000)) {
BWN_ERRPRINTF(mac->mac_sc, "Radio 0x2057 rcal timeout\n");
return 0;
}
tmp = BWN_RF_READ(mac, R2057_RCAL_STATUS) & 0x3E;
/* Disable */
BWN_RF_MASK(mac, R2057_RCAL_CONFIG, ~0x1);
/* Restore */
for (i = 0; i < nitems(phy_to_store_rf); i++)
BWN_PHY_WRITE(mac, phy_to_store_rf[i], saved_regs_phy_rf[i]);
for (i = 0; i < nitems(phy_to_store); i++)
BWN_PHY_WRITE(mac, phy_to_store[i], saved_regs_phy[i]);
switch (phy->rf_rev) {
case 0 ... 4:
case 6:
BWN_RF_SETMASK(mac, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
BWN_RF_SETMASK(mac, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
tmp << 2);
break;
case 5:
BWN_RF_MASK(mac, R2057_IPA2G_CASCONV_CORE0, ~0x1);
BWN_RF_MASK(mac, R2057v7_IQTEST_SEL_PU2, ~0x2);
break;
case 9:
BWN_RF_WRITE(mac, R2057_IQTEST_SEL_PU, saved_regs_radio[0]);
break;
case 14:
BWN_RF_WRITE(mac, R2057_IQTEST_SEL_PU, saved_regs_radio[0]);
BWN_RF_WRITE(mac, R2057v7_IQTEST_SEL_PU2, saved_regs_radio[1]);
break;
}
return tmp & 0x3e;
}
/* Calibrate the internal RC oscillator?
* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal
*/
static uint16_t bwn_radio_2057_rccal(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
bool special = (phy->rf_rev == 3 || phy->rf_rev == 4 ||
phy->rf_rev == 6);
uint16_t tmp;
/* Setup cal */
if (special) {
BWN_RF_WRITE(mac, R2057_RCCAL_MASTER, 0x61);
BWN_RF_WRITE(mac, R2057_RCCAL_TRC0, 0xC0);
} else {
BWN_RF_WRITE(mac, R2057v7_RCCAL_MASTER, 0x61);
BWN_RF_WRITE(mac, R2057_RCCAL_TRC0, 0xE9);
}
BWN_RF_WRITE(mac, R2057_RCCAL_X1, 0x6E);
/* Start, wait, stop */
BWN_RF_WRITE(mac, R2057_RCCAL_START_R1_Q1_P1, 0x55);
if (!bwn_radio_wait_value(mac, R2057_RCCAL_DONE_OSCCAP, 2, 2, 500,
5000000))
BWN_DBGPRINTF(mac, "Radio 0x2057 rccal timeout\n");
/* 35..70 */
DELAY(70);
BWN_RF_WRITE(mac, R2057_RCCAL_START_R1_Q1_P1, 0x15);
/* 70..140 */
DELAY(140);
/* Setup cal */
if (special) {
BWN_RF_WRITE(mac, R2057_RCCAL_MASTER, 0x69);
BWN_RF_WRITE(mac, R2057_RCCAL_TRC0, 0xB0);
} else {
BWN_RF_WRITE(mac, R2057v7_RCCAL_MASTER, 0x69);
BWN_RF_WRITE(mac, R2057_RCCAL_TRC0, 0xD5);
}
BWN_RF_WRITE(mac, R2057_RCCAL_X1, 0x6E);
/* Start, wait, stop */
/* 35..70 */
DELAY(70);
BWN_RF_WRITE(mac, R2057_RCCAL_START_R1_Q1_P1, 0x55);
/* 70..140 */
DELAY(140);
if (!bwn_radio_wait_value(mac, R2057_RCCAL_DONE_OSCCAP, 2, 2, 500,
5000000))
BWN_DBGPRINTF(mac, "Radio 0x2057 rccal timeout\n");
/* 35..70 */
DELAY(70);
BWN_RF_WRITE(mac, R2057_RCCAL_START_R1_Q1_P1, 0x15);
/* 70..140 */
DELAY(140);
/* Setup cal */
if (special) {
BWN_RF_WRITE(mac, R2057_RCCAL_MASTER, 0x73);
BWN_RF_WRITE(mac, R2057_RCCAL_X1, 0x28);
BWN_RF_WRITE(mac, R2057_RCCAL_TRC0, 0xB0);
} else {
BWN_RF_WRITE(mac, R2057v7_RCCAL_MASTER, 0x73);
BWN_RF_WRITE(mac, R2057_RCCAL_X1, 0x6E);
BWN_RF_WRITE(mac, R2057_RCCAL_TRC0, 0x99);
}
/* Start, wait, stop */
/* 35..70 */
DELAY(70);
BWN_RF_WRITE(mac, R2057_RCCAL_START_R1_Q1_P1, 0x55);
/* 70..140 */
DELAY(140);
if (!bwn_radio_wait_value(mac, R2057_RCCAL_DONE_OSCCAP, 2, 2, 500,
5000000)) {
BWN_ERRPRINTF(mac->mac_sc, "Radio 0x2057 rcal timeout\n");
return 0;
}
tmp = BWN_RF_READ(mac, R2057_RCCAL_DONE_OSCCAP);
/* 35..70 */
DELAY(70);
BWN_RF_WRITE(mac, R2057_RCCAL_START_R1_Q1_P1, 0x15);
/* 70..140 */
DELAY(140);
if (special)
BWN_RF_MASK(mac, R2057_RCCAL_MASTER, ~0x1);
else
BWN_RF_MASK(mac, R2057v7_RCCAL_MASTER, ~0x1);
return tmp;
}
static void bwn_radio_2057_init_pre(struct bwn_mac *mac)
{
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD, ~BWN_NPHY_RFCTL_CMD_CHIP0PU);
/* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD, BWN_NPHY_RFCTL_CMD_OEPORFORCE);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD, ~BWN_NPHY_RFCTL_CMD_OEPORFORCE);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD, BWN_NPHY_RFCTL_CMD_CHIP0PU);
}
static void bwn_radio_2057_init_post(struct bwn_mac *mac)
{
BWN_RF_SET(mac, R2057_XTALPUOVR_PINCTRL, 0x1);
if (0) /* FIXME: Is this BCM43217 specific? */
BWN_RF_SET(mac, R2057_XTALPUOVR_PINCTRL, 0x2);
BWN_RF_SET(mac, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
BWN_RF_SET(mac, R2057_XTAL_CONFIG2, 0x80);
DELAY(2000);
BWN_RF_MASK(mac, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
BWN_RF_MASK(mac, R2057_XTAL_CONFIG2, ~0x80);
if (mac->mac_phy.phy_do_full_init) {
bwn_radio_2057_rcal(mac);
bwn_radio_2057_rccal(mac);
}
BWN_RF_MASK(mac, R2057_RFPLL_MASTER, ~0x8);
}
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
static void bwn_radio_2057_init(struct bwn_mac *mac)
{
bwn_radio_2057_init_pre(mac);
r2057_upload_inittabs(mac);
bwn_radio_2057_init_post(mac);
}
/**************************************************
* Radio 0x2056
**************************************************/
static void bwn_chantab_radio_2056_upload(struct bwn_mac *mac,
const struct bwn_nphy_channeltab_entry_rev3 *e)
{
BWN_RF_WRITE(mac, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1);
BWN_RF_WRITE(mac, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2);
BWN_RF_WRITE(mac, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv);
BWN_RF_WRITE(mac, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2);
BWN_RF_WRITE(mac, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER1,
e->radio_syn_pll_loopfilter1);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER2,
e->radio_syn_pll_loopfilter2);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER3,
e->radio_syn_pll_loopfilter3);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER4,
e->radio_syn_pll_loopfilter4);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER5,
e->radio_syn_pll_loopfilter5);
BWN_RF_WRITE(mac, B2056_SYN_RESERVED_ADDR27,
e->radio_syn_reserved_addr27);
BWN_RF_WRITE(mac, B2056_SYN_RESERVED_ADDR28,
e->radio_syn_reserved_addr28);
BWN_RF_WRITE(mac, B2056_SYN_RESERVED_ADDR29,
e->radio_syn_reserved_addr29);
BWN_RF_WRITE(mac, B2056_SYN_LOGEN_VCOBUF1,
e->radio_syn_logen_vcobuf1);
BWN_RF_WRITE(mac, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2);
BWN_RF_WRITE(mac, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3);
BWN_RF_WRITE(mac, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_LNAA_TUNE,
e->radio_rx0_lnaa_tune);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_LNAG_TUNE,
e->radio_rx0_lnag_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE,
e->radio_tx0_intpaa_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE,
e->radio_tx0_intpag_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE,
e->radio_tx0_pada_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE,
e->radio_tx0_padg_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE,
e->radio_tx0_pgaa_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE,
e->radio_tx0_pgag_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE,
e->radio_tx0_mixa_boost_tune);
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE,
e->radio_tx0_mixg_boost_tune);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_LNAA_TUNE,
e->radio_rx1_lnaa_tune);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_LNAG_TUNE,
e->radio_rx1_lnag_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE,
e->radio_tx1_intpaa_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE,
e->radio_tx1_intpag_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE,
e->radio_tx1_pada_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE,
e->radio_tx1_padg_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE,
e->radio_tx1_pgaa_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE,
e->radio_tx1_pgag_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE,
e->radio_tx1_mixa_boost_tune);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE,
e->radio_tx1_mixg_boost_tune);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
static void bwn_radio_2056_setup(struct bwn_mac *mac,
const struct bwn_nphy_channeltab_entry_rev3 *e)
{
struct bwn_softc *sc = mac->mac_sc;
bwn_band_t band = bwn_current_band(mac);
uint16_t offset;
uint8_t i;
uint16_t bias, cbias;
uint16_t pag_boost, padg_boost, pgag_boost, mixg_boost;
uint16_t paa_boost, pada_boost, pgaa_boost, mixa_boost;
bool is_pkg_fab_smic;
DPRINTF(mac->mac_sc, BWN_DEBUG_RF, "%s: called\n", __func__);
if (mac->mac_phy.rev < 3) {
BWN_ERRPRINTF(mac->mac_sc, "%s: phy rev %d out of range\n",
__func__,
mac->mac_phy.rev);
}
is_pkg_fab_smic =
((siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM43224 ||
siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM43225 ||
siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM43421) &&
siba_get_chippkg(sc->sc_dev) == BCMA_PKG_ID_BCM43224_FAB_SMIC);
bwn_chantab_radio_2056_upload(mac, e);
b2056_upload_syn_pll_cp2(mac, band == BWN_BAND_5G);
if (siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_GPLL_WAR &&
bwn_current_band(mac) == BWN_BAND_2G) {
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
if (siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM4716 ||
siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM47162) {
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER4, 0x14);
BWN_RF_WRITE(mac, B2056_SYN_PLL_CP2, 0);
} else {
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
BWN_RF_WRITE(mac, B2056_SYN_PLL_CP2, 0x14);
}
}
if (siba_sprom_get_bf2_hi(sc->sc_dev) & BWN_BFH2_GPLL_WAR2 &&
bwn_current_band(mac) == BWN_BAND_2G) {
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
BWN_RF_WRITE(mac, B2056_SYN_PLL_CP2, 0x20);
}
if (siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_APLL_WAR &&
bwn_current_band(mac) == BWN_BAND_5G) {
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
BWN_RF_WRITE(mac, B2056_SYN_PLL_LOOPFILTER4, 0x05);
BWN_RF_WRITE(mac, B2056_SYN_PLL_CP2, 0x0C);
}
if (mac->mac_phy.phy_n->ipa2g_on && band == BWN_BAND_2G) {
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
if (mac->mac_phy.rev >= 5) {
BWN_RF_WRITE(mac,
offset | B2056_TX_PADG_IDAC, 0xcc);
if (siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM4716 ||
siba_get_chipid(sc->sc_dev) == BCMA_CHIP_ID_BCM47162) {
bias = 0x40;
cbias = 0x45;
pag_boost = 0x5;
pgag_boost = 0x33;
mixg_boost = 0x55;
} else {
bias = 0x25;
cbias = 0x20;
if (is_pkg_fab_smic) {
bias = 0x2a;
cbias = 0x38;
}
pag_boost = 0x4;
pgag_boost = 0x03;
mixg_boost = 0x65;
}
padg_boost = 0x77;
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_IMAIN_STAT,
bias);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_IAUX_STAT,
bias);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_CASCBIAS,
cbias);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_BOOST_TUNE,
pag_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_PGAG_BOOST_TUNE,
pgag_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_PADG_BOOST_TUNE,
padg_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_MIXG_BOOST_TUNE,
mixg_boost);
} else {
bias = bwn_is_40mhz(mac) ? 0x40 : 0x20;
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_IMAIN_STAT,
bias);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_IAUX_STAT,
bias);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAG_CASCBIAS,
0x30);
}
BWN_RF_WRITE(mac, offset | B2056_TX_PA_SPARE1, 0xee);
}
} else if (mac->mac_phy.phy_n->ipa5g_on && band == BWN_BAND_5G) {
uint16_t freq = bwn_get_centre_freq(mac);
/* XXX 5g low/med/high? */
if (freq < 5100) {
paa_boost = 0xA;
pada_boost = 0x77;
pgaa_boost = 0xF;
mixa_boost = 0xF;
} else if (freq < 5340) {
paa_boost = 0x8;
pada_boost = 0x77;
pgaa_boost = 0xFB;
mixa_boost = 0xF;
} else if (freq < 5650) {
paa_boost = 0x0;
pada_boost = 0x77;
pgaa_boost = 0xB;
mixa_boost = 0xF;
} else {
paa_boost = 0x0;
pada_boost = 0x77;
if (freq != 5825)
pgaa_boost = -(freq - 18) / 36 + 168;
else
pgaa_boost = 6;
mixa_boost = 0xF;
}
cbias = is_pkg_fab_smic ? 0x35 : 0x30;
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
BWN_RF_WRITE(mac,
offset | B2056_TX_TXSPARE1, 0x30);
BWN_RF_WRITE(mac,
offset | B2056_TX_PA_SPARE2, 0xee);
BWN_RF_WRITE(mac,
offset | B2056_TX_PADA_CASCBIAS, 0x03);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
BWN_RF_WRITE(mac,
offset | B2056_TX_INTPAA_CASCBIAS, cbias);
}
}
DELAY(50);
/* VCO calibration */
BWN_RF_WRITE(mac, B2056_SYN_PLL_VCOCAL12, 0x00);
BWN_RF_WRITE(mac, B2056_TX_INTPAA_PA_MISC, 0x38);
BWN_RF_WRITE(mac, B2056_TX_INTPAA_PA_MISC, 0x18);
BWN_RF_WRITE(mac, B2056_TX_INTPAA_PA_MISC, 0x38);
BWN_RF_WRITE(mac, B2056_TX_INTPAA_PA_MISC, 0x39);
DELAY(300);
}
static uint8_t bwn_radio_2056_rcal(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t mast2, tmp;
if (phy->rev != 3)
return 0;
DPRINTF(mac->mac_sc, BWN_DEBUG_RF, "%s: called\n", __func__);
mast2 = BWN_RF_READ(mac, B2056_SYN_PLL_MAST2);
BWN_RF_WRITE(mac, B2056_SYN_PLL_MAST2, mast2 | 0x7);
DELAY(10);
BWN_RF_WRITE(mac, B2056_SYN_RCAL_MASTER, 0x01);
DELAY(10);
BWN_RF_WRITE(mac, B2056_SYN_RCAL_MASTER, 0x09);
if (!bwn_radio_wait_value(mac, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
1000000)) {
BWN_ERRPRINTF(mac->mac_sc, "Radio recalibration timeout\n");
return 0;
}
BWN_RF_WRITE(mac, B2056_SYN_RCAL_MASTER, 0x01);
tmp = BWN_RF_READ(mac, B2056_SYN_RCAL_CODE_OUT);
BWN_RF_WRITE(mac, B2056_SYN_RCAL_MASTER, 0x00);
BWN_RF_WRITE(mac, B2056_SYN_PLL_MAST2, mast2);
return tmp & 0x1f;
}
static void bwn_radio_init2056_pre(struct bwn_mac *mac)
{
DPRINTF(mac->mac_sc, BWN_DEBUG_RF, "%s: called\n", __func__);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
~BWN_NPHY_RFCTL_CMD_CHIP0PU);
/* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_OEPORFORCE);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
~BWN_NPHY_RFCTL_CMD_OEPORFORCE);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_CHIP0PU);
}
static void bwn_radio_init2056_post(struct bwn_mac *mac)
{
DPRINTF(mac->mac_sc, BWN_DEBUG_RF, "%s: called\n", __func__);
BWN_RF_SET(mac, B2056_SYN_COM_CTRL, 0xB);
BWN_RF_SET(mac, B2056_SYN_COM_PU, 0x2);
BWN_RF_SET(mac, B2056_SYN_COM_RESET, 0x2);
DELAY(1000);
BWN_RF_MASK(mac, B2056_SYN_COM_RESET, ~0x2);
BWN_RF_MASK(mac, B2056_SYN_PLL_MAST2, ~0xFC);
BWN_RF_MASK(mac, B2056_SYN_RCCAL_CTRL0, ~0x1);
if (mac->mac_phy.phy_do_full_init)
bwn_radio_2056_rcal(mac);
}
/*
* Initialize a Broadcom 2056 N-radio
* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
*/
static void bwn_radio_init2056(struct bwn_mac *mac)
{
DPRINTF(mac->mac_sc, BWN_DEBUG_RF, "%s: called\n", __func__);
bwn_radio_init2056_pre(mac);
b2056_upload_inittabs(mac, 0, 0);
bwn_radio_init2056_post(mac);
}
/**************************************************
* Radio 0x2055
**************************************************/
static void bwn_chantab_radio_upload(struct bwn_mac *mac,
const struct bwn_nphy_channeltab_entry_rev2 *e)
{
BWN_RF_WRITE(mac, B2055_PLL_REF, e->radio_pll_ref);
BWN_RF_WRITE(mac, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
BWN_RF_WRITE(mac, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
BWN_RF_WRITE(mac, B2055_VCO_CAPTAIL, e->radio_vco_captail);
BWN_READ_4(mac, BWN_MACCTL); /* flush writes */
BWN_RF_WRITE(mac, B2055_VCO_CAL1, e->radio_vco_cal1);
BWN_RF_WRITE(mac, B2055_VCO_CAL2, e->radio_vco_cal2);
BWN_RF_WRITE(mac, B2055_PLL_LFC1, e->radio_pll_lfc1);
BWN_RF_WRITE(mac, B2055_PLL_LFR1, e->radio_pll_lfr1);
BWN_READ_4(mac, BWN_MACCTL); /* flush writes */
BWN_RF_WRITE(mac, B2055_PLL_LFC2, e->radio_pll_lfc2);
BWN_RF_WRITE(mac, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
BWN_RF_WRITE(mac, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
BWN_RF_WRITE(mac, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
BWN_READ_4(mac, BWN_MACCTL); /* flush writes */
BWN_RF_WRITE(mac, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
BWN_RF_WRITE(mac, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
BWN_RF_WRITE(mac, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
BWN_RF_WRITE(mac, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
BWN_READ_4(mac, BWN_MACCTL); /* flush writes */
BWN_RF_WRITE(mac, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
BWN_RF_WRITE(mac, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
BWN_RF_WRITE(mac, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
BWN_RF_WRITE(mac, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
BWN_READ_4(mac, BWN_MACCTL); /* flush writes */
BWN_RF_WRITE(mac, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
BWN_RF_WRITE(mac, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
static void bwn_radio_2055_setup(struct bwn_mac *mac,
const struct bwn_nphy_channeltab_entry_rev2 *e)
{
if (mac->mac_phy.rev >= 3) {
BWN_ERRPRINTF(mac->mac_sc, "%s: phy rev %d out of range\n",
__func__,
mac->mac_phy.rev);
}
DPRINTF(mac->mac_sc, BWN_DEBUG_RF, "%s: called\n", __func__);
bwn_chantab_radio_upload(mac, e);
DELAY(50);
BWN_RF_WRITE(mac, B2055_VCO_CAL10, 0x05);
BWN_RF_WRITE(mac, B2055_VCO_CAL10, 0x45);
BWN_READ_4(mac, BWN_MACCTL); /* flush writes */
BWN_RF_WRITE(mac, B2055_VCO_CAL10, 0x65);
DELAY(300);
}
static void bwn_radio_init2055_pre(struct bwn_mac *mac)
{
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
~BWN_NPHY_RFCTL_CMD_PORFORCE);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_CHIP0PU |
BWN_NPHY_RFCTL_CMD_OEPORFORCE);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_PORFORCE);
}
static void bwn_radio_init2055_post(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
bool workaround = false;
if (siba_get_revid(sc->sc_dev) < 4)
workaround =
(siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM)
&& (siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4321)
&& (siba_sprom_get_brev(sc->sc_dev) >= 0x41);
else
workaround =
!(siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_RXBB_INT_REG_DIS);
BWN_RF_MASK(mac, B2055_MASTER1, 0xFFF3);
if (workaround) {
BWN_RF_MASK(mac, B2055_C1_RX_BB_REG, 0x7F);
BWN_RF_MASK(mac, B2055_C2_RX_BB_REG, 0x7F);
}
BWN_RF_SETMASK(mac, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C);
BWN_RF_WRITE(mac, B2055_CAL_MISC, 0x3C);
BWN_RF_MASK(mac, B2055_CAL_MISC, 0xFFBE);
BWN_RF_SET(mac, B2055_CAL_LPOCTL, 0x80);
BWN_RF_SET(mac, B2055_CAL_MISC, 0x1);
DELAY(1000);
BWN_RF_SET(mac, B2055_CAL_MISC, 0x40);
if (!bwn_radio_wait_value(mac, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
BWN_ERRPRINTF(mac->mac_sc, "radio post init timeout\n");
BWN_RF_MASK(mac, B2055_CAL_LPOCTL, 0xFF7F);
bwn_switch_channel(mac, bwn_get_chan(mac));
BWN_RF_WRITE(mac, B2055_C1_RX_BB_LPF, 0x9);
BWN_RF_WRITE(mac, B2055_C2_RX_BB_LPF, 0x9);
BWN_RF_WRITE(mac, B2055_C1_RX_BB_MIDACHP, 0x83);
BWN_RF_WRITE(mac, B2055_C2_RX_BB_MIDACHP, 0x83);
BWN_RF_SETMASK(mac, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6);
BWN_RF_SETMASK(mac, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6);
if (!nphy->gain_boost) {
BWN_RF_SET(mac, B2055_C1_RX_RFSPC1, 0x2);
BWN_RF_SET(mac, B2055_C2_RX_RFSPC1, 0x2);
} else {
BWN_RF_MASK(mac, B2055_C1_RX_RFSPC1, 0xFFFD);
BWN_RF_MASK(mac, B2055_C2_RX_RFSPC1, 0xFFFD);
}
DELAY(2);
}
/*
* Initialize a Broadcom 2055 N-radio
* http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
*/
static void bwn_radio_init2055(struct bwn_mac *mac)
{
bwn_radio_init2055_pre(mac);
if (mac->mac_status < BWN_MAC_STATUS_INITED) {
/* Follow wl, not specs. Do not force uploading all regs */
b2055_upload_inittab(mac, 0, 0);
} else {
bool ghz5 = bwn_current_band(mac) == BWN_BAND_5G;
b2055_upload_inittab(mac, ghz5, 0);
}
bwn_radio_init2055_post(mac);
}
/**************************************************
* Samples
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
static int bwn_nphy_load_samples(struct bwn_mac *mac,
struct bwn_c32 *samples, uint16_t len) {
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t i;
uint32_t *data;
- data = mallocarray(len, sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
+ data = malloc(len * sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
if (!data) {
BWN_ERRPRINTF(mac->mac_sc, "allocation for samples loading failed\n");
return -ENOMEM;
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
for (i = 0; i < len; i++) {
data[i] = (samples[i].i & 0x3FF << 10);
data[i] |= samples[i].q & 0x3FF;
}
bwn_ntab_write_bulk(mac, BWN_NTAB32(17, 0), len, data);
free(data, M_DEVBUF);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
return 0;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
static uint16_t bwn_nphy_gen_load_samples(struct bwn_mac *mac, uint32_t freq, uint16_t max,
bool test)
{
int i;
uint16_t bw, len, rot, angle;
struct bwn_c32 *samples;
bw = bwn_is_40mhz(mac) ? 40 : 20;
len = bw << 3;
if (test) {
if (BWN_PHY_READ(mac, BWN_NPHY_BBCFG) & BWN_NPHY_BBCFG_RSTRX)
bw = 82;
else
bw = 80;
if (bwn_is_40mhz(mac))
bw <<= 1;
len = bw << 1;
}
- samples = mallocarray(len, sizeof(struct bwn_c32), M_DEVBUF,
- M_NOWAIT | M_ZERO);
+ samples = malloc(len * sizeof(struct bwn_c32), M_DEVBUF, M_NOWAIT | M_ZERO);
if (!samples) {
BWN_ERRPRINTF(mac->mac_sc, "allocation for samples generation failed\n");
return 0;
}
rot = (((freq * 36) / bw) << 16) / 100;
angle = 0;
for (i = 0; i < len; i++) {
samples[i] = bwn_cordic(angle);
angle += rot;
samples[i].q = CORDIC_CONVERT(samples[i].q * max);
samples[i].i = CORDIC_CONVERT(samples[i].i * max);
}
i = bwn_nphy_load_samples(mac, samples, len);
free(samples, M_DEVBUF);
return (i < 0) ? 0 : len;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
static void bwn_nphy_run_samples(struct bwn_mac *mac, uint16_t samps, uint16_t loops,
uint16_t wait, bool iqmode, bool dac_test,
bool modify_bbmult)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
int i;
uint16_t seq_mode;
uint32_t tmp;
bwn_nphy_stay_in_carrier_search(mac, true);
if (phy->rev >= 7) {
bool lpf_bw3, lpf_bw4;
lpf_bw3 = BWN_PHY_READ(mac, BWN_NPHY_REV7_RF_CTL_OVER3) & 0x80;
lpf_bw4 = BWN_PHY_READ(mac, BWN_NPHY_REV7_RF_CTL_OVER4) & 0x80;
if (lpf_bw3 || lpf_bw4) {
/* TODO */
} else {
uint16_t value = bwn_nphy_read_lpf_ctl(mac, 0);
if (phy->rev >= 19)
bwn_nphy_rf_ctl_override_rev19(mac, 0x80, value,
0, false, 1);
else
bwn_nphy_rf_ctl_override_rev7(mac, 0x80, value,
0, false, 1);
nphy->lpf_bw_overrode_for_sample_play = true;
}
}
if ((nphy->bb_mult_save & 0x80000000) == 0) {
tmp = bwn_ntab_read(mac, BWN_NTAB16(15, 87));
nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
}
if (modify_bbmult) {
tmp = !bwn_is_40mhz(mac) ? 0x6464 : 0x4747;
bwn_ntab_write(mac, BWN_NTAB16(15, 87), tmp);
}
BWN_PHY_WRITE(mac, BWN_NPHY_SAMP_DEPCNT, (samps - 1));
if (loops != 0xFFFF)
BWN_PHY_WRITE(mac, BWN_NPHY_SAMP_LOOPCNT, (loops - 1));
else
BWN_PHY_WRITE(mac, BWN_NPHY_SAMP_LOOPCNT, loops);
BWN_PHY_WRITE(mac, BWN_NPHY_SAMP_WAITCNT, wait);
seq_mode = BWN_PHY_READ(mac, BWN_NPHY_RFSEQMODE);
BWN_PHY_SET(mac, BWN_NPHY_RFSEQMODE, BWN_NPHY_RFSEQMODE_CAOVER);
if (iqmode) {
BWN_PHY_MASK(mac, BWN_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
BWN_PHY_SET(mac, BWN_NPHY_IQLOCAL_CMDGCTL, 0x8000);
} else {
tmp = dac_test ? 5 : 1;
BWN_PHY_WRITE(mac, BWN_NPHY_SAMP_CMD, tmp);
}
for (i = 0; i < 100; i++) {
if (!(BWN_PHY_READ(mac, BWN_NPHY_RFSEQST) & 1)) {
i = 0;
break;
}
DELAY(10);
}
if (i)
BWN_ERRPRINTF(mac->mac_sc, "run samples timeout\n");
BWN_PHY_WRITE(mac, BWN_NPHY_RFSEQMODE, seq_mode);
bwn_nphy_stay_in_carrier_search(mac, false);
}
/**************************************************
* RSSI
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
static void bwn_nphy_scale_offset_rssi(struct bwn_mac *mac, uint16_t scale,
int8_t offset, uint8_t core,
enum n_rail_type rail,
enum n_rssi_type rssi_type)
{
uint16_t tmp;
bool core1or5 = (core == 1) || (core == 5);
bool core2or5 = (core == 2) || (core == 5);
offset = bwn_clamp_val(offset, -32, 31);
tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
switch (rssi_type) {
case N_RSSI_NB:
if (core1or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_RSSI_Z, tmp);
if (core1or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
if (core2or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_RSSI_Z, tmp);
if (core2or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
break;
case N_RSSI_W1:
if (core1or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_RSSI_X, tmp);
if (core1or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_RSSI_X, tmp);
if (core2or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_RSSI_X, tmp);
if (core2or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_RSSI_X, tmp);
break;
case N_RSSI_W2:
if (core1or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_RSSI_Y, tmp);
if (core1or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
if (core2or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_RSSI_Y, tmp);
if (core2or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
break;
case N_RSSI_TBD:
if (core1or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_TBD, tmp);
if (core1or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_TBD, tmp);
if (core2or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_TBD, tmp);
if (core2or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_TBD, tmp);
break;
case N_RSSI_IQ:
if (core1or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_PWRDET, tmp);
if (core1or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_PWRDET, tmp);
if (core2or5 && rail == N_RAIL_I)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_PWRDET, tmp);
if (core2or5 && rail == N_RAIL_Q)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_PWRDET, tmp);
break;
case N_RSSI_TSSI_2G:
if (core1or5)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_TSSI, tmp);
if (core2or5)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_TSSI, tmp);
break;
case N_RSSI_TSSI_5G:
if (core1or5)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_TSSI, tmp);
if (core2or5)
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_TSSI, tmp);
break;
}
}
static void bwn_nphy_rssi_select_rev19(struct bwn_mac *mac, uint8_t code,
enum n_rssi_type rssi_type)
{
/* TODO */
}
static void bwn_nphy_rev3_rssi_select(struct bwn_mac *mac, uint8_t code,
enum n_rssi_type rssi_type)
{
uint8_t i;
uint16_t reg, val;
if (code == 0) {
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER1, 0xFDFF);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER, 0xFDFF);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C1, 0xFCFF);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C2, 0xFCFF);
BWN_PHY_MASK(mac, BWN_NPHY_TXF_40CO_B1S0, 0xFFDF);
BWN_PHY_MASK(mac, BWN_NPHY_TXF_40CO_B32S1, 0xFFDF);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3);
} else {
for (i = 0; i < 2; i++) {
if ((code == 1 && i == 1) || (code == 2 && !i))
continue;
reg = (i == 0) ?
BWN_NPHY_AFECTL_OVER1 : BWN_NPHY_AFECTL_OVER;
BWN_PHY_SETMASK(mac, reg, 0xFDFF, 0x0200);
if (rssi_type == N_RSSI_W1 ||
rssi_type == N_RSSI_W2 ||
rssi_type == N_RSSI_NB) {
reg = (i == 0) ?
BWN_NPHY_AFECTL_C1 :
BWN_NPHY_AFECTL_C2;
BWN_PHY_SETMASK(mac, reg, 0xFCFF, 0);
reg = (i == 0) ?
BWN_NPHY_RFCTL_LUT_TRSW_UP1 :
BWN_NPHY_RFCTL_LUT_TRSW_UP2;
BWN_PHY_SETMASK(mac, reg, 0xFFC3, 0);
if (rssi_type == N_RSSI_W1)
val = (bwn_current_band(mac) == BWN_BAND_5G) ? 4 : 8;
else if (rssi_type == N_RSSI_W2)
val = 16;
else
val = 32;
BWN_PHY_SET(mac, reg, val);
reg = (i == 0) ?
BWN_NPHY_TXF_40CO_B1S0 :
BWN_NPHY_TXF_40CO_B32S1;
BWN_PHY_SET(mac, reg, 0x0020);
} else {
if (rssi_type == N_RSSI_TBD)
val = 0x0100;
else if (rssi_type == N_RSSI_IQ)
val = 0x0200;
else
val = 0x0300;
reg = (i == 0) ?
BWN_NPHY_AFECTL_C1 :
BWN_NPHY_AFECTL_C2;
BWN_PHY_SETMASK(mac, reg, 0xFCFF, val);
BWN_PHY_SETMASK(mac, reg, 0xF3FF, val << 2);
if (rssi_type != N_RSSI_IQ &&
rssi_type != N_RSSI_TBD) {
bwn_band_t band =
bwn_current_band(mac);
if (mac->mac_phy.rev < 7) {
if (bwn_nphy_ipa(mac))
val = (band == BWN_BAND_5G) ? 0xC : 0xE;
else
val = 0x11;
reg = (i == 0) ? B2056_TX0 : B2056_TX1;
reg |= B2056_TX_TX_SSI_MUX;
BWN_RF_WRITE(mac, reg, val);
}
reg = (i == 0) ?
BWN_NPHY_AFECTL_OVER1 :
BWN_NPHY_AFECTL_OVER;
BWN_PHY_SET(mac, reg, 0x0200);
}
}
}
}
}
static void bwn_nphy_rev2_rssi_select(struct bwn_mac *mac, uint8_t code,
enum n_rssi_type rssi_type)
{
uint16_t val;
bool rssi_w1_w2_nb = false;
switch (rssi_type) {
case N_RSSI_W1:
case N_RSSI_W2:
case N_RSSI_NB:
val = 0;
rssi_w1_w2_nb = true;
break;
case N_RSSI_TBD:
val = 1;
break;
case N_RSSI_IQ:
val = 2;
break;
default:
val = 3;
}
val = (val << 12) | (val << 14);
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C1, 0x0FFF, val);
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C2, 0x0FFF, val);
if (rssi_w1_w2_nb) {
BWN_PHY_SETMASK(mac, BWN_NPHY_RFCTL_RSSIO1, 0xFFCF,
(rssi_type + 1) << 4);
BWN_PHY_SETMASK(mac, BWN_NPHY_RFCTL_RSSIO2, 0xFFCF,
(rssi_type + 1) << 4);
}
if (code == 0) {
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER, ~0x3000);
if (rssi_w1_w2_nb) {
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
~(BWN_NPHY_RFCTL_CMD_RXEN |
BWN_NPHY_RFCTL_CMD_CORESEL));
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER,
~(0x1 << 12 |
0x1 << 5 |
0x1 << 1 |
0x1));
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
~BWN_NPHY_RFCTL_CMD_START);
DELAY(20);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER, ~0x1);
}
} else {
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x3000);
if (rssi_w1_w2_nb) {
BWN_PHY_SETMASK(mac, BWN_NPHY_RFCTL_CMD,
~(BWN_NPHY_RFCTL_CMD_RXEN |
BWN_NPHY_RFCTL_CMD_CORESEL),
(BWN_NPHY_RFCTL_CMD_RXEN |
code << BWN_NPHY_RFCTL_CMD_CORESEL_SHIFT));
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_OVER,
(0x1 << 12 |
0x1 << 5 |
0x1 << 1 |
0x1));
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_CMD_START);
DELAY(20);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER, ~0x1);
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
static void bwn_nphy_rssi_select(struct bwn_mac *mac, uint8_t code,
enum n_rssi_type type)
{
if (mac->mac_phy.rev >= 19)
bwn_nphy_rssi_select_rev19(mac, code, type);
else if (mac->mac_phy.rev >= 3)
bwn_nphy_rev3_rssi_select(mac, code, type);
else
bwn_nphy_rev2_rssi_select(mac, code, type);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
static void bwn_nphy_set_rssi_2055_vcm(struct bwn_mac *mac,
enum n_rssi_type rssi_type, uint8_t *buf)
{
int i;
for (i = 0; i < 2; i++) {
if (rssi_type == N_RSSI_NB) {
if (i == 0) {
BWN_RF_SETMASK(mac, B2055_C1_B0NB_RSSIVCM,
0xFC, buf[0]);
BWN_RF_SETMASK(mac, B2055_C1_RX_BB_RSSICTL5,
0xFC, buf[1]);
} else {
BWN_RF_SETMASK(mac, B2055_C2_B0NB_RSSIVCM,
0xFC, buf[2 * i]);
BWN_RF_SETMASK(mac, B2055_C2_RX_BB_RSSICTL5,
0xFC, buf[2 * i + 1]);
}
} else {
if (i == 0)
BWN_RF_SETMASK(mac, B2055_C1_RX_BB_RSSICTL5,
0xF3, buf[0] << 2);
else
BWN_RF_SETMASK(mac, B2055_C2_RX_BB_RSSICTL5,
0xF3, buf[2 * i + 1] << 2);
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
static int bwn_nphy_poll_rssi(struct bwn_mac *mac, enum n_rssi_type rssi_type,
int32_t *buf, uint8_t nsamp)
{
int i;
int out;
uint16_t save_regs_phy[9];
uint16_t s[2];
/* TODO: rev7+ is treated like rev3+, what about rev19+? */
if (mac->mac_phy.rev >= 3) {
save_regs_phy[0] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C1);
save_regs_phy[1] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C2);
save_regs_phy[2] = BWN_PHY_READ(mac,
BWN_NPHY_RFCTL_LUT_TRSW_UP1);
save_regs_phy[3] = BWN_PHY_READ(mac,
BWN_NPHY_RFCTL_LUT_TRSW_UP2);
save_regs_phy[4] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER1);
save_regs_phy[5] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER);
save_regs_phy[6] = BWN_PHY_READ(mac, BWN_NPHY_TXF_40CO_B1S0);
save_regs_phy[7] = BWN_PHY_READ(mac, BWN_NPHY_TXF_40CO_B32S1);
save_regs_phy[8] = 0;
} else {
save_regs_phy[0] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C1);
save_regs_phy[1] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C2);
save_regs_phy[2] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER);
save_regs_phy[3] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_CMD);
save_regs_phy[4] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_OVER);
save_regs_phy[5] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_RSSIO1);
save_regs_phy[6] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_RSSIO2);
save_regs_phy[7] = 0;
save_regs_phy[8] = 0;
}
bwn_nphy_rssi_select(mac, 5, rssi_type);
if (mac->mac_phy.rev < 2) {
save_regs_phy[8] = BWN_PHY_READ(mac, BWN_NPHY_GPIO_SEL);
BWN_PHY_WRITE(mac, BWN_NPHY_GPIO_SEL, 5);
}
for (i = 0; i < 4; i++)
buf[i] = 0;
for (i = 0; i < nsamp; i++) {
if (mac->mac_phy.rev < 2) {
s[0] = BWN_PHY_READ(mac, BWN_NPHY_GPIO_LOOUT);
s[1] = BWN_PHY_READ(mac, BWN_NPHY_GPIO_HIOUT);
} else {
s[0] = BWN_PHY_READ(mac, BWN_NPHY_RSSI1);
s[1] = BWN_PHY_READ(mac, BWN_NPHY_RSSI2);
}
buf[0] += ((int8_t)((s[0] & 0x3F) << 2)) >> 2;
buf[1] += ((int8_t)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
buf[2] += ((int8_t)((s[1] & 0x3F) << 2)) >> 2;
buf[3] += ((int8_t)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
}
out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
(buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
if (mac->mac_phy.rev < 2)
BWN_PHY_WRITE(mac, BWN_NPHY_GPIO_SEL, save_regs_phy[8]);
if (mac->mac_phy.rev >= 3) {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C1, save_regs_phy[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C2, save_regs_phy[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP1,
save_regs_phy[2]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP2,
save_regs_phy[3]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, save_regs_phy[4]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, save_regs_phy[5]);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C1, save_regs_phy[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C2, save_regs_phy[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, save_regs_phy[2]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_CMD, save_regs_phy[3]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_OVER, save_regs_phy[4]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_RSSIO1, save_regs_phy[5]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_RSSIO2, save_regs_phy[6]);
}
return out;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
static void bwn_nphy_rev3_rssi_cal(struct bwn_mac *mac)
{
//struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t saved_regs_phy_rfctl[2];
uint16_t saved_regs_phy[22];
uint16_t regs_to_store_rev3[] = {
BWN_NPHY_AFECTL_OVER1, BWN_NPHY_AFECTL_OVER,
BWN_NPHY_AFECTL_C1, BWN_NPHY_AFECTL_C2,
BWN_NPHY_TXF_40CO_B1S1, BWN_NPHY_RFCTL_OVER,
BWN_NPHY_TXF_40CO_B1S0, BWN_NPHY_TXF_40CO_B32S1,
BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_LUT_TRSW_UP1, BWN_NPHY_RFCTL_LUT_TRSW_UP2,
BWN_NPHY_RFCTL_RSSIO1, BWN_NPHY_RFCTL_RSSIO2
};
uint16_t regs_to_store_rev7[] = {
BWN_NPHY_AFECTL_OVER1, BWN_NPHY_AFECTL_OVER,
BWN_NPHY_AFECTL_C1, BWN_NPHY_AFECTL_C2,
BWN_NPHY_TXF_40CO_B1S1, BWN_NPHY_RFCTL_OVER,
BWN_NPHY_REV7_RF_CTL_OVER3, BWN_NPHY_REV7_RF_CTL_OVER4,
BWN_NPHY_REV7_RF_CTL_OVER5, BWN_NPHY_REV7_RF_CTL_OVER6,
0x2ff,
BWN_NPHY_TXF_40CO_B1S0, BWN_NPHY_TXF_40CO_B32S1,
BWN_NPHY_RFCTL_CMD,
BWN_NPHY_RFCTL_LUT_TRSW_UP1, BWN_NPHY_RFCTL_LUT_TRSW_UP2,
BWN_NPHY_REV7_RF_CTL_MISC_REG3, BWN_NPHY_REV7_RF_CTL_MISC_REG4,
BWN_NPHY_REV7_RF_CTL_MISC_REG5, BWN_NPHY_REV7_RF_CTL_MISC_REG6,
BWN_NPHY_RFCTL_RSSIO1, BWN_NPHY_RFCTL_RSSIO2
};
uint16_t *regs_to_store;
int regs_amount;
uint16_t class;
uint16_t clip_state[2];
uint16_t clip_off[2] = { 0xFFFF, 0xFFFF };
uint8_t vcm_final = 0;
int32_t offset[4];
int32_t results[8][4] = { };
int32_t results_min[4] = { };
int32_t poll_results[4] = { };
uint16_t *rssical_radio_regs = NULL;
uint16_t *rssical_phy_regs = NULL;
uint16_t r; /* routing */
uint8_t rx_core_state;
int core, i, j, vcm;
if (mac->mac_phy.rev >= 7) {
regs_to_store = regs_to_store_rev7;
regs_amount = nitems(regs_to_store_rev7);
} else {
regs_to_store = regs_to_store_rev3;
regs_amount = nitems(regs_to_store_rev3);
}
KASSERT((regs_amount <= nitems(saved_regs_phy)),
("%s: reg_amount (%d) too large\n",
__func__,
regs_amount));
class = bwn_nphy_classifier(mac, 0, 0);
bwn_nphy_classifier(mac, 7, 4);
bwn_nphy_read_clip_detection(mac, clip_state);
bwn_nphy_write_clip_detection(mac, clip_off);
saved_regs_phy_rfctl[0] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC1);
saved_regs_phy_rfctl[1] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC2);
for (i = 0; i < regs_amount; i++)
saved_regs_phy[i] = BWN_PHY_READ(mac, regs_to_store[i]);
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_OFF, 0, 7);
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_TRSW, 1, 7);
if (mac->mac_phy.rev >= 7) {
bwn_nphy_rf_ctl_override_one_to_many(mac,
N_RF_CTL_OVER_CMD_RXRF_PU,
0, 0, false);
bwn_nphy_rf_ctl_override_one_to_many(mac,
N_RF_CTL_OVER_CMD_RX_PU,
1, 0, false);
bwn_nphy_rf_ctl_override_rev7(mac, 0x80, 1, 0, false, 0);
bwn_nphy_rf_ctl_override_rev7(mac, 0x40, 1, 0, false, 0);
if (bwn_current_band(mac) == BWN_BAND_5G) {
bwn_nphy_rf_ctl_override_rev7(mac, 0x20, 0, 0, false,
0);
bwn_nphy_rf_ctl_override_rev7(mac, 0x10, 1, 0, false,
0);
} else {
bwn_nphy_rf_ctl_override_rev7(mac, 0x10, 0, 0, false,
0);
bwn_nphy_rf_ctl_override_rev7(mac, 0x20, 1, 0, false,
0);
}
} else {
bwn_nphy_rf_ctl_override(mac, 0x1, 0, 0, false);
bwn_nphy_rf_ctl_override(mac, 0x2, 1, 0, false);
bwn_nphy_rf_ctl_override(mac, 0x80, 1, 0, false);
bwn_nphy_rf_ctl_override(mac, 0x40, 1, 0, false);
if (bwn_current_band(mac) == BWN_BAND_5G) {
bwn_nphy_rf_ctl_override(mac, 0x20, 0, 0, false);
bwn_nphy_rf_ctl_override(mac, 0x10, 1, 0, false);
} else {
bwn_nphy_rf_ctl_override(mac, 0x10, 0, 0, false);
bwn_nphy_rf_ctl_override(mac, 0x20, 1, 0, false);
}
}
rx_core_state = bwn_nphy_get_rx_core_state(mac);
for (core = 0; core < 2; core++) {
if (!(rx_core_state & (1 << core)))
continue;
r = core ? B2056_RX1 : B2056_RX0;
bwn_nphy_scale_offset_rssi(mac, 0, 0, core + 1, N_RAIL_I,
N_RSSI_NB);
bwn_nphy_scale_offset_rssi(mac, 0, 0, core + 1, N_RAIL_Q,
N_RSSI_NB);
/* Grab RSSI results for every possible VCM */
for (vcm = 0; vcm < 8; vcm++) {
if (mac->mac_phy.rev >= 7)
BWN_RF_SETMASK(mac,
core ? R2057_NB_MASTER_CORE1 :
R2057_NB_MASTER_CORE0,
~R2057_VCM_MASK, vcm);
else
BWN_RF_SETMASK(mac, r | B2056_RX_RSSI_MISC,
0xE3, vcm << 2);
bwn_nphy_poll_rssi(mac, N_RSSI_NB, results[vcm], 8);
}
/* Find out which VCM got the best results */
for (i = 0; i < 4; i += 2) {
int32_t currd;
int32_t mind = 0x100000;
int32_t minpoll = 249;
uint8_t minvcm = 0;
if (2 * core != i)
continue;
for (vcm = 0; vcm < 8; vcm++) {
currd = results[vcm][i] * results[vcm][i] +
results[vcm][i + 1] * results[vcm][i];
if (currd < mind) {
mind = currd;
minvcm = vcm;
}
if (results[vcm][i] < minpoll)
minpoll = results[vcm][i];
}
vcm_final = minvcm;
results_min[i] = minpoll;
}
/* Select the best VCM */
if (mac->mac_phy.rev >= 7)
BWN_RF_SETMASK(mac,
core ? R2057_NB_MASTER_CORE1 :
R2057_NB_MASTER_CORE0,
~R2057_VCM_MASK, vcm);
else
BWN_RF_SETMASK(mac, r | B2056_RX_RSSI_MISC,
0xE3, vcm_final << 2);
for (i = 0; i < 4; i++) {
if (core != i / 2)
continue;
offset[i] = -results[vcm_final][i];
if (offset[i] < 0)
offset[i] = -((abs(offset[i]) + 4) / 8);
else
offset[i] = (offset[i] + 4) / 8;
if (results_min[i] == 248)
offset[i] = -32;
bwn_nphy_scale_offset_rssi(mac, 0, offset[i],
(i / 2 == 0) ? 1 : 2,
(i % 2 == 0) ? N_RAIL_I : N_RAIL_Q,
N_RSSI_NB);
}
}
for (core = 0; core < 2; core++) {
if (!(rx_core_state & (1 << core)))
continue;
for (i = 0; i < 2; i++) {
bwn_nphy_scale_offset_rssi(mac, 0, 0, core + 1,
N_RAIL_I, i);
bwn_nphy_scale_offset_rssi(mac, 0, 0, core + 1,
N_RAIL_Q, i);
bwn_nphy_poll_rssi(mac, i, poll_results, 8);
for (j = 0; j < 4; j++) {
if (j / 2 == core) {
offset[j] = 232 - poll_results[j];
if (offset[j] < 0)
offset[j] = -(abs(offset[j] + 4) / 8);
else
offset[j] = (offset[j] + 4) / 8;
bwn_nphy_scale_offset_rssi(mac, 0,
offset[2 * core], core + 1, j % 2, i);
}
}
}
}
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, saved_regs_phy_rfctl[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, saved_regs_phy_rfctl[1]);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
BWN_PHY_SET(mac, BWN_NPHY_TXF_40CO_B1S1, 0x1);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD, BWN_NPHY_RFCTL_CMD_START);
BWN_PHY_MASK(mac, BWN_NPHY_TXF_40CO_B1S1, ~0x1);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_OVER, 0x1);
BWN_PHY_SET(mac, BWN_NPHY_RFCTL_CMD, BWN_NPHY_RFCTL_CMD_RXTX);
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_OVER, ~0x1);
for (i = 0; i < regs_amount; i++)
BWN_PHY_WRITE(mac, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
if (bwn_current_band(mac) == BWN_BAND_2G) {
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
}
if (mac->mac_phy.rev >= 7) {
rssical_radio_regs[0] = BWN_RF_READ(mac,
R2057_NB_MASTER_CORE0);
rssical_radio_regs[1] = BWN_RF_READ(mac,
R2057_NB_MASTER_CORE1);
} else {
rssical_radio_regs[0] = BWN_RF_READ(mac, B2056_RX0 |
B2056_RX_RSSI_MISC);
rssical_radio_regs[1] = BWN_RF_READ(mac, B2056_RX1 |
B2056_RX_RSSI_MISC);
}
rssical_phy_regs[0] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_0I_RSSI_Z);
rssical_phy_regs[1] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_0Q_RSSI_Z);
rssical_phy_regs[2] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_1I_RSSI_Z);
rssical_phy_regs[3] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_1Q_RSSI_Z);
rssical_phy_regs[4] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_0I_RSSI_X);
rssical_phy_regs[5] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_0Q_RSSI_X);
rssical_phy_regs[6] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_1I_RSSI_X);
rssical_phy_regs[7] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_1Q_RSSI_X);
rssical_phy_regs[8] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_0I_RSSI_Y);
rssical_phy_regs[9] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_0Q_RSSI_Y);
rssical_phy_regs[10] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_1I_RSSI_Y);
rssical_phy_regs[11] = BWN_PHY_READ(mac, BWN_NPHY_RSSIMC_1Q_RSSI_Y);
/* Remember for which channel we store configuration */
if (bwn_current_band(mac) == BWN_BAND_2G)
nphy->rssical_chanspec_2G.center_freq = bwn_get_centre_freq(mac);
else
nphy->rssical_chanspec_5G.center_freq = bwn_get_centre_freq(mac);
/* End of calibration, restore configuration */
bwn_nphy_classifier(mac, 7, class);
bwn_nphy_write_clip_detection(mac, clip_state);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
static void bwn_nphy_rev2_rssi_cal(struct bwn_mac *mac, enum n_rssi_type type)
{
int i, j, vcm;
uint8_t state[4];
uint8_t code, val;
uint16_t class, override;
uint8_t regs_save_radio[2];
uint16_t regs_save_phy[2];
int32_t offset[4];
uint8_t core;
uint8_t rail;
uint16_t clip_state[2];
uint16_t clip_off[2] = { 0xFFFF, 0xFFFF };
int32_t results_min[4] = { };
uint8_t vcm_final[4] = { };
int32_t results[4][4] = { };
int32_t miniq[4][2] = { };
if (type == N_RSSI_NB) {
code = 0;
val = 6;
} else if (type == N_RSSI_W1 || type == N_RSSI_W2) {
code = 25;
val = 4;
} else {
BWN_ERRPRINTF(mac->mac_sc, "%s: RSSI type %d invalid\n",
__func__,
type);
return;
}
class = bwn_nphy_classifier(mac, 0, 0);
bwn_nphy_classifier(mac, 7, 4);
bwn_nphy_read_clip_detection(mac, clip_state);
bwn_nphy_write_clip_detection(mac, clip_off);
if (bwn_current_band(mac) == BWN_BAND_5G)
override = 0x140;
else
override = 0x110;
regs_save_phy[0] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC1);
regs_save_radio[0] = BWN_RF_READ(mac, B2055_C1_PD_RXTX);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, override);
BWN_RF_WRITE(mac, B2055_C1_PD_RXTX, val);
regs_save_phy[1] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC2);
regs_save_radio[1] = BWN_RF_READ(mac, B2055_C2_PD_RXTX);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, override);
BWN_RF_WRITE(mac, B2055_C2_PD_RXTX, val);
state[0] = BWN_RF_READ(mac, B2055_C1_PD_RSSIMISC) & 0x07;
state[1] = BWN_RF_READ(mac, B2055_C2_PD_RSSIMISC) & 0x07;
BWN_RF_MASK(mac, B2055_C1_PD_RSSIMISC, 0xF8);
BWN_RF_MASK(mac, B2055_C2_PD_RSSIMISC, 0xF8);
state[2] = BWN_RF_READ(mac, B2055_C1_SP_RSSI) & 0x07;
state[3] = BWN_RF_READ(mac, B2055_C2_SP_RSSI) & 0x07;
bwn_nphy_rssi_select(mac, 5, type);
bwn_nphy_scale_offset_rssi(mac, 0, 0, 5, N_RAIL_I, type);
bwn_nphy_scale_offset_rssi(mac, 0, 0, 5, N_RAIL_Q, type);
for (vcm = 0; vcm < 4; vcm++) {
uint8_t tmp[4];
for (j = 0; j < 4; j++)
tmp[j] = vcm;
if (type != N_RSSI_W2)
bwn_nphy_set_rssi_2055_vcm(mac, type, tmp);
bwn_nphy_poll_rssi(mac, type, results[vcm], 8);
if (type == N_RSSI_W1 || type == N_RSSI_W2)
for (j = 0; j < 2; j++)
miniq[vcm][j] = min(results[vcm][2 * j],
results[vcm][2 * j + 1]);
}
for (i = 0; i < 4; i++) {
int32_t mind = 0x100000;
uint8_t minvcm = 0;
int32_t minpoll = 249;
int32_t currd;
for (vcm = 0; vcm < 4; vcm++) {
if (type == N_RSSI_NB)
currd = abs(results[vcm][i] - code * 8);
else
currd = abs(miniq[vcm][i / 2] - code * 8);
if (currd < mind) {
mind = currd;
minvcm = vcm;
}
if (results[vcm][i] < minpoll)
minpoll = results[vcm][i];
}
results_min[i] = minpoll;
vcm_final[i] = minvcm;
}
if (type != N_RSSI_W2)
bwn_nphy_set_rssi_2055_vcm(mac, type, vcm_final);
for (i = 0; i < 4; i++) {
offset[i] = (code * 8) - results[vcm_final[i]][i];
if (offset[i] < 0)
offset[i] = -((abs(offset[i]) + 4) / 8);
else
offset[i] = (offset[i] + 4) / 8;
if (results_min[i] == 248)
offset[i] = code - 32;
core = (i / 2) ? 2 : 1;
rail = (i % 2) ? N_RAIL_Q : N_RAIL_I;
bwn_nphy_scale_offset_rssi(mac, 0, offset[i], core, rail,
type);
}
BWN_RF_SETMASK(mac, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
BWN_RF_SETMASK(mac, B2055_C2_PD_RSSIMISC, 0xF8, state[1]);
switch (state[2]) {
case 1:
bwn_nphy_rssi_select(mac, 1, N_RSSI_NB);
break;
case 4:
bwn_nphy_rssi_select(mac, 1, N_RSSI_W1);
break;
case 2:
bwn_nphy_rssi_select(mac, 1, N_RSSI_W2);
break;
default:
bwn_nphy_rssi_select(mac, 1, N_RSSI_W2);
break;
}
switch (state[3]) {
case 1:
bwn_nphy_rssi_select(mac, 2, N_RSSI_NB);
break;
case 4:
bwn_nphy_rssi_select(mac, 2, N_RSSI_W1);
break;
default:
bwn_nphy_rssi_select(mac, 2, N_RSSI_W2);
break;
}
bwn_nphy_rssi_select(mac, 0, type);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, regs_save_phy[0]);
BWN_RF_WRITE(mac, B2055_C1_PD_RXTX, regs_save_radio[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, regs_save_phy[1]);
BWN_RF_WRITE(mac, B2055_C2_PD_RXTX, regs_save_radio[1]);
bwn_nphy_classifier(mac, 7, class);
bwn_nphy_write_clip_detection(mac, clip_state);
/* Specs don't say about reset here, but it makes wl and b43 dumps
identical, it really seems wl performs this */
bwn_nphy_reset_cca(mac);
}
/*
* RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
*/
static void bwn_nphy_rssi_cal(struct bwn_mac *mac)
{
if (mac->mac_phy.rev >= 19) {
/* TODO */
} else if (mac->mac_phy.rev >= 3) {
bwn_nphy_rev3_rssi_cal(mac);
} else {
bwn_nphy_rev2_rssi_cal(mac, N_RSSI_NB);
bwn_nphy_rev2_rssi_cal(mac, N_RSSI_W1);
bwn_nphy_rev2_rssi_cal(mac, N_RSSI_W2);
}
}
/**************************************************
* Workarounds
**************************************************/
static void bwn_nphy_gain_ctl_workarounds_rev19(struct bwn_mac *mac)
{
/* TODO */
}
static void bwn_nphy_gain_ctl_workarounds_rev7(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
switch (phy->rev) {
/* TODO */
}
}
static void bwn_nphy_gain_ctl_workarounds_rev3(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
bool ghz5;
bool ext_lna;
uint16_t rssi_gain;
struct bwn_nphy_gain_ctl_workaround_entry *e;
uint8_t lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
uint8_t lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
/* Prepare values */
ghz5 = BWN_PHY_READ(mac, BWN_NPHY_BANDCTL)
& BWN_NPHY_BANDCTL_5GHZ;
ext_lna = ghz5 ? siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_EXTLNA_5GHZ :
siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA;
e = bwn_nphy_get_gain_ctl_workaround_ent(mac, ghz5, ext_lna);
if (ghz5 && mac->mac_phy.rev >= 5)
rssi_gain = 0x90;
else
rssi_gain = 0x50;
BWN_PHY_SET(mac, BWN_NPHY_RXCTL, 0x0040);
/* Set Clip 2 detect */
BWN_PHY_SET(mac, BWN_NPHY_C1_CGAINI, BWN_NPHY_C1_CGAINI_CL2DETECT);
BWN_PHY_SET(mac, BWN_NPHY_C2_CGAINI, BWN_NPHY_C2_CGAINI_CL2DETECT);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
0x17);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
0x17);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_RSSI_GAIN,
rssi_gain);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_RSSI_GAIN,
rssi_gain);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
0x17);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
0x17);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
bwn_ntab_write_bulk(mac, BWN_NTAB8(0, 8), 4, e->lna1_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB8(1, 8), 4, e->lna1_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB8(0, 16), 4, e->lna2_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB8(1, 16), 4, e->lna2_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB8(0, 32), 10, e->gain_db);
bwn_ntab_write_bulk(mac, BWN_NTAB8(1, 32), 10, e->gain_db);
bwn_ntab_write_bulk(mac, BWN_NTAB8(2, 32), 10, e->gain_bits);
bwn_ntab_write_bulk(mac, BWN_NTAB8(3, 32), 10, e->gain_bits);
bwn_ntab_write_bulk(mac, BWN_NTAB8(0, 0x40), 6, lpf_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB8(1, 0x40), 6, lpf_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB8(2, 0x40), 6, lpf_bits);
bwn_ntab_write_bulk(mac, BWN_NTAB8(3, 0x40), 6, lpf_bits);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C1_INITGAIN_A, e->init_gain);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C2_INITGAIN_A, e->init_gain);
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x106), 2,
e->rfseq_init);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C1_CLIP_HIGAIN_A, e->cliphi_gain);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C2_CLIP_HIGAIN_A, e->cliphi_gain);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C1_CLIP_MEDGAIN_A, e->clipmd_gain);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C2_CLIP_MEDGAIN_A, e->clipmd_gain);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C1_CLIP_LOGAIN_A, e->cliplo_gain);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C2_CLIP_LOGAIN_A, e->cliplo_gain);
BWN_PHY_SETMASK(mac, BWN_NPHY_CRSMINPOWER0, 0xFF00, e->crsmin);
BWN_PHY_SETMASK(mac, BWN_NPHY_CRSMINPOWERL0, 0xFF00, e->crsminl);
BWN_PHY_SETMASK(mac, BWN_NPHY_CRSMINPOWERU0, 0xFF00, e->crsminu);
BWN_PHY_WRITE(mac, BWN_NPHY_C1_NBCLIPTHRES, e->nbclip);
BWN_PHY_WRITE(mac, BWN_NPHY_C2_NBCLIPTHRES, e->nbclip);
BWN_PHY_SETMASK(mac, BWN_NPHY_C1_CLIPWBTHRES,
~BWN_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
BWN_PHY_SETMASK(mac, BWN_NPHY_C2_CLIPWBTHRES,
~BWN_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
BWN_PHY_WRITE(mac, BWN_NPHY_CCK_SHIFTB_REF, 0x809C);
}
static void bwn_nphy_gain_ctl_workarounds_rev1_2(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t i, j;
uint8_t code;
uint16_t tmp;
uint8_t rfseq_events[3] = { 6, 8, 7 };
uint8_t rfseq_delays[3] = { 10, 30, 1 };
/* Set Clip 2 detect */
BWN_PHY_SET(mac, BWN_NPHY_C1_CGAINI, BWN_NPHY_C1_CGAINI_CL2DETECT);
BWN_PHY_SET(mac, BWN_NPHY_C2_CGAINI, BWN_NPHY_C2_CGAINI_CL2DETECT);
/* Set narrowband clip threshold */
BWN_PHY_WRITE(mac, BWN_NPHY_C1_NBCLIPTHRES, 0x84);
BWN_PHY_WRITE(mac, BWN_NPHY_C2_NBCLIPTHRES, 0x84);
if (!bwn_is_40mhz(mac)) {
/* Set dwell lengths */
BWN_PHY_WRITE(mac, BWN_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
BWN_PHY_WRITE(mac, BWN_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
BWN_PHY_WRITE(mac, BWN_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
BWN_PHY_WRITE(mac, BWN_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
}
/* Set wideband clip 2 threshold */
BWN_PHY_SETMASK(mac, BWN_NPHY_C1_CLIPWBTHRES,
~BWN_NPHY_C1_CLIPWBTHRES_CLIP2, 21);
BWN_PHY_SETMASK(mac, BWN_NPHY_C2_CLIPWBTHRES,
~BWN_NPHY_C2_CLIPWBTHRES_CLIP2, 21);
if (!bwn_is_40mhz(mac)) {
BWN_PHY_SETMASK(mac, BWN_NPHY_C1_CGAINI,
~BWN_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
BWN_PHY_SETMASK(mac, BWN_NPHY_C2_CGAINI,
~BWN_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
BWN_PHY_SETMASK(mac, BWN_NPHY_C1_CCK_CGAINI,
~BWN_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
BWN_PHY_SETMASK(mac, BWN_NPHY_C2_CCK_CGAINI,
~BWN_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
}
BWN_PHY_WRITE(mac, BWN_NPHY_CCK_SHIFTB_REF, 0x809C);
if (nphy->gain_boost) {
if (bwn_current_band(mac) == BWN_BAND_2G &&
bwn_is_40mhz(mac))
code = 4;
else
code = 5;
} else {
code = bwn_is_40mhz(mac) ? 6 : 7;
}
/* Set HPVGA2 index */
BWN_PHY_SETMASK(mac, BWN_NPHY_C1_INITGAIN, ~BWN_NPHY_C1_INITGAIN_HPVGA2,
code << BWN_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
BWN_PHY_SETMASK(mac, BWN_NPHY_C2_INITGAIN, ~BWN_NPHY_C2_INITGAIN_HPVGA2,
code << BWN_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR, 0x1D06);
/* specs say about 2 loops, but wl does 4 */
for (i = 0; i < 4; i++)
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, (code << 8 | 0x7C));
bwn_nphy_adjust_lna_gain_table(mac);
if (nphy->elna_gain_config) {
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR, 0x0808);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x0);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x1);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x1);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x1);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR, 0x0C08);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x0);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x1);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x1);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0x1);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR, 0x1D06);
/* specs say about 2 loops, but wl does 4 */
for (i = 0; i < 4; i++)
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO,
(code << 8 | 0x74));
}
if (mac->mac_phy.rev == 2) {
for (i = 0; i < 4; i++) {
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR,
(0x0400 * i) + 0x0020);
for (j = 0; j < 21; j++) {
tmp = j * (i < 2 ? 3 : 1);
BWN_PHY_WRITE(mac,
BWN_NPHY_TABLE_DATALO, tmp);
}
}
}
bwn_nphy_set_rf_sequence(mac, 5, rfseq_events, rfseq_delays, 3);
BWN_PHY_SETMASK(mac, BWN_NPHY_OVER_DGAIN1,
~BWN_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
0x5A << BWN_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
if (bwn_current_band(mac) == BWN_BAND_2G)
BWN_PHY_SETMASK(mac, BWN_PHY_N(0xC5D), 0xFF80, 4);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void bwn_nphy_gain_ctl_workarounds(struct bwn_mac *mac)
{
if (mac->mac_phy.rev >= 19)
bwn_nphy_gain_ctl_workarounds_rev19(mac);
else if (mac->mac_phy.rev >= 7)
bwn_nphy_gain_ctl_workarounds_rev7(mac);
else if (mac->mac_phy.rev >= 3)
bwn_nphy_gain_ctl_workarounds_rev3(mac);
else
bwn_nphy_gain_ctl_workarounds_rev1_2(mac);
}
static void bwn_nphy_workarounds_rev7plus(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
/* TX to RX */
uint8_t tx2rx_events[7] = { 4, 3, 5, 2, 1, 8, 31, };
uint8_t tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1, };
/* RX to TX */
uint8_t rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
uint8_t rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
static const uint16_t ntab7_15e_16e[] = { 0, 0x10f, 0x10f };
uint8_t ntab7_138_146[] = { 0x11, 0x11 };
uint8_t ntab7_133[] = { 0x77, 0x11, 0x11 };
uint16_t lpf_ofdm_20mhz[2], lpf_ofdm_40mhz[2], lpf_11b[2];
uint16_t bcap_val;
int16_t bcap_val_11b[2], bcap_val_11n_20[2], bcap_val_11n_40[2];
uint16_t scap_val;
int16_t scap_val_11b[2], scap_val_11n_20[2], scap_val_11n_40[2];
bool rccal_ovrd = false;
uint16_t bias, conv, filt;
uint32_t noise_tbl[2];
uint32_t tmp32;
uint8_t core;
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A0, 0x0125);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A1, 0x01b3);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A2, 0x0105);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B0, 0x016e);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B1, 0x00cd);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B2, 0x0020);
if (phy->rev == 7) {
BWN_PHY_SET(mac, BWN_NPHY_FINERX2_CGC, 0x10);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN0, 0xFF80, 0x0020);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN0, 0x80FF, 0x2700);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN1, 0xFF80, 0x002E);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN1, 0x80FF, 0x3300);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN2, 0xFF80, 0x0037);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN3, 0xFF80, 0x003C);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN4, 0xFF80, 0x003E);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN5, 0xFF80, 0x0040);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN5, 0x80FF, 0x4000);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN6, 0xFF80, 0x0040);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN6, 0x80FF, 0x4000);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN7, 0xFF80, 0x0040);
BWN_PHY_SETMASK(mac, BWN_NPHY_FREQGAIN7, 0x80FF, 0x4000);
}
if (phy->rev >= 16) {
BWN_PHY_WRITE(mac, BWN_NPHY_FORCEFRONT0, 0x7ff);
BWN_PHY_WRITE(mac, BWN_NPHY_FORCEFRONT1, 0x7ff);
} else if (phy->rev <= 8) {
BWN_PHY_WRITE(mac, BWN_NPHY_FORCEFRONT0, 0x1B0);
BWN_PHY_WRITE(mac, BWN_NPHY_FORCEFRONT1, 0x1B0);
}
if (phy->rev >= 16)
BWN_PHY_SETMASK(mac, BWN_NPHY_TXTAILCNT, ~0xFF, 0xa0);
else if (phy->rev >= 8)
BWN_PHY_SETMASK(mac, BWN_NPHY_TXTAILCNT, ~0xFF, 0x72);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x00), 2);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x10), 2);
tmp32 = bwn_ntab_read(mac, BWN_NTAB32(30, 0));
tmp32 &= 0xffffff;
bwn_ntab_write(mac, BWN_NTAB32(30, 0), tmp32);
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x15d), 3, ntab7_15e_16e);
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x16d), 3, ntab7_15e_16e);
bwn_nphy_set_rf_sequence(mac, 1, tx2rx_events, tx2rx_delays,
nitems(tx2rx_events));
if (bwn_nphy_ipa(mac))
bwn_nphy_set_rf_sequence(mac, 0, rx2tx_events_ipa,
rx2tx_delays_ipa, nitems(rx2tx_events_ipa));
BWN_PHY_SETMASK(mac, BWN_NPHY_EPS_OVERRIDEI_0, 0x3FFF, 0x4000);
BWN_PHY_SETMASK(mac, BWN_NPHY_EPS_OVERRIDEI_1, 0x3FFF, 0x4000);
for (core = 0; core < 2; core++) {
lpf_ofdm_20mhz[core] = bwn_nphy_read_lpf_ctl(mac, 0x154 + core * 0x10);
lpf_ofdm_40mhz[core] = bwn_nphy_read_lpf_ctl(mac, 0x159 + core * 0x10);
lpf_11b[core] = bwn_nphy_read_lpf_ctl(mac, 0x152 + core * 0x10);
}
bcap_val = BWN_RF_READ(mac, R2057_RCCAL_BCAP_VAL);
scap_val = BWN_RF_READ(mac, R2057_RCCAL_SCAP_VAL);
if (bwn_nphy_ipa(mac)) {
bool ghz2 = bwn_current_band(mac) == BWN_BAND_2G;
switch (phy->rf_rev) {
case 5:
/* Check radio version (to be 0) by PHY rev for now */
if (phy->rev == 8 && bwn_is_40mhz(mac)) {
for (core = 0; core < 2; core++) {
scap_val_11b[core] = scap_val;
bcap_val_11b[core] = bcap_val;
scap_val_11n_20[core] = scap_val;
bcap_val_11n_20[core] = bcap_val;
scap_val_11n_40[core] = 0xc;
bcap_val_11n_40[core] = 0xc;
}
rccal_ovrd = true;
}
if (phy->rev == 9) {
/* TODO: Radio version 1 (e.g. BCM5357B0) */
}
break;
case 7:
case 8:
for (core = 0; core < 2; core++) {
scap_val_11b[core] = scap_val;
bcap_val_11b[core] = bcap_val;
lpf_ofdm_20mhz[core] = 4;
lpf_11b[core] = 1;
if (bwn_current_band(mac) == BWN_BAND_2G) {
scap_val_11n_20[core] = 0xc;
bcap_val_11n_20[core] = 0xc;
scap_val_11n_40[core] = 0xa;
bcap_val_11n_40[core] = 0xa;
} else {
scap_val_11n_20[core] = 0x14;
bcap_val_11n_20[core] = 0x14;
scap_val_11n_40[core] = 0xf;
bcap_val_11n_40[core] = 0xf;
}
}
rccal_ovrd = true;
break;
case 9:
for (core = 0; core < 2; core++) {
bcap_val_11b[core] = bcap_val;
scap_val_11b[core] = scap_val;
lpf_11b[core] = 1;
if (ghz2) {
bcap_val_11n_20[core] = bcap_val + 13;
scap_val_11n_20[core] = scap_val + 15;
} else {
bcap_val_11n_20[core] = bcap_val + 14;
scap_val_11n_20[core] = scap_val + 15;
}
lpf_ofdm_20mhz[core] = 4;
if (ghz2) {
bcap_val_11n_40[core] = bcap_val - 7;
scap_val_11n_40[core] = scap_val - 5;
} else {
bcap_val_11n_40[core] = bcap_val + 2;
scap_val_11n_40[core] = scap_val + 4;
}
lpf_ofdm_40mhz[core] = 4;
}
rccal_ovrd = true;
break;
case 14:
for (core = 0; core < 2; core++) {
bcap_val_11b[core] = bcap_val;
scap_val_11b[core] = scap_val;
lpf_11b[core] = 1;
}
bcap_val_11n_20[0] = bcap_val + 20;
scap_val_11n_20[0] = scap_val + 20;
lpf_ofdm_20mhz[0] = 3;
bcap_val_11n_20[1] = bcap_val + 16;
scap_val_11n_20[1] = scap_val + 16;
lpf_ofdm_20mhz[1] = 3;
bcap_val_11n_40[0] = bcap_val + 20;
scap_val_11n_40[0] = scap_val + 20;
lpf_ofdm_40mhz[0] = 4;
bcap_val_11n_40[1] = bcap_val + 10;
scap_val_11n_40[1] = scap_val + 10;
lpf_ofdm_40mhz[1] = 4;
rccal_ovrd = true;
break;
}
} else {
if (phy->rf_rev == 5) {
for (core = 0; core < 2; core++) {
lpf_ofdm_20mhz[core] = 1;
lpf_ofdm_40mhz[core] = 3;
scap_val_11b[core] = scap_val;
bcap_val_11b[core] = bcap_val;
scap_val_11n_20[core] = 0x11;
scap_val_11n_40[core] = 0x11;
bcap_val_11n_20[core] = 0x13;
bcap_val_11n_40[core] = 0x13;
}
rccal_ovrd = true;
}
}
if (rccal_ovrd) {
uint16_t rx2tx_lut_20_11b[2], rx2tx_lut_20_11n[2], rx2tx_lut_40_11n[2];
uint8_t rx2tx_lut_extra = 1;
for (core = 0; core < 2; core++) {
bcap_val_11b[core] = bwn_clamp_val(bcap_val_11b[core], 0, 0x1f);
scap_val_11b[core] = bwn_clamp_val(scap_val_11b[core], 0, 0x1f);
bcap_val_11n_20[core] = bwn_clamp_val(bcap_val_11n_20[core], 0, 0x1f);
scap_val_11n_20[core] = bwn_clamp_val(scap_val_11n_20[core], 0, 0x1f);
bcap_val_11n_40[core] = bwn_clamp_val(bcap_val_11n_40[core], 0, 0x1f);
scap_val_11n_40[core] = bwn_clamp_val(scap_val_11n_40[core], 0, 0x1f);
rx2tx_lut_20_11b[core] = (rx2tx_lut_extra << 13) |
(bcap_val_11b[core] << 8) |
(scap_val_11b[core] << 3) |
lpf_11b[core];
rx2tx_lut_20_11n[core] = (rx2tx_lut_extra << 13) |
(bcap_val_11n_20[core] << 8) |
(scap_val_11n_20[core] << 3) |
lpf_ofdm_20mhz[core];
rx2tx_lut_40_11n[core] = (rx2tx_lut_extra << 13) |
(bcap_val_11n_40[core] << 8) |
(scap_val_11n_40[core] << 3) |
lpf_ofdm_40mhz[core];
}
for (core = 0; core < 2; core++) {
bwn_ntab_write(mac, BWN_NTAB16(7, 0x152 + core * 16),
rx2tx_lut_20_11b[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x153 + core * 16),
rx2tx_lut_20_11n[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x154 + core * 16),
rx2tx_lut_20_11n[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x155 + core * 16),
rx2tx_lut_40_11n[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x156 + core * 16),
rx2tx_lut_40_11n[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x157 + core * 16),
rx2tx_lut_40_11n[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x158 + core * 16),
rx2tx_lut_40_11n[core]);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x159 + core * 16),
rx2tx_lut_40_11n[core]);
}
}
BWN_PHY_WRITE(mac, 0x32F, 0x3);
if (phy->rf_rev == 4 || phy->rf_rev == 6)
bwn_nphy_rf_ctl_override_rev7(mac, 4, 1, 3, false, 0);
if (phy->rf_rev == 3 || phy->rf_rev == 4 || phy->rf_rev == 6) {
if (siba_sprom_get_rev(sc->sc_dev) &&
siba_sprom_get_bf2_hi(sc->sc_dev) & BWN_BFH2_IPALVLSHIFT_3P3) {
BWN_RF_WRITE(mac, 0x5, 0x05);
BWN_RF_WRITE(mac, 0x6, 0x30);
BWN_RF_WRITE(mac, 0x7, 0x00);
BWN_RF_SET(mac, 0x4f, 0x1);
BWN_RF_SET(mac, 0xd4, 0x1);
bias = 0x1f;
conv = 0x6f;
filt = 0xaa;
} else {
bias = 0x2b;
conv = 0x7f;
filt = 0xee;
}
if (bwn_current_band(mac) == BWN_BAND_2G) {
for (core = 0; core < 2; core++) {
if (core == 0) {
BWN_RF_WRITE(mac, 0x5F, bias);
BWN_RF_WRITE(mac, 0x64, conv);
BWN_RF_WRITE(mac, 0x66, filt);
} else {
BWN_RF_WRITE(mac, 0xE8, bias);
BWN_RF_WRITE(mac, 0xE9, conv);
BWN_RF_WRITE(mac, 0xEB, filt);
}
}
}
}
if (bwn_nphy_ipa(mac)) {
if (bwn_current_band(mac) == BWN_BAND_2G) {
if (phy->rf_rev == 3 || phy->rf_rev == 4 ||
phy->rf_rev == 6) {
for (core = 0; core < 2; core++) {
if (core == 0)
BWN_RF_WRITE(mac, 0x51,
0x7f);
else
BWN_RF_WRITE(mac, 0xd6,
0x7f);
}
}
switch (phy->rf_rev) {
case 3:
for (core = 0; core < 2; core++) {
if (core == 0) {
BWN_RF_WRITE(mac, 0x64,
0x13);
BWN_RF_WRITE(mac, 0x5F,
0x1F);
BWN_RF_WRITE(mac, 0x66,
0xEE);
BWN_RF_WRITE(mac, 0x59,
0x8A);
BWN_RF_WRITE(mac, 0x80,
0x3E);
} else {
BWN_RF_WRITE(mac, 0x69,
0x13);
BWN_RF_WRITE(mac, 0xE8,
0x1F);
BWN_RF_WRITE(mac, 0xEB,
0xEE);
BWN_RF_WRITE(mac, 0xDE,
0x8A);
BWN_RF_WRITE(mac, 0x105,
0x3E);
}
}
break;
case 7:
case 8:
if (!bwn_is_40mhz(mac)) {
BWN_RF_WRITE(mac, 0x5F, 0x14);
BWN_RF_WRITE(mac, 0xE8, 0x12);
} else {
BWN_RF_WRITE(mac, 0x5F, 0x16);
BWN_RF_WRITE(mac, 0xE8, 0x16);
}
break;
case 14:
for (core = 0; core < 2; core++) {
int o = core ? 0x85 : 0;
BWN_RF_WRITE(mac, o + R2057_IPA2G_CASCONV_CORE0, 0x13);
BWN_RF_WRITE(mac, o + R2057_TXMIX2G_TUNE_BOOST_PU_CORE0, 0x21);
BWN_RF_WRITE(mac, o + R2057_IPA2G_BIAS_FILTER_CORE0, 0xff);
BWN_RF_WRITE(mac, o + R2057_PAD2G_IDACS_CORE0, 0x88);
BWN_RF_WRITE(mac, o + R2057_PAD2G_TUNE_PUS_CORE0, 0x23);
BWN_RF_WRITE(mac, o + R2057_IPA2G_IMAIN_CORE0, 0x16);
BWN_RF_WRITE(mac, o + R2057_PAD_BIAS_FILTER_BWS_CORE0, 0x3e);
BWN_RF_WRITE(mac, o + R2057_BACKUP1_CORE0, 0x10);
}
break;
}
} else {
uint16_t freq = bwn_get_centre_freq(mac);
if ((freq >= 5180 && freq <= 5230) ||
(freq >= 5745 && freq <= 5805)) {
BWN_RF_WRITE(mac, 0x7D, 0xFF);
BWN_RF_WRITE(mac, 0xFE, 0xFF);
}
}
} else {
if (phy->rf_rev != 5) {
for (core = 0; core < 2; core++) {
if (core == 0) {
BWN_RF_WRITE(mac, 0x5c, 0x61);
BWN_RF_WRITE(mac, 0x51, 0x70);
} else {
BWN_RF_WRITE(mac, 0xe1, 0x61);
BWN_RF_WRITE(mac, 0xd6, 0x70);
}
}
}
}
if (phy->rf_rev == 4) {
bwn_ntab_write(mac, BWN_NTAB16(8, 0x05), 0x20);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x15), 0x20);
for (core = 0; core < 2; core++) {
if (core == 0) {
BWN_RF_WRITE(mac, 0x1a1, 0x00);
BWN_RF_WRITE(mac, 0x1a2, 0x3f);
BWN_RF_WRITE(mac, 0x1a6, 0x3f);
} else {
BWN_RF_WRITE(mac, 0x1a7, 0x00);
BWN_RF_WRITE(mac, 0x1ab, 0x3f);
BWN_RF_WRITE(mac, 0x1ac, 0x3f);
}
}
} else {
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_C1, 0x4);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER1, 0x4);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_C2, 0x4);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x4);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C1, ~0x1);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER1, 0x1);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C2, ~0x1);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x1);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x05), 0);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x15), 0);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C1, ~0x4);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER1, ~0x4);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C2, ~0x4);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER, ~0x4);
}
BWN_PHY_WRITE(mac, BWN_NPHY_ENDROP_TLEN, 0x2);
bwn_ntab_write(mac, BWN_NTAB32(16, 0x100), 20);
bwn_ntab_write_bulk(mac, BWN_NTAB8(7, 0x138), 2, ntab7_138_146);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x141), 0x77);
bwn_ntab_write_bulk(mac, BWN_NTAB8(7, 0x133), 3, ntab7_133);
bwn_ntab_write_bulk(mac, BWN_NTAB8(7, 0x146), 2, ntab7_138_146);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x123), 0x77);
bwn_ntab_write(mac, BWN_NTAB16(7, 0x12A), 0x77);
bwn_ntab_read_bulk(mac, BWN_NTAB32(16, 0x02), 1, noise_tbl);
noise_tbl[1] = bwn_is_40mhz(mac) ? 0x14D : 0x18D;
bwn_ntab_write_bulk(mac, BWN_NTAB32(16, 0x02), 2, noise_tbl);
bwn_ntab_read_bulk(mac, BWN_NTAB32(16, 0x7E), 1, noise_tbl);
noise_tbl[1] = bwn_is_40mhz(mac) ? 0x14D : 0x18D;
bwn_ntab_write_bulk(mac, BWN_NTAB32(16, 0x7E), 2, noise_tbl);
bwn_nphy_gain_ctl_workarounds(mac);
/* TODO
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x08), 4,
aux_adc_vmid_rev7_core0);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x18), 4,
aux_adc_vmid_rev7_core1);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x0C), 4,
aux_adc_gain_rev7);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x1C), 4,
aux_adc_gain_rev7);
*/
}
static void bwn_nphy_workarounds_rev3plus(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
/* TX to RX */
uint8_t tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
uint8_t tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
/* RX to TX */
uint8_t rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
uint8_t rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
uint8_t rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
uint8_t rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
uint16_t vmids[5][4] = {
{ 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
{ 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
{ 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
{ 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
{ 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
};
uint16_t gains[5][4] = {
{ 0x02, 0x02, 0x02, 0x00, }, /* 0 */
{ 0x02, 0x02, 0x02, 0x02, }, /* 1 */
{ 0x02, 0x02, 0x02, 0x04, }, /* 2 */
{ 0x02, 0x02, 0x02, 0x00, }, /* 3 */
{ 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
};
uint16_t *vmid, *gain;
uint8_t pdet_range;
uint16_t tmp16;
uint32_t tmp32;
BWN_PHY_WRITE(mac, BWN_NPHY_FORCEFRONT0, 0x1f8);
BWN_PHY_WRITE(mac, BWN_NPHY_FORCEFRONT1, 0x1f8);
tmp32 = bwn_ntab_read(mac, BWN_NTAB32(30, 0));
tmp32 &= 0xffffff;
bwn_ntab_write(mac, BWN_NTAB32(30, 0), tmp32);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A0, 0x0125);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A1, 0x01B3);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A2, 0x0105);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B0, 0x016E);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B1, 0x00CD);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B2, 0x0020);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C1_CLIP_LOGAIN_B, 0x000C);
BWN_PHY_WRITE(mac, BWN_NPHY_REV3_C2_CLIP_LOGAIN_B, 0x000C);
/* TX to RX */
bwn_nphy_set_rf_sequence(mac, 1, tx2rx_events, tx2rx_delays,
nitems(tx2rx_events));
/* RX to TX */
if (bwn_nphy_ipa(mac))
bwn_nphy_set_rf_sequence(mac, 0, rx2tx_events_ipa,
rx2tx_delays_ipa, nitems(rx2tx_events_ipa));
if (nphy->hw_phyrxchain != 3 &&
nphy->hw_phyrxchain != nphy->hw_phytxchain) {
if (bwn_nphy_ipa(mac)) {
rx2tx_delays[5] = 59;
rx2tx_delays[6] = 1;
rx2tx_events[7] = 0x1F;
}
bwn_nphy_set_rf_sequence(mac, 0, rx2tx_events, rx2tx_delays,
nitems(rx2tx_events));
}
tmp16 = (bwn_current_band(mac) == BWN_BAND_2G) ?
0x2 : 0x9C40;
BWN_PHY_WRITE(mac, BWN_NPHY_ENDROP_TLEN, tmp16);
BWN_PHY_SETMASK(mac, BWN_NPHY_SGILTRNOFFSET, 0xF0FF, 0x0700);
if (!bwn_is_40mhz(mac)) {
bwn_ntab_write(mac, BWN_NTAB32(16, 3), 0x18D);
bwn_ntab_write(mac, BWN_NTAB32(16, 127), 0x18D);
} else {
bwn_ntab_write(mac, BWN_NTAB32(16, 3), 0x14D);
bwn_ntab_write(mac, BWN_NTAB32(16, 127), 0x14D);
}
bwn_nphy_gain_ctl_workarounds(mac);
bwn_ntab_write(mac, BWN_NTAB16(8, 0), 2);
bwn_ntab_write(mac, BWN_NTAB16(8, 16), 2);
if (bwn_current_band(mac) == BWN_BAND_2G)
pdet_range = siba_sprom_get_fem_2ghz_pdet_range(sc->sc_dev);
else
pdet_range = siba_sprom_get_fem_5ghz_pdet_range(sc->sc_dev);
/* uint16_t min() */
vmid = vmids[min(pdet_range, 4)];
gain = gains[min(pdet_range, 4)];
switch (pdet_range) {
case 3:
if (!(mac->mac_phy.rev >= 4 &&
bwn_current_band(mac) == BWN_BAND_2G))
break;
/* FALL THROUGH */
case 0:
case 1:
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x08), 4, vmid);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x18), 4, vmid);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x0c), 4, gain);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x1c), 4, gain);
break;
case 2:
if (mac->mac_phy.rev >= 6) {
if (bwn_current_band(mac) == BWN_BAND_2G)
vmid[3] = 0x94;
else
vmid[3] = 0x8e;
gain[3] = 3;
} else if (mac->mac_phy.rev == 5) {
vmid[3] = 0x84;
gain[3] = 2;
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x08), 4, vmid);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x18), 4, vmid);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x0c), 4, gain);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x1c), 4, gain);
break;
case 4:
case 5:
if (bwn_current_band(mac) != BWN_BAND_2G) {
if (pdet_range == 4) {
vmid[3] = 0x8e;
tmp16 = 0x96;
gain[3] = 0x2;
} else {
vmid[3] = 0x89;
tmp16 = 0x89;
gain[3] = 0;
}
} else {
if (pdet_range == 4) {
vmid[3] = 0x89;
tmp16 = 0x8b;
gain[3] = 0x2;
} else {
vmid[3] = 0x74;
tmp16 = 0x70;
gain[3] = 0;
}
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x08), 4, vmid);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x0c), 4, gain);
vmid[3] = tmp16;
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x18), 4, vmid);
bwn_ntab_write_bulk(mac, BWN_NTAB16(8, 0x1c), 4, gain);
break;
}
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
BWN_RF_WRITE(mac, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
BWN_RF_WRITE(mac, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
if ((siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_APLL_WAR &&
bwn_current_band(mac) == BWN_BAND_5G) ||
(siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_GPLL_WAR &&
bwn_current_band(mac) == BWN_BAND_2G))
tmp32 = 0x00088888;
else
tmp32 = 0x88888888;
bwn_ntab_write(mac, BWN_NTAB32(30, 1), tmp32);
bwn_ntab_write(mac, BWN_NTAB32(30, 2), tmp32);
bwn_ntab_write(mac, BWN_NTAB32(30, 3), tmp32);
if (mac->mac_phy.rev == 4 &&
bwn_current_band(mac) == BWN_BAND_5G) {
BWN_RF_WRITE(mac, B2056_TX0 | B2056_TX_GMBB_IDAC,
0x70);
BWN_RF_WRITE(mac, B2056_TX1 | B2056_TX_GMBB_IDAC,
0x70);
}
/* Dropped probably-always-true condition */
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20LDEASSERTTHRESH0, 0x0381);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20LDEASSERTTHRESH1, 0x0381);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20UASSERTTHRESH0, 0x042b);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20UASSERTTHRESH1, 0x042b);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20UDEASSERTTHRESH0, 0x0381);
BWN_PHY_WRITE(mac, BWN_NPHY_ED_CRS20UDEASSERTTHRESH1, 0x0381);
if (mac->mac_phy.rev >= 6 && siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_SINGLEANT_CCK)
; /* TODO: 0x0080000000000000 HF */
}
static void bwn_nphy_workarounds_rev1_2(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
uint8_t events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
uint8_t delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
uint8_t events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
uint8_t delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
if (siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_SKWRKFEM_BRD ||
siba_get_pci_subdevice(sc->sc_dev)== BCMA_BOARD_TYPE_BCM943224M93) {
delays1[0] = 0x1;
delays1[5] = 0x14;
}
if (bwn_current_band(mac) == BWN_BAND_5G &&
nphy->band5g_pwrgain) {
BWN_RF_MASK(mac, B2055_C1_TX_RF_SPARE, ~0x8);
BWN_RF_MASK(mac, B2055_C2_TX_RF_SPARE, ~0x8);
} else {
BWN_RF_SET(mac, B2055_C1_TX_RF_SPARE, 0x8);
BWN_RF_SET(mac, B2055_C2_TX_RF_SPARE, 0x8);
}
bwn_ntab_write(mac, BWN_NTAB16(8, 0x00), 0x000A);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x10), 0x000A);
if (mac->mac_phy.rev < 3) {
bwn_ntab_write(mac, BWN_NTAB16(8, 0x02), 0xCDAA);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x12), 0xCDAA);
}
if (mac->mac_phy.rev < 2) {
bwn_ntab_write(mac, BWN_NTAB16(8, 0x08), 0x0000);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x18), 0x0000);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x07), 0x7AAB);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x17), 0x7AAB);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x06), 0x0800);
bwn_ntab_write(mac, BWN_NTAB16(8, 0x16), 0x0800);
}
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
bwn_nphy_set_rf_sequence(mac, 0, events1, delays1, 7);
bwn_nphy_set_rf_sequence(mac, 1, events2, delays2, 7);
bwn_nphy_gain_ctl_workarounds(mac);
if (mac->mac_phy.rev < 2) {
if (BWN_PHY_READ(mac, BWN_NPHY_RXCTL) & 0x2)
bwn_hf_write(mac, bwn_hf_read(mac) |
BWN_HF_MLADVW);
} else if (mac->mac_phy.rev == 2) {
BWN_PHY_WRITE(mac, BWN_NPHY_CRSCHECK2, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_CRSCHECK3, 0);
}
if (mac->mac_phy.rev < 2)
BWN_PHY_MASK(mac, BWN_NPHY_SCRAM_SIGCTL,
~BWN_NPHY_SCRAM_SIGCTL_SCM);
/* Set phase track alpha and beta */
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A0, 0x125);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A1, 0x1B3);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_A2, 0x105);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B0, 0x16E);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B1, 0xCD);
BWN_PHY_WRITE(mac, BWN_NPHY_PHASETR_B2, 0x20);
if (mac->mac_phy.rev < 3) {
BWN_PHY_MASK(mac, BWN_NPHY_PIL_DW1,
~BWN_NPHY_PIL_DW_64QAM & 0xFFFF);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_20CO_S2B1, 0xB5);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_20CO_S2B2, 0xA4);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_20CO_S2B3, 0x00);
}
if (mac->mac_phy.rev == 2)
BWN_PHY_SET(mac, BWN_NPHY_FINERX2_CGC,
BWN_NPHY_FINERX2_CGC_DECGC);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
static void bwn_nphy_workarounds(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
if (bwn_current_band(mac) == BWN_BAND_5G)
bwn_nphy_classifier(mac, 1, 0);
else
bwn_nphy_classifier(mac, 1, 1);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
BWN_PHY_SET(mac, BWN_NPHY_IQFLIP,
BWN_NPHY_IQFLIP_ADC1 | BWN_NPHY_IQFLIP_ADC2);
/* TODO: rev19+ */
if (mac->mac_phy.rev >= 7)
bwn_nphy_workarounds_rev7plus(mac);
else if (mac->mac_phy.rev >= 3)
bwn_nphy_workarounds_rev3plus(mac);
else
bwn_nphy_workarounds_rev1_2(mac);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
/**************************************************
* Tx/Rx common
**************************************************/
/*
* Transmits a known value for LO calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
*/
static int bwn_nphy_tx_tone(struct bwn_mac *mac, uint32_t freq, uint16_t max_val,
bool iqmode, bool dac_test, bool modify_bbmult)
{
uint16_t samp = bwn_nphy_gen_load_samples(mac, freq, max_val, dac_test);
if (samp == 0)
return -1;
bwn_nphy_run_samples(mac, samp, 0xFFFF, 0, iqmode, dac_test,
modify_bbmult);
return 0;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
static void bwn_nphy_update_txrx_chain(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
bool override = false;
uint16_t chain = 0x33;
if (nphy->txrx_chain == 0) {
chain = 0x11;
override = true;
} else if (nphy->txrx_chain == 1) {
chain = 0x22;
override = true;
}
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA,
~(BWN_NPHY_RFSEQCA_TXEN | BWN_NPHY_RFSEQCA_RXEN),
chain);
if (override)
BWN_PHY_SET(mac, BWN_NPHY_RFSEQMODE,
BWN_NPHY_RFSEQMODE_CAOVER);
else
BWN_PHY_MASK(mac, BWN_NPHY_RFSEQMODE,
~BWN_NPHY_RFSEQMODE_CAOVER);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
static void bwn_nphy_stop_playback(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t tmp;
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
tmp = BWN_PHY_READ(mac, BWN_NPHY_SAMP_STAT);
if (tmp & 0x1)
BWN_PHY_SET(mac, BWN_NPHY_SAMP_CMD, BWN_NPHY_SAMP_CMD_STOP);
else if (tmp & 0x2)
BWN_PHY_MASK(mac, BWN_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
BWN_PHY_MASK(mac, BWN_NPHY_SAMP_CMD, ~0x0004);
if (nphy->bb_mult_save & 0x80000000) {
tmp = nphy->bb_mult_save & 0xFFFF;
bwn_ntab_write(mac, BWN_NTAB16(15, 87), tmp);
nphy->bb_mult_save = 0;
}
if (phy->rev >= 7 && nphy->lpf_bw_overrode_for_sample_play) {
if (phy->rev >= 19)
bwn_nphy_rf_ctl_override_rev19(mac, 0x80, 0, 0, true,
1);
else
bwn_nphy_rf_ctl_override_rev7(mac, 0x80, 0, 0, true, 1);
nphy->lpf_bw_overrode_for_sample_play = false;
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
static void bwn_nphy_iq_cal_gain_params(struct bwn_mac *mac, uint16_t core,
struct bwn_nphy_txgains target,
struct bwn_nphy_iqcal_params *params)
{
struct bwn_phy *phy = &mac->mac_phy;
int i, j, indx;
uint16_t gain;
if (mac->mac_phy.rev >= 3) {
params->tx_lpf = target.tx_lpf[core]; /* Rev 7+ */
params->txgm = target.txgm[core];
params->pga = target.pga[core];
params->pad = target.pad[core];
params->ipa = target.ipa[core];
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
params->cal_gain = (params->txgm << 12) | (params->pga << 8) | (params->pad << 3) | (params->ipa) | (params->tx_lpf << 15);
} else {
params->cal_gain = (params->txgm << 12) | (params->pga << 8) | (params->pad << 4) | (params->ipa);
}
for (j = 0; j < 5; j++)
params->ncorr[j] = 0x79;
} else {
gain = (target.pad[core]) | (target.pga[core] << 4) |
(target.txgm[core] << 8);
indx = (bwn_current_band(mac) == BWN_BAND_5G) ?
1 : 0;
for (i = 0; i < 9; i++)
if (tbl_iqcal_gainparams[indx][i][0] == gain)
break;
i = min(i, 8);
params->txgm = tbl_iqcal_gainparams[indx][i][1];
params->pga = tbl_iqcal_gainparams[indx][i][2];
params->pad = tbl_iqcal_gainparams[indx][i][3];
params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
(params->pad << 2);
for (j = 0; j < 4; j++)
params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
}
}
/**************************************************
* Tx and Rx
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
static void bwn_nphy_tx_power_ctrl(struct bwn_mac *mac, bool enable)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t i;
uint16_t bmask, val, tmp;
bwn_band_t band = bwn_current_band(mac);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
nphy->txpwrctrl = enable;
if (!enable) {
if (mac->mac_phy.rev >= 3 &&
(BWN_PHY_READ(mac, BWN_NPHY_TXPCTL_CMD) &
(BWN_NPHY_TXPCTL_CMD_COEFF |
BWN_NPHY_TXPCTL_CMD_HWPCTLEN |
BWN_NPHY_TXPCTL_CMD_PCTLEN))) {
/* We disable enabled TX pwr ctl, save it's state */
nphy->tx_pwr_idx[0] = BWN_PHY_READ(mac,
BWN_NPHY_C1_TXPCTL_STAT) & 0x7f;
nphy->tx_pwr_idx[1] = BWN_PHY_READ(mac,
BWN_NPHY_C2_TXPCTL_STAT) & 0x7f;
}
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR, 0x6840);
for (i = 0; i < 84; i++)
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR, 0x6C40);
for (i = 0; i < 84; i++)
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO, 0);
tmp = BWN_NPHY_TXPCTL_CMD_COEFF | BWN_NPHY_TXPCTL_CMD_HWPCTLEN;
if (mac->mac_phy.rev >= 3)
tmp |= BWN_NPHY_TXPCTL_CMD_PCTLEN;
BWN_PHY_MASK(mac, BWN_NPHY_TXPCTL_CMD, ~tmp);
if (mac->mac_phy.rev >= 3) {
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER1, 0x0100);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x0100);
} else {
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x4000);
}
if (mac->mac_phy.rev == 2)
BWN_PHY_SETMASK(mac, BWN_NPHY_BPHY_CTL3,
~BWN_NPHY_BPHY_CTL3_SCALE, 0x53);
else if (mac->mac_phy.rev < 2)
BWN_PHY_SETMASK(mac, BWN_NPHY_BPHY_CTL3,
~BWN_NPHY_BPHY_CTL3_SCALE, 0x5A);
if (mac->mac_phy.rev < 2 && bwn_is_40mhz(mac))
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_TSSI_RESET_PSM_WORKAROUN);
} else {
bwn_ntab_write_bulk(mac, BWN_NTAB16(26, 64), 84,
nphy->adj_pwr_tbl);
bwn_ntab_write_bulk(mac, BWN_NTAB16(27, 64), 84,
nphy->adj_pwr_tbl);
bmask = BWN_NPHY_TXPCTL_CMD_COEFF |
BWN_NPHY_TXPCTL_CMD_HWPCTLEN;
/* wl does useless check for "enable" param here */
val = BWN_NPHY_TXPCTL_CMD_COEFF | BWN_NPHY_TXPCTL_CMD_HWPCTLEN;
if (mac->mac_phy.rev >= 3) {
bmask |= BWN_NPHY_TXPCTL_CMD_PCTLEN;
if (val)
val |= BWN_NPHY_TXPCTL_CMD_PCTLEN;
}
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_CMD, ~(bmask), val);
if (band == BWN_BAND_5G) {
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_CMD,
~BWN_NPHY_TXPCTL_CMD_INIT,
0x32);
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_INIT,
~BWN_NPHY_TXPCTL_INIT_PIDXI1,
0x32);
} else {
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_CMD,
~BWN_NPHY_TXPCTL_CMD_INIT,
0x64);
if (phy->rev > 1)
BWN_PHY_SETMASK(mac,
BWN_NPHY_TXPCTL_INIT,
~BWN_NPHY_TXPCTL_INIT_PIDXI1,
0x64);
}
}
if (mac->mac_phy.rev >= 3) {
if (nphy->tx_pwr_idx[0] != 128 &&
nphy->tx_pwr_idx[1] != 128) {
/* Recover TX pwr ctl state */
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_CMD,
~BWN_NPHY_TXPCTL_CMD_INIT,
nphy->tx_pwr_idx[0]);
if (mac->mac_phy.rev > 1)
BWN_PHY_SETMASK(mac,
BWN_NPHY_TXPCTL_INIT,
~0xff, nphy->tx_pwr_idx[1]);
}
}
if (phy->rev >= 7) {
/* TODO */
}
if (mac->mac_phy.rev >= 3) {
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER1, ~0x100);
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER, ~0x100);
} else {
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_OVER, ~0x4000);
}
if (mac->mac_phy.rev == 2)
BWN_PHY_SETMASK(mac, BWN_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
else if (mac->mac_phy.rev < 2)
BWN_PHY_SETMASK(mac, BWN_NPHY_BPHY_CTL3, ~0xFF, 0x40);
if (mac->mac_phy.rev < 2 && bwn_is_40mhz(mac))
bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_TSSI_RESET_PSM_WORKAROUN);
if (bwn_nphy_ipa(mac)) {
BWN_PHY_MASK(mac, BWN_NPHY_PAPD_EN0, ~0x4);
BWN_PHY_MASK(mac, BWN_NPHY_PAPD_EN1, ~0x4);
}
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
static void bwn_nphy_tx_power_fix(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t txpi[2], bbmult, i;
uint16_t tmp, radio_gain, dac_gain;
uint16_t freq = bwn_get_centre_freq(mac);
uint32_t txgain;
/* uint32_t gaintbl; rev3+ */
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
/* TODO: rev19+ */
if (mac->mac_phy.rev >= 7) {
txpi[0] = txpi[1] = 30;
} else if (mac->mac_phy.rev >= 3) {
txpi[0] = 40;
txpi[1] = 40;
} else if (siba_sprom_get_rev(sc->sc_dev) < 4) {
txpi[0] = 72;
txpi[1] = 72;
} else {
if (bwn_current_band(mac) == BWN_BAND_2G) {
txpi[0] = siba_sprom_get_txpid_2g_0(sc->sc_dev);
txpi[1] = siba_sprom_get_txpid_2g_1(sc->sc_dev);
} else if (freq >= 4900 && freq < 5100) {
txpi[0] = siba_sprom_get_txpid_5gl_0(sc->sc_dev);
txpi[1] = siba_sprom_get_txpid_5gl_1(sc->sc_dev);
} else if (freq >= 5100 && freq < 5500) {
txpi[0] = siba_sprom_get_txpid_5g_0(sc->sc_dev);
txpi[1] = siba_sprom_get_txpid_5g_1(sc->sc_dev);
} else if (freq >= 5500) {
txpi[0] = siba_sprom_get_txpid_5gh_0(sc->sc_dev);
txpi[1] = siba_sprom_get_txpid_5gh_1(sc->sc_dev);
} else {
txpi[0] = 91;
txpi[1] = 91;
}
}
if (mac->mac_phy.rev < 7 &&
(txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100))
txpi[0] = txpi[1] = 91;
/*
for (i = 0; i < 2; i++) {
nphy->txpwrindex[i].index_internal = txpi[i];
nphy->txpwrindex[i].index_internal_save = txpi[i];
}
*/
for (i = 0; i < 2; i++) {
const uint32_t *table = bwn_nphy_get_tx_gain_table(mac);
if (!table)
break;
txgain = *(table + txpi[i]);
if (mac->mac_phy.rev >= 3)
radio_gain = (txgain >> 16) & 0x1FFFF;
else
radio_gain = (txgain >> 16) & 0x1FFF;
if (mac->mac_phy.rev >= 7)
dac_gain = (txgain >> 8) & 0x7;
else
dac_gain = (txgain >> 8) & 0x3F;
bbmult = txgain & 0xFF;
if (mac->mac_phy.rev >= 3) {
if (i == 0)
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER1, 0x0100);
else
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x0100);
} else {
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x4000);
}
if (i == 0)
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_DACGAIN1, dac_gain);
else
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_DACGAIN2, dac_gain);
bwn_ntab_write(mac, BWN_NTAB16(0x7, 0x110 + i), radio_gain);
tmp = bwn_ntab_read(mac, BWN_NTAB16(0xF, 0x57));
if (i == 0)
tmp = (tmp & 0x00FF) | (bbmult << 8);
else
tmp = (tmp & 0xFF00) | bbmult;
bwn_ntab_write(mac, BWN_NTAB16(0xF, 0x57), tmp);
if (bwn_nphy_ipa(mac)) {
uint32_t tmp32;
uint16_t reg = (i == 0) ?
BWN_NPHY_PAPD_EN0 : BWN_NPHY_PAPD_EN1;
tmp32 = bwn_ntab_read(mac, BWN_NTAB32(26 + i,
576 + txpi[i]));
BWN_PHY_SETMASK(mac, reg, 0xE00F, (uint32_t) tmp32 << 4);
BWN_PHY_SET(mac, reg, 0x4);
}
}
BWN_PHY_MASK(mac, BWN_NPHY_BPHY_CTL2, ~BWN_NPHY_BPHY_CTL2_LUT);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
static void bwn_nphy_ipa_internal_tssi_setup(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
uint8_t core;
uint16_t r; /* routing */
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
for (core = 0; core < 2; core++) {
r = core ? 0x190 : 0x170;
if (bwn_current_band(mac) == BWN_BAND_2G) {
BWN_RF_WRITE(mac, r + 0x5, 0x5);
BWN_RF_WRITE(mac, r + 0x9, 0xE);
if (phy->rev != 5)
BWN_RF_WRITE(mac, r + 0xA, 0);
if (phy->rev != 7)
BWN_RF_WRITE(mac, r + 0xB, 1);
else
BWN_RF_WRITE(mac, r + 0xB, 0x31);
} else {
BWN_RF_WRITE(mac, r + 0x5, 0x9);
BWN_RF_WRITE(mac, r + 0x9, 0xC);
BWN_RF_WRITE(mac, r + 0xB, 0x0);
if (phy->rev != 5)
BWN_RF_WRITE(mac, r + 0xA, 1);
else
BWN_RF_WRITE(mac, r + 0xA, 0x31);
}
BWN_RF_WRITE(mac, r + 0x6, 0);
BWN_RF_WRITE(mac, r + 0x7, 0);
BWN_RF_WRITE(mac, r + 0x8, 3);
BWN_RF_WRITE(mac, r + 0xC, 0);
}
} else {
if (bwn_current_band(mac) == BWN_BAND_2G)
BWN_RF_WRITE(mac, B2056_SYN_RESERVED_ADDR31, 0x128);
else
BWN_RF_WRITE(mac, B2056_SYN_RESERVED_ADDR31, 0x80);
BWN_RF_WRITE(mac, B2056_SYN_RESERVED_ADDR30, 0);
BWN_RF_WRITE(mac, B2056_SYN_GPIO_MASTER1, 0x29);
for (core = 0; core < 2; core++) {
r = core ? B2056_TX1 : B2056_TX0;
BWN_RF_WRITE(mac, r | B2056_TX_IQCAL_VCM_HG, 0);
BWN_RF_WRITE(mac, r | B2056_TX_IQCAL_IDAC, 0);
BWN_RF_WRITE(mac, r | B2056_TX_TSSI_VCM, 3);
BWN_RF_WRITE(mac, r | B2056_TX_TX_AMP_DET, 0);
BWN_RF_WRITE(mac, r | B2056_TX_TSSI_MISC1, 8);
BWN_RF_WRITE(mac, r | B2056_TX_TSSI_MISC2, 0);
BWN_RF_WRITE(mac, r | B2056_TX_TSSI_MISC3, 0);
if (bwn_current_band(mac) == BWN_BAND_2G) {
BWN_RF_WRITE(mac, r | B2056_TX_TX_SSI_MASTER,
0x5);
if (phy->rev != 5)
BWN_RF_WRITE(mac, r | B2056_TX_TSSIA,
0x00);
if (phy->rev >= 5)
BWN_RF_WRITE(mac, r | B2056_TX_TSSIG,
0x31);
else
BWN_RF_WRITE(mac, r | B2056_TX_TSSIG,
0x11);
BWN_RF_WRITE(mac, r | B2056_TX_TX_SSI_MUX,
0xE);
} else {
BWN_RF_WRITE(mac, r | B2056_TX_TX_SSI_MASTER,
0x9);
BWN_RF_WRITE(mac, r | B2056_TX_TSSIA, 0x31);
BWN_RF_WRITE(mac, r | B2056_TX_TSSIG, 0x0);
BWN_RF_WRITE(mac, r | B2056_TX_TX_SSI_MUX,
0xC);
}
}
}
}
/*
* Stop radio and transmit known signal. Then check received signal strength to
* get TSSI (Transmit Signal Strength Indicator).
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
*/
static void bwn_nphy_tx_power_ctl_idle_tssi(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint32_t tmp;
int32_t rssi[4] = { };
if (bwn_is_chan_passive(mac))
return;
if (bwn_nphy_ipa(mac))
bwn_nphy_ipa_internal_tssi_setup(mac);
if (phy->rev >= 19)
bwn_nphy_rf_ctl_override_rev19(mac, 0x1000, 0, 3, false, 0);
else if (phy->rev >= 7)
bwn_nphy_rf_ctl_override_rev7(mac, 0x1000, 0, 3, false, 0);
else if (phy->rev >= 3)
bwn_nphy_rf_ctl_override(mac, 0x2000, 0, 3, false);
bwn_nphy_stop_playback(mac);
bwn_nphy_tx_tone(mac, 4000, 0, false, false, false);
DELAY(20);
tmp = bwn_nphy_poll_rssi(mac, N_RSSI_TSSI_2G, rssi, 1);
bwn_nphy_stop_playback(mac);
bwn_nphy_rssi_select(mac, 0, N_RSSI_W1);
if (phy->rev >= 19)
bwn_nphy_rf_ctl_override_rev19(mac, 0x1000, 0, 3, true, 0);
else if (phy->rev >= 7)
bwn_nphy_rf_ctl_override_rev7(mac, 0x1000, 0, 3, true, 0);
else if (phy->rev >= 3)
bwn_nphy_rf_ctl_override(mac, 0x2000, 0, 3, true);
if (phy->rev >= 19) {
/* TODO */
return;
} else if (phy->rev >= 3) {
nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF;
} else {
nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF;
nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF;
}
nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF;
nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
}
/* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */
static void bwn_nphy_tx_prepare_adjusted_power_table(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t idx, delta;
uint8_t i, stf_mode;
/* Array adj_pwr_tbl corresponds to the hardware table. It consists of
* 21 groups, each containing 4 entries.
*
* First group has entries for CCK modulation.
* The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
*
* Group 0 is for CCK
* Groups 1..4 use BPSK (group per coding rate)
* Groups 5..8 use QPSK (group per coding rate)
* Groups 9..12 use 16-QAM (group per coding rate)
* Groups 13..16 use 64-QAM (group per coding rate)
* Groups 17..20 are unknown
*/
for (i = 0; i < 4; i++)
nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
for (stf_mode = 0; stf_mode < 4; stf_mode++) {
delta = 0;
switch (stf_mode) {
case 0:
if (bwn_is_40mhz(mac) && mac->mac_phy.rev >= 5) {
idx = 68;
} else {
delta = 1;
idx = bwn_is_40mhz(mac) ? 52 : 4;
}
break;
case 1:
idx = bwn_is_40mhz(mac) ? 76 : 28;
break;
case 2:
idx = bwn_is_40mhz(mac) ? 84 : 36;
break;
case 3:
idx = bwn_is_40mhz(mac) ? 92 : 44;
break;
}
for (i = 0; i < 20; i++) {
nphy->adj_pwr_tbl[4 + 4 * i + stf_mode] =
nphy->tx_power_offset[idx];
if (i == 0)
idx += delta;
if (i == 14)
idx += 1 - delta;
if (i == 3 || i == 4 || i == 7 || i == 8 || i == 11 ||
i == 13)
idx += 1;
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
static void bwn_nphy_tx_power_ctl_setup(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
struct siba_sprom_core_pwr_info core_pwr_info[4];
int n;
int16_t a1[2], b0[2], b1[2];
uint8_t idle[2];
uint8_t ppr_max;
int8_t target[2];
int32_t num, den, pwr;
uint32_t regval[64];
uint16_t freq = bwn_get_centre_freq(mac);
uint16_t tmp;
uint16_t r; /* routing */
uint8_t i, c;
for (n = 0; n < 4; n++) {
bzero(&core_pwr_info[n], sizeof(core_pwr_info[n]));
if (siba_sprom_get_core_power_info(sc->sc_dev, n,
&core_pwr_info[n]) != 0) {
BWN_ERRPRINTF(mac->mac_sc,
"%s: failed to get core_pwr_info for core %d\n",
__func__,
n);
}
}
if (siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12) {
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~0, 0x200000);
BWN_READ_4(mac, BWN_MACCTL);
DELAY(1);
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, true);
BWN_PHY_SET(mac, BWN_NPHY_TSSIMODE, BWN_NPHY_TSSIMODE_EN);
if (mac->mac_phy.rev >= 3)
BWN_PHY_MASK(mac, BWN_NPHY_TXPCTL_CMD,
~BWN_NPHY_TXPCTL_CMD_PCTLEN & 0xFFFF);
else
BWN_PHY_SET(mac, BWN_NPHY_TXPCTL_CMD,
BWN_NPHY_TXPCTL_CMD_PCTLEN);
if (siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12)
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~0x200000, 0);
/*
* XXX TODO: see if those bandsbelow map to 5g-lo, 5g-mid, 5g-hi in
* any way.
*/
if (siba_sprom_get_rev(sc->sc_dev) < 4) {
idle[0] = nphy->pwr_ctl_info[0].idle_tssi_2g;
idle[1] = nphy->pwr_ctl_info[1].idle_tssi_2g;
target[0] = target[1] = 52;
a1[0] = a1[1] = -424;
b0[0] = b0[1] = 5612;
b1[0] = b1[1] = -1393;
} else {
if (bwn_current_band(mac) == BWN_BAND_2G) {
for (c = 0; c < 2; c++) {
idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
target[c] = core_pwr_info[c].maxpwr_2g;
a1[c] = core_pwr_info[c].pa_2g[0];
b0[c] = core_pwr_info[c].pa_2g[1];
b1[c] = core_pwr_info[c].pa_2g[2];
}
} else if (freq >= 4900 && freq < 5100) {
for (c = 0; c < 2; c++) {
idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g;
target[c] = core_pwr_info[c].maxpwr_5gl;
a1[c] = core_pwr_info[c].pa_5gl[0];
b0[c] = core_pwr_info[c].pa_5gl[1];
b1[c] = core_pwr_info[c].pa_5gl[2];
}
} else if (freq >= 5100 && freq < 5500) {
for (c = 0; c < 2; c++) {
idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g;
target[c] = core_pwr_info[c].maxpwr_5g;
a1[c] = core_pwr_info[c].pa_5g[0];
b0[c] = core_pwr_info[c].pa_5g[1];
b1[c] = core_pwr_info[c].pa_5g[2];
}
} else if (freq >= 5500) {
for (c = 0; c < 2; c++) {
idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g;
target[c] = core_pwr_info[c].maxpwr_5gh;
a1[c] = core_pwr_info[c].pa_5gh[0];
b0[c] = core_pwr_info[c].pa_5gh[1];
b1[c] = core_pwr_info[c].pa_5gh[2];
}
} else {
idle[0] = nphy->pwr_ctl_info[0].idle_tssi_5g;
idle[1] = nphy->pwr_ctl_info[1].idle_tssi_5g;
target[0] = target[1] = 52;
a1[0] = a1[1] = -424;
b0[0] = b0[1] = 5612;
b1[0] = b1[1] = -1393;
}
}
ppr_max = bwn_ppr_get_max(mac, &nphy->tx_pwr_max_ppr);
if (ppr_max) {
target[0] = ppr_max;
target[1] = ppr_max;
}
if (mac->mac_phy.rev >= 3) {
if (siba_sprom_get_fem_2ghz_tssipos(sc->sc_dev))
BWN_PHY_SET(mac, BWN_NPHY_TXPCTL_ITSSI, 0x4000);
if (mac->mac_phy.rev >= 7) {
for (c = 0; c < 2; c++) {
r = c ? 0x190 : 0x170;
if (bwn_nphy_ipa(mac))
BWN_RF_WRITE(mac, r + 0x9, (bwn_current_band(mac) == BWN_BAND_2G) ? 0xE : 0xC);
}
} else {
if (bwn_nphy_ipa(mac)) {
tmp = (bwn_current_band(mac) == BWN_BAND_5G) ? 0xC : 0xE;
BWN_RF_WRITE(mac,
B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
BWN_RF_WRITE(mac,
B2056_TX1 | B2056_TX_TX_SSI_MUX, tmp);
} else {
BWN_RF_WRITE(mac,
B2056_TX0 | B2056_TX_TX_SSI_MUX, 0x11);
BWN_RF_WRITE(mac,
B2056_TX1 | B2056_TX_TX_SSI_MUX, 0x11);
}
}
}
if (siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12) {
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~0, 0x200000);
BWN_READ_4(mac, BWN_MACCTL);
DELAY(1);
}
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_CMD,
~BWN_NPHY_TXPCTL_CMD_INIT, 0x19);
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_INIT,
~BWN_NPHY_TXPCTL_INIT_PIDXI1, 0x19);
} else {
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_CMD,
~BWN_NPHY_TXPCTL_CMD_INIT, 0x40);
if (mac->mac_phy.rev > 1)
BWN_PHY_SETMASK(mac, BWN_NPHY_TXPCTL_INIT,
~BWN_NPHY_TXPCTL_INIT_PIDXI1, 0x40);
}
if (siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12)
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~0x200000, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_TXPCTL_N,
0xF0 << BWN_NPHY_TXPCTL_N_TSSID_SHIFT |
3 << BWN_NPHY_TXPCTL_N_NPTIL2_SHIFT);
BWN_PHY_WRITE(mac, BWN_NPHY_TXPCTL_ITSSI,
idle[0] << BWN_NPHY_TXPCTL_ITSSI_0_SHIFT |
idle[1] << BWN_NPHY_TXPCTL_ITSSI_1_SHIFT |
BWN_NPHY_TXPCTL_ITSSI_BINF);
BWN_PHY_WRITE(mac, BWN_NPHY_TXPCTL_TPWR,
target[0] << BWN_NPHY_TXPCTL_TPWR_0_SHIFT |
target[1] << BWN_NPHY_TXPCTL_TPWR_1_SHIFT);
for (c = 0; c < 2; c++) {
for (i = 0; i < 64; i++) {
num = 8 * (16 * b0[c] + b1[c] * i);
den = 32768 + a1[c] * i;
pwr = max((4 * num + den / 2) / den, -8);
if (mac->mac_phy.rev < 3 && (i <= (31 - idle[c] + 1)))
pwr = max(pwr, target[c] + 1);
regval[i] = pwr;
}
bwn_ntab_write_bulk(mac, BWN_NTAB32(26 + c, 0), 64, regval);
}
bwn_nphy_tx_prepare_adjusted_power_table(mac);
bwn_ntab_write_bulk(mac, BWN_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
bwn_ntab_write_bulk(mac, BWN_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, false);
}
static void bwn_nphy_tx_gain_table_upload(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
const uint32_t *table = NULL;
uint32_t rfpwr_offset;
uint8_t pga_gain, pad_gain;
int i;
const int16_t *rf_pwr_offset_table = NULL;
table = bwn_nphy_get_tx_gain_table(mac);
if (!table)
return;
bwn_ntab_write_bulk(mac, BWN_NTAB32(26, 192), 128, table);
bwn_ntab_write_bulk(mac, BWN_NTAB32(27, 192), 128, table);
if (phy->rev < 3)
return;
#if 0
nphy->gmval = (table[0] >> 16) & 0x7000;
#endif
if (phy->rev >= 19) {
return;
} else if (phy->rev >= 7) {
rf_pwr_offset_table = bwn_ntab_get_rf_pwr_offset_table(mac);
if (!rf_pwr_offset_table)
return;
/* TODO: Enable this once we have gains configured */
return;
}
for (i = 0; i < 128; i++) {
if (phy->rev >= 19) {
/* TODO */
return;
} else if (phy->rev >= 7) {
pga_gain = (table[i] >> 24) & 0xf;
pad_gain = (table[i] >> 19) & 0x1f;
if (bwn_current_band(mac) == BWN_BAND_2G)
rfpwr_offset = rf_pwr_offset_table[pad_gain];
else
rfpwr_offset = rf_pwr_offset_table[pga_gain];
} else {
pga_gain = (table[i] >> 24) & 0xF;
if (bwn_current_band(mac) == BWN_BAND_2G)
rfpwr_offset = bwn_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
else
rfpwr_offset = 0; /* FIXME */
}
bwn_ntab_write(mac, BWN_NTAB32(26, 576 + i), rfpwr_offset);
bwn_ntab_write(mac, BWN_NTAB32(27, 576 + i), rfpwr_offset);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
static void bwn_nphy_pa_override(struct bwn_mac *mac, bool enable)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
bwn_band_t band;
uint16_t tmp;
if (!enable) {
nphy->rfctrl_intc1_save = BWN_PHY_READ(mac,
BWN_NPHY_RFCTL_INTC1);
nphy->rfctrl_intc2_save = BWN_PHY_READ(mac,
BWN_NPHY_RFCTL_INTC2);
band = bwn_current_band(mac);
if (mac->mac_phy.rev >= 7) {
tmp = 0x1480;
} else if (mac->mac_phy.rev >= 3) {
if (band == BWN_BAND_5G)
tmp = 0x600;
else
tmp = 0x480;
} else {
if (band == BWN_BAND_5G)
tmp = 0x180;
else
tmp = 0x120;
}
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, tmp);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, tmp);
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1,
nphy->rfctrl_intc1_save);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2,
nphy->rfctrl_intc2_save);
}
}
/*
* TX low-pass filter bandwidth setup
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw
*/
static void bwn_nphy_tx_lpf_bw(struct bwn_mac *mac)
{
uint16_t tmp;
if (mac->mac_phy.rev < 3 || mac->mac_phy.rev >= 7)
return;
if (bwn_nphy_ipa(mac))
tmp = bwn_is_40mhz(mac) ? 5 : 4;
else
tmp = bwn_is_40mhz(mac) ? 3 : 1;
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B32S2,
(tmp << 9) | (tmp << 6) | (tmp << 3) | tmp);
if (bwn_nphy_ipa(mac)) {
tmp = bwn_is_40mhz(mac) ? 4 : 1;
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B1S2,
(tmp << 9) | (tmp << 6) | (tmp << 3) | tmp);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
static void bwn_nphy_rx_iq_est(struct bwn_mac *mac, struct bwn_nphy_iq_est *est,
uint16_t samps, uint8_t time, bool wait)
{
int i;
uint16_t tmp;
BWN_PHY_WRITE(mac, BWN_NPHY_IQEST_SAMCNT, samps);
BWN_PHY_SETMASK(mac, BWN_NPHY_IQEST_WT, ~BWN_NPHY_IQEST_WT_VAL, time);
if (wait)
BWN_PHY_SET(mac, BWN_NPHY_IQEST_CMD, BWN_NPHY_IQEST_CMD_MODE);
else
BWN_PHY_MASK(mac, BWN_NPHY_IQEST_CMD, ~BWN_NPHY_IQEST_CMD_MODE);
BWN_PHY_SET(mac, BWN_NPHY_IQEST_CMD, BWN_NPHY_IQEST_CMD_START);
for (i = 1000; i; i--) {
tmp = BWN_PHY_READ(mac, BWN_NPHY_IQEST_CMD);
if (!(tmp & BWN_NPHY_IQEST_CMD_START)) {
est->i0_pwr = (BWN_PHY_READ(mac, BWN_NPHY_IQEST_IPACC_HI0) << 16) |
BWN_PHY_READ(mac, BWN_NPHY_IQEST_IPACC_LO0);
est->q0_pwr = (BWN_PHY_READ(mac, BWN_NPHY_IQEST_QPACC_HI0) << 16) |
BWN_PHY_READ(mac, BWN_NPHY_IQEST_QPACC_LO0);
est->iq0_prod = (BWN_PHY_READ(mac, BWN_NPHY_IQEST_IQACC_HI0) << 16) |
BWN_PHY_READ(mac, BWN_NPHY_IQEST_IQACC_LO0);
est->i1_pwr = (BWN_PHY_READ(mac, BWN_NPHY_IQEST_IPACC_HI1) << 16) |
BWN_PHY_READ(mac, BWN_NPHY_IQEST_IPACC_LO1);
est->q1_pwr = (BWN_PHY_READ(mac, BWN_NPHY_IQEST_QPACC_HI1) << 16) |
BWN_PHY_READ(mac, BWN_NPHY_IQEST_QPACC_LO1);
est->iq1_prod = (BWN_PHY_READ(mac, BWN_NPHY_IQEST_IQACC_HI1) << 16) |
BWN_PHY_READ(mac, BWN_NPHY_IQEST_IQACC_LO1);
return;
}
DELAY(10);
}
memset(est, 0, sizeof(*est));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
static void bwn_nphy_rx_iq_coeffs(struct bwn_mac *mac, bool write,
struct bwn_phy_n_iq_comp *pcomp)
{
if (write) {
BWN_PHY_WRITE(mac, BWN_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
BWN_PHY_WRITE(mac, BWN_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
BWN_PHY_WRITE(mac, BWN_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
BWN_PHY_WRITE(mac, BWN_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
} else {
pcomp->a0 = BWN_PHY_READ(mac, BWN_NPHY_C1_RXIQ_COMPA0);
pcomp->b0 = BWN_PHY_READ(mac, BWN_NPHY_C1_RXIQ_COMPB0);
pcomp->a1 = BWN_PHY_READ(mac, BWN_NPHY_C2_RXIQ_COMPA1);
pcomp->b1 = BWN_PHY_READ(mac, BWN_NPHY_C2_RXIQ_COMPB1);
}
}
#if 0
/* Ready but not used anywhere */
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
static void bwn_nphy_rx_cal_phy_cleanup(struct bwn_mac *mac, uint8_t core)
{
uint16_t *regs = mac->mac_phy.phy_n->tx_rx_cal_phy_saveregs;
BWN_PHY_WRITE(mac, BWN_NPHY_RFSEQCA, regs[0]);
if (core == 0) {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C1, regs[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, regs[2]);
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C2, regs[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, regs[2]);
}
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, regs[3]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, regs[4]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_RSSIO1, regs[5]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_RSSIO2, regs[6]);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B1S1, regs[7]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_OVER, regs[8]);
BWN_PHY_WRITE(mac, BWN_NPHY_PAPD_EN0, regs[9]);
BWN_PHY_WRITE(mac, BWN_NPHY_PAPD_EN1, regs[10]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
static void bwn_nphy_rx_cal_phy_setup(struct bwn_mac *mac, uint8_t core)
{
uint8_t rxval, txval;
uint16_t *regs = mac->mac_phy.phy_n->tx_rx_cal_phy_saveregs;
regs[0] = BWN_PHY_READ(mac, BWN_NPHY_RFSEQCA);
if (core == 0) {
regs[1] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C1);
regs[2] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER1);
} else {
regs[1] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C2);
regs[2] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER);
}
regs[3] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC1);
regs[4] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC2);
regs[5] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_RSSIO1);
regs[6] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_RSSIO2);
regs[7] = BWN_PHY_READ(mac, BWN_NPHY_TXF_40CO_B1S1);
regs[8] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_OVER);
regs[9] = BWN_PHY_READ(mac, BWN_NPHY_PAPD_EN0);
regs[10] = BWN_PHY_READ(mac, BWN_NPHY_PAPD_EN1);
BWN_PHY_MASK(mac, BWN_NPHY_PAPD_EN0, ~0x0001);
BWN_PHY_MASK(mac, BWN_NPHY_PAPD_EN1, ~0x0001);
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA,
~BWN_NPHY_RFSEQCA_RXDIS & 0xFFFF,
((1 - core) << BWN_NPHY_RFSEQCA_RXDIS_SHIFT));
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA, ~BWN_NPHY_RFSEQCA_TXEN,
((1 - core) << BWN_NPHY_RFSEQCA_TXEN_SHIFT));
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA, ~BWN_NPHY_RFSEQCA_RXEN,
(core << BWN_NPHY_RFSEQCA_RXEN_SHIFT));
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA, ~BWN_NPHY_RFSEQCA_TXDIS,
(core << BWN_NPHY_RFSEQCA_TXDIS_SHIFT));
if (core == 0) {
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C1, ~0x0007);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER1, 0x0007);
} else {
BWN_PHY_MASK(mac, BWN_NPHY_AFECTL_C2, ~0x0007);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x0007);
}
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_PA, 0, 3);
bwn_nphy_rf_ctl_override(mac, 8, 0, 3, false);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RX2TX);
if (core == 0) {
rxval = 1;
txval = 8;
} else {
rxval = 4;
txval = 2;
}
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_TRSW, rxval,
core + 1);
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_TRSW, txval,
2 - core);
}
#endif
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
static void bwn_nphy_calc_rx_iq_comp(struct bwn_mac *mac, uint8_t mask)
{
int i;
int32_t iq;
uint32_t ii;
uint32_t qq;
int iq_nbits, qq_nbits;
int arsh, brsh;
uint16_t tmp, a, b;
struct bwn_nphy_iq_est est;
struct bwn_phy_n_iq_comp old;
struct bwn_phy_n_iq_comp new = { };
bool error = false;
if (mask == 0)
return;
bwn_nphy_rx_iq_coeffs(mac, false, &old);
bwn_nphy_rx_iq_coeffs(mac, true, &new);
bwn_nphy_rx_iq_est(mac, &est, 0x4000, 32, false);
new = old;
for (i = 0; i < 2; i++) {
if (i == 0 && (mask & 1)) {
iq = est.iq0_prod;
ii = est.i0_pwr;
qq = est.q0_pwr;
} else if (i == 1 && (mask & 2)) {
iq = est.iq1_prod;
ii = est.i1_pwr;
qq = est.q1_pwr;
} else {
continue;
}
if (ii + qq < 2) {
error = true;
break;
}
iq_nbits = fls(abs(iq));
qq_nbits = fls(qq);
arsh = iq_nbits - 20;
if (arsh >= 0) {
a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
tmp = ii >> arsh;
} else {
a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
tmp = ii << -arsh;
}
if (tmp == 0) {
error = true;
break;
}
a /= tmp;
brsh = qq_nbits - 11;
if (brsh >= 0) {
b = (qq << (31 - qq_nbits));
tmp = ii >> brsh;
} else {
b = (qq << (31 - qq_nbits));
tmp = ii << -brsh;
}
if (tmp == 0) {
error = true;
break;
}
b = bwn_sqrt(mac, b / tmp - a * a) - (1 << 10);
if (i == 0 && (mask & 0x1)) {
if (mac->mac_phy.rev >= 3) {
new.a0 = a & 0x3FF;
new.b0 = b & 0x3FF;
} else {
new.a0 = b & 0x3FF;
new.b0 = a & 0x3FF;
}
} else if (i == 1 && (mask & 0x2)) {
if (mac->mac_phy.rev >= 3) {
new.a1 = a & 0x3FF;
new.b1 = b & 0x3FF;
} else {
new.a1 = b & 0x3FF;
new.b1 = a & 0x3FF;
}
}
}
if (error)
new = old;
bwn_nphy_rx_iq_coeffs(mac, true, &new);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
static void bwn_nphy_tx_iq_workaround(struct bwn_mac *mac)
{
uint16_t array[4];
bwn_ntab_read_bulk(mac, BWN_NTAB16(0xF, 0x50), 4, array);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHM_SH_NPHY_TXIQW0, array[0]);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHM_SH_NPHY_TXIQW1, array[1]);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHM_SH_NPHY_TXIQW2, array[2]);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHM_SH_NPHY_TXIQW3, array[3]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
static void bwn_nphy_spur_workaround(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t channel = bwn_get_chan(mac);
int tone[2] = { 57, 58 };
uint32_t noise[2] = { 0x3FF, 0x3FF };
if (mac->mac_phy.rev < 3) {
BWN_ERRPRINTF(mac->mac_sc, "%s: phy rev %d out of range\n",
__func__,
mac->mac_phy.rev);
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
if (nphy->gband_spurwar_en) {
/* TODO: N PHY Adjust Analog Pfbw (7) */
if (channel == 11 && bwn_is_40mhz(mac))
; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
else
; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
/* TODO: N PHY Adjust CRS Min Power (0x1E) */
}
if (nphy->aband_spurwar_en) {
if (channel == 54) {
tone[0] = 0x20;
noise[0] = 0x25F;
} else if (channel == 38 || channel == 102 || channel == 118) {
if (0 /* FIXME */) {
tone[0] = 0x20;
noise[0] = 0x21F;
} else {
tone[0] = 0;
noise[0] = 0;
}
} else if (channel == 134) {
tone[0] = 0x20;
noise[0] = 0x21F;
} else if (channel == 151) {
tone[0] = 0x10;
noise[0] = 0x23F;
} else if (channel == 153 || channel == 161) {
tone[0] = 0x30;
noise[0] = 0x23F;
} else {
tone[0] = 0;
noise[0] = 0;
}
if (!tone[0] && !noise[0])
; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
else
; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
static void bwn_nphy_tx_pwr_ctrl_coef_setup(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
int i, j;
uint32_t tmp;
uint32_t cur_real, cur_imag, real_part, imag_part;
uint16_t buffer[7];
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, true);
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 80), 7, buffer);
for (i = 0; i < 2; i++) {
tmp = ((buffer[i * 2] & 0x3FF) << 10) |
(buffer[i * 2 + 1] & 0x3FF);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR,
(((i + 26) << 10) | 320));
for (j = 0; j < 128; j++) {
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATAHI,
((tmp >> 16) & 0xFFFF));
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO,
(tmp & 0xFFFF));
}
}
for (i = 0; i < 2; i++) {
tmp = buffer[5 + i];
real_part = (tmp >> 8) & 0xFF;
imag_part = (tmp & 0xFF);
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_ADDR,
(((i + 26) << 10) | 448));
if (mac->mac_phy.rev >= 3) {
cur_real = real_part;
cur_imag = imag_part;
tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
}
for (j = 0; j < 128; j++) {
if (mac->mac_phy.rev < 3) {
cur_real = (real_part * loscale[j] + 128) >> 8;
cur_imag = (imag_part * loscale[j] + 128) >> 8;
tmp = ((cur_real & 0xFF) << 8) |
(cur_imag & 0xFF);
}
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATAHI,
((tmp >> 16) & 0xFFFF));
BWN_PHY_WRITE(mac, BWN_NPHY_TABLE_DATALO,
(tmp & 0xFFFF));
}
}
if (mac->mac_phy.rev >= 3) {
bwn_shm_write_2(mac, BWN_SHARED,
BWN_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
bwn_shm_write_2(mac, BWN_SHARED,
BWN_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
}
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, false);
}
/*
* Restore RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
*/
static void bwn_nphy_restore_rssi_cal(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t *rssical_radio_regs = NULL;
uint16_t *rssical_phy_regs = NULL;
if (bwn_current_band(mac) == BWN_BAND_2G) {
if (!nphy->rssical_chanspec_2G.center_freq)
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
if (!nphy->rssical_chanspec_5G.center_freq)
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
}
if (mac->mac_phy.rev >= 19) {
/* TODO */
} else if (mac->mac_phy.rev >= 7) {
BWN_RF_SETMASK(mac, R2057_NB_MASTER_CORE0, ~R2057_VCM_MASK,
rssical_radio_regs[0]);
BWN_RF_SETMASK(mac, R2057_NB_MASTER_CORE1, ~R2057_VCM_MASK,
rssical_radio_regs[1]);
} else {
BWN_RF_SETMASK(mac, B2056_RX0 | B2056_RX_RSSI_MISC, 0xE3,
rssical_radio_regs[0]);
BWN_RF_SETMASK(mac, B2056_RX1 | B2056_RX_RSSI_MISC, 0xE3,
rssical_radio_regs[1]);
}
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
BWN_PHY_WRITE(mac, BWN_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
}
static void bwn_nphy_tx_cal_radio_setup_rev19(struct bwn_mac *mac)
{
/* TODO */
}
static void bwn_nphy_tx_cal_radio_setup_rev7(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t *save = nphy->tx_rx_cal_radio_saveregs;
int core, off;
uint16_t r, tmp;
for (core = 0; core < 2; core++) {
r = core ? 0x20 : 0;
off = core * 11;
save[off + 0] = BWN_RF_READ(mac, r + R2057_TX0_TX_SSI_MASTER);
save[off + 1] = BWN_RF_READ(mac, r + R2057_TX0_IQCAL_VCM_HG);
save[off + 2] = BWN_RF_READ(mac, r + R2057_TX0_IQCAL_IDAC);
save[off + 3] = BWN_RF_READ(mac, r + R2057_TX0_TSSI_VCM);
save[off + 4] = 0;
save[off + 5] = BWN_RF_READ(mac, r + R2057_TX0_TX_SSI_MUX);
if (phy->rf_rev != 5)
save[off + 6] = BWN_RF_READ(mac, r + R2057_TX0_TSSIA);
save[off + 7] = BWN_RF_READ(mac, r + R2057_TX0_TSSIG);
save[off + 8] = BWN_RF_READ(mac, r + R2057_TX0_TSSI_MISC1);
if (bwn_current_band(mac) == BWN_BAND_5G) {
BWN_RF_WRITE(mac, r + R2057_TX0_TX_SSI_MASTER, 0xA);
BWN_RF_WRITE(mac, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
BWN_RF_WRITE(mac, r + R2057_TX0_IQCAL_IDAC, 0x55);
BWN_RF_WRITE(mac, r + R2057_TX0_TSSI_VCM, 0);
BWN_RF_WRITE(mac, r + R2057_TX0_TSSIG, 0);
if (nphy->use_int_tx_iq_lo_cal) {
BWN_RF_WRITE(mac, r + R2057_TX0_TX_SSI_MUX, 0x4);
tmp = true ? 0x31 : 0x21; /* TODO */
BWN_RF_WRITE(mac, r + R2057_TX0_TSSIA, tmp);
}
BWN_RF_WRITE(mac, r + R2057_TX0_TSSI_MISC1, 0x00);
} else {
BWN_RF_WRITE(mac, r + R2057_TX0_TX_SSI_MASTER, 0x6);
BWN_RF_WRITE(mac, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
BWN_RF_WRITE(mac, r + R2057_TX0_IQCAL_IDAC, 0x55);
BWN_RF_WRITE(mac, r + R2057_TX0_TSSI_VCM, 0);
if (phy->rf_rev != 5)
BWN_RF_WRITE(mac, r + R2057_TX0_TSSIA, 0);
if (nphy->use_int_tx_iq_lo_cal) {
BWN_RF_WRITE(mac, r + R2057_TX0_TX_SSI_MUX, 0x6);
tmp = true ? 0x31 : 0x21; /* TODO */
BWN_RF_WRITE(mac, r + R2057_TX0_TSSIG, tmp);
}
BWN_RF_WRITE(mac, r + R2057_TX0_TSSI_MISC1, 0);
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
static void bwn_nphy_tx_cal_radio_setup(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t *save = nphy->tx_rx_cal_radio_saveregs;
uint16_t tmp;
uint8_t offset, i;
if (phy->rev >= 19) {
bwn_nphy_tx_cal_radio_setup_rev19(mac);
} else if (phy->rev >= 7) {
bwn_nphy_tx_cal_radio_setup_rev7(mac);
} else if (phy->rev >= 3) {
for (i = 0; i < 2; i++) {
tmp = (i == 0) ? 0x2000 : 0x3000;
offset = i * 11;
save[offset + 0] = BWN_RF_READ(mac, B2055_CAL_RVARCTL);
save[offset + 1] = BWN_RF_READ(mac, B2055_CAL_LPOCTL);
save[offset + 2] = BWN_RF_READ(mac, B2055_CAL_TS);
save[offset + 3] = BWN_RF_READ(mac, B2055_CAL_RCCALRTS);
save[offset + 4] = BWN_RF_READ(mac, B2055_CAL_RCALRTS);
save[offset + 5] = BWN_RF_READ(mac, B2055_PADDRV);
save[offset + 6] = BWN_RF_READ(mac, B2055_XOCTL1);
save[offset + 7] = BWN_RF_READ(mac, B2055_XOCTL2);
save[offset + 8] = BWN_RF_READ(mac, B2055_XOREGUL);
save[offset + 9] = BWN_RF_READ(mac, B2055_XOMISC);
save[offset + 10] = BWN_RF_READ(mac, B2055_PLL_LFC1);
if (bwn_current_band(mac) == BWN_BAND_5G) {
BWN_RF_WRITE(mac, tmp | B2055_CAL_RVARCTL, 0x0A);
BWN_RF_WRITE(mac, tmp | B2055_CAL_LPOCTL, 0x40);
BWN_RF_WRITE(mac, tmp | B2055_CAL_TS, 0x55);
BWN_RF_WRITE(mac, tmp | B2055_CAL_RCCALRTS, 0);
BWN_RF_WRITE(mac, tmp | B2055_CAL_RCALRTS, 0);
if (nphy->ipa5g_on) {
BWN_RF_WRITE(mac, tmp | B2055_PADDRV, 4);
BWN_RF_WRITE(mac, tmp | B2055_XOCTL1, 1);
} else {
BWN_RF_WRITE(mac, tmp | B2055_PADDRV, 0);
BWN_RF_WRITE(mac, tmp | B2055_XOCTL1, 0x2F);
}
BWN_RF_WRITE(mac, tmp | B2055_XOCTL2, 0);
} else {
BWN_RF_WRITE(mac, tmp | B2055_CAL_RVARCTL, 0x06);
BWN_RF_WRITE(mac, tmp | B2055_CAL_LPOCTL, 0x40);
BWN_RF_WRITE(mac, tmp | B2055_CAL_TS, 0x55);
BWN_RF_WRITE(mac, tmp | B2055_CAL_RCCALRTS, 0);
BWN_RF_WRITE(mac, tmp | B2055_CAL_RCALRTS, 0);
BWN_RF_WRITE(mac, tmp | B2055_XOCTL1, 0);
if (nphy->ipa2g_on) {
BWN_RF_WRITE(mac, tmp | B2055_PADDRV, 6);
BWN_RF_WRITE(mac, tmp | B2055_XOCTL2,
(mac->mac_phy.rev < 5) ? 0x11 : 0x01);
} else {
BWN_RF_WRITE(mac, tmp | B2055_PADDRV, 0);
BWN_RF_WRITE(mac, tmp | B2055_XOCTL2, 0);
}
}
BWN_RF_WRITE(mac, tmp | B2055_XOREGUL, 0);
BWN_RF_WRITE(mac, tmp | B2055_XOMISC, 0);
BWN_RF_WRITE(mac, tmp | B2055_PLL_LFC1, 0);
}
} else {
save[0] = BWN_RF_READ(mac, B2055_C1_TX_RF_IQCAL1);
BWN_RF_WRITE(mac, B2055_C1_TX_RF_IQCAL1, 0x29);
save[1] = BWN_RF_READ(mac, B2055_C1_TX_RF_IQCAL2);
BWN_RF_WRITE(mac, B2055_C1_TX_RF_IQCAL2, 0x54);
save[2] = BWN_RF_READ(mac, B2055_C2_TX_RF_IQCAL1);
BWN_RF_WRITE(mac, B2055_C2_TX_RF_IQCAL1, 0x29);
save[3] = BWN_RF_READ(mac, B2055_C2_TX_RF_IQCAL2);
BWN_RF_WRITE(mac, B2055_C2_TX_RF_IQCAL2, 0x54);
save[3] = BWN_RF_READ(mac, B2055_C1_PWRDET_RXTX);
save[4] = BWN_RF_READ(mac, B2055_C2_PWRDET_RXTX);
if (!(BWN_PHY_READ(mac, BWN_NPHY_BANDCTL) &
BWN_NPHY_BANDCTL_5GHZ)) {
BWN_RF_WRITE(mac, B2055_C1_PWRDET_RXTX, 0x04);
BWN_RF_WRITE(mac, B2055_C2_PWRDET_RXTX, 0x04);
} else {
BWN_RF_WRITE(mac, B2055_C1_PWRDET_RXTX, 0x20);
BWN_RF_WRITE(mac, B2055_C2_PWRDET_RXTX, 0x20);
}
if (mac->mac_phy.rev < 2) {
BWN_RF_SET(mac, B2055_C1_TX_BB_MXGM, 0x20);
BWN_RF_SET(mac, B2055_C2_TX_BB_MXGM, 0x20);
} else {
BWN_RF_MASK(mac, B2055_C1_TX_BB_MXGM, ~0x20);
BWN_RF_MASK(mac, B2055_C2_TX_BB_MXGM, ~0x20);
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
static void bwn_nphy_update_tx_cal_ladder(struct bwn_mac *mac, uint16_t core)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
int i;
uint16_t scale, entry;
uint16_t tmp = nphy->txcal_bbmult;
if (core == 0)
tmp >>= 8;
tmp &= 0xff;
for (i = 0; i < 18; i++) {
scale = (ladder_lo[i].percent * tmp) / 100;
entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
bwn_ntab_write(mac, BWN_NTAB16(15, i), entry);
scale = (ladder_iq[i].percent * tmp) / 100;
entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
bwn_ntab_write(mac, BWN_NTAB16(15, i + 32), entry);
}
}
static void bwn_nphy_pa_set_tx_dig_filter(struct bwn_mac *mac, uint16_t offset,
const int16_t *filter)
{
int i;
offset = BWN_PHY_N(offset);
for (i = 0; i < 15; i++, offset++)
BWN_PHY_WRITE(mac, offset, filter[i]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
static void bwn_nphy_ext_pa_set_tx_dig_filters(struct bwn_mac *mac)
{
bwn_nphy_pa_set_tx_dig_filter(mac, 0x2C5,
tbl_tx_filter_coef_rev4[2]);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
static void bwn_nphy_int_pa_set_tx_dig_filters(struct bwn_mac *mac)
{
/* BWN_NPHY_TXF_20CO_S0A1, BWN_NPHY_TXF_40CO_S0A1, unknown */
static const uint16_t offset[] = { 0x186, 0x195, 0x2C5 };
static const int16_t dig_filter_phy_rev16[] = {
-375, 136, -407, 208, -1527,
956, 93, 186, 93, 230,
-44, 230, 201, -191, 201,
};
int i;
for (i = 0; i < 3; i++)
bwn_nphy_pa_set_tx_dig_filter(mac, offset[i],
tbl_tx_filter_coef_rev4[i]);
/* Verified with BCM43227 and BCM43228 */
if (mac->mac_phy.rev == 16)
bwn_nphy_pa_set_tx_dig_filter(mac, 0x186, dig_filter_phy_rev16);
/* Verified with BCM43131 and BCM43217 */
if (mac->mac_phy.rev == 17) {
bwn_nphy_pa_set_tx_dig_filter(mac, 0x186, dig_filter_phy_rev16);
bwn_nphy_pa_set_tx_dig_filter(mac, 0x195,
tbl_tx_filter_coef_rev4[1]);
}
if (bwn_is_40mhz(mac)) {
bwn_nphy_pa_set_tx_dig_filter(mac, 0x186,
tbl_tx_filter_coef_rev4[3]);
} else {
if (bwn_current_band(mac) == BWN_BAND_5G)
bwn_nphy_pa_set_tx_dig_filter(mac, 0x186,
tbl_tx_filter_coef_rev4[5]);
if (bwn_get_chan(mac) == 14)
bwn_nphy_pa_set_tx_dig_filter(mac, 0x186,
tbl_tx_filter_coef_rev4[6]);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
static struct bwn_nphy_txgains bwn_nphy_get_tx_gains(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t curr_gain[2];
struct bwn_nphy_txgains target;
const uint32_t *table = NULL;
if (!nphy->txpwrctrl) {
int i;
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, true);
bwn_ntab_read_bulk(mac, BWN_NTAB16(7, 0x110), 2, curr_gain);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, false);
for (i = 0; i < 2; ++i) {
if (mac->mac_phy.rev >= 7) {
target.ipa[i] = curr_gain[i] & 0x0007;
target.pad[i] = (curr_gain[i] & 0x00F8) >> 3;
target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
target.tx_lpf[i] = (curr_gain[i] & 0x8000) >> 15;
} else if (mac->mac_phy.rev >= 3) {
target.ipa[i] = curr_gain[i] & 0x000F;
target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
} else {
target.ipa[i] = curr_gain[i] & 0x0003;
target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
}
}
} else {
int i;
uint16_t index[2];
index[0] = (BWN_PHY_READ(mac, BWN_NPHY_C1_TXPCTL_STAT) &
BWN_NPHY_TXPCTL_STAT_BIDX) >>
BWN_NPHY_TXPCTL_STAT_BIDX_SHIFT;
index[1] = (BWN_PHY_READ(mac, BWN_NPHY_C2_TXPCTL_STAT) &
BWN_NPHY_TXPCTL_STAT_BIDX) >>
BWN_NPHY_TXPCTL_STAT_BIDX_SHIFT;
for (i = 0; i < 2; ++i) {
table = bwn_nphy_get_tx_gain_table(mac);
if (!table)
break;
if (mac->mac_phy.rev >= 7) {
target.ipa[i] = (table[index[i]] >> 16) & 0x7;
target.pad[i] = (table[index[i]] >> 19) & 0x1F;
target.pga[i] = (table[index[i]] >> 24) & 0xF;
target.txgm[i] = (table[index[i]] >> 28) & 0x7;
target.tx_lpf[i] = (table[index[i]] >> 31) & 0x1;
} else if (mac->mac_phy.rev >= 3) {
target.ipa[i] = (table[index[i]] >> 16) & 0xF;
target.pad[i] = (table[index[i]] >> 20) & 0xF;
target.pga[i] = (table[index[i]] >> 24) & 0xF;
target.txgm[i] = (table[index[i]] >> 28) & 0xF;
} else {
target.ipa[i] = (table[index[i]] >> 16) & 0x3;
target.pad[i] = (table[index[i]] >> 18) & 0x3;
target.pga[i] = (table[index[i]] >> 20) & 0x7;
target.txgm[i] = (table[index[i]] >> 23) & 0x7;
}
}
}
return target;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
static void bwn_nphy_tx_cal_phy_cleanup(struct bwn_mac *mac)
{
uint16_t *regs = mac->mac_phy.phy_n->tx_rx_cal_phy_saveregs;
if (mac->mac_phy.rev >= 3) {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C1, regs[0]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C2, regs[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, regs[2]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, regs[3]);
BWN_PHY_WRITE(mac, BWN_NPHY_BBCFG, regs[4]);
bwn_ntab_write(mac, BWN_NTAB16(8, 3), regs[5]);
bwn_ntab_write(mac, BWN_NTAB16(8, 19), regs[6]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, regs[7]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, regs[8]);
BWN_PHY_WRITE(mac, BWN_NPHY_PAPD_EN0, regs[9]);
BWN_PHY_WRITE(mac, BWN_NPHY_PAPD_EN1, regs[10]);
bwn_nphy_reset_cca(mac);
} else {
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C1, 0x0FFF, regs[0]);
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C2, 0x0FFF, regs[1]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, regs[2]);
bwn_ntab_write(mac, BWN_NTAB16(8, 2), regs[3]);
bwn_ntab_write(mac, BWN_NTAB16(8, 18), regs[4]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, regs[5]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, regs[6]);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
static void bwn_nphy_tx_cal_phy_setup(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t *regs = mac->mac_phy.phy_n->tx_rx_cal_phy_saveregs;
uint16_t tmp;
regs[0] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C1);
regs[1] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_C2);
if (mac->mac_phy.rev >= 3) {
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C1, 0xF0FF, 0x0A00);
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C2, 0xF0FF, 0x0A00);
tmp = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER1);
regs[2] = tmp;
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, tmp | 0x0600);
tmp = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER);
regs[3] = tmp;
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, tmp | 0x0600);
regs[4] = BWN_PHY_READ(mac, BWN_NPHY_BBCFG);
BWN_PHY_MASK(mac, BWN_NPHY_BBCFG,
~BWN_NPHY_BBCFG_RSTRX & 0xFFFF);
tmp = bwn_ntab_read(mac, BWN_NTAB16(8, 3));
regs[5] = tmp;
bwn_ntab_write(mac, BWN_NTAB16(8, 3), 0);
tmp = bwn_ntab_read(mac, BWN_NTAB16(8, 19));
regs[6] = tmp;
bwn_ntab_write(mac, BWN_NTAB16(8, 19), 0);
regs[7] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC1);
regs[8] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC2);
if (!nphy->use_int_tx_iq_lo_cal)
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_PA,
1, 3);
else
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_PA,
0, 3);
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_TRSW, 2, 1);
bwn_nphy_rf_ctl_intc_override(mac, N_INTC_OVERRIDE_TRSW, 8, 2);
regs[9] = BWN_PHY_READ(mac, BWN_NPHY_PAPD_EN0);
regs[10] = BWN_PHY_READ(mac, BWN_NPHY_PAPD_EN1);
BWN_PHY_MASK(mac, BWN_NPHY_PAPD_EN0, ~0x0001);
BWN_PHY_MASK(mac, BWN_NPHY_PAPD_EN1, ~0x0001);
tmp = bwn_nphy_read_lpf_ctl(mac, 0);
if (phy->rev >= 19)
bwn_nphy_rf_ctl_override_rev19(mac, 0x80, tmp, 0, false,
1);
else if (phy->rev >= 7)
bwn_nphy_rf_ctl_override_rev7(mac, 0x80, tmp, 0, false,
1);
if (nphy->use_int_tx_iq_lo_cal && true /* FIXME */) {
if (phy->rev >= 19) {
bwn_nphy_rf_ctl_override_rev19(mac, 0x8, 0, 0x3,
false, 0);
} else if (phy->rev >= 8) {
bwn_nphy_rf_ctl_override_rev7(mac, 0x8, 0, 0x3,
false, 0);
} else if (phy->rev == 7) {
BWN_RF_SETMASK(mac, R2057_OVR_REG0, 1 << 4, 1 << 4);
if (bwn_current_band(mac) == BWN_BAND_2G) {
BWN_RF_SETMASK(mac, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
BWN_RF_SETMASK(mac, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
} else {
BWN_RF_SETMASK(mac, R2057_IPA5G_CASCOFFV_PU_CORE0, ~1, 0);
BWN_RF_SETMASK(mac, R2057_IPA5G_CASCOFFV_PU_CORE1, ~1, 0);
}
}
}
} else {
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
BWN_PHY_SETMASK(mac, BWN_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
tmp = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER);
regs[2] = tmp;
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, tmp | 0x3000);
tmp = bwn_ntab_read(mac, BWN_NTAB16(8, 2));
regs[3] = tmp;
tmp |= 0x2000;
bwn_ntab_write(mac, BWN_NTAB16(8, 2), tmp);
tmp = bwn_ntab_read(mac, BWN_NTAB16(8, 18));
regs[4] = tmp;
tmp |= 0x2000;
bwn_ntab_write(mac, BWN_NTAB16(8, 18), tmp);
regs[5] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC1);
regs[6] = BWN_PHY_READ(mac, BWN_NPHY_RFCTL_INTC2);
if (bwn_current_band(mac) == BWN_BAND_5G)
tmp = 0x0180;
else
tmp = 0x0120;
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, tmp);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, tmp);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
static void bwn_nphy_save_cal(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
struct bwn_phy_n_iq_comp *rxcal_coeffs = NULL;
uint16_t *txcal_radio_regs = NULL;
struct bwn_chanspec *iqcal_chanspec;
uint16_t *table = NULL;
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 1);
if (bwn_current_band(mac) == BWN_BAND_2G) {
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
iqcal_chanspec = &nphy->iqcal_chanspec_2G;
table = nphy->cal_cache.txcal_coeffs_2G;
} else {
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
iqcal_chanspec = &nphy->iqcal_chanspec_5G;
table = nphy->cal_cache.txcal_coeffs_5G;
}
bwn_nphy_rx_iq_coeffs(mac, false, rxcal_coeffs);
/* TODO use some definitions */
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
txcal_radio_regs[0] = BWN_RF_READ(mac,
R2057_TX0_LOFT_FINE_I);
txcal_radio_regs[1] = BWN_RF_READ(mac,
R2057_TX0_LOFT_FINE_Q);
txcal_radio_regs[4] = BWN_RF_READ(mac,
R2057_TX0_LOFT_COARSE_I);
txcal_radio_regs[5] = BWN_RF_READ(mac,
R2057_TX0_LOFT_COARSE_Q);
txcal_radio_regs[2] = BWN_RF_READ(mac,
R2057_TX1_LOFT_FINE_I);
txcal_radio_regs[3] = BWN_RF_READ(mac,
R2057_TX1_LOFT_FINE_Q);
txcal_radio_regs[6] = BWN_RF_READ(mac,
R2057_TX1_LOFT_COARSE_I);
txcal_radio_regs[7] = BWN_RF_READ(mac,
R2057_TX1_LOFT_COARSE_Q);
} else if (phy->rev >= 3) {
txcal_radio_regs[0] = BWN_RF_READ(mac, 0x2021);
txcal_radio_regs[1] = BWN_RF_READ(mac, 0x2022);
txcal_radio_regs[2] = BWN_RF_READ(mac, 0x3021);
txcal_radio_regs[3] = BWN_RF_READ(mac, 0x3022);
txcal_radio_regs[4] = BWN_RF_READ(mac, 0x2023);
txcal_radio_regs[5] = BWN_RF_READ(mac, 0x2024);
txcal_radio_regs[6] = BWN_RF_READ(mac, 0x3023);
txcal_radio_regs[7] = BWN_RF_READ(mac, 0x3024);
} else {
txcal_radio_regs[0] = BWN_RF_READ(mac, 0x8B);
txcal_radio_regs[1] = BWN_RF_READ(mac, 0xBA);
txcal_radio_regs[2] = BWN_RF_READ(mac, 0x8D);
txcal_radio_regs[3] = BWN_RF_READ(mac, 0xBC);
}
iqcal_chanspec->center_freq = bwn_get_centre_freq(mac);
iqcal_chanspec->channel_type = bwn_get_chan_type(mac, NULL);
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 80), 8, table);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, 0);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
static void bwn_nphy_restore_cal(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint16_t coef[4];
uint16_t *loft = NULL;
uint16_t *table = NULL;
int i;
uint16_t *txcal_radio_regs = NULL;
struct bwn_phy_n_iq_comp *rxcal_coeffs = NULL;
if (bwn_current_band(mac) == BWN_BAND_2G) {
if (!nphy->iqcal_chanspec_2G.center_freq)
return;
table = nphy->cal_cache.txcal_coeffs_2G;
loft = &nphy->cal_cache.txcal_coeffs_2G[5];
} else {
if (!nphy->iqcal_chanspec_5G.center_freq)
return;
table = nphy->cal_cache.txcal_coeffs_5G;
loft = &nphy->cal_cache.txcal_coeffs_5G[5];
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 80), 4, table);
for (i = 0; i < 4; i++) {
if (mac->mac_phy.rev >= 3)
table[i] = coef[i];
else
coef[i] = 0;
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 88), 4, coef);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 85), 2, loft);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 93), 2, loft);
if (mac->mac_phy.rev < 2)
bwn_nphy_tx_iq_workaround(mac);
if (bwn_current_band(mac) == BWN_BAND_2G) {
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
} else {
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
}
/* TODO use some definitions */
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
BWN_RF_WRITE(mac, R2057_TX0_LOFT_FINE_I,
txcal_radio_regs[0]);
BWN_RF_WRITE(mac, R2057_TX0_LOFT_FINE_Q,
txcal_radio_regs[1]);
BWN_RF_WRITE(mac, R2057_TX0_LOFT_COARSE_I,
txcal_radio_regs[4]);
BWN_RF_WRITE(mac, R2057_TX0_LOFT_COARSE_Q,
txcal_radio_regs[5]);
BWN_RF_WRITE(mac, R2057_TX1_LOFT_FINE_I,
txcal_radio_regs[2]);
BWN_RF_WRITE(mac, R2057_TX1_LOFT_FINE_Q,
txcal_radio_regs[3]);
BWN_RF_WRITE(mac, R2057_TX1_LOFT_COARSE_I,
txcal_radio_regs[6]);
BWN_RF_WRITE(mac, R2057_TX1_LOFT_COARSE_Q,
txcal_radio_regs[7]);
} else if (phy->rev >= 3) {
BWN_RF_WRITE(mac, 0x2021, txcal_radio_regs[0]);
BWN_RF_WRITE(mac, 0x2022, txcal_radio_regs[1]);
BWN_RF_WRITE(mac, 0x3021, txcal_radio_regs[2]);
BWN_RF_WRITE(mac, 0x3022, txcal_radio_regs[3]);
BWN_RF_WRITE(mac, 0x2023, txcal_radio_regs[4]);
BWN_RF_WRITE(mac, 0x2024, txcal_radio_regs[5]);
BWN_RF_WRITE(mac, 0x3023, txcal_radio_regs[6]);
BWN_RF_WRITE(mac, 0x3024, txcal_radio_regs[7]);
} else {
BWN_RF_WRITE(mac, 0x8B, txcal_radio_regs[0]);
BWN_RF_WRITE(mac, 0xBA, txcal_radio_regs[1]);
BWN_RF_WRITE(mac, 0x8D, txcal_radio_regs[2]);
BWN_RF_WRITE(mac, 0xBC, txcal_radio_regs[3]);
}
bwn_nphy_rx_iq_coeffs(mac, true, rxcal_coeffs);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
static int bwn_nphy_cal_tx_iq_lo(struct bwn_mac *mac,
struct bwn_nphy_txgains target,
bool full, bool mphase)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
int i;
int error = 0;
int freq;
bool avoid = false;
uint8_t length;
uint16_t tmp, core, type, count, max, numb, last = 0, cmd;
const uint16_t *table;
bool phy6or5x;
uint16_t buffer[11];
uint16_t diq_start = 0;
uint16_t save[2];
uint16_t gain[2];
struct bwn_nphy_iqcal_params params[2];
bool updated[2] = { };
bwn_nphy_stay_in_carrier_search(mac, true);
if (mac->mac_phy.rev >= 4) {
avoid = nphy->hang_avoid;
nphy->hang_avoid = false;
}
bwn_ntab_read_bulk(mac, BWN_NTAB16(7, 0x110), 2, save);
for (i = 0; i < 2; i++) {
bwn_nphy_iq_cal_gain_params(mac, i, target, &params[i]);
gain[i] = params[i].cal_gain;
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x110), 2, gain);
bwn_nphy_tx_cal_radio_setup(mac);
bwn_nphy_tx_cal_phy_setup(mac);
phy6or5x = mac->mac_phy.rev >= 6 ||
(mac->mac_phy.rev == 5 && nphy->ipa2g_on &&
bwn_current_band(mac) == BWN_BAND_2G);
if (phy6or5x) {
if (bwn_is_40mhz(mac)) {
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 0), 18,
tbl_tx_iqlo_cal_loft_ladder_40);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 32), 18,
tbl_tx_iqlo_cal_iqimb_ladder_40);
} else {
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 0), 18,
tbl_tx_iqlo_cal_loft_ladder_20);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 32), 18,
tbl_tx_iqlo_cal_iqimb_ladder_20);
}
}
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
BWN_PHY_WRITE(mac, BWN_NPHY_IQLOCAL_CMDGCTL, 0x8AD9);
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
}
if (!bwn_is_40mhz(mac))
freq = 2500;
else
freq = 5000;
if (nphy->mphase_cal_phase_id > 2)
bwn_nphy_run_samples(mac, (bwn_is_40mhz(mac) ? 40 : 20) * 8,
0xFFFF, 0, true, false, false);
else
error = bwn_nphy_tx_tone(mac, freq, 250, true, false, false);
if (error == 0) {
if (nphy->mphase_cal_phase_id > 2) {
table = nphy->mphase_txcal_bestcoeffs;
length = 11;
if (mac->mac_phy.rev < 3)
length -= 2;
} else {
if (!full && nphy->txiqlocal_coeffsvalid) {
table = nphy->txiqlocal_bestc;
length = 11;
if (mac->mac_phy.rev < 3)
length -= 2;
} else {
full = true;
if (mac->mac_phy.rev >= 3) {
table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
length = BWN_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
} else {
table = tbl_tx_iqlo_cal_startcoefs;
length = BWN_NTAB_TX_IQLO_CAL_STARTCOEFS;
}
}
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 64), length, table);
if (full) {
if (mac->mac_phy.rev >= 3)
max = BWN_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
else
max = BWN_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
} else {
if (mac->mac_phy.rev >= 3)
max = BWN_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
else
max = BWN_NTAB_TX_IQLO_CAL_CMDS_RECAL;
}
if (mphase) {
count = nphy->mphase_txcal_cmdidx;
numb = min(max,
(uint16_t)(count + nphy->mphase_txcal_numcmds));
} else {
count = 0;
numb = max;
}
for (; count < numb; count++) {
if (full) {
if (mac->mac_phy.rev >= 3)
cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
else
cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
} else {
if (mac->mac_phy.rev >= 3)
cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
else
cmd = tbl_tx_iqlo_cal_cmds_recal[count];
}
core = (cmd & 0x3000) >> 12;
type = (cmd & 0x0F00) >> 8;
if (phy6or5x && updated[core] == 0) {
bwn_nphy_update_tx_cal_ladder(mac, core);
updated[core] = true;
}
tmp = (params[core].ncorr[type] << 8) | 0x66;
BWN_PHY_WRITE(mac, BWN_NPHY_IQLOCAL_CMDNNUM, tmp);
if (type == 1 || type == 3 || type == 4) {
buffer[0] = bwn_ntab_read(mac,
BWN_NTAB16(15, 69 + core));
diq_start = buffer[0];
buffer[0] = 0;
bwn_ntab_write(mac, BWN_NTAB16(15, 69 + core),
0);
}
BWN_PHY_WRITE(mac, BWN_NPHY_IQLOCAL_CMD, cmd);
for (i = 0; i < 2000; i++) {
tmp = BWN_PHY_READ(mac, BWN_NPHY_IQLOCAL_CMD);
if (tmp & 0xC000)
break;
DELAY(10);
}
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 96), length,
buffer);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 64), length,
buffer);
if (type == 1 || type == 3 || type == 4)
buffer[0] = diq_start;
}
if (mphase)
nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
last = (mac->mac_phy.rev < 3) ? 6 : 7;
if (!mphase || nphy->mphase_cal_phase_id == last) {
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 96), 4, buffer);
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 80), 4, buffer);
if (mac->mac_phy.rev < 3) {
buffer[0] = 0;
buffer[1] = 0;
buffer[2] = 0;
buffer[3] = 0;
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 88), 4,
buffer);
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 101), 2,
buffer);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 85), 2,
buffer);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 93), 2,
buffer);
length = 11;
if (mac->mac_phy.rev < 3)
length -= 2;
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 96), length,
nphy->txiqlocal_bestc);
nphy->txiqlocal_coeffsvalid = true;
nphy->txiqlocal_chanspec.center_freq =
bwn_get_centre_freq(mac);
nphy->txiqlocal_chanspec.channel_type = bwn_get_chan_type(mac, NULL);
} else {
length = 11;
if (mac->mac_phy.rev < 3)
length -= 2;
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 96), length,
nphy->mphase_txcal_bestcoeffs);
}
bwn_nphy_stop_playback(mac);
BWN_PHY_WRITE(mac, BWN_NPHY_IQLOCAL_CMDGCTL, 0);
}
bwn_nphy_tx_cal_phy_cleanup(mac);
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x110), 2, save);
if (mac->mac_phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
bwn_nphy_tx_iq_workaround(mac);
if (mac->mac_phy.rev >= 4)
nphy->hang_avoid = avoid;
bwn_nphy_stay_in_carrier_search(mac, false);
return error;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
static void bwn_nphy_reapply_tx_cal_coeffs(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
uint8_t i;
uint16_t buffer[7];
bool equal = true;
if (!nphy->txiqlocal_coeffsvalid ||
nphy->txiqlocal_chanspec.center_freq != bwn_get_centre_freq(mac) ||
nphy->txiqlocal_chanspec.channel_type != bwn_get_chan_type(mac, NULL))
return;
bwn_ntab_read_bulk(mac, BWN_NTAB16(15, 80), 7, buffer);
for (i = 0; i < 4; i++) {
if (buffer[i] != nphy->txiqlocal_bestc[i]) {
equal = false;
break;
}
}
if (!equal) {
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 80), 4,
nphy->txiqlocal_bestc);
for (i = 0; i < 4; i++)
buffer[i] = 0;
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 88), 4,
buffer);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 85), 2,
&nphy->txiqlocal_bestc[5]);
bwn_ntab_write_bulk(mac, BWN_NTAB16(15, 93), 2,
&nphy->txiqlocal_bestc[5]);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
static int bwn_nphy_rev2_cal_rx_iq(struct bwn_mac *mac,
struct bwn_nphy_txgains target, uint8_t type, bool debug)
{
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
int i, j, index;
uint8_t rfctl[2];
uint8_t afectl_core;
uint16_t tmp[6];
uint16_t cur_hpf1, cur_hpf2, cur_lna;
uint32_t real, imag;
bwn_band_t band;
uint8_t use;
uint16_t cur_hpf;
uint16_t lna[3] = { 3, 3, 1 };
uint16_t hpf1[3] = { 7, 2, 0 };
uint16_t hpf2[3] = { 2, 0, 0 };
uint32_t power[3] = { };
uint16_t gain_save[2];
uint16_t cal_gain[2];
struct bwn_nphy_iqcal_params cal_params[2];
struct bwn_nphy_iq_est est;
int ret = 0;
bool playtone = true;
int desired = 13;
bwn_nphy_stay_in_carrier_search(mac, 1);
if (mac->mac_phy.rev < 2)
bwn_nphy_reapply_tx_cal_coeffs(mac);
bwn_ntab_read_bulk(mac, BWN_NTAB16(7, 0x110), 2, gain_save);
for (i = 0; i < 2; i++) {
bwn_nphy_iq_cal_gain_params(mac, i, target, &cal_params[i]);
cal_gain[i] = cal_params[i].cal_gain;
}
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x110), 2, cal_gain);
for (i = 0; i < 2; i++) {
if (i == 0) {
rfctl[0] = BWN_NPHY_RFCTL_INTC1;
rfctl[1] = BWN_NPHY_RFCTL_INTC2;
afectl_core = BWN_NPHY_AFECTL_C1;
} else {
rfctl[0] = BWN_NPHY_RFCTL_INTC2;
rfctl[1] = BWN_NPHY_RFCTL_INTC1;
afectl_core = BWN_NPHY_AFECTL_C2;
}
tmp[1] = BWN_PHY_READ(mac, BWN_NPHY_RFSEQCA);
tmp[2] = BWN_PHY_READ(mac, afectl_core);
tmp[3] = BWN_PHY_READ(mac, BWN_NPHY_AFECTL_OVER);
tmp[4] = BWN_PHY_READ(mac, rfctl[0]);
tmp[5] = BWN_PHY_READ(mac, rfctl[1]);
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA,
~BWN_NPHY_RFSEQCA_RXDIS & 0xFFFF,
((1 - i) << BWN_NPHY_RFSEQCA_RXDIS_SHIFT));
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA, ~BWN_NPHY_RFSEQCA_TXEN,
(1 - i));
BWN_PHY_SET(mac, afectl_core, 0x0006);
BWN_PHY_SET(mac, BWN_NPHY_AFECTL_OVER, 0x0006);
band = bwn_current_band(mac);
if (nphy->rxcalparams & 0xFF000000) {
if (band == BWN_BAND_5G)
BWN_PHY_WRITE(mac, rfctl[0], 0x140);
else
BWN_PHY_WRITE(mac, rfctl[0], 0x110);
} else {
if (band == BWN_BAND_5G)
BWN_PHY_WRITE(mac, rfctl[0], 0x180);
else
BWN_PHY_WRITE(mac, rfctl[0], 0x120);
}
if (band == BWN_BAND_5G)
BWN_PHY_WRITE(mac, rfctl[1], 0x148);
else
BWN_PHY_WRITE(mac, rfctl[1], 0x114);
if (nphy->rxcalparams & 0x10000) {
BWN_RF_SETMASK(mac, B2055_C1_GENSPARE2, 0xFC,
(i + 1));
BWN_RF_SETMASK(mac, B2055_C2_GENSPARE2, 0xFC,
(2 - i));
}
for (j = 0; j < 4; j++) {
if (j < 3) {
cur_lna = lna[j];
cur_hpf1 = hpf1[j];
cur_hpf2 = hpf2[j];
} else {
if (power[1] > 10000) {
use = 1;
cur_hpf = cur_hpf1;
index = 2;
} else {
if (power[0] > 10000) {
use = 1;
cur_hpf = cur_hpf1;
index = 1;
} else {
index = 0;
use = 2;
cur_hpf = cur_hpf2;
}
}
cur_lna = lna[index];
cur_hpf1 = hpf1[index];
cur_hpf2 = hpf2[index];
cur_hpf += desired - bwn_hweight32(power[index]);
cur_hpf = bwn_clamp_val(cur_hpf, 0, 10);
if (use == 1)
cur_hpf1 = cur_hpf;
else
cur_hpf2 = cur_hpf;
}
tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
(cur_lna << 2));
bwn_nphy_rf_ctl_override(mac, 0x400, tmp[0], 3,
false);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
bwn_nphy_stop_playback(mac);
if (playtone) {
ret = bwn_nphy_tx_tone(mac, 4000,
(nphy->rxcalparams & 0xFFFF),
false, false, true);
playtone = false;
} else {
bwn_nphy_run_samples(mac, 160, 0xFFFF, 0, false,
false, true);
}
if (ret == 0) {
if (j < 3) {
bwn_nphy_rx_iq_est(mac, &est, 1024, 32,
false);
if (i == 0) {
real = est.i0_pwr;
imag = est.q0_pwr;
} else {
real = est.i1_pwr;
imag = est.q1_pwr;
}
power[i] = ((real + imag) / 1024) + 1;
} else {
bwn_nphy_calc_rx_iq_comp(mac, 1 << i);
}
bwn_nphy_stop_playback(mac);
}
if (ret != 0)
break;
}
BWN_RF_MASK(mac, B2055_C1_GENSPARE2, 0xFC);
BWN_RF_MASK(mac, B2055_C2_GENSPARE2, 0xFC);
BWN_PHY_WRITE(mac, rfctl[1], tmp[5]);
BWN_PHY_WRITE(mac, rfctl[0], tmp[4]);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, tmp[3]);
BWN_PHY_WRITE(mac, afectl_core, tmp[2]);
BWN_PHY_WRITE(mac, BWN_NPHY_RFSEQCA, tmp[1]);
if (ret != 0)
break;
}
bwn_nphy_rf_ctl_override(mac, 0x400, 0, 3, true);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
bwn_ntab_write_bulk(mac, BWN_NTAB16(7, 0x110), 2, gain_save);
bwn_nphy_stay_in_carrier_search(mac, 0);
return ret;
}
static int bwn_nphy_rev3_cal_rx_iq(struct bwn_mac *mac,
struct bwn_nphy_txgains target, uint8_t type, bool debug)
{
return -1;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
static int bwn_nphy_cal_rx_iq(struct bwn_mac *mac,
struct bwn_nphy_txgains target, uint8_t type, bool debug)
{
if (mac->mac_phy.rev >= 7)
type = 0;
if (mac->mac_phy.rev >= 3)
return bwn_nphy_rev3_cal_rx_iq(mac, target, type, debug);
else
return bwn_nphy_rev2_cal_rx_iq(mac, target, type, debug);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
static void bwn_nphy_set_rx_core_state(struct bwn_mac *mac, uint8_t mask)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
/* uint16_t buf[16]; it's rev3+ */
nphy->phyrxchain = mask;
if (0 /* FIXME clk */)
return;
bwn_mac_suspend(mac);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, true);
BWN_PHY_SETMASK(mac, BWN_NPHY_RFSEQCA, ~BWN_NPHY_RFSEQCA_RXEN,
(mask & 0x3) << BWN_NPHY_RFSEQCA_RXEN_SHIFT);
if ((mask & 0x3) != 0x3) {
BWN_PHY_WRITE(mac, BWN_NPHY_HPANT_SWTHRES, 1);
if (mac->mac_phy.rev >= 3) {
/* TODO */
}
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_HPANT_SWTHRES, 0x1E);
if (mac->mac_phy.rev >= 3) {
/* TODO */
}
}
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
if (nphy->hang_avoid)
bwn_nphy_stay_in_carrier_search(mac, false);
bwn_mac_enable(mac);
}
bwn_txpwr_result_t
bwn_nphy_op_recalc_txpower(struct bwn_mac *mac, bool ignore_tssi)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
struct ieee80211_channel *channel = bwn_get_channel(mac);
struct bwn_softc *sc = mac->mac_sc;
struct bwn_ppr *ppr = &nphy->tx_pwr_max_ppr;
uint8_t max; /* qdBm */
bool tx_pwr_state;
if (nphy->tx_pwr_last_recalc_freq == bwn_get_centre_freq(mac) &&
nphy->tx_pwr_last_recalc_limit == phy->txpower)
return BWN_TXPWR_RES_DONE;
/* Make sure we have a clean PPR */
bwn_ppr_clear(mac, ppr);
/* HW limitations */
bwn_ppr_load_max_from_sprom(mac, ppr, BWN_PHY_BAND_2G);
/* XXX TODO: other bands? */
/* Regulatory & user settings */
max = INT_TO_Q52(bwn_get_chan_power(mac, channel));
/* uint8_t */
if (phy->txpower)
max = min(max, INT_TO_Q52(phy->txpower));
bwn_ppr_apply_max(mac, ppr, max);
DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT_POWER,
"Calculated TX power: " Q52_FMT "\n",
Q52_ARG(bwn_ppr_get_max(mac, ppr)));
/* TODO: Enable this once we get gains working */
#if 0
/* Some extra gains */
hw_gain = 6; /* N-PHY specific */
if (bwn_current_band(mac) == BWN_BAND_2G)
hw_gain += sprom->antenna_gain.a0;
else
hw_gain += sprom->antenna_gain.a1;
bwn_ppr_add(mac, ppr, -hw_gain);
#endif
/* Make sure we didn't go too low */
bwn_ppr_apply_min(mac, ppr, INT_TO_Q52(8));
/* Apply */
tx_pwr_state = nphy->txpwrctrl;
bwn_mac_suspend(mac);
bwn_nphy_tx_power_ctl_setup(mac);
if (siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12) {
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~0, BWN_MACCTL_PHY_LOCK);
BWN_READ_4(mac, BWN_MACCTL);
DELAY(1);
}
bwn_nphy_tx_power_ctrl(mac, nphy->txpwrctrl);
if (siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12)
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~BWN_MACCTL_PHY_LOCK, 0);
bwn_mac_enable(mac);
nphy->tx_pwr_last_recalc_freq = bwn_get_centre_freq(mac);
nphy->tx_pwr_last_recalc_limit = phy->txpower;
return BWN_TXPWR_RES_DONE;
}
/**************************************************
* N-PHY init
**************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
static void bwn_nphy_update_mimo_config(struct bwn_mac *mac, int32_t preamble)
{
uint16_t mimocfg = BWN_PHY_READ(mac, BWN_NPHY_MIMOCFG);
mimocfg |= BWN_NPHY_MIMOCFG_AUTO;
if (preamble == 1)
mimocfg |= BWN_NPHY_MIMOCFG_GFMIX;
else
mimocfg &= ~BWN_NPHY_MIMOCFG_GFMIX;
BWN_PHY_WRITE(mac, BWN_NPHY_MIMOCFG, mimocfg);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
static void bwn_nphy_bphy_init(struct bwn_mac *mac)
{
unsigned int i;
uint16_t val;
val = 0x1E1F;
for (i = 0; i < 16; i++) {
BWN_PHY_WRITE(mac, BWN_PHY_N_BMODE(0x88 + i), val);
val -= 0x202;
}
val = 0x3E3F;
for (i = 0; i < 16; i++) {
BWN_PHY_WRITE(mac, BWN_PHY_N_BMODE(0x98 + i), val);
val -= 0x202;
}
BWN_PHY_WRITE(mac, BWN_PHY_N_BMODE(0x38), 0x668);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
static void bwn_nphy_superswitch_init(struct bwn_mac *mac, bool init)
{
struct bwn_softc *sc = mac->mac_sc;
if (mac->mac_phy.rev >= 7)
return;
if (mac->mac_phy.rev >= 3) {
if (!init)
return;
if (0 /* FIXME */) {
bwn_ntab_write(mac, BWN_NTAB16(9, 2), 0x211);
bwn_ntab_write(mac, BWN_NTAB16(9, 3), 0x222);
bwn_ntab_write(mac, BWN_NTAB16(9, 8), 0x144);
bwn_ntab_write(mac, BWN_NTAB16(9, 12), 0x188);
}
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_GPIO_LOOEN, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_GPIO_HIOEN, 0);
siba_gpio_set(sc->sc_dev, 0xfc00);
BWN_WRITE_SETMASK4(mac, BWN_MACCTL, ~BWN_MACCTL_GPOUT_MASK, 0);
BWN_WRITE_SETMASK2(mac, BWN_GPIO_MASK, ~0, 0xFC00);
BWN_WRITE_SETMASK2(mac, BWN_GPIO_CONTROL, (~0xFC00 & 0xFFFF),
0);
if (init) {
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
static int bwn_phy_initn(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
uint8_t tx_pwr_state;
struct bwn_nphy_txgains target;
uint16_t tmp;
bwn_band_t tmp2;
bool do_rssi_cal;
uint16_t clip[2];
bool do_cal = false;
if ((mac->mac_phy.rev >= 3) &&
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) &&
(bwn_current_band(mac) == BWN_BAND_2G)) {
siba_cc_set32(sc->sc_dev, SIBA_CC_CHIPCTL, 0x40);
}
nphy->use_int_tx_iq_lo_cal = bwn_nphy_ipa(mac) ||
phy->rev >= 7 ||
(phy->rev >= 5 &&
siba_sprom_get_bf2_hi(sc->sc_dev) & BWN_BFH2_INTERNDET_TXIQCAL);
nphy->deaf_count = 0;
bwn_nphy_tables_init(mac);
nphy->crsminpwr_adjusted = false;
nphy->noisevars_adjusted = false;
/* Clear all overrides */
if (mac->mac_phy.rev >= 3) {
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B1S1, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_OVER, 0);
if (phy->rev >= 7) {
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER3, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER4, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER5, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_REV7_RF_CTL_OVER6, 0);
}
if (phy->rev >= 19) {
/* TODO */
}
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B1S0, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_TXF_40CO_B32S1, 0);
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_OVER, 0);
}
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC1, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC2, 0);
if (mac->mac_phy.rev < 6) {
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC3, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_RFCTL_INTC4, 0);
}
BWN_PHY_MASK(mac, BWN_NPHY_RFSEQMODE,
~(BWN_NPHY_RFSEQMODE_CAOVER |
BWN_NPHY_RFSEQMODE_TROVER));
if (mac->mac_phy.rev >= 3)
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, 0);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, 0);
if (mac->mac_phy.rev <= 2) {
tmp = (mac->mac_phy.rev == 2) ? 0x3B : 0x40;
BWN_PHY_SETMASK(mac, BWN_NPHY_BPHY_CTL3,
~BWN_NPHY_BPHY_CTL3_SCALE,
tmp << BWN_NPHY_BPHY_CTL3_SCALE_SHIFT);
}
BWN_PHY_WRITE(mac, BWN_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
BWN_PHY_WRITE(mac, BWN_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
if (siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_SKWRKFEM_BRD ||
(siba_get_pci_subvendor(sc->sc_dev) == PCI_VENDOR_APPLE &&
siba_get_pci_subdevice(sc->sc_dev) == BCMA_BOARD_TYPE_BCM943224M93))
BWN_PHY_WRITE(mac, BWN_NPHY_TXREALFD, 0xA0);
else
BWN_PHY_WRITE(mac, BWN_NPHY_TXREALFD, 0xB8);
BWN_PHY_WRITE(mac, BWN_NPHY_MIMO_CRSTXEXT, 0xC8);
BWN_PHY_WRITE(mac, BWN_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
BWN_PHY_WRITE(mac, BWN_NPHY_TXRIFS_FRDEL, 0x30);
if (phy->rev < 8)
bwn_nphy_update_mimo_config(mac, nphy->preamble_override);
bwn_nphy_update_txrx_chain(mac);
if (phy->rev < 2) {
BWN_PHY_WRITE(mac, BWN_NPHY_DUP40_GFBL, 0xAA8);
BWN_PHY_WRITE(mac, BWN_NPHY_DUP40_BL, 0x9A4);
}
tmp2 = bwn_current_band(mac);
if (bwn_nphy_ipa(mac)) {
BWN_PHY_SET(mac, BWN_NPHY_PAPD_EN0, 0x1);
BWN_PHY_SETMASK(mac, BWN_NPHY_EPS_TABLE_ADJ0, 0x007F,
nphy->papd_epsilon_offset[0] << 7);
BWN_PHY_SET(mac, BWN_NPHY_PAPD_EN1, 0x1);
BWN_PHY_SETMASK(mac, BWN_NPHY_EPS_TABLE_ADJ1, 0x007F,
nphy->papd_epsilon_offset[1] << 7);
bwn_nphy_int_pa_set_tx_dig_filters(mac);
} else if (phy->rev >= 5) {
bwn_nphy_ext_pa_set_tx_dig_filters(mac);
}
bwn_nphy_workarounds(mac);
/* Reset CCA, in init code it differs a little from standard way */
bwn_phy_force_clock(mac, 1);
tmp = BWN_PHY_READ(mac, BWN_NPHY_BBCFG);
BWN_PHY_WRITE(mac, BWN_NPHY_BBCFG, tmp | BWN_NPHY_BBCFG_RSTCCA);
BWN_PHY_WRITE(mac, BWN_NPHY_BBCFG, tmp & ~BWN_NPHY_BBCFG_RSTCCA);
bwn_phy_force_clock(mac, 0);
bwn_mac_phy_clock_set(mac, true);
if (phy->rev < 7) {
bwn_nphy_pa_override(mac, false);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RX2TX);
bwn_nphy_force_rf_sequence(mac, BWN_RFSEQ_RESET2RX);
bwn_nphy_pa_override(mac, true);
}
bwn_nphy_classifier(mac, 0, 0);
bwn_nphy_read_clip_detection(mac, clip);
if (bwn_current_band(mac) == BWN_BAND_2G)
bwn_nphy_bphy_init(mac);
tx_pwr_state = nphy->txpwrctrl;
bwn_nphy_tx_power_ctrl(mac, false);
bwn_nphy_tx_power_fix(mac);
bwn_nphy_tx_power_ctl_idle_tssi(mac);
bwn_nphy_tx_power_ctl_setup(mac);
bwn_nphy_tx_gain_table_upload(mac);
if (nphy->phyrxchain != 3)
bwn_nphy_set_rx_core_state(mac, nphy->phyrxchain);
if (nphy->mphase_cal_phase_id > 0)
;/* TODO PHY Periodic Calibration Multi-Phase Restart */
do_rssi_cal = false;
if (phy->rev >= 3) {
if (bwn_current_band(mac) == BWN_BAND_2G)
do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
else
do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
if (do_rssi_cal)
bwn_nphy_rssi_cal(mac);
else
bwn_nphy_restore_rssi_cal(mac);
} else {
bwn_nphy_rssi_cal(mac);
}
if (!((nphy->measure_hold & 0x6) != 0)) {
if (bwn_current_band(mac) == BWN_BAND_2G)
do_cal = !nphy->iqcal_chanspec_2G.center_freq;
else
do_cal = !nphy->iqcal_chanspec_5G.center_freq;
if (nphy->mute)
do_cal = false;
if (do_cal) {
target = bwn_nphy_get_tx_gains(mac);
if (nphy->antsel_type == 2)
bwn_nphy_superswitch_init(mac, true);
if (nphy->perical != 2) {
bwn_nphy_rssi_cal(mac);
if (phy->rev >= 3) {
nphy->cal_orig_pwr_idx[0] =
nphy->txpwrindex[0].index_internal;
nphy->cal_orig_pwr_idx[1] =
nphy->txpwrindex[1].index_internal;
/* TODO N PHY Pre Calibrate TX Gain */
target = bwn_nphy_get_tx_gains(mac);
}
if (!bwn_nphy_cal_tx_iq_lo(mac, target, true, false))
if (bwn_nphy_cal_rx_iq(mac, target, 2, 0) == 0)
bwn_nphy_save_cal(mac);
} else if (nphy->mphase_cal_phase_id == 0)
;/* N PHY Periodic Calibration with arg 3 */
} else {
bwn_nphy_restore_cal(mac);
}
}
bwn_nphy_tx_pwr_ctrl_coef_setup(mac);
bwn_nphy_tx_power_ctrl(mac, tx_pwr_state);
BWN_PHY_WRITE(mac, BWN_NPHY_TXMACIF_HOLDOFF, 0x0015);
BWN_PHY_WRITE(mac, BWN_NPHY_TXMACDELAY, 0x0320);
if (phy->rev >= 3 && phy->rev <= 6)
BWN_PHY_WRITE(mac, BWN_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
bwn_nphy_tx_lpf_bw(mac);
if (phy->rev >= 3)
bwn_nphy_spur_workaround(mac);
return 0;
}
/**************************************************
* Channel switching ops.
**************************************************/
static void bwn_chantab_phy_upload(struct bwn_mac *mac,
const struct bwn_phy_n_sfo_cfg *e)
{
BWN_PHY_WRITE(mac, BWN_NPHY_BW1A, e->phy_bw1a);
BWN_PHY_WRITE(mac, BWN_NPHY_BW2, e->phy_bw2);
BWN_PHY_WRITE(mac, BWN_NPHY_BW3, e->phy_bw3);
BWN_PHY_WRITE(mac, BWN_NPHY_BW4, e->phy_bw4);
BWN_PHY_WRITE(mac, BWN_NPHY_BW5, e->phy_bw5);
BWN_PHY_WRITE(mac, BWN_NPHY_BW6, e->phy_bw6);
}
/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
static void bwn_nphy_pmu_spur_avoid(struct bwn_mac *mac, bool avoid)
{
struct bwn_softc *sc = mac->mac_sc;
DPRINTF(sc, BWN_DEBUG_RESET, "%s: spuravoid %d\n", __func__, avoid);
siba_pmu_spuravoid_pllupdate(sc->sc_dev, avoid);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void bwn_nphy_channel_setup(struct bwn_mac *mac,
const struct bwn_phy_n_sfo_cfg *e,
struct ieee80211_channel *new_channel)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = mac->mac_phy.phy_n;
int ch = new_channel->ic_ieee;
uint16_t tmp16;
if (bwn_channel_band(mac, new_channel) == BWN_BAND_5G) {
DPRINTF(sc, BWN_DEBUG_RESET, "%s: BAND_5G; chan=%d\n", __func__, ch);
/* Switch to 2 GHz for a moment to access BWN_PHY_B_BBCFG */
BWN_PHY_MASK(mac, BWN_NPHY_BANDCTL, ~BWN_NPHY_BANDCTL_5GHZ);
tmp16 = BWN_READ_2(mac, BWN_PSM_PHY_HDR);
BWN_WRITE_2(mac, BWN_PSM_PHY_HDR, tmp16 | 4);
/* Put BPHY in the reset */
BWN_PHY_SET(mac, BWN_PHY_B_BBCFG,
BWN_PHY_B_BBCFG_RSTCCA | BWN_PHY_B_BBCFG_RSTRX);
BWN_WRITE_2(mac, BWN_PSM_PHY_HDR, tmp16);
BWN_PHY_SET(mac, BWN_NPHY_BANDCTL, BWN_NPHY_BANDCTL_5GHZ);
} else if (bwn_channel_band(mac, new_channel) == BWN_BAND_2G) {
DPRINTF(sc, BWN_DEBUG_RESET, "%s: BAND_2G; chan=%d\n", __func__, ch);
BWN_PHY_MASK(mac, BWN_NPHY_BANDCTL, ~BWN_NPHY_BANDCTL_5GHZ);
tmp16 = BWN_READ_2(mac, BWN_PSM_PHY_HDR);
BWN_WRITE_2(mac, BWN_PSM_PHY_HDR, tmp16 | 4);
/* Take BPHY out of the reset */
BWN_PHY_MASK(mac, BWN_PHY_B_BBCFG,
(uint16_t)~(BWN_PHY_B_BBCFG_RSTCCA | BWN_PHY_B_BBCFG_RSTRX));
BWN_WRITE_2(mac, BWN_PSM_PHY_HDR, tmp16);
} else {
BWN_ERRPRINTF(mac->mac_sc, "%s: unknown band?\n", __func__);
}
bwn_chantab_phy_upload(mac, e);
if (new_channel->ic_ieee == 14) {
bwn_nphy_classifier(mac, 2, 0);
BWN_PHY_SET(mac, BWN_PHY_B_TEST, 0x0800);
} else {
bwn_nphy_classifier(mac, 2, 2);
if (bwn_channel_band(mac, new_channel) == BWN_BAND_2G)
BWN_PHY_MASK(mac, BWN_PHY_B_TEST, ~0x840);
}
if (!nphy->txpwrctrl)
bwn_nphy_tx_power_fix(mac);
if (mac->mac_phy.rev < 3)
bwn_nphy_adjust_lna_gain_table(mac);
bwn_nphy_tx_lpf_bw(mac);
if (mac->mac_phy.rev >= 3 &&
mac->mac_phy.phy_n->spur_avoid != BWN_SPUR_AVOID_DISABLE) {
uint8_t spuravoid = 0;
if (mac->mac_phy.phy_n->spur_avoid == BWN_SPUR_AVOID_FORCE) {
spuravoid = 1;
} else if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 18) {
/* TODO */
} else if (phy->rev >= 17) {
/* TODO: Off for channels 1-11, but check 12-14! */
} else if (phy->rev >= 16) {
/* TODO: Off for 2 GHz, but check 5 GHz! */
} else if (phy->rev >= 7) {
if (!bwn_is_40mhz(mac)) { /* 20MHz */
if (ch == 13 || ch == 14 || ch == 153)
spuravoid = 1;
} else { /* 40 MHz */
if (ch == 54)
spuravoid = 1;
}
} else {
if (!bwn_is_40mhz(mac)) { /* 20MHz */
if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
spuravoid = 1;
} else { /* 40MHz */
if (nphy->aband_spurwar_en &&
(ch == 38 || ch == 102 || ch == 118))
spuravoid = siba_get_chipid(sc->sc_dev) == 0x4716;
}
}
bwn_nphy_pmu_spur_avoid(mac, spuravoid);
bwn_mac_switch_freq(mac, spuravoid);
if (mac->mac_phy.rev == 3 || mac->mac_phy.rev == 4)
bwn_wireless_core_phy_pll_reset(mac);
if (spuravoid)
BWN_PHY_SET(mac, BWN_NPHY_BBCFG, BWN_NPHY_BBCFG_RSTRX);
else
BWN_PHY_MASK(mac, BWN_NPHY_BBCFG,
~BWN_NPHY_BBCFG_RSTRX & 0xFFFF);
bwn_nphy_reset_cca(mac);
/* wl sets useless phy_isspuravoid here */
}
BWN_PHY_WRITE(mac, BWN_NPHY_NDATAT_DUP40, 0x3830);
if (phy->rev >= 3)
bwn_nphy_spur_workaround(mac);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
static int bwn_nphy_set_channel(struct bwn_mac *mac,
struct ieee80211_channel *channel,
bwn_chan_type_t channel_type)
{
struct bwn_phy *phy = &mac->mac_phy;
const struct bwn_nphy_channeltab_entry_rev2 *tabent_r2 = NULL;
const struct bwn_nphy_channeltab_entry_rev3 *tabent_r3 = NULL;
const struct bwn_nphy_chantabent_rev7 *tabent_r7 = NULL;
const struct bwn_nphy_chantabent_rev7_2g *tabent_r7_2g = NULL;
uint8_t tmp;
if (phy->rev >= 19) {
return -ESRCH;
/* TODO */
} else if (phy->rev >= 7) {
r2057_get_chantabent_rev7(mac, bwn_get_chan_centre_freq(mac, channel),
&tabent_r7, &tabent_r7_2g);
if (!tabent_r7 && !tabent_r7_2g)
return -ESRCH;
} else if (phy->rev >= 3) {
tabent_r3 = bwn_nphy_get_chantabent_rev3(mac,
bwn_get_chan_centre_freq(mac, channel));
if (!tabent_r3)
return -ESRCH;
} else {
tabent_r2 = bwn_nphy_get_chantabent_rev2(mac,
channel->ic_ieee);
if (!tabent_r2)
return -ESRCH;
}
/* Channel is set later in common code, but we need to set it on our
own to let this function's subcalls work properly. */
#if 0
phy->channel = channel->ic_ieee;
#endif
#if 0
if (bwn_channel_type_is_40mhz(phy->channel_type) !=
bwn_channel_type_is_40mhz(channel_type))
; /* TODO: BMAC BW Set (channel_type) */
#endif
if (channel_type == BWN_CHAN_TYPE_40_HT_U) {
BWN_PHY_SET(mac, BWN_NPHY_RXCTL, BWN_NPHY_RXCTL_BSELU20);
if (phy->rev >= 7)
BWN_PHY_SET(mac, 0x310, 0x8000);
} else if (channel_type == BWN_CHAN_TYPE_40_HT_D) {
BWN_PHY_MASK(mac, BWN_NPHY_RXCTL, ~BWN_NPHY_RXCTL_BSELU20);
if (phy->rev >= 7)
BWN_PHY_MASK(mac, 0x310, (uint16_t)~0x8000);
}
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
const struct bwn_phy_n_sfo_cfg *phy_regs = tabent_r7 ?
&(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
if (phy->rf_rev <= 4 || phy->rf_rev == 6) {
tmp = (bwn_channel_band(mac, channel) == BWN_BAND_5G) ? 2 : 0;
BWN_RF_SETMASK(mac, R2057_TIA_CONFIG_CORE0, ~2, tmp);
BWN_RF_SETMASK(mac, R2057_TIA_CONFIG_CORE1, ~2, tmp);
}
bwn_radio_2057_setup(mac, tabent_r7, tabent_r7_2g);
bwn_nphy_channel_setup(mac, phy_regs, channel);
} else if (phy->rev >= 3) {
tmp = (bwn_channel_band(mac, channel) == BWN_BAND_5G) ? 4 : 0;
BWN_RF_SETMASK(mac, 0x08, 0xFFFB, tmp);
bwn_radio_2056_setup(mac, tabent_r3);
bwn_nphy_channel_setup(mac, &(tabent_r3->phy_regs), channel);
} else {
tmp = (bwn_channel_band(mac, channel) == BWN_BAND_5G) ? 0x0020 : 0x0050;
BWN_RF_SETMASK(mac, B2055_MASTER1, 0xFF8F, tmp);
bwn_radio_2055_setup(mac, tabent_r2);
bwn_nphy_channel_setup(mac, &(tabent_r2->phy_regs), channel);
}
return 0;
}
/**************************************************
* Basic PHY ops.
**************************************************/
int
bwn_nphy_op_allocate(struct bwn_mac *mac)
{
struct bwn_phy_n *nphy;
nphy = malloc(sizeof(*nphy), M_DEVBUF, M_ZERO | M_NOWAIT);
if (!nphy)
return -ENOMEM;
mac->mac_phy.phy_n = nphy;
return 0;
}
void
bwn_nphy_op_prepare_structs(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
memset(nphy, 0, sizeof(*nphy));
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
nphy->spur_avoid = (phy->rev >= 3) ?
BWN_SPUR_AVOID_AUTO : BWN_SPUR_AVOID_DISABLE;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid bwn_nphy_set_rx_core_state like wl */
nphy->perical = 2; /* avoid additional rssi cal on init (like wl) */
/* 128 can mean disabled-by-default state of TX pwr ctl. Max value is
* 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
nphy->tx_pwr_idx[0] = 128;
nphy->tx_pwr_idx[1] = 128;
/* Hardware TX power control and 5GHz power gain */
nphy->txpwrctrl = false;
nphy->pwg_gain_5ghz = false;
if (mac->mac_phy.rev >= 3 ||
(siba_get_pci_subvendor(sc->sc_dev) == PCI_VENDOR_APPLE &&
(siba_get_revid(sc->sc_dev) == 11 || siba_get_revid(sc->sc_dev) == 12))) {
nphy->txpwrctrl = true;
nphy->pwg_gain_5ghz = true;
} else if (siba_sprom_get_rev(sc->sc_dev) >= 4) {
if (mac->mac_phy.rev >= 2 &&
(siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_TXPWRCTRL_EN)) {
nphy->txpwrctrl = true;
if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) {
if ((siba_get_pci_device(sc->sc_dev) == 0x4328) ||
(siba_get_pci_device(sc->sc_dev) == 0x432a))
nphy->pwg_gain_5ghz = true;
}
} else if (siba_sprom_get_bf2_lo(sc->sc_dev) & BWN_BFL2_5G_PWRGAIN) {
nphy->pwg_gain_5ghz = true;
}
}
if (mac->mac_phy.rev >= 3) {
nphy->ipa2g_on = siba_sprom_get_fem_2ghz_extpa_gain(sc->sc_dev) == 2;
nphy->ipa5g_on = siba_sprom_get_fem_5ghz_extpa_gain(sc->sc_dev) == 2;
}
}
void
bwn_nphy_op_free(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_n *nphy = phy->phy_n;
free(nphy, M_DEVBUF);
phy->phy_n = NULL;
}
int
bwn_nphy_op_init(struct bwn_mac *mac)
{
return bwn_phy_initn(mac);
}
static inline void check_phyreg(struct bwn_mac *mac, uint16_t offset)
{
#ifdef BWN_DEBUG
if ((offset & BWN_PHYROUTE_MASK) == BWN_PHYROUTE_OFDM_GPHY) {
/* OFDM registers are onnly available on A/G-PHYs */
BWN_ERRPRINTF(mac->mac_sc, "Invalid OFDM PHY access at "
"0x%04X on N-PHY\n", offset);
}
if ((offset & BWN_PHYROUTE_MASK) == BWN_PHYROUTE_EXT_GPHY) {
/* Ext-G registers are only available on G-PHYs */
BWN_ERRPRINTF(mac->mac_sc, "Invalid EXT-G PHY access at "
"0x%04X on N-PHY\n", offset);
}
#endif /* BWN_DEBUG */
}
void
bwn_nphy_op_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask,
uint16_t set)
{
check_phyreg(mac, reg);
BWN_WRITE_2_F(mac, BWN_PHYCTL, reg);
BWN_WRITE_SETMASK2(mac, BWN_PHYDATA, mask, set);
}
#if 0
uint16_t
bwn_nphy_op_radio_read(struct bwn_mac *mac, uint16_t reg)
{
/* Register 1 is a 32-bit register. */
if (mac->mac_phy.rev < 7 && reg == 1) {
BWN_ERRPRINTF(mac->mac_sc, "%s: bad reg access\n", __func__);
}
if (mac->mac_phy.rev >= 7)
reg |= 0x200; /* Radio 0x2057 */
else
reg |= 0x100;
BWN_WRITE_2_F(mac, BWN_RFCTL, reg);
return BWN_READ_2(mac, BWN_RFDATALO);
}
#endif
#if 0
void
bwn_nphy_op_radio_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
/* Register 1 is a 32-bit register. */
if (mac->mac_phy.rev < 7 && reg == 1) {
BWN_ERRPRINTF(mac->mac_sc, "%s: bad reg access\n", __func__);
}
BWN_WRITE_2_F(mac, BWN_RFCTL, reg);
BWN_WRITE_2(mac, BWN_RFDATALO, value);
}
#endif
/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
void
bwn_nphy_op_software_rfkill(struct bwn_mac *mac, bool active)
{
struct bwn_phy *phy = &mac->mac_phy;
if (BWN_READ_4(mac, BWN_MACCTL) & BWN_MACCTL_ON)
BWN_ERRPRINTF(mac->mac_sc, "MAC not suspended\n");
DPRINTF(mac->mac_sc, BWN_DEBUG_RESET | BWN_DEBUG_PHY,
"%s: called; rev=%d, rf_on=%d, active=%d\n", __func__,
phy->rev, mac->mac_phy.rf_on, active);
/*
* XXX TODO: don't bother doing RF programming if it's
* already done. But, bwn(4) currently sets rf_on in the
* PHY setup and leaves it on after startup, which causes
* the below to not init the 2056/2057 radios.
*/
if (active) {
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
// if (!mac->mac_phy.rf_on)
bwn_radio_2057_init(mac);
bwn_switch_channel(mac, bwn_get_chan(mac));
} else if (phy->rev >= 3) {
// if (!mac->mac_phy.rf_on)
bwn_radio_init2056(mac);
bwn_switch_channel(mac, bwn_get_chan(mac));
} else {
bwn_radio_init2055(mac);
}
} else {
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 8) {
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
~BWN_NPHY_RFCTL_CMD_CHIP0PU);
} else if (phy->rev >= 7) {
/* Nothing needed */
} else if (phy->rev >= 3) {
BWN_PHY_MASK(mac, BWN_NPHY_RFCTL_CMD,
~BWN_NPHY_RFCTL_CMD_CHIP0PU);
BWN_RF_MASK(mac, 0x09, ~0x2);
BWN_RF_WRITE(mac, 0x204D, 0);
BWN_RF_WRITE(mac, 0x2053, 0);
BWN_RF_WRITE(mac, 0x2058, 0);
BWN_RF_WRITE(mac, 0x205E, 0);
BWN_RF_MASK(mac, 0x2062, ~0xF0);
BWN_RF_WRITE(mac, 0x2064, 0);
BWN_RF_WRITE(mac, 0x304D, 0);
BWN_RF_WRITE(mac, 0x3053, 0);
BWN_RF_WRITE(mac, 0x3058, 0);
BWN_RF_WRITE(mac, 0x305E, 0);
BWN_RF_MASK(mac, 0x3062, ~0xF0);
BWN_RF_WRITE(mac, 0x3064, 0);
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
void
bwn_nphy_op_switch_analog(struct bwn_mac *mac, bool on)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t override = on ? 0x0 : 0x7FFF;
uint16_t core = on ? 0xD : 0x00FD;
if (phy->rev >= 19) {
/* TODO */
device_printf(mac->mac_sc->sc_dev, "%s: TODO\n", __func__);
} else if (phy->rev >= 3) {
if (on) {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C1, core);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, override);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C2, core);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, override);
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER1, override);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C1, core);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, override);
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_C2, core);
}
} else {
BWN_PHY_WRITE(mac, BWN_NPHY_AFECTL_OVER, override);
}
}
int
bwn_nphy_op_switch_channel(struct bwn_mac *mac, unsigned int new_channel)
{
struct ieee80211_channel *channel = bwn_get_channel(mac);
bwn_chan_type_t channel_type = bwn_get_chan_type(mac, NULL);
if (bwn_current_band(mac) == BWN_BAND_2G) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
if (new_channel > 200)
return -EINVAL;
}
return bwn_nphy_set_channel(mac, channel, channel_type);
}
#if 0
unsigned int
bwn_nphy_op_get_default_chan(struct bwn_mac *mac)
{
if (bwn_current_band(mac) == BWN_BAND_2G)
return 1;
return 36;
}
#endif
Index: head/sys/i386/i386/bpf_jit_machdep.c
===================================================================
--- head/sys/i386/i386/bpf_jit_machdep.c (revision 328217)
+++ head/sys/i386/i386/bpf_jit_machdep.c (revision 328218)
@@ -1,694 +1,694 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2002-2003 NetGroup, Politecnico di Torino (Italy)
* Copyright (C) 2005-2017 Jung-uk Kim <jkim@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Politecnico di Torino nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifdef _KERNEL
#include "opt_bpf.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <net/if.h>
#else
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/param.h>
#endif
#include <sys/types.h>
#include <net/bpf.h>
#include <net/bpf_jitter.h>
#include <i386/i386/bpf_jit_machdep.h>
/*
* Emit routine to update the jump table.
*/
static void
emit_length(bpf_bin_stream *stream, __unused u_int value, u_int len)
{
if (stream->refs != NULL)
(stream->refs)[stream->bpf_pc] += len;
stream->cur_ip += len;
}
/*
* Emit routine to output the actual binary code.
*/
static void
emit_code(bpf_bin_stream *stream, u_int value, u_int len)
{
switch (len) {
case 1:
stream->ibuf[stream->cur_ip] = (u_char)value;
stream->cur_ip++;
break;
case 2:
*((u_short *)(void *)(stream->ibuf + stream->cur_ip)) =
(u_short)value;
stream->cur_ip += 2;
break;
case 4:
*((u_int *)(void *)(stream->ibuf + stream->cur_ip)) = value;
stream->cur_ip += 4;
break;
}
return;
}
/*
* Scan the filter program and find possible optimization.
*/
static int
bpf_jit_optimize(struct bpf_insn *prog, u_int nins)
{
int flags;
u_int i;
/* Do we return immediately? */
if (BPF_CLASS(prog[0].code) == BPF_RET)
return (BPF_JIT_FRET);
for (flags = 0, i = 0; i < nins; i++) {
switch (prog[i].code) {
case BPF_LD|BPF_W|BPF_ABS:
case BPF_LD|BPF_H|BPF_ABS:
case BPF_LD|BPF_B|BPF_ABS:
case BPF_LD|BPF_W|BPF_IND:
case BPF_LD|BPF_H|BPF_IND:
case BPF_LD|BPF_B|BPF_IND:
case BPF_LDX|BPF_MSH|BPF_B:
flags |= BPF_JIT_FPKT;
break;
case BPF_LD|BPF_MEM:
case BPF_LDX|BPF_MEM:
case BPF_ST:
case BPF_STX:
flags |= BPF_JIT_FMEM;
break;
case BPF_JMP|BPF_JA:
case BPF_JMP|BPF_JGT|BPF_K:
case BPF_JMP|BPF_JGE|BPF_K:
case BPF_JMP|BPF_JEQ|BPF_K:
case BPF_JMP|BPF_JSET|BPF_K:
case BPF_JMP|BPF_JGT|BPF_X:
case BPF_JMP|BPF_JGE|BPF_X:
case BPF_JMP|BPF_JEQ|BPF_X:
case BPF_JMP|BPF_JSET|BPF_X:
flags |= BPF_JIT_FJMP;
break;
case BPF_ALU|BPF_DIV|BPF_K:
case BPF_ALU|BPF_MOD|BPF_K:
flags |= BPF_JIT_FADK;
break;
}
if (flags == BPF_JIT_FLAG_ALL)
break;
}
return (flags);
}
/*
* Function that does the real stuff.
*/
bpf_filter_func
bpf_jit_compile(struct bpf_insn *prog, u_int nins, size_t *size)
{
bpf_bin_stream stream;
struct bpf_insn *ins;
int flags, fret, fpkt, fmem, fjmp, fadk;
int save_esp;
u_int i, pass;
/*
* NOTE: Do not modify the name of this variable, as it's used by
* the macros to emit code.
*/
emit_func emitm;
flags = bpf_jit_optimize(prog, nins);
fret = (flags & BPF_JIT_FRET) != 0;
fpkt = (flags & BPF_JIT_FPKT) != 0;
fmem = (flags & BPF_JIT_FMEM) != 0;
fjmp = (flags & BPF_JIT_FJMP) != 0;
fadk = (flags & BPF_JIT_FADK) != 0;
save_esp = (fpkt || fmem || fadk); /* Stack is used. */
if (fret)
nins = 1;
memset(&stream, 0, sizeof(stream));
/* Allocate the reference table for the jumps. */
if (fjmp) {
#ifdef _KERNEL
- stream.refs = mallocarray(nins + 1, sizeof(u_int), M_BPFJIT,
+ stream.refs = malloc((nins + 1) * sizeof(u_int), M_BPFJIT,
M_NOWAIT | M_ZERO);
#else
stream.refs = calloc(nins + 1, sizeof(u_int));
#endif
if (stream.refs == NULL)
return (NULL);
}
/*
* The first pass will emit the lengths of the instructions
* to create the reference table.
*/
emitm = emit_length;
for (pass = 0; pass < 2; pass++) {
ins = prog;
/* Create the procedure header. */
if (save_esp) {
PUSH(EBP);
MOVrd(ESP, EBP);
}
if (fmem)
SUBib(BPF_MEMWORDS * sizeof(uint32_t), ESP);
if (save_esp)
PUSH(ESI);
if (fpkt) {
PUSH(EDI);
PUSH(EBX);
MOVodd(8, EBP, EBX);
MOVodd(16, EBP, EDI);
}
for (i = 0; i < nins; i++) {
stream.bpf_pc++;
switch (ins->code) {
default:
#ifdef _KERNEL
return (NULL);
#else
abort();
#endif
case BPF_RET|BPF_K:
MOVid(ins->k, EAX);
if (save_esp) {
if (fpkt) {
POP(EBX);
POP(EDI);
}
POP(ESI);
LEAVE();
}
RET();
break;
case BPF_RET|BPF_A:
if (save_esp) {
if (fpkt) {
POP(EBX);
POP(EDI);
}
POP(ESI);
LEAVE();
}
RET();
break;
case BPF_LD|BPF_W|BPF_ABS:
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
JAb(12);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int32_t), ECX);
JAEb(7);
ZEROrd(EAX);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
MOVobd(EBX, ESI, EAX);
BSWAP(EAX);
break;
case BPF_LD|BPF_H|BPF_ABS:
ZEROrd(EAX);
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
JAb(12);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int16_t), ECX);
JAEb(5);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
MOVobw(EBX, ESI, AX);
SWAP_AX();
break;
case BPF_LD|BPF_B|BPF_ABS:
ZEROrd(EAX);
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
JBb(5);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
MOVobb(EBX, ESI, AL);
break;
case BPF_LD|BPF_W|BPF_LEN:
if (save_esp)
MOVodd(12, EBP, EAX);
else {
MOVrd(ESP, ECX);
MOVodd(12, ECX, EAX);
}
break;
case BPF_LDX|BPF_W|BPF_LEN:
if (save_esp)
MOVodd(12, EBP, EDX);
else {
MOVrd(ESP, ECX);
MOVodd(12, ECX, EDX);
}
break;
case BPF_LD|BPF_W|BPF_IND:
CMPrd(EDI, EDX);
JAb(27);
MOVid(ins->k, ESI);
MOVrd(EDI, ECX);
SUBrd(EDX, ECX);
CMPrd(ESI, ECX);
JBb(14);
ADDrd(EDX, ESI);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int32_t), ECX);
JAEb(7);
ZEROrd(EAX);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
MOVobd(EBX, ESI, EAX);
BSWAP(EAX);
break;
case BPF_LD|BPF_H|BPF_IND:
ZEROrd(EAX);
CMPrd(EDI, EDX);
JAb(27);
MOVid(ins->k, ESI);
MOVrd(EDI, ECX);
SUBrd(EDX, ECX);
CMPrd(ESI, ECX);
JBb(14);
ADDrd(EDX, ESI);
MOVrd(EDI, ECX);
SUBrd(ESI, ECX);
CMPid(sizeof(int16_t), ECX);
JAEb(5);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
MOVobw(EBX, ESI, AX);
SWAP_AX();
break;
case BPF_LD|BPF_B|BPF_IND:
ZEROrd(EAX);
CMPrd(EDI, EDX);
JAEb(13);
MOVid(ins->k, ESI);
MOVrd(EDI, ECX);
SUBrd(EDX, ECX);
CMPrd(ESI, ECX);
JAb(5);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
ADDrd(EDX, ESI);
MOVobb(EBX, ESI, AL);
break;
case BPF_LDX|BPF_MSH|BPF_B:
MOVid(ins->k, ESI);
CMPrd(EDI, ESI);
JBb(7);
ZEROrd(EAX);
POP(EBX);
POP(EDI);
POP(ESI);
LEAVE();
RET();
ZEROrd(EDX);
MOVobb(EBX, ESI, DL);
ANDib(0x0f, DL);
SHLib(2, EDX);
break;
case BPF_LD|BPF_IMM:
MOVid(ins->k, EAX);
break;
case BPF_LDX|BPF_IMM:
MOVid(ins->k, EDX);
break;
case BPF_LD|BPF_MEM:
MOVrd(EBP, ECX);
MOVid(((int)ins->k - BPF_MEMWORDS) *
sizeof(uint32_t), ESI);
MOVobd(ECX, ESI, EAX);
break;
case BPF_LDX|BPF_MEM:
MOVrd(EBP, ECX);
MOVid(((int)ins->k - BPF_MEMWORDS) *
sizeof(uint32_t), ESI);
MOVobd(ECX, ESI, EDX);
break;
case BPF_ST:
/*
* XXX this command and the following could
* be optimized if the previous instruction
* was already of this type
*/
MOVrd(EBP, ECX);
MOVid(((int)ins->k - BPF_MEMWORDS) *
sizeof(uint32_t), ESI);
MOVomd(EAX, ECX, ESI);
break;
case BPF_STX:
MOVrd(EBP, ECX);
MOVid(((int)ins->k - BPF_MEMWORDS) *
sizeof(uint32_t), ESI);
MOVomd(EDX, ECX, ESI);
break;
case BPF_JMP|BPF_JA:
JUMP(ins->k);
break;
case BPF_JMP|BPF_JGT|BPF_K:
case BPF_JMP|BPF_JGE|BPF_K:
case BPF_JMP|BPF_JEQ|BPF_K:
case BPF_JMP|BPF_JSET|BPF_K:
case BPF_JMP|BPF_JGT|BPF_X:
case BPF_JMP|BPF_JGE|BPF_X:
case BPF_JMP|BPF_JEQ|BPF_X:
case BPF_JMP|BPF_JSET|BPF_X:
if (ins->jt == ins->jf) {
JUMP(ins->jt);
break;
}
switch (ins->code) {
case BPF_JMP|BPF_JGT|BPF_K:
CMPid(ins->k, EAX);
JCC(JA, JBE);
break;
case BPF_JMP|BPF_JGE|BPF_K:
CMPid(ins->k, EAX);
JCC(JAE, JB);
break;
case BPF_JMP|BPF_JEQ|BPF_K:
CMPid(ins->k, EAX);
JCC(JE, JNE);
break;
case BPF_JMP|BPF_JSET|BPF_K:
TESTid(ins->k, EAX);
JCC(JNE, JE);
break;
case BPF_JMP|BPF_JGT|BPF_X:
CMPrd(EDX, EAX);
JCC(JA, JBE);
break;
case BPF_JMP|BPF_JGE|BPF_X:
CMPrd(EDX, EAX);
JCC(JAE, JB);
break;
case BPF_JMP|BPF_JEQ|BPF_X:
CMPrd(EDX, EAX);
JCC(JE, JNE);
break;
case BPF_JMP|BPF_JSET|BPF_X:
TESTrd(EDX, EAX);
JCC(JNE, JE);
break;
}
break;
case BPF_ALU|BPF_ADD|BPF_X:
ADDrd(EDX, EAX);
break;
case BPF_ALU|BPF_SUB|BPF_X:
SUBrd(EDX, EAX);
break;
case BPF_ALU|BPF_MUL|BPF_X:
MOVrd(EDX, ECX);
MULrd(EDX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_DIV|BPF_X:
case BPF_ALU|BPF_MOD|BPF_X:
TESTrd(EDX, EDX);
if (save_esp) {
if (fpkt) {
JNEb(7);
ZEROrd(EAX);
POP(EBX);
POP(EDI);
} else {
JNEb(5);
ZEROrd(EAX);
}
POP(ESI);
LEAVE();
} else {
JNEb(3);
ZEROrd(EAX);
}
RET();
MOVrd(EDX, ECX);
ZEROrd(EDX);
DIVrd(ECX);
if (BPF_OP(ins->code) == BPF_MOD)
MOVrd(EDX, EAX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_AND|BPF_X:
ANDrd(EDX, EAX);
break;
case BPF_ALU|BPF_OR|BPF_X:
ORrd(EDX, EAX);
break;
case BPF_ALU|BPF_XOR|BPF_X:
XORrd(EDX, EAX);
break;
case BPF_ALU|BPF_LSH|BPF_X:
MOVrd(EDX, ECX);
SHL_CLrb(EAX);
break;
case BPF_ALU|BPF_RSH|BPF_X:
MOVrd(EDX, ECX);
SHR_CLrb(EAX);
break;
case BPF_ALU|BPF_ADD|BPF_K:
ADD_EAXi(ins->k);
break;
case BPF_ALU|BPF_SUB|BPF_K:
SUB_EAXi(ins->k);
break;
case BPF_ALU|BPF_MUL|BPF_K:
MOVrd(EDX, ECX);
MOVid(ins->k, EDX);
MULrd(EDX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_DIV|BPF_K:
case BPF_ALU|BPF_MOD|BPF_K:
MOVrd(EDX, ECX);
ZEROrd(EDX);
MOVid(ins->k, ESI);
DIVrd(ESI);
if (BPF_OP(ins->code) == BPF_MOD)
MOVrd(EDX, EAX);
MOVrd(ECX, EDX);
break;
case BPF_ALU|BPF_AND|BPF_K:
ANDid(ins->k, EAX);
break;
case BPF_ALU|BPF_OR|BPF_K:
ORid(ins->k, EAX);
break;
case BPF_ALU|BPF_XOR|BPF_K:
XORid(ins->k, EAX);
break;
case BPF_ALU|BPF_LSH|BPF_K:
SHLib((ins->k) & 0xff, EAX);
break;
case BPF_ALU|BPF_RSH|BPF_K:
SHRib((ins->k) & 0xff, EAX);
break;
case BPF_ALU|BPF_NEG:
NEGd(EAX);
break;
case BPF_MISC|BPF_TAX:
MOVrd(EAX, EDX);
break;
case BPF_MISC|BPF_TXA:
MOVrd(EDX, EAX);
break;
}
ins++;
}
if (pass > 0)
continue;
*size = stream.cur_ip;
#ifdef _KERNEL
stream.ibuf = malloc(*size, M_BPFJIT, M_NOWAIT);
if (stream.ibuf == NULL)
break;
#else
stream.ibuf = mmap(NULL, *size, PROT_READ | PROT_WRITE,
MAP_ANON, -1, 0);
if (stream.ibuf == MAP_FAILED) {
stream.ibuf = NULL;
break;
}
#endif
/*
* Modify the reference table to contain the offsets and
* not the lengths of the instructions.
*/
if (fjmp)
for (i = 1; i < nins + 1; i++)
stream.refs[i] += stream.refs[i - 1];
/* Reset the counters. */
stream.cur_ip = 0;
stream.bpf_pc = 0;
/* The second pass creates the actual code. */
emitm = emit_code;
}
/*
* The reference table is needed only during compilation,
* now we can free it.
*/
if (fjmp)
#ifdef _KERNEL
free(stream.refs, M_BPFJIT);
#else
free(stream.refs);
#endif
#ifndef _KERNEL
if (stream.ibuf != NULL &&
mprotect(stream.ibuf, *size, PROT_READ | PROT_EXEC) != 0) {
munmap(stream.ibuf, *size);
stream.ibuf = NULL;
}
#endif
return ((bpf_filter_func)(void *)stream.ibuf);
}
void
bpf_jit_free(void *func, size_t size)
{
#ifdef _KERNEL
free(func, M_BPFJIT);
#else
munmap(func, size);
#endif
}
Index: head/sys/i386/i386/k6_mem.c
===================================================================
--- head/sys/i386/i386/k6_mem.c (revision 328217)
+++ head/sys/i386/i386/k6_mem.c (revision 328218)
@@ -1,191 +1,191 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1999 Brian Fundakowski Feldman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
/*
* A K6-2 MTRR is defined as the highest 15 bits having the address, the next
* 15 having the mask, the 1st bit being "write-combining" and the 0th bit
* being "uncacheable".
*
* Address Mask WC UC
* | XXXXXXXXXXXXXXX | XXXXXXXXXXXXXXX | X | X |
*
* There are two of these in the 64-bit UWCCR.
*/
#define UWCCR 0xc0000085
#define K6_REG_GET(reg, addr, mask, wc, uc) do { \
addr = (reg) & 0xfffe0000; \
mask = ((reg) & 0x1fffc) >> 2; \
wc = ((reg) & 0x2) >> 1; \
uc = (reg) & 0x1; \
} while (0)
#define K6_REG_MAKE(addr, mask, wc, uc) \
((addr) | ((mask) << 2) | ((wc) << 1) | uc)
static void k6_mrinit(struct mem_range_softc *sc);
static int k6_mrset(struct mem_range_softc *, struct mem_range_desc *,
int *);
static __inline int k6_mrmake(struct mem_range_desc *, u_int32_t *);
static void k6_mem_drvinit(void *);
static struct mem_range_ops k6_mrops =
{
k6_mrinit,
k6_mrset,
NULL,
NULL
};
static __inline int
k6_mrmake(struct mem_range_desc *desc, u_int32_t *mtrr)
{
u_int32_t len = 0, wc, uc;
int bit;
if (desc->mr_base &~ 0xfffe0000)
return (EINVAL);
if (desc->mr_len < 131072 || !powerof2(desc->mr_len))
return (EINVAL);
if (desc->mr_flags &~ (MDF_WRITECOMBINE|MDF_UNCACHEABLE|MDF_FORCE))
return (EOPNOTSUPP);
for (bit = ffs(desc->mr_len >> 17) - 1; bit < 15; bit++)
len |= 1 << bit;
wc = (desc->mr_flags & MDF_WRITECOMBINE) ? 1 : 0;
uc = (desc->mr_flags & MDF_UNCACHEABLE) ? 1 : 0;
*mtrr = K6_REG_MAKE(desc->mr_base, len, wc, uc);
return (0);
}
static void
k6_mrinit(struct mem_range_softc *sc)
{
u_int64_t reg;
u_int32_t addr, mask, wc, uc;
int d;
sc->mr_cap = 0;
sc->mr_ndesc = 2; /* XXX (BFF) For now, we only have one msr for this */
- sc->mr_desc = mallocarray(sc->mr_ndesc, sizeof(struct mem_range_desc),
+ sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc),
M_MEMDESC, M_NOWAIT | M_ZERO);
if (sc->mr_desc == NULL)
panic("k6_mrinit: malloc returns NULL");
reg = rdmsr(UWCCR);
for (d = 0; d < sc->mr_ndesc; d++) {
u_int32_t one = (reg & (0xffffffff << (32 * d))) >> (32 * d);
K6_REG_GET(one, addr, mask, wc, uc);
sc->mr_desc[d].mr_base = addr;
sc->mr_desc[d].mr_len = ffs(mask) << 17;
if (wc)
sc->mr_desc[d].mr_flags |= MDF_WRITECOMBINE;
if (uc)
sc->mr_desc[d].mr_flags |= MDF_UNCACHEABLE;
}
printf("K6-family MTRR support enabled (%d registers)\n", sc->mr_ndesc);
}
static int
k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
{
u_int64_t reg;
u_int32_t mtrr;
int error, d;
switch (*arg) {
case MEMRANGE_SET_UPDATE:
error = k6_mrmake(desc, &mtrr);
if (error)
return (error);
for (d = 0; d < sc->mr_ndesc; d++) {
if (!sc->mr_desc[d].mr_len) {
sc->mr_desc[d] = *desc;
goto out;
}
if (sc->mr_desc[d].mr_base == desc->mr_base &&
sc->mr_desc[d].mr_len == desc->mr_len)
return (EEXIST);
}
return (ENOSPC);
case MEMRANGE_SET_REMOVE:
mtrr = 0;
for (d = 0; d < sc->mr_ndesc; d++)
if (sc->mr_desc[d].mr_base == desc->mr_base &&
sc->mr_desc[d].mr_len == desc->mr_len) {
bzero(&sc->mr_desc[d], sizeof(sc->mr_desc[d]));
goto out;
}
return (ENOENT);
default:
return (EOPNOTSUPP);
}
out:
disable_intr();
wbinvd();
reg = rdmsr(UWCCR);
reg &= ~(0xffffffff << (32 * d));
reg |= mtrr << (32 * d);
wrmsr(UWCCR, reg);
wbinvd();
enable_intr();
return (0);
}
static void
k6_mem_drvinit(void *unused)
{
if (cpu_vendor_id != CPU_VENDOR_AMD)
return;
if ((cpu_id & 0xf00) != 0x500)
return;
if ((cpu_id & 0xf0) < 0x80 ||
((cpu_id & 0xf0) == 0x80 && (cpu_id & 0xf) <= 0x7))
return;
mem_range_softc.mr_op = &k6_mrops;
}
SYSINIT(k6memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, k6_mem_drvinit, NULL);
Index: head/sys/kern/init_main.c
===================================================================
--- head/sys/kern/init_main.c (revision 328217)
+++ head/sys/kern/init_main.c (revision 328218)
@@ -1,878 +1,878 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1995 Terrence R. Lambert
* All rights reserved.
*
* Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)init_main.c 8.9 (Berkeley) 1/21/94
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_init_path.h"
#include "opt_verbose_sysinit.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/exec.h>
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/jail.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/loginclass.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
#include <sys/vnode.h>
#include <sys/sysent.h>
#include <sys/reboot.h>
#include <sys/sched.h>
#include <sys/sx.h>
#include <sys/sysproto.h>
#include <sys/vmmeter.h>
#include <sys/unistd.h>
#include <sys/malloc.h>
#include <sys/conf.h>
#include <sys/cpuset.h>
#include <machine/cpu.h>
#include <security/audit/audit.h>
#include <security/mac/mac_framework.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <sys/copyright.h>
#include <ddb/ddb.h>
#include <ddb/db_sym.h>
void mi_startup(void); /* Should be elsewhere */
/* Components of the first process -- never freed. */
static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
struct thread0_storage thread0_st __aligned(32);
struct vmspace vmspace0;
struct proc *initproc;
#ifndef BOOTHOWTO
#define BOOTHOWTO 0
#endif
int boothowto = BOOTHOWTO; /* initialized so that it can be patched */
SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0,
"Boot control flags, passed from loader");
#ifndef BOOTVERBOSE
#define BOOTVERBOSE 0
#endif
int bootverbose = BOOTVERBOSE;
SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, &bootverbose, 0,
"Control the output of verbose kernel messages");
#ifdef INVARIANTS
FEATURE(invariants, "Kernel compiled with INVARIANTS, may affect performance");
#endif
/*
* This ensures that there is at least one entry so that the sysinit_set
* symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never
* executed.
*/
SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL);
/*
* The sysinit table itself. Items are checked off as the are run.
* If we want to register new sysinit types, add them to newsysinit.
*/
SET_DECLARE(sysinit_set, struct sysinit);
struct sysinit **sysinit, **sysinit_end;
struct sysinit **newsysinit, **newsysinit_end;
EVENTHANDLER_LIST_DECLARE(process_init);
EVENTHANDLER_LIST_DECLARE(thread_init);
EVENTHANDLER_LIST_DECLARE(process_ctor);
EVENTHANDLER_LIST_DECLARE(thread_ctor);
/*
* Merge a new sysinit set into the current set, reallocating it if
* necessary. This can only be called after malloc is running.
*/
void
sysinit_add(struct sysinit **set, struct sysinit **set_end)
{
struct sysinit **newset;
struct sysinit **sipp;
struct sysinit **xipp;
int count;
count = set_end - set;
if (newsysinit)
count += newsysinit_end - newsysinit;
else
count += sysinit_end - sysinit;
- newset = mallocarray(count, sizeof(*sipp), M_TEMP, M_NOWAIT);
+ newset = malloc(count * sizeof(*sipp), M_TEMP, M_NOWAIT);
if (newset == NULL)
panic("cannot malloc for sysinit");
xipp = newset;
if (newsysinit)
for (sipp = newsysinit; sipp < newsysinit_end; sipp++)
*xipp++ = *sipp;
else
for (sipp = sysinit; sipp < sysinit_end; sipp++)
*xipp++ = *sipp;
for (sipp = set; sipp < set_end; sipp++)
*xipp++ = *sipp;
if (newsysinit)
free(newsysinit, M_TEMP);
newsysinit = newset;
newsysinit_end = newset + count;
}
#if defined (DDB) && defined(VERBOSE_SYSINIT)
static const char *
symbol_name(vm_offset_t va, db_strategy_t strategy)
{
const char *name;
c_db_sym_t sym;
db_expr_t offset;
if (va == 0)
return (NULL);
sym = db_search_symbol(va, strategy, &offset);
if (offset != 0)
return (NULL);
db_symbol_values(sym, &name, NULL);
return (name);
}
#endif
/*
* System startup; initialize the world, create process 0, mount root
* filesystem, and fork to create init and pagedaemon. Most of the
* hard work is done in the lower-level initialization routines including
* startup(), which does memory initialization and autoconfiguration.
*
* This allows simple addition of new kernel subsystems that require
* boot time initialization. It also allows substitution of subsystem
* (for instance, a scheduler, kernel profiler, or VM system) by object
* module. Finally, it allows for optional "kernel threads".
*/
void
mi_startup(void)
{
struct sysinit **sipp; /* system initialization*/
struct sysinit **xipp; /* interior loop of sort*/
struct sysinit *save; /* bubble*/
#if defined(VERBOSE_SYSINIT)
int last;
int verbose;
#endif
TSENTER();
if (boothowto & RB_VERBOSE)
bootverbose++;
if (sysinit == NULL) {
sysinit = SET_BEGIN(sysinit_set);
sysinit_end = SET_LIMIT(sysinit_set);
}
restart:
/*
* Perform a bubble sort of the system initialization objects by
* their subsystem (primary key) and order (secondary key).
*/
for (sipp = sysinit; sipp < sysinit_end; sipp++) {
for (xipp = sipp + 1; xipp < sysinit_end; xipp++) {
if ((*sipp)->subsystem < (*xipp)->subsystem ||
((*sipp)->subsystem == (*xipp)->subsystem &&
(*sipp)->order <= (*xipp)->order))
continue; /* skip*/
save = *sipp;
*sipp = *xipp;
*xipp = save;
}
}
#if defined(VERBOSE_SYSINIT)
last = SI_SUB_COPYRIGHT;
verbose = 0;
#if !defined(DDB)
printf("VERBOSE_SYSINIT: DDB not enabled, symbol lookups disabled.\n");
#endif
#endif
/*
* Traverse the (now) ordered list of system initialization tasks.
* Perform each task, and continue on to the next task.
*/
for (sipp = sysinit; sipp < sysinit_end; sipp++) {
if ((*sipp)->subsystem == SI_SUB_DUMMY)
continue; /* skip dummy task(s)*/
if ((*sipp)->subsystem == SI_SUB_DONE)
continue;
#if defined(VERBOSE_SYSINIT)
if ((*sipp)->subsystem > last) {
verbose = 1;
last = (*sipp)->subsystem;
printf("subsystem %x\n", last);
}
if (verbose) {
#if defined(DDB)
const char *func, *data;
func = symbol_name((vm_offset_t)(*sipp)->func,
DB_STGY_PROC);
data = symbol_name((vm_offset_t)(*sipp)->udata,
DB_STGY_ANY);
if (func != NULL && data != NULL)
printf(" %s(&%s)... ", func, data);
else if (func != NULL)
printf(" %s(%p)... ", func, (*sipp)->udata);
else
#endif
printf(" %p(%p)... ", (*sipp)->func,
(*sipp)->udata);
}
#endif
/* Call function */
(*((*sipp)->func))((*sipp)->udata);
#if defined(VERBOSE_SYSINIT)
if (verbose)
printf("done.\n");
#endif
/* Check off the one we're just done */
(*sipp)->subsystem = SI_SUB_DONE;
/* Check if we've installed more sysinit items via KLD */
if (newsysinit != NULL) {
if (sysinit != SET_BEGIN(sysinit_set))
free(sysinit, M_TEMP);
sysinit = newsysinit;
sysinit_end = newsysinit_end;
newsysinit = NULL;
newsysinit_end = NULL;
goto restart;
}
}
TSEXIT(); /* Here so we don't overlap with start_init. */
mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
mtx_unlock(&Giant);
/*
* Now hand over this thread to swapper.
*/
swapper();
/* NOTREACHED*/
}
static void
print_caddr_t(void *data)
{
printf("%s", (char *)data);
}
static void
print_version(void *data __unused)
{
int len;
/* Strip a trailing newline from version. */
len = strlen(version);
while (len > 0 && version[len - 1] == '\n')
len--;
printf("%.*s %s\n", len, version, machine);
printf("%s\n", compiler_version);
}
SYSINIT(announce, SI_SUB_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t,
copyright);
SYSINIT(trademark, SI_SUB_COPYRIGHT, SI_ORDER_SECOND, print_caddr_t,
trademark);
SYSINIT(version, SI_SUB_COPYRIGHT, SI_ORDER_THIRD, print_version, NULL);
#ifdef WITNESS
static char wit_warn[] =
"WARNING: WITNESS option enabled, expect reduced performance.\n";
SYSINIT(witwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1,
print_caddr_t, wit_warn);
SYSINIT(witwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1,
print_caddr_t, wit_warn);
#endif
#ifdef DIAGNOSTIC
static char diag_warn[] =
"WARNING: DIAGNOSTIC option enabled, expect reduced performance.\n";
SYSINIT(diagwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 2,
print_caddr_t, diag_warn);
SYSINIT(diagwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 2,
print_caddr_t, diag_warn);
#endif
static int
null_fetch_syscall_args(struct thread *td __unused)
{
panic("null_fetch_syscall_args");
}
static void
null_set_syscall_retval(struct thread *td __unused, int error __unused)
{
panic("null_set_syscall_retval");
}
struct sysentvec null_sysvec = {
.sv_size = 0,
.sv_table = NULL,
.sv_mask = 0,
.sv_errsize = 0,
.sv_errtbl = NULL,
.sv_transtrap = NULL,
.sv_fixup = NULL,
.sv_sendsig = NULL,
.sv_sigcode = NULL,
.sv_szsigcode = NULL,
.sv_name = "null",
.sv_coredump = NULL,
.sv_imgact_try = NULL,
.sv_minsigstksz = 0,
.sv_pagesize = PAGE_SIZE,
.sv_minuser = VM_MIN_ADDRESS,
.sv_maxuser = VM_MAXUSER_ADDRESS,
.sv_usrstack = USRSTACK,
.sv_psstrings = PS_STRINGS,
.sv_stackprot = VM_PROT_ALL,
.sv_copyout_strings = NULL,
.sv_setregs = NULL,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = 0,
.sv_set_syscall_retval = null_set_syscall_retval,
.sv_fetch_syscall_args = null_fetch_syscall_args,
.sv_syscallnames = NULL,
.sv_schedtail = NULL,
.sv_thread_detach = NULL,
.sv_trap = NULL,
};
/*
* The two following SYSINIT's are proc0 specific glue code. I am not
* convinced that they can not be safely combined, but their order of
* operation has been maintained as the same as the original init_main.c
* for right now.
*/
/* ARGSUSED*/
static void
proc0_init(void *dummy __unused)
{
struct proc *p;
struct thread *td;
struct ucred *newcred;
struct uidinfo tmpuinfo;
struct loginclass tmplc = {
.lc_name = "",
};
vm_paddr_t pageablemem;
int i;
GIANT_REQUIRED;
p = &proc0;
td = &thread0;
/*
* Initialize magic number and osrel.
*/
p->p_magic = P_MAGIC;
p->p_osrel = osreldate;
/*
* Initialize thread and process structures.
*/
procinit(); /* set up proc zone */
threadinit(); /* set up UMA zones */
/*
* Initialise scheduler resources.
* Add scheduler specific parts to proc, thread as needed.
*/
schedinit(); /* scheduler gets its house in order */
/*
* Create process 0 (the swapper).
*/
LIST_INSERT_HEAD(&allproc, p, p_list);
LIST_INSERT_HEAD(PIDHASH(0), p, p_hash);
mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
p->p_pgrp = &pgrp0;
LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
LIST_INIT(&pgrp0.pg_members);
LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist);
pgrp0.pg_session = &session0;
mtx_init(&session0.s_mtx, "session", NULL, MTX_DEF);
refcount_init(&session0.s_count, 1);
session0.s_leader = p;
p->p_sysent = &null_sysvec;
p->p_flag = P_SYSTEM | P_INMEM | P_KPROC;
p->p_flag2 = 0;
p->p_state = PRS_NORMAL;
p->p_klist = knlist_alloc(&p->p_mtx);
STAILQ_INIT(&p->p_ktr);
p->p_nice = NZERO;
/* pid_max cannot be greater than PID_MAX */
td->td_tid = PID_MAX + 1;
LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
td->td_state = TDS_RUNNING;
td->td_pri_class = PRI_TIMESHARE;
td->td_user_pri = PUSER;
td->td_base_user_pri = PUSER;
td->td_lend_user_pri = PRI_MAX;
td->td_priority = PVM;
td->td_base_pri = PVM;
td->td_oncpu = curcpu;
td->td_flags = TDF_INMEM;
td->td_pflags = TDP_KTHREAD;
td->td_cpuset = cpuset_thread0();
td->td_domain.dr_policy = td->td_cpuset->cs_domain;
prison0_init();
p->p_peers = 0;
p->p_leader = p;
p->p_reaper = p;
LIST_INIT(&p->p_reaplist);
strncpy(p->p_comm, "kernel", sizeof (p->p_comm));
strncpy(td->td_name, "swapper", sizeof (td->td_name));
callout_init_mtx(&p->p_itcallout, &p->p_mtx, 0);
callout_init_mtx(&p->p_limco, &p->p_mtx, 0);
callout_init(&td->td_slpcallout, 1);
/* Create credentials. */
newcred = crget();
newcred->cr_ngroups = 1; /* group 0 */
/* A hack to prevent uifind from tripping over NULL pointers. */
curthread->td_ucred = newcred;
tmpuinfo.ui_uid = 1;
newcred->cr_uidinfo = newcred->cr_ruidinfo = &tmpuinfo;
newcred->cr_uidinfo = uifind(0);
newcred->cr_ruidinfo = uifind(0);
newcred->cr_loginclass = &tmplc;
newcred->cr_loginclass = loginclass_find("default");
/* End hack. creds get properly set later with thread_cow_get_proc */
curthread->td_ucred = NULL;
newcred->cr_prison = &prison0;
proc_set_cred_init(p, newcred);
#ifdef AUDIT
audit_cred_kproc0(newcred);
#endif
#ifdef MAC
mac_cred_create_swapper(newcred);
#endif
/* Create sigacts. */
p->p_sigacts = sigacts_alloc();
/* Initialize signal state for process 0. */
siginit(&proc0);
/* Create the file descriptor table. */
p->p_fd = fdinit(NULL, false);
p->p_fdtol = NULL;
/* Create the limits structures. */
p->p_limit = lim_alloc();
for (i = 0; i < RLIM_NLIMITS; i++)
p->p_limit->pl_rlimit[i].rlim_cur =
p->p_limit->pl_rlimit[i].rlim_max = RLIM_INFINITY;
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
p->p_limit->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
p->p_limit->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
/* Cast to avoid overflow on i386/PAE. */
pageablemem = ptoa((vm_paddr_t)vm_cnt.v_free_count);
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = pageablemem;
p->p_cpulimit = RLIM_INFINITY;
PROC_LOCK(p);
thread_cow_get_proc(td, p);
PROC_UNLOCK(p);
/* Initialize resource accounting structures. */
racct_create(&p->p_racct);
p->p_stats = pstats_alloc();
/* Allocate a prototype map so we have something to fork. */
p->p_vmspace = &vmspace0;
vmspace0.vm_refcnt = 1;
pmap_pinit0(vmspace_pmap(&vmspace0));
/*
* proc0 is not expected to enter usermode, so there is no special
* handling for sv_minuser here, like is done for exec_new_vmspace().
*/
vm_map_init(&vmspace0.vm_map, vmspace_pmap(&vmspace0),
p->p_sysent->sv_minuser, p->p_sysent->sv_maxuser);
/*
* Call the init and ctor for the new thread and proc. We wait
* to do this until all other structures are fairly sane.
*/
EVENTHANDLER_DIRECT_INVOKE(process_init, p);
EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
EVENTHANDLER_DIRECT_INVOKE(process_ctor, p);
EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
/*
* Charge root for one process.
*/
(void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0);
PROC_LOCK(p);
racct_add_force(p, RACCT_NPROC, 1);
PROC_UNLOCK(p);
}
SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL);
/* ARGSUSED*/
static void
proc0_post(void *dummy __unused)
{
struct timespec ts;
struct proc *p;
struct rusage ru;
struct thread *td;
/*
* Now we can look at the time, having had a chance to verify the
* time from the filesystem. Pretend that proc0 started now.
*/
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
microuptime(&p->p_stats->p_start);
PROC_STATLOCK(p);
rufetch(p, &ru); /* Clears thread stats */
PROC_STATUNLOCK(p);
p->p_rux.rux_runtime = 0;
p->p_rux.rux_uticks = 0;
p->p_rux.rux_sticks = 0;
p->p_rux.rux_iticks = 0;
FOREACH_THREAD_IN_PROC(p, td) {
td->td_runtime = 0;
}
}
sx_sunlock(&allproc_lock);
PCPU_SET(switchtime, cpu_ticks());
PCPU_SET(switchticks, ticks);
/*
* Give the ``random'' number generator a thump.
*/
nanotime(&ts);
srandom(ts.tv_sec ^ ts.tv_nsec);
}
SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL);
static void
random_init(void *dummy __unused)
{
/*
* After CPU has been started we have some randomness on most
* platforms via get_cyclecount(). For platforms that don't
* we will reseed random(9) in proc0_post() as well.
*/
srandom(get_cyclecount());
}
SYSINIT(random, SI_SUB_RANDOM, SI_ORDER_FIRST, random_init, NULL);
/*
***************************************************************************
****
**** The following SYSINIT's and glue code should be moved to the
**** respective files on a per subsystem basis.
****
***************************************************************************
*/
/*
* List of paths to try when searching for "init".
*/
static char init_path[MAXPATHLEN] =
#ifdef INIT_PATH
__XSTRING(INIT_PATH);
#else
"/sbin/init:/sbin/oinit:/sbin/init.bak:/rescue/init";
#endif
SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0,
"Path used to search the init process");
/*
* Shutdown timeout of init(8).
* Unused within kernel, but used to control init(8), hence do not remove.
*/
#ifndef INIT_SHUTDOWN_TIMEOUT
#define INIT_SHUTDOWN_TIMEOUT 120
#endif
static int init_shutdown_timeout = INIT_SHUTDOWN_TIMEOUT;
SYSCTL_INT(_kern, OID_AUTO, init_shutdown_timeout,
CTLFLAG_RW, &init_shutdown_timeout, 0, "Shutdown timeout of init(8). "
"Unused within kernel, but used to control init(8)");
/*
* Start the initial user process; try exec'ing each pathname in init_path.
* The program is invoked with one argument containing the boot flags.
*/
static void
start_init(void *dummy)
{
vm_offset_t addr;
struct execve_args args;
int options, error;
char *var, *path, *next, *s;
char *ucp, **uap, *arg0, *arg1;
struct thread *td;
struct proc *p;
mtx_lock(&Giant);
GIANT_REQUIRED;
TSENTER(); /* Here so we don't overlap with mi_startup. */
td = curthread;
p = td->td_proc;
vfs_mountroot();
/* Wipe GELI passphrase from the environment. */
kern_unsetenv("kern.geom.eli.passphrase");
/*
* Need just enough stack to hold the faked-up "execve()" arguments.
*/
addr = p->p_sysent->sv_usrstack - PAGE_SIZE;
if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE, 0,
VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0)
panic("init: couldn't allocate argument space");
p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
p->p_vmspace->vm_ssize = 1;
if ((var = kern_getenv("init_path")) != NULL) {
strlcpy(init_path, var, sizeof(init_path));
freeenv(var);
}
for (path = init_path; *path != '\0'; path = next) {
while (*path == ':')
path++;
if (*path == '\0')
break;
for (next = path; *next != '\0' && *next != ':'; next++)
/* nothing */ ;
if (bootverbose)
printf("start_init: trying %.*s\n", (int)(next - path),
path);
/*
* Move out the boot flag argument.
*/
options = 0;
ucp = (char *)p->p_sysent->sv_usrstack;
(void)subyte(--ucp, 0); /* trailing zero */
if (boothowto & RB_SINGLE) {
(void)subyte(--ucp, 's');
options = 1;
}
#ifdef notyet
if (boothowto & RB_FASTBOOT) {
(void)subyte(--ucp, 'f');
options = 1;
}
#endif
#ifdef BOOTCDROM
(void)subyte(--ucp, 'C');
options = 1;
#endif
if (options == 0)
(void)subyte(--ucp, '-');
(void)subyte(--ucp, '-'); /* leading hyphen */
arg1 = ucp;
/*
* Move out the file name (also arg 0).
*/
(void)subyte(--ucp, 0);
for (s = next - 1; s >= path; s--)
(void)subyte(--ucp, *s);
arg0 = ucp;
/*
* Move out the arg pointers.
*/
uap = (char **)rounddown2((intptr_t)ucp, sizeof(intptr_t));
(void)suword((caddr_t)--uap, (long)0); /* terminator */
(void)suword((caddr_t)--uap, (long)(intptr_t)arg1);
(void)suword((caddr_t)--uap, (long)(intptr_t)arg0);
/*
* Point at the arguments.
*/
args.fname = arg0;
args.argv = uap;
args.envv = NULL;
/*
* Now try to exec the program. If can't for any reason
* other than it doesn't exist, complain.
*
* Otherwise, return via fork_trampoline() all the way
* to user mode as init!
*/
if ((error = sys_execve(td, &args)) == EJUSTRETURN) {
mtx_unlock(&Giant);
TSEXIT();
return;
}
if (error != ENOENT)
printf("exec %.*s: error %d\n", (int)(next - path),
path, error);
}
printf("init: not found in path %s\n", init_path);
panic("no init");
}
/*
* Like kproc_create(), but runs in its own address space.
* We do this early to reserve pid 1.
*
* Note special case - do not make it runnable yet. Other work
* in progress will change this more.
*/
static void
create_init(const void *udata __unused)
{
struct fork_req fr;
struct ucred *newcred, *oldcred;
struct thread *td;
int error;
bzero(&fr, sizeof(fr));
fr.fr_flags = RFFDG | RFPROC | RFSTOPPED;
fr.fr_procp = &initproc;
error = fork1(&thread0, &fr);
if (error)
panic("cannot fork init: %d\n", error);
KASSERT(initproc->p_pid == 1, ("create_init: initproc->p_pid != 1"));
/* divorce init's credentials from the kernel's */
newcred = crget();
sx_xlock(&proctree_lock);
PROC_LOCK(initproc);
initproc->p_flag |= P_SYSTEM | P_INMEM;
initproc->p_treeflag |= P_TREE_REAPER;
LIST_INSERT_HEAD(&initproc->p_reaplist, &proc0, p_reapsibling);
oldcred = initproc->p_ucred;
crcopy(newcred, oldcred);
#ifdef MAC
mac_cred_create_init(newcred);
#endif
#ifdef AUDIT
audit_cred_proc1(newcred);
#endif
proc_set_cred(initproc, newcred);
td = FIRST_THREAD_IN_PROC(initproc);
crfree(td->td_ucred);
td->td_ucred = crhold(initproc->p_ucred);
PROC_UNLOCK(initproc);
sx_xunlock(&proctree_lock);
crfree(oldcred);
cpu_fork_kthread_handler(FIRST_THREAD_IN_PROC(initproc),
start_init, NULL);
}
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL);
/*
* Make it runnable now.
*/
static void
kick_init(const void *udata __unused)
{
struct thread *td;
td = FIRST_THREAD_IN_PROC(initproc);
thread_lock(td);
TD_SET_CAN_RUN(td);
sched_add(td, SRQ_BORING);
thread_unlock(td);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_MIDDLE, kick_init, NULL);
Index: head/sys/kern/kern_cpu.c
===================================================================
--- head/sys/kern/kern_cpu.c (revision 328217)
+++ head/sys/kern/kern_cpu.c (revision 328218)
@@ -1,1069 +1,1069 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2004-2007 Nate Lawson (SDG)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/cpu.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/sx.h>
#include <sys/timetc.h>
#include <sys/taskqueue.h>
#include "cpufreq_if.h"
/*
* Common CPU frequency glue code. Drivers for specific hardware can
* attach this interface to allow users to get/set the CPU frequency.
*/
/*
* Number of levels we can handle. Levels are synthesized from settings
* so for M settings and N drivers, there may be M*N levels.
*/
#define CF_MAX_LEVELS 64
struct cf_saved_freq {
struct cf_level level;
int priority;
SLIST_ENTRY(cf_saved_freq) link;
};
struct cpufreq_softc {
struct sx lock;
struct cf_level curr_level;
int curr_priority;
SLIST_HEAD(, cf_saved_freq) saved_freq;
struct cf_level_lst all_levels;
int all_count;
int max_mhz;
device_t dev;
struct sysctl_ctx_list sysctl_ctx;
struct task startup_task;
struct cf_level *levels_buf;
};
struct cf_setting_array {
struct cf_setting sets[MAX_SETTINGS];
int count;
TAILQ_ENTRY(cf_setting_array) link;
};
TAILQ_HEAD(cf_setting_lst, cf_setting_array);
#define CF_MTX_INIT(x) sx_init((x), "cpufreq lock")
#define CF_MTX_LOCK(x) sx_xlock((x))
#define CF_MTX_UNLOCK(x) sx_xunlock((x))
#define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED)
#define CF_DEBUG(msg...) do { \
if (cf_verbose) \
printf("cpufreq: " msg); \
} while (0)
static int cpufreq_attach(device_t dev);
static void cpufreq_startup_task(void *ctx, int pending);
static int cpufreq_detach(device_t dev);
static int cf_set_method(device_t dev, const struct cf_level *level,
int priority);
static int cf_get_method(device_t dev, struct cf_level *level);
static int cf_levels_method(device_t dev, struct cf_level *levels,
int *count);
static int cpufreq_insert_abs(struct cpufreq_softc *sc,
struct cf_setting *sets, int count);
static int cpufreq_expand_set(struct cpufreq_softc *sc,
struct cf_setting_array *set_arr);
static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
struct cf_level *dup, struct cf_setting *set);
static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
static device_method_t cpufreq_methods[] = {
DEVMETHOD(device_probe, bus_generic_probe),
DEVMETHOD(device_attach, cpufreq_attach),
DEVMETHOD(device_detach, cpufreq_detach),
DEVMETHOD(cpufreq_set, cf_set_method),
DEVMETHOD(cpufreq_get, cf_get_method),
DEVMETHOD(cpufreq_levels, cf_levels_method),
{0, 0}
};
static driver_t cpufreq_driver = {
"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
};
static devclass_t cpufreq_dc;
DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0);
static int cf_lowest_freq;
static int cf_verbose;
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,
"cpufreq debugging");
SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RWTUN, &cf_lowest_freq, 1,
"Don't provide levels below this frequency.");
SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RWTUN, &cf_verbose, 1,
"Print verbose debugging messages");
static int
cpufreq_attach(device_t dev)
{
struct cpufreq_softc *sc;
struct pcpu *pc;
device_t parent;
uint64_t rate;
int numdevs;
CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
sc = device_get_softc(dev);
parent = device_get_parent(dev);
sc->dev = dev;
sysctl_ctx_init(&sc->sysctl_ctx);
TAILQ_INIT(&sc->all_levels);
CF_MTX_INIT(&sc->lock);
sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
SLIST_INIT(&sc->saved_freq);
/* Try to get nominal CPU freq to use it as maximum later if needed */
sc->max_mhz = cpu_get_nominal_mhz(dev);
/* If that fails, try to measure the current rate */
if (sc->max_mhz <= 0) {
pc = cpu_get_pcpu(dev);
if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
sc->max_mhz = rate / 1000000;
else
sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
}
/*
* Only initialize one set of sysctls for all CPUs. In the future,
* if multiple CPUs can have different settings, we can move these
* sysctls to be under every CPU instead of just the first one.
*/
numdevs = devclass_get_count(cpufreq_dc);
if (numdevs > 1)
return (0);
CF_DEBUG("initializing one-time data for %s\n",
device_get_nameunit(dev));
sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
M_DEVBUF, M_WAITOK);
SYSCTL_ADD_PROC(&sc->sysctl_ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
cpufreq_curr_sysctl, "I", "Current CPU frequency");
SYSCTL_ADD_PROC(&sc->sysctl_ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
cpufreq_levels_sysctl, "A", "CPU frequency levels");
/*
* Queue a one-shot broadcast that levels have changed.
* It will run once the system has completed booting.
*/
TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
return (0);
}
/* Handle any work to be done for all drivers that attached during boot. */
static void
cpufreq_startup_task(void *ctx, int pending)
{
cpufreq_settings_changed((device_t)ctx);
}
static int
cpufreq_detach(device_t dev)
{
struct cpufreq_softc *sc;
struct cf_saved_freq *saved_freq;
int numdevs;
CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
sc = device_get_softc(dev);
sysctl_ctx_free(&sc->sysctl_ctx);
while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
SLIST_REMOVE_HEAD(&sc->saved_freq, link);
free(saved_freq, M_TEMP);
}
/* Only clean up these resources when the last device is detaching. */
numdevs = devclass_get_count(cpufreq_dc);
if (numdevs == 1) {
CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev));
free(sc->levels_buf, M_DEVBUF);
}
return (0);
}
static int
cf_set_method(device_t dev, const struct cf_level *level, int priority)
{
struct cpufreq_softc *sc;
const struct cf_setting *set;
struct cf_saved_freq *saved_freq, *curr_freq;
struct pcpu *pc;
int error, i;
sc = device_get_softc(dev);
error = 0;
set = NULL;
saved_freq = NULL;
/* We are going to change levels so notify the pre-change handler. */
EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
if (error != 0) {
EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
return (error);
}
CF_MTX_LOCK(&sc->lock);
#ifdef SMP
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
#else
/*
* If still booting and secondary CPUs not started yet, don't allow
* changing the frequency until they're online. This is because we
* can't switch to them using sched_bind() and thus we'd only be
* switching the main CPU. XXXTODO: Need to think more about how to
* handle having different CPUs at different frequencies.
*/
if (mp_ncpus > 1 && !smp_started) {
device_printf(dev, "rejecting change, SMP not started yet\n");
error = ENXIO;
goto out;
}
#endif
#endif /* SMP */
/*
* If the requested level has a lower priority, don't allow
* the new level right now.
*/
if (priority < sc->curr_priority) {
CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
sc->curr_priority);
error = EPERM;
goto out;
}
/*
* If the caller didn't specify a level and one is saved, prepare to
* restore the saved level. If none has been saved, return an error.
*/
if (level == NULL) {
saved_freq = SLIST_FIRST(&sc->saved_freq);
if (saved_freq == NULL) {
CF_DEBUG("NULL level, no saved level\n");
error = ENXIO;
goto out;
}
level = &saved_freq->level;
priority = saved_freq->priority;
CF_DEBUG("restoring saved level, freq %d prio %d\n",
level->total_set.freq, priority);
}
/* Reject levels that are below our specified threshold. */
if (level->total_set.freq < cf_lowest_freq) {
CF_DEBUG("rejecting freq %d, less than %d limit\n",
level->total_set.freq, cf_lowest_freq);
error = EINVAL;
goto out;
}
/* If already at this level, just return. */
if (sc->curr_level.total_set.freq == level->total_set.freq) {
CF_DEBUG("skipping freq %d, same as current level %d\n",
level->total_set.freq, sc->curr_level.total_set.freq);
goto skip;
}
/* First, set the absolute frequency via its driver. */
set = &level->abs_set;
if (set->dev) {
if (!device_is_attached(set->dev)) {
error = ENXIO;
goto out;
}
/* Bind to the target CPU before switching. */
pc = cpu_get_pcpu(set->dev);
thread_lock(curthread);
sched_bind(curthread, pc->pc_cpuid);
thread_unlock(curthread);
CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
device_get_nameunit(set->dev), PCPU_GET(cpuid));
error = CPUFREQ_DRV_SET(set->dev, set);
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (error) {
goto out;
}
}
/* Next, set any/all relative frequencies via their drivers. */
for (i = 0; i < level->rel_count; i++) {
set = &level->rel_set[i];
if (!device_is_attached(set->dev)) {
error = ENXIO;
goto out;
}
/* Bind to the target CPU before switching. */
pc = cpu_get_pcpu(set->dev);
thread_lock(curthread);
sched_bind(curthread, pc->pc_cpuid);
thread_unlock(curthread);
CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
device_get_nameunit(set->dev), PCPU_GET(cpuid));
error = CPUFREQ_DRV_SET(set->dev, set);
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (error) {
/* XXX Back out any successful setting? */
goto out;
}
}
skip:
/*
* Before recording the current level, check if we're going to a
* higher priority. If so, save the previous level and priority.
*/
if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
priority > sc->curr_priority) {
CF_DEBUG("saving level, freq %d prio %d\n",
sc->curr_level.total_set.freq, sc->curr_priority);
curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
if (curr_freq == NULL) {
error = ENOMEM;
goto out;
}
curr_freq->level = sc->curr_level;
curr_freq->priority = sc->curr_priority;
SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
}
sc->curr_level = *level;
sc->curr_priority = priority;
/* If we were restoring a saved state, reset it to "unused". */
if (saved_freq != NULL) {
CF_DEBUG("resetting saved level\n");
sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
SLIST_REMOVE_HEAD(&sc->saved_freq, link);
free(saved_freq, M_TEMP);
}
out:
CF_MTX_UNLOCK(&sc->lock);
/*
* We changed levels (or attempted to) so notify the post-change
* handler of new frequency or error.
*/
EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
if (error && set)
device_printf(set->dev, "set freq failed, err %d\n", error);
return (error);
}
static int
cf_get_method(device_t dev, struct cf_level *level)
{
struct cpufreq_softc *sc;
struct cf_level *levels;
struct cf_setting *curr_set, set;
struct pcpu *pc;
device_t *devs;
int bdiff, count, diff, error, i, n, numdevs;
uint64_t rate;
sc = device_get_softc(dev);
error = 0;
levels = NULL;
/* If we already know the current frequency, we're done. */
CF_MTX_LOCK(&sc->lock);
curr_set = &sc->curr_level.total_set;
if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
CF_DEBUG("get returning known freq %d\n", curr_set->freq);
goto out;
}
CF_MTX_UNLOCK(&sc->lock);
/*
* We need to figure out the current level. Loop through every
* driver, getting the current setting. Then, attempt to get a best
* match of settings against each level.
*/
count = CF_MAX_LEVELS;
- levels = mallocarray(count, sizeof(*levels), M_TEMP, M_NOWAIT);
+ levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
if (levels == NULL)
return (ENOMEM);
error = CPUFREQ_LEVELS(sc->dev, levels, &count);
if (error) {
if (error == E2BIG)
printf("cpufreq: need to increase CF_MAX_LEVELS\n");
free(levels, M_TEMP);
return (error);
}
error = device_get_children(device_get_parent(dev), &devs, &numdevs);
if (error) {
free(levels, M_TEMP);
return (error);
}
/*
* Reacquire the lock and search for the given level.
*
* XXX Note: this is not quite right since we really need to go
* through each level and compare both absolute and relative
* settings for each driver in the system before making a match.
* The estimation code below catches this case though.
*/
CF_MTX_LOCK(&sc->lock);
for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) {
if (!device_is_attached(devs[n]))
continue;
if (CPUFREQ_DRV_GET(devs[n], &set) != 0)
continue;
for (i = 0; i < count; i++) {
if (set.freq == levels[i].total_set.freq) {
sc->curr_level = levels[i];
break;
}
}
}
free(devs, M_TEMP);
if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
goto out;
}
/*
* We couldn't find an exact match, so attempt to estimate and then
* match against a level.
*/
pc = cpu_get_pcpu(dev);
if (pc == NULL) {
error = ENXIO;
goto out;
}
cpu_est_clockrate(pc->pc_cpuid, &rate);
rate /= 1000000;
bdiff = 1 << 30;
for (i = 0; i < count; i++) {
diff = abs(levels[i].total_set.freq - rate);
if (diff < bdiff) {
bdiff = diff;
sc->curr_level = levels[i];
}
}
CF_DEBUG("get estimated freq %d\n", curr_set->freq);
out:
if (error == 0)
*level = sc->curr_level;
CF_MTX_UNLOCK(&sc->lock);
if (levels)
free(levels, M_TEMP);
return (error);
}
static int
cf_levels_method(device_t dev, struct cf_level *levels, int *count)
{
struct cf_setting_array *set_arr;
struct cf_setting_lst rel_sets;
struct cpufreq_softc *sc;
struct cf_level *lev;
struct cf_setting *sets;
struct pcpu *pc;
device_t *devs;
int error, i, numdevs, set_count, type;
uint64_t rate;
if (levels == NULL || count == NULL)
return (EINVAL);
TAILQ_INIT(&rel_sets);
sc = device_get_softc(dev);
error = device_get_children(device_get_parent(dev), &devs, &numdevs);
if (error)
return (error);
sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
if (sets == NULL) {
free(devs, M_TEMP);
return (ENOMEM);
}
/* Get settings from all cpufreq drivers. */
CF_MTX_LOCK(&sc->lock);
for (i = 0; i < numdevs; i++) {
/* Skip devices that aren't ready. */
if (!device_is_attached(devs[i]))
continue;
/*
* Get settings, skipping drivers that offer no settings or
* provide settings for informational purposes only.
*/
error = CPUFREQ_DRV_TYPE(devs[i], &type);
if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) {
if (error == 0) {
CF_DEBUG("skipping info-only driver %s\n",
device_get_nameunit(devs[i]));
}
continue;
}
set_count = MAX_SETTINGS;
error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count);
if (error || set_count == 0)
continue;
/* Add the settings to our absolute/relative lists. */
switch (type & CPUFREQ_TYPE_MASK) {
case CPUFREQ_TYPE_ABSOLUTE:
error = cpufreq_insert_abs(sc, sets, set_count);
break;
case CPUFREQ_TYPE_RELATIVE:
CF_DEBUG("adding %d relative settings\n", set_count);
set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
if (set_arr == NULL) {
error = ENOMEM;
goto out;
}
bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
set_arr->count = set_count;
TAILQ_INSERT_TAIL(&rel_sets, set_arr, link);
break;
default:
error = EINVAL;
}
if (error)
goto out;
}
/*
* If there are no absolute levels, create a fake one at 100%. We
* then cache the clockrate for later use as our base frequency.
*/
if (TAILQ_EMPTY(&sc->all_levels)) {
if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
sc->max_mhz = cpu_get_nominal_mhz(dev);
/*
* If the CPU can't report a rate for 100%, hope
* the CPU is running at its nominal rate right now,
* and use that instead.
*/
if (sc->max_mhz <= 0) {
pc = cpu_get_pcpu(dev);
cpu_est_clockrate(pc->pc_cpuid, &rate);
sc->max_mhz = rate / 1000000;
}
}
memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets));
sets[0].freq = sc->max_mhz;
sets[0].dev = NULL;
error = cpufreq_insert_abs(sc, sets, 1);
if (error)
goto out;
}
/* Create a combined list of absolute + relative levels. */
TAILQ_FOREACH(set_arr, &rel_sets, link)
cpufreq_expand_set(sc, set_arr);
/* If the caller doesn't have enough space, return the actual count. */
if (sc->all_count > *count) {
*count = sc->all_count;
error = E2BIG;
goto out;
}
/* Finally, output the list of levels. */
i = 0;
TAILQ_FOREACH(lev, &sc->all_levels, link) {
/* Skip levels that have a frequency that is too low. */
if (lev->total_set.freq < cf_lowest_freq) {
sc->all_count--;
continue;
}
levels[i] = *lev;
i++;
}
*count = sc->all_count;
error = 0;
out:
/* Clear all levels since we regenerate them each time. */
while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
TAILQ_REMOVE(&sc->all_levels, lev, link);
free(lev, M_TEMP);
}
sc->all_count = 0;
CF_MTX_UNLOCK(&sc->lock);
while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
TAILQ_REMOVE(&rel_sets, set_arr, link);
free(set_arr, M_TEMP);
}
free(devs, M_TEMP);
free(sets, M_TEMP);
return (error);
}
/*
* Create levels for an array of absolute settings and insert them in
* sorted order in the specified list.
*/
static int
cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
int count)
{
struct cf_level_lst *list;
struct cf_level *level, *search;
int i;
CF_MTX_ASSERT(&sc->lock);
list = &sc->all_levels;
for (i = 0; i < count; i++) {
level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
if (level == NULL)
return (ENOMEM);
level->abs_set = sets[i];
level->total_set = sets[i];
level->total_set.dev = NULL;
sc->all_count++;
if (TAILQ_EMPTY(list)) {
CF_DEBUG("adding abs setting %d at head\n",
sets[i].freq);
TAILQ_INSERT_HEAD(list, level, link);
continue;
}
TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) {
if (sets[i].freq <= search->total_set.freq) {
CF_DEBUG("adding abs setting %d after %d\n",
sets[i].freq, search->total_set.freq);
TAILQ_INSERT_AFTER(list, search, level, link);
break;
}
}
}
return (0);
}
/*
* Expand a group of relative settings, creating derived levels from them.
*/
static int
cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
{
struct cf_level *fill, *search;
struct cf_setting *set;
int i;
CF_MTX_ASSERT(&sc->lock);
/*
* Walk the set of all existing levels in reverse. This is so we
* create derived states from the lowest absolute settings first
* and discard duplicates created from higher absolute settings.
* For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
* preferable to 200 Mhz + 25% because absolute settings are more
* efficient since they often change the voltage as well.
*/
TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
/* Add each setting to the level, duplicating if necessary. */
for (i = 0; i < set_arr->count; i++) {
set = &set_arr->sets[i];
/*
* If this setting is less than 100%, split the level
* into two and add this setting to the new level.
*/
fill = search;
if (set->freq < 10000) {
fill = cpufreq_dup_set(sc, search, set);
/*
* The new level was a duplicate of an existing
* level or its absolute setting is too high
* so we freed it. For example, we discard a
* derived level of 1000 MHz/25% if a level
* of 500 MHz/100% already exists.
*/
if (fill == NULL)
break;
}
/* Add this setting to the existing or new level. */
KASSERT(fill->rel_count < MAX_SETTINGS,
("cpufreq: too many relative drivers (%d)",
MAX_SETTINGS));
fill->rel_set[fill->rel_count] = *set;
fill->rel_count++;
CF_DEBUG(
"expand set added rel setting %d%% to %d level\n",
set->freq / 100, fill->total_set.freq);
}
}
return (0);
}
static struct cf_level *
cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
struct cf_setting *set)
{
struct cf_level_lst *list;
struct cf_level *fill, *itr;
struct cf_setting *fill_set, *itr_set;
int i;
CF_MTX_ASSERT(&sc->lock);
/*
* Create a new level, copy it from the old one, and update the
* total frequency and power by the percentage specified in the
* relative setting.
*/
fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
if (fill == NULL)
return (NULL);
*fill = *dup;
fill_set = &fill->total_set;
fill_set->freq =
((uint64_t)fill_set->freq * set->freq) / 10000;
if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
fill_set->power = ((uint64_t)fill_set->power * set->freq)
/ 10000;
}
if (set->lat != CPUFREQ_VAL_UNKNOWN) {
if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
fill_set->lat += set->lat;
else
fill_set->lat = set->lat;
}
CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
/*
* If we copied an old level that we already modified (say, at 100%),
* we need to remove that setting before adding this one. Since we
* process each setting array in order, we know any settings for this
* driver will be found at the end.
*/
for (i = fill->rel_count; i != 0; i--) {
if (fill->rel_set[i - 1].dev != set->dev)
break;
CF_DEBUG("removed last relative driver: %s\n",
device_get_nameunit(set->dev));
fill->rel_count--;
}
/*
* Insert the new level in sorted order. If it is a duplicate of an
* existing level (1) or has an absolute setting higher than the
* existing level (2), do not add it. We can do this since any such
* level is guaranteed use less power. For example (1), a level with
* one absolute setting of 800 Mhz uses less power than one composed
* of an absolute setting of 1600 Mhz and a relative setting at 50%.
* Also for example (2), a level of 800 Mhz/75% is preferable to
* 1600 Mhz/25% even though the latter has a lower total frequency.
*/
list = &sc->all_levels;
KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
itr_set = &itr->total_set;
if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
CF_DEBUG("dup set rejecting %d (dupe)\n",
fill_set->freq);
itr = NULL;
break;
} else if (fill_set->freq < itr_set->freq) {
if (fill->abs_set.freq <= itr->abs_set.freq) {
CF_DEBUG(
"dup done, inserting new level %d after %d\n",
fill_set->freq, itr_set->freq);
TAILQ_INSERT_AFTER(list, itr, fill, link);
sc->all_count++;
} else {
CF_DEBUG("dup set rejecting %d (abs too big)\n",
fill_set->freq);
itr = NULL;
}
break;
}
}
/* We didn't find a good place for this new level so free it. */
if (itr == NULL) {
CF_DEBUG("dup set freeing new level %d (not optimal)\n",
fill_set->freq);
free(fill, M_TEMP);
fill = NULL;
}
return (fill);
}
static int
cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
{
struct cpufreq_softc *sc;
struct cf_level *levels;
int best, count, diff, bdiff, devcount, error, freq, i, n;
device_t *devs;
devs = NULL;
sc = oidp->oid_arg1;
levels = sc->levels_buf;
error = CPUFREQ_GET(sc->dev, &levels[0]);
if (error)
goto out;
freq = levels[0].total_set.freq;
error = sysctl_handle_int(oidp, &freq, 0, req);
if (error != 0 || req->newptr == NULL)
goto out;
/*
* While we only call cpufreq_get() on one device (assuming all
* CPUs have equal levels), we call cpufreq_set() on all CPUs.
* This is needed for some MP systems.
*/
error = devclass_get_devices(cpufreq_dc, &devs, &devcount);
if (error)
goto out;
for (n = 0; n < devcount; n++) {
count = CF_MAX_LEVELS;
error = CPUFREQ_LEVELS(devs[n], levels, &count);
if (error) {
if (error == E2BIG)
printf(
"cpufreq: need to increase CF_MAX_LEVELS\n");
break;
}
best = 0;
bdiff = 1 << 30;
for (i = 0; i < count; i++) {
diff = abs(levels[i].total_set.freq - freq);
if (diff < bdiff) {
bdiff = diff;
best = i;
}
}
error = CPUFREQ_SET(devs[n], &levels[best], CPUFREQ_PRIO_USER);
}
out:
if (devs)
free(devs, M_TEMP);
return (error);
}
static int
cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
{
struct cpufreq_softc *sc;
struct cf_level *levels;
struct cf_setting *set;
struct sbuf sb;
int count, error, i;
sc = oidp->oid_arg1;
sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
/* Get settings from the device and generate the output string. */
count = CF_MAX_LEVELS;
levels = sc->levels_buf;
if (levels == NULL) {
sbuf_delete(&sb);
return (ENOMEM);
}
error = CPUFREQ_LEVELS(sc->dev, levels, &count);
if (error) {
if (error == E2BIG)
printf("cpufreq: need to increase CF_MAX_LEVELS\n");
goto out;
}
if (count) {
for (i = 0; i < count; i++) {
set = &levels[i].total_set;
sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
}
} else
sbuf_cpy(&sb, "0");
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
out:
sbuf_delete(&sb);
return (error);
}
static int
cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
{
device_t dev;
struct cf_setting *sets;
struct sbuf sb;
int error, i, set_count;
dev = oidp->oid_arg1;
sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
/* Get settings from the device and generate the output string. */
set_count = MAX_SETTINGS;
- sets = mallocarray(set_count, sizeof(*sets), M_TEMP, M_NOWAIT);
+ sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
if (sets == NULL) {
sbuf_delete(&sb);
return (ENOMEM);
}
error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
if (error)
goto out;
if (set_count) {
for (i = 0; i < set_count; i++)
sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
} else
sbuf_cpy(&sb, "0");
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
out:
free(sets, M_TEMP);
sbuf_delete(&sb);
return (error);
}
int
cpufreq_register(device_t dev)
{
struct cpufreq_softc *sc;
device_t cf_dev, cpu_dev;
/* Add a sysctl to get each driver's settings separately. */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0,
cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
/*
* Add only one cpufreq device to each CPU. Currently, all CPUs
* must offer the same levels and be switched at the same time.
*/
cpu_dev = device_get_parent(dev);
if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
sc = device_get_softc(cf_dev);
sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
return (0);
}
/* Add the child device and possibly sysctls. */
cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1);
if (cf_dev == NULL)
return (ENOMEM);
device_quiet(cf_dev);
return (device_probe_and_attach(cf_dev));
}
int
cpufreq_unregister(device_t dev)
{
device_t cf_dev, *devs;
int cfcount, devcount, error, i, type;
/*
* If this is the last cpufreq child device, remove the control
* device as well. We identify cpufreq children by calling a method
* they support.
*/
error = device_get_children(device_get_parent(dev), &devs, &devcount);
if (error)
return (error);
cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
if (cf_dev == NULL) {
device_printf(dev,
"warning: cpufreq_unregister called with no cpufreq device active\n");
free(devs, M_TEMP);
return (0);
}
cfcount = 0;
for (i = 0; i < devcount; i++) {
if (!device_is_attached(devs[i]))
continue;
if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
cfcount++;
}
if (cfcount <= 1)
device_delete_child(device_get_parent(cf_dev), cf_dev);
free(devs, M_TEMP);
return (0);
}
int
cpufreq_settings_changed(device_t dev)
{
EVENTHANDLER_INVOKE(cpufreq_levels_changed,
device_get_unit(device_get_parent(dev)));
return (0);
}
Index: head/sys/kern/kern_ctf.c
===================================================================
--- head/sys/kern/kern_ctf.c (revision 328217)
+++ head/sys/kern/kern_ctf.c (revision 328218)
@@ -1,326 +1,326 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2008 John Birrell <jb@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Note this file is included by both link_elf.c and link_elf_obj.c.
*
* The CTF header structure definition can't be used here because it's
* (annoyingly) covered by the CDDL. We will just use a few bytes from
* it as an integer array where we 'know' what they mean.
*/
#define CTF_HDR_SIZE 36
#define CTF_HDR_STRTAB_U32 7
#define CTF_HDR_STRLEN_U32 8
#ifdef DDB_CTF
static void *
z_alloc(void *nil, u_int items, u_int size)
{
void *ptr;
- ptr = mallocarray(items, size, M_TEMP, M_NOWAIT);
+ ptr = malloc(items * size, M_TEMP, M_NOWAIT);
return ptr;
}
static void
z_free(void *nil, void *ptr)
{
free(ptr, M_TEMP);
}
#endif
static int
link_elf_ctf_get(linker_file_t lf, linker_ctf_t *lc)
{
#ifdef DDB_CTF
Elf_Ehdr *hdr = NULL;
Elf_Shdr *shdr = NULL;
caddr_t ctftab = NULL;
caddr_t raw = NULL;
caddr_t shstrtab = NULL;
elf_file_t ef = (elf_file_t) lf;
int flags;
int i;
int nbytes;
size_t sz;
struct nameidata nd;
struct thread *td = curthread;
uint8_t ctf_hdr[CTF_HDR_SIZE];
#endif
int error = 0;
if (lf == NULL || lc == NULL)
return (EINVAL);
/* Set the defaults for no CTF present. That's not a crime! */
bzero(lc, sizeof(*lc));
#ifdef DDB_CTF
/*
* First check if we've tried to load CTF data previously and the
* CTF ELF section wasn't found. We flag that condition by setting
* ctfcnt to -1. See below.
*/
if (ef->ctfcnt < 0)
return (EFTYPE);
/* Now check if we've already loaded the CTF data.. */
if (ef->ctfcnt > 0) {
/* We only need to load once. */
lc->ctftab = ef->ctftab;
lc->ctfcnt = ef->ctfcnt;
lc->symtab = ef->ddbsymtab;
lc->strtab = ef->ddbstrtab;
lc->strcnt = ef->ddbstrcnt;
lc->nsym = ef->ddbsymcnt;
lc->ctfoffp = (uint32_t **) &ef->ctfoff;
lc->typoffp = (uint32_t **) &ef->typoff;
lc->typlenp = &ef->typlen;
return (0);
}
/*
* We need to try reading the CTF data. Flag no CTF data present
* by default and if we actually succeed in reading it, we'll
* update ctfcnt to the number of bytes read.
*/
ef->ctfcnt = -1;
NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, lf->pathname, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0, NULL);
if (error)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
/* Allocate memory for the FLF header. */
hdr = malloc(sizeof(*hdr), M_LINKER, M_WAITOK);
/* Read the ELF header. */
if ((error = vn_rdwr(UIO_READ, nd.ni_vp, hdr, sizeof(*hdr),
0, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, NULL,
td)) != 0)
goto out;
/* Sanity check. */
if (!IS_ELF(*hdr)) {
error = ENOEXEC;
goto out;
}
nbytes = hdr->e_shnum * hdr->e_shentsize;
if (nbytes == 0 || hdr->e_shoff == 0 ||
hdr->e_shentsize != sizeof(Elf_Shdr)) {
error = ENOEXEC;
goto out;
}
/* Allocate memory for all the section headers */
shdr = malloc(nbytes, M_LINKER, M_WAITOK);
/* Read all the section headers */
if ((error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)shdr, nbytes,
hdr->e_shoff, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED,
NULL, td)) != 0)
goto out;
/*
* We need to search for the CTF section by name, so if the
* section names aren't present, then we can't locate the
* .SUNW_ctf section containing the CTF data.
*/
if (hdr->e_shstrndx == 0 || shdr[hdr->e_shstrndx].sh_type != SHT_STRTAB) {
printf("%s(%d): module %s e_shstrndx is %d, sh_type is %d\n",
__func__, __LINE__, lf->pathname, hdr->e_shstrndx,
shdr[hdr->e_shstrndx].sh_type);
error = EFTYPE;
goto out;
}
/* Allocate memory to buffer the section header strings. */
shstrtab = malloc(shdr[hdr->e_shstrndx].sh_size, M_LINKER, M_WAITOK);
/* Read the section header strings. */
if ((error = vn_rdwr(UIO_READ, nd.ni_vp, shstrtab,
shdr[hdr->e_shstrndx].sh_size, shdr[hdr->e_shstrndx].sh_offset,
UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, NULL, td)) != 0)
goto out;
/* Search for the section containing the CTF data. */
for (i = 0; i < hdr->e_shnum; i++)
if (strcmp(".SUNW_ctf", shstrtab + shdr[i].sh_name) == 0)
break;
/* Check if the CTF section wasn't found. */
if (i >= hdr->e_shnum) {
printf("%s(%d): module %s has no .SUNW_ctf section\n",
__func__, __LINE__, lf->pathname);
error = EFTYPE;
goto out;
}
/* Read the CTF header. */
if ((error = vn_rdwr(UIO_READ, nd.ni_vp, ctf_hdr, sizeof(ctf_hdr),
shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred,
NOCRED, NULL, td)) != 0)
goto out;
/* Check the CTF magic number. (XXX check for big endian!) */
if (ctf_hdr[0] != 0xf1 || ctf_hdr[1] != 0xcf) {
printf("%s(%d): module %s has invalid format\n",
__func__, __LINE__, lf->pathname);
error = EFTYPE;
goto out;
}
/* Check if version 2. */
if (ctf_hdr[2] != 2) {
printf("%s(%d): module %s CTF format version is %d "
"(2 expected)\n",
__func__, __LINE__, lf->pathname, ctf_hdr[2]);
error = EFTYPE;
goto out;
}
/* Check if the data is compressed. */
if ((ctf_hdr[3] & 0x1) != 0) {
uint32_t *u32 = (uint32_t *) ctf_hdr;
/*
* The last two fields in the CTF header are the offset
* from the end of the header to the start of the string
* data and the length of that string data. se this
* information to determine the decompressed CTF data
* buffer required.
*/
sz = u32[CTF_HDR_STRTAB_U32] + u32[CTF_HDR_STRLEN_U32] +
sizeof(ctf_hdr);
/*
* Allocate memory for the compressed CTF data, including
* the header (which isn't compressed).
*/
raw = malloc(shdr[i].sh_size, M_LINKER, M_WAITOK);
} else {
/*
* The CTF data is not compressed, so the ELF section
* size is the same as the buffer size required.
*/
sz = shdr[i].sh_size;
}
/*
* Allocate memory to buffer the CTF data in its decompressed
* form.
*/
ctftab = malloc(sz, M_LINKER, M_WAITOK);
/*
* Read the CTF data into the raw buffer if compressed, or
* directly into the CTF buffer otherwise.
*/
if ((error = vn_rdwr(UIO_READ, nd.ni_vp, raw == NULL ? ctftab : raw,
shdr[i].sh_size, shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED,
td->td_ucred, NOCRED, NULL, td)) != 0)
goto out;
/* Check if decompression is required. */
if (raw != NULL) {
z_stream zs;
int ret;
/*
* The header isn't compressed, so copy that into the
* CTF buffer first.
*/
bcopy(ctf_hdr, ctftab, sizeof(ctf_hdr));
/* Initialise the zlib structure. */
bzero(&zs, sizeof(zs));
zs.zalloc = z_alloc;
zs.zfree = z_free;
if (inflateInit(&zs) != Z_OK) {
error = EIO;
goto out;
}
zs.avail_in = shdr[i].sh_size - sizeof(ctf_hdr);
zs.next_in = ((uint8_t *) raw) + sizeof(ctf_hdr);
zs.avail_out = sz - sizeof(ctf_hdr);
zs.next_out = ((uint8_t *) ctftab) + sizeof(ctf_hdr);
ret = inflate(&zs, Z_FINISH);
inflateEnd(&zs);
if (ret != Z_STREAM_END) {
printf("%s(%d): zlib inflate returned %d\n", __func__, __LINE__, ret);
error = EIO;
goto out;
}
}
/* Got the CTF data! */
ef->ctftab = ctftab;
ef->ctfcnt = shdr[i].sh_size;
/* We'll retain the memory allocated for the CTF data. */
ctftab = NULL;
/* Let the caller use the CTF data read. */
lc->ctftab = ef->ctftab;
lc->ctfcnt = ef->ctfcnt;
lc->symtab = ef->ddbsymtab;
lc->strtab = ef->ddbstrtab;
lc->strcnt = ef->ddbstrcnt;
lc->nsym = ef->ddbsymcnt;
lc->ctfoffp = (uint32_t **) &ef->ctfoff;
lc->typoffp = (uint32_t **) &ef->typoff;
lc->typlenp = &ef->typlen;
out:
VOP_UNLOCK(nd.ni_vp, 0);
vn_close(nd.ni_vp, FREAD, td->td_ucred, td);
if (hdr != NULL)
free(hdr, M_LINKER);
if (shdr != NULL)
free(shdr, M_LINKER);
if (shstrtab != NULL)
free(shstrtab, M_LINKER);
if (ctftab != NULL)
free(ctftab, M_LINKER);
if (raw != NULL)
free(raw, M_LINKER);
#else
error = EOPNOTSUPP;
#endif
return (error);
}
Index: head/sys/kern/kern_pmc.c
===================================================================
--- head/sys/kern/kern_pmc.c (revision 328217)
+++ head/sys/kern/kern_pmc.c (revision 328218)
@@ -1,347 +1,346 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2003-2008 Joseph Koshy
* Copyright (c) 2007 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by A. Joseph Koshy under
* sponsorship from the FreeBSD Foundation and Google, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
#include <sys/types.h>
#include <sys/ctype.h>
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#ifdef HWPMC_HOOKS
FEATURE(hwpmc_hooks, "Kernel support for HW PMC");
#define PMC_KERNEL_VERSION PMC_VERSION
#else
#define PMC_KERNEL_VERSION 0
#endif
MALLOC_DECLARE(M_PMCHOOKS);
MALLOC_DEFINE(M_PMCHOOKS, "pmchooks", "Memory space for PMC hooks");
const int pmc_kernel_version = PMC_KERNEL_VERSION;
/* Hook variable. */
int __read_mostly (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
/* Interrupt handler */
int __read_mostly (*pmc_intr)(int cpu, struct trapframe *tf) = NULL;
/* Bitmask of CPUs requiring servicing at hardclock time */
volatile cpuset_t pmc_cpumask;
/*
* A global count of SS mode PMCs. When non-zero, this means that
* we have processes that are sampling the system as a whole.
*/
volatile int pmc_ss_count;
/*
* Since PMC(4) may not be loaded in the current kernel, the
* convention followed is that a non-NULL value of 'pmc_hook' implies
* the presence of this kernel module.
*
* This requires us to protect 'pmc_hook' with a
* shared (sx) lock -- thus making the process of calling into PMC(4)
* somewhat more expensive than a simple 'if' check and indirect call.
*/
struct sx pmc_sx;
/*
* PMC Soft per cpu trapframe.
*/
struct trapframe pmc_tf[MAXCPU];
/*
* PMC Soft use a global table to store registered events.
*/
SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
static int pmc_softevents = 16;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, softevents, CTLFLAG_RDTUN,
&pmc_softevents, 0, "maximum number of soft events");
struct mtx pmc_softs_mtx;
int pmc_softs_count;
struct pmc_soft **pmc_softs;
MTX_SYSINIT(pmc_soft_mtx, &pmc_softs_mtx, "pmc-softs", MTX_SPIN);
static void
pmc_init_sx(void)
{
sx_init_flags(&pmc_sx, "pmc-sx", SX_NOWITNESS);
}
SYSINIT(pmcsx, SI_SUB_LOCK, SI_ORDER_MIDDLE, pmc_init_sx, NULL);
/*
* Helper functions.
*/
/*
* A note on the CPU numbering scheme used by the hwpmc(4) driver.
*
* CPUs are denoted using numbers in the range 0..[pmc_cpu_max()-1].
* CPUs could be numbered "sparsely" in this range; the predicate
* `pmc_cpu_is_present()' is used to test whether a given CPU is
* physically present.
*
* Further, a CPU that is physically present may be administratively
* disabled or otherwise unavailable for use by hwpmc(4). The
* `pmc_cpu_is_active()' predicate tests for CPU usability. An
* "active" CPU participates in thread scheduling and can field
* interrupts raised by PMC hardware.
*
* On systems with hyperthreaded CPUs, multiple logical CPUs may share
* PMC hardware resources. For such processors one logical CPU is
* denoted as the primary owner of the in-CPU PMC resources. The
* pmc_cpu_is_primary() predicate is used to distinguish this primary
* CPU from the others.
*/
int
pmc_cpu_is_active(int cpu)
{
#ifdef SMP
return (pmc_cpu_is_present(cpu) &&
!CPU_ISSET(cpu, &hlt_cpus_mask));
#else
return (1);
#endif
}
/* Deprecated. */
int
pmc_cpu_is_disabled(int cpu)
{
return (!pmc_cpu_is_active(cpu));
}
int
pmc_cpu_is_present(int cpu)
{
#ifdef SMP
return (!CPU_ABSENT(cpu));
#else
return (1);
#endif
}
int
pmc_cpu_is_primary(int cpu)
{
#ifdef SMP
return (!CPU_ISSET(cpu, &logical_cpus_mask));
#else
return (1);
#endif
}
/*
* Return the maximum CPU number supported by the system. The return
* value is used for scaling internal data structures and for runtime
* checks.
*/
unsigned int
pmc_cpu_max(void)
{
#ifdef SMP
return (mp_maxid+1);
#else
return (1);
#endif
}
#ifdef INVARIANTS
/*
* Return the count of CPUs in the `active' state in the system.
*/
int
pmc_cpu_max_active(void)
{
#ifdef SMP
/*
* When support for CPU hot-plugging is added to the kernel,
* this function would change to return the current number
* of "active" CPUs.
*/
return (mp_ncpus);
#else
return (1);
#endif
}
#endif
/*
* Cleanup event name:
* - remove duplicate '_'
* - all uppercase
*/
static void
pmc_soft_namecleanup(char *name)
{
char *p, *q;
p = q = name;
for ( ; *p == '_' ; p++)
;
for ( ; *p ; p++) {
if (*p == '_' && (*(p + 1) == '_' || *(p + 1) == '\0'))
continue;
else
*q++ = toupper(*p);
}
*q = '\0';
}
void
pmc_soft_ev_register(struct pmc_soft *ps)
{
static int warned = 0;
int n;
ps->ps_running = 0;
ps->ps_ev.pm_ev_code = 0; /* invalid */
pmc_soft_namecleanup(ps->ps_ev.pm_ev_name);
mtx_lock_spin(&pmc_softs_mtx);
if (pmc_softs_count >= pmc_softevents) {
/*
* XXX Reusing events can enter a race condition where
* new allocated event will be used as an old one.
*/
for (n = 0; n < pmc_softevents; n++)
if (pmc_softs[n] == NULL)
break;
if (n == pmc_softevents) {
mtx_unlock_spin(&pmc_softs_mtx);
if (!warned) {
printf("hwpmc: too many soft events, "
"increase kern.hwpmc.softevents tunable\n");
warned = 1;
}
return;
}
ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + n;
pmc_softs[n] = ps;
} else {
ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + pmc_softs_count;
pmc_softs[pmc_softs_count++] = ps;
}
mtx_unlock_spin(&pmc_softs_mtx);
}
void
pmc_soft_ev_deregister(struct pmc_soft *ps)
{
KASSERT(ps != NULL, ("pmc_soft_deregister: called with NULL"));
mtx_lock_spin(&pmc_softs_mtx);
if (ps->ps_ev.pm_ev_code != 0 &&
(ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST) < pmc_softevents) {
KASSERT((int)ps->ps_ev.pm_ev_code >= PMC_EV_SOFT_FIRST &&
(int)ps->ps_ev.pm_ev_code <= PMC_EV_SOFT_LAST,
("pmc_soft_deregister: invalid event value"));
pmc_softs[ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST] = NULL;
}
mtx_unlock_spin(&pmc_softs_mtx);
}
struct pmc_soft *
pmc_soft_ev_acquire(enum pmc_event ev)
{
struct pmc_soft *ps;
if (ev == 0 || (ev - PMC_EV_SOFT_FIRST) >= pmc_softevents)
return NULL;
KASSERT((int)ev >= PMC_EV_SOFT_FIRST &&
(int)ev <= PMC_EV_SOFT_LAST,
("event out of range"));
mtx_lock_spin(&pmc_softs_mtx);
ps = pmc_softs[ev - PMC_EV_SOFT_FIRST];
if (ps == NULL)
mtx_unlock_spin(&pmc_softs_mtx);
return ps;
}
void
pmc_soft_ev_release(struct pmc_soft *ps)
{
mtx_unlock_spin(&pmc_softs_mtx);
}
/*
* Initialise hwpmc.
*/
static void
init_hwpmc(void *dummy __unused)
{
if (pmc_softevents <= 0 ||
pmc_softevents > PMC_EV_DYN_COUNT) {
(void) printf("hwpmc: tunable \"softevents\"=%d out of "
"range.\n", pmc_softevents);
pmc_softevents = PMC_EV_DYN_COUNT;
}
- pmc_softs = mallocarray(pmc_softevents, sizeof(struct pmc_soft *),
- M_PMCHOOKS, M_NOWAIT|M_ZERO);
+ pmc_softs = malloc(pmc_softevents * sizeof(struct pmc_soft *), M_PMCHOOKS, M_NOWAIT|M_ZERO);
KASSERT(pmc_softs != NULL, ("cannot allocate soft events table"));
}
SYSINIT(hwpmc, SI_SUB_KDTRACE, SI_ORDER_FIRST, init_hwpmc, NULL);
Index: head/sys/kern/subr_bus.c
===================================================================
--- head/sys/kern/subr_bus.c (revision 328217)
+++ head/sys/kern/subr_bus.c (revision 328218)
@@ -1,5634 +1,5634 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1997,1998,2003 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_bus.h"
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/eventhandler.h>
#include <sys/filio.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/kobj.h>
#include <sys/limits.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/poll.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/condvar.h>
#include <sys/queue.h>
#include <machine/bus.h>
#include <sys/random.h>
#include <sys/rman.h>
#include <sys/selinfo.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/uio.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/cpuset.h>
#include <net/vnet.h>
#include <machine/cpu.h>
#include <machine/stdarg.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <ddb/ddb.h>
SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW, NULL, NULL);
SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW, NULL, NULL);
/*
* Used to attach drivers to devclasses.
*/
typedef struct driverlink *driverlink_t;
struct driverlink {
kobj_class_t driver;
TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */
int pass;
TAILQ_ENTRY(driverlink) passlink;
};
/*
* Forward declarations
*/
typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t;
typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
typedef TAILQ_HEAD(device_list, device) device_list_t;
struct devclass {
TAILQ_ENTRY(devclass) link;
devclass_t parent; /* parent in devclass hierarchy */
driver_list_t drivers; /* bus devclasses store drivers for bus */
char *name;
device_t *devices; /* array of devices indexed by unit */
int maxunit; /* size of devices array */
int flags;
#define DC_HAS_CHILDREN 1
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_tree;
};
/**
* @brief Implementation of device.
*/
struct device {
/*
* A device is a kernel object. The first field must be the
* current ops table for the object.
*/
KOBJ_FIELDS;
/*
* Device hierarchy.
*/
TAILQ_ENTRY(device) link; /**< list of devices in parent */
TAILQ_ENTRY(device) devlink; /**< global device list membership */
device_t parent; /**< parent of this device */
device_list_t children; /**< list of child devices */
/*
* Details of this device.
*/
driver_t *driver; /**< current driver */
devclass_t devclass; /**< current device class */
int unit; /**< current unit number */
char* nameunit; /**< name+unit e.g. foodev0 */
char* desc; /**< driver specific description */
int busy; /**< count of calls to device_busy() */
device_state_t state; /**< current device state */
uint32_t devflags; /**< api level flags for device_get_flags() */
u_int flags; /**< internal device flags */
u_int order; /**< order from device_add_child_ordered() */
void *ivars; /**< instance variables */
void *softc; /**< current driver's variables */
struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */
struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */
};
static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures");
static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc");
EVENTHANDLER_LIST_DEFINE(device_attach);
EVENTHANDLER_LIST_DEFINE(device_detach);
EVENTHANDLER_LIST_DEFINE(dev_lookup);
static void devctl2_init(void);
#define DRIVERNAME(d) ((d)? d->name : "no driver")
#define DEVCLANAME(d) ((d)? d->name : "no devclass")
#ifdef BUS_DEBUG
static int bus_debug = 1;
SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0,
"Bus debug level");
#define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");}
#define DEVICENAME(d) ((d)? device_get_name(d): "no device")
/**
* Produce the indenting, indent*2 spaces plus a '.' ahead of that to
* prevent syslog from deleting initial spaces
*/
#define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJ<indent; iJ++) printf(" "); printf p ; } while (0)
static void print_device_short(device_t dev, int indent);
static void print_device(device_t dev, int indent);
void print_device_tree_short(device_t dev, int indent);
void print_device_tree(device_t dev, int indent);
static void print_driver_short(driver_t *driver, int indent);
static void print_driver(driver_t *driver, int indent);
static void print_driver_list(driver_list_t drivers, int indent);
static void print_devclass_short(devclass_t dc, int indent);
static void print_devclass(devclass_t dc, int indent);
void print_devclass_list_short(void);
void print_devclass_list(void);
#else
/* Make the compiler ignore the function calls */
#define PDEBUG(a) /* nop */
#define DEVICENAME(d) /* nop */
#define print_device_short(d,i) /* nop */
#define print_device(d,i) /* nop */
#define print_device_tree_short(d,i) /* nop */
#define print_device_tree(d,i) /* nop */
#define print_driver_short(d,i) /* nop */
#define print_driver(d,i) /* nop */
#define print_driver_list(d,i) /* nop */
#define print_devclass_short(d,i) /* nop */
#define print_devclass(d,i) /* nop */
#define print_devclass_list_short() /* nop */
#define print_devclass_list() /* nop */
#endif
/*
* dev sysctl tree
*/
enum {
DEVCLASS_SYSCTL_PARENT,
};
static int
devclass_sysctl_handler(SYSCTL_HANDLER_ARGS)
{
devclass_t dc = (devclass_t)arg1;
const char *value;
switch (arg2) {
case DEVCLASS_SYSCTL_PARENT:
value = dc->parent ? dc->parent->name : "";
break;
default:
return (EINVAL);
}
return (SYSCTL_OUT_STR(req, value));
}
static void
devclass_sysctl_init(devclass_t dc)
{
if (dc->sysctl_tree != NULL)
return;
sysctl_ctx_init(&dc->sysctl_ctx);
dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name,
CTLFLAG_RD, NULL, "");
SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree),
OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD,
dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A",
"parent class");
}
enum {
DEVICE_SYSCTL_DESC,
DEVICE_SYSCTL_DRIVER,
DEVICE_SYSCTL_LOCATION,
DEVICE_SYSCTL_PNPINFO,
DEVICE_SYSCTL_PARENT,
};
static int
device_sysctl_handler(SYSCTL_HANDLER_ARGS)
{
device_t dev = (device_t)arg1;
const char *value;
char *buf;
int error;
buf = NULL;
switch (arg2) {
case DEVICE_SYSCTL_DESC:
value = dev->desc ? dev->desc : "";
break;
case DEVICE_SYSCTL_DRIVER:
value = dev->driver ? dev->driver->name : "";
break;
case DEVICE_SYSCTL_LOCATION:
value = buf = malloc(1024, M_BUS, M_WAITOK | M_ZERO);
bus_child_location_str(dev, buf, 1024);
break;
case DEVICE_SYSCTL_PNPINFO:
value = buf = malloc(1024, M_BUS, M_WAITOK | M_ZERO);
bus_child_pnpinfo_str(dev, buf, 1024);
break;
case DEVICE_SYSCTL_PARENT:
value = dev->parent ? dev->parent->nameunit : "";
break;
default:
return (EINVAL);
}
error = SYSCTL_OUT_STR(req, value);
if (buf != NULL)
free(buf, M_BUS);
return (error);
}
static void
device_sysctl_init(device_t dev)
{
devclass_t dc = dev->devclass;
int domain;
if (dev->sysctl_tree != NULL)
return;
devclass_sysctl_init(dc);
sysctl_ctx_init(&dev->sysctl_ctx);
dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx,
SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO,
dev->nameunit + strlen(dc->name),
CTLFLAG_RD, NULL, "", "device_index");
SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD,
dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A",
"device description");
SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD,
dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A",
"device driver name");
SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD,
dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A",
"device location relative to parent");
SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD,
dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A",
"device identification");
SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD,
dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A",
"parent device");
if (bus_get_domain(dev, &domain) == 0)
SYSCTL_ADD_INT(&dev->sysctl_ctx,
SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain",
CTLFLAG_RD, NULL, domain, "NUMA domain");
}
static void
device_sysctl_update(device_t dev)
{
devclass_t dc = dev->devclass;
if (dev->sysctl_tree == NULL)
return;
sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name));
}
static void
device_sysctl_fini(device_t dev)
{
if (dev->sysctl_tree == NULL)
return;
sysctl_ctx_free(&dev->sysctl_ctx);
dev->sysctl_tree = NULL;
}
/*
* /dev/devctl implementation
*/
/*
* This design allows only one reader for /dev/devctl. This is not desirable
* in the long run, but will get a lot of hair out of this implementation.
* Maybe we should make this device a clonable device.
*
* Also note: we specifically do not attach a device to the device_t tree
* to avoid potential chicken and egg problems. One could argue that all
* of this belongs to the root node. One could also further argue that the
* sysctl interface that we have not might more properly be an ioctl
* interface, but at this stage of the game, I'm not inclined to rock that
* boat.
*
* I'm also not sure that the SIGIO support is done correctly or not, as
* I copied it from a driver that had SIGIO support that likely hasn't been
* tested since 3.4 or 2.2.8!
*/
/* Deprecated way to adjust queue length */
static int sysctl_devctl_disable(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
CTLFLAG_MPSAFE, NULL, 0, sysctl_devctl_disable, "I",
"devctl disable -- deprecated");
#define DEVCTL_DEFAULT_QUEUE_LEN 1000
static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS);
static int devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN;
SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_queue, CTLTYPE_INT | CTLFLAG_RWTUN |
CTLFLAG_MPSAFE, NULL, 0, sysctl_devctl_queue, "I", "devctl queue length");
static d_open_t devopen;
static d_close_t devclose;
static d_read_t devread;
static d_ioctl_t devioctl;
static d_poll_t devpoll;
static d_kqfilter_t devkqfilter;
static struct cdevsw dev_cdevsw = {
.d_version = D_VERSION,
.d_open = devopen,
.d_close = devclose,
.d_read = devread,
.d_ioctl = devioctl,
.d_poll = devpoll,
.d_kqfilter = devkqfilter,
.d_name = "devctl",
};
struct dev_event_info
{
char *dei_data;
TAILQ_ENTRY(dev_event_info) dei_link;
};
TAILQ_HEAD(devq, dev_event_info);
static struct dev_softc
{
int inuse;
int nonblock;
int queued;
int async;
struct mtx mtx;
struct cv cv;
struct selinfo sel;
struct devq devq;
struct sigio *sigio;
} devsoftc;
static void filt_devctl_detach(struct knote *kn);
static int filt_devctl_read(struct knote *kn, long hint);
struct filterops devctl_rfiltops = {
.f_isfd = 1,
.f_detach = filt_devctl_detach,
.f_event = filt_devctl_read,
};
static struct cdev *devctl_dev;
static void
devinit(void)
{
devctl_dev = make_dev_credf(MAKEDEV_ETERNAL, &dev_cdevsw, 0, NULL,
UID_ROOT, GID_WHEEL, 0600, "devctl");
mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF);
cv_init(&devsoftc.cv, "dev cv");
TAILQ_INIT(&devsoftc.devq);
knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx);
devctl2_init();
}
static int
devopen(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
mtx_lock(&devsoftc.mtx);
if (devsoftc.inuse) {
mtx_unlock(&devsoftc.mtx);
return (EBUSY);
}
/* move to init */
devsoftc.inuse = 1;
mtx_unlock(&devsoftc.mtx);
return (0);
}
static int
devclose(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
mtx_lock(&devsoftc.mtx);
devsoftc.inuse = 0;
devsoftc.nonblock = 0;
devsoftc.async = 0;
cv_broadcast(&devsoftc.cv);
funsetown(&devsoftc.sigio);
mtx_unlock(&devsoftc.mtx);
return (0);
}
/*
* The read channel for this device is used to report changes to
* userland in realtime. We are required to free the data as well as
* the n1 object because we allocate them separately. Also note that
* we return one record at a time. If you try to read this device a
* character at a time, you will lose the rest of the data. Listening
* programs are expected to cope.
*/
static int
devread(struct cdev *dev, struct uio *uio, int ioflag)
{
struct dev_event_info *n1;
int rv;
mtx_lock(&devsoftc.mtx);
while (TAILQ_EMPTY(&devsoftc.devq)) {
if (devsoftc.nonblock) {
mtx_unlock(&devsoftc.mtx);
return (EAGAIN);
}
rv = cv_wait_sig(&devsoftc.cv, &devsoftc.mtx);
if (rv) {
/*
* Need to translate ERESTART to EINTR here? -- jake
*/
mtx_unlock(&devsoftc.mtx);
return (rv);
}
}
n1 = TAILQ_FIRST(&devsoftc.devq);
TAILQ_REMOVE(&devsoftc.devq, n1, dei_link);
devsoftc.queued--;
mtx_unlock(&devsoftc.mtx);
rv = uiomove(n1->dei_data, strlen(n1->dei_data), uio);
free(n1->dei_data, M_BUS);
free(n1, M_BUS);
return (rv);
}
static int
devioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
{
switch (cmd) {
case FIONBIO:
if (*(int*)data)
devsoftc.nonblock = 1;
else
devsoftc.nonblock = 0;
return (0);
case FIOASYNC:
if (*(int*)data)
devsoftc.async = 1;
else
devsoftc.async = 0;
return (0);
case FIOSETOWN:
return fsetown(*(int *)data, &devsoftc.sigio);
case FIOGETOWN:
*(int *)data = fgetown(&devsoftc.sigio);
return (0);
/* (un)Support for other fcntl() calls. */
case FIOCLEX:
case FIONCLEX:
case FIONREAD:
default:
break;
}
return (ENOTTY);
}
static int
devpoll(struct cdev *dev, int events, struct thread *td)
{
int revents = 0;
mtx_lock(&devsoftc.mtx);
if (events & (POLLIN | POLLRDNORM)) {
if (!TAILQ_EMPTY(&devsoftc.devq))
revents = events & (POLLIN | POLLRDNORM);
else
selrecord(td, &devsoftc.sel);
}
mtx_unlock(&devsoftc.mtx);
return (revents);
}
static int
devkqfilter(struct cdev *dev, struct knote *kn)
{
int error;
if (kn->kn_filter == EVFILT_READ) {
kn->kn_fop = &devctl_rfiltops;
knlist_add(&devsoftc.sel.si_note, kn, 0);
error = 0;
} else
error = EINVAL;
return (error);
}
static void
filt_devctl_detach(struct knote *kn)
{
knlist_remove(&devsoftc.sel.si_note, kn, 0);
}
static int
filt_devctl_read(struct knote *kn, long hint)
{
kn->kn_data = devsoftc.queued;
return (kn->kn_data != 0);
}
/**
* @brief Return whether the userland process is running
*/
boolean_t
devctl_process_running(void)
{
return (devsoftc.inuse == 1);
}
/**
* @brief Queue data to be read from the devctl device
*
* Generic interface to queue data to the devctl device. It is
* assumed that @p data is properly formatted. It is further assumed
* that @p data is allocated using the M_BUS malloc type.
*/
void
devctl_queue_data_f(char *data, int flags)
{
struct dev_event_info *n1 = NULL, *n2 = NULL;
if (strlen(data) == 0)
goto out;
if (devctl_queue_length == 0)
goto out;
n1 = malloc(sizeof(*n1), M_BUS, flags);
if (n1 == NULL)
goto out;
n1->dei_data = data;
mtx_lock(&devsoftc.mtx);
if (devctl_queue_length == 0) {
mtx_unlock(&devsoftc.mtx);
free(n1->dei_data, M_BUS);
free(n1, M_BUS);
return;
}
/* Leave at least one spot in the queue... */
while (devsoftc.queued > devctl_queue_length - 1) {
n2 = TAILQ_FIRST(&devsoftc.devq);
TAILQ_REMOVE(&devsoftc.devq, n2, dei_link);
free(n2->dei_data, M_BUS);
free(n2, M_BUS);
devsoftc.queued--;
}
TAILQ_INSERT_TAIL(&devsoftc.devq, n1, dei_link);
devsoftc.queued++;
cv_broadcast(&devsoftc.cv);
KNOTE_LOCKED(&devsoftc.sel.si_note, 0);
mtx_unlock(&devsoftc.mtx);
selwakeup(&devsoftc.sel);
if (devsoftc.async && devsoftc.sigio != NULL)
pgsigio(&devsoftc.sigio, SIGIO, 0);
return;
out:
/*
* We have to free data on all error paths since the caller
* assumes it will be free'd when this item is dequeued.
*/
free(data, M_BUS);
return;
}
void
devctl_queue_data(char *data)
{
devctl_queue_data_f(data, M_NOWAIT);
}
/**
* @brief Send a 'notification' to userland, using standard ways
*/
void
devctl_notify_f(const char *system, const char *subsystem, const char *type,
const char *data, int flags)
{
int len = 0;
char *msg;
if (system == NULL)
return; /* BOGUS! Must specify system. */
if (subsystem == NULL)
return; /* BOGUS! Must specify subsystem. */
if (type == NULL)
return; /* BOGUS! Must specify type. */
len += strlen(" system=") + strlen(system);
len += strlen(" subsystem=") + strlen(subsystem);
len += strlen(" type=") + strlen(type);
/* add in the data message plus newline. */
if (data != NULL)
len += strlen(data);
len += 3; /* '!', '\n', and NUL */
msg = malloc(len, M_BUS, flags);
if (msg == NULL)
return; /* Drop it on the floor */
if (data != NULL)
snprintf(msg, len, "!system=%s subsystem=%s type=%s %s\n",
system, subsystem, type, data);
else
snprintf(msg, len, "!system=%s subsystem=%s type=%s\n",
system, subsystem, type);
devctl_queue_data_f(msg, flags);
}
void
devctl_notify(const char *system, const char *subsystem, const char *type,
const char *data)
{
devctl_notify_f(system, subsystem, type, data, M_NOWAIT);
}
/*
* Common routine that tries to make sending messages as easy as possible.
* We allocate memory for the data, copy strings into that, but do not
* free it unless there's an error. The dequeue part of the driver should
* free the data. We don't send data when the device is disabled. We do
* send data, even when we have no listeners, because we wish to avoid
* races relating to startup and restart of listening applications.
*
* devaddq is designed to string together the type of event, with the
* object of that event, plus the plug and play info and location info
* for that event. This is likely most useful for devices, but less
* useful for other consumers of this interface. Those should use
* the devctl_queue_data() interface instead.
*/
static void
devaddq(const char *type, const char *what, device_t dev)
{
char *data = NULL;
char *loc = NULL;
char *pnp = NULL;
const char *parstr;
if (!devctl_queue_length)/* Rare race, but lost races safely discard */
return;
data = malloc(1024, M_BUS, M_NOWAIT);
if (data == NULL)
goto bad;
/* get the bus specific location of this device */
loc = malloc(1024, M_BUS, M_NOWAIT);
if (loc == NULL)
goto bad;
*loc = '\0';
bus_child_location_str(dev, loc, 1024);
/* Get the bus specific pnp info of this device */
pnp = malloc(1024, M_BUS, M_NOWAIT);
if (pnp == NULL)
goto bad;
*pnp = '\0';
bus_child_pnpinfo_str(dev, pnp, 1024);
/* Get the parent of this device, or / if high enough in the tree. */
if (device_get_parent(dev) == NULL)
parstr = "."; /* Or '/' ? */
else
parstr = device_get_nameunit(device_get_parent(dev));
/* String it all together. */
snprintf(data, 1024, "%s%s at %s %s on %s\n", type, what, loc, pnp,
parstr);
free(loc, M_BUS);
free(pnp, M_BUS);
devctl_queue_data(data);
return;
bad:
free(pnp, M_BUS);
free(loc, M_BUS);
free(data, M_BUS);
return;
}
/*
* A device was added to the tree. We are called just after it successfully
* attaches (that is, probe and attach success for this device). No call
* is made if a device is merely parented into the tree. See devnomatch
* if probe fails. If attach fails, no notification is sent (but maybe
* we should have a different message for this).
*/
static void
devadded(device_t dev)
{
devaddq("+", device_get_nameunit(dev), dev);
}
/*
* A device was removed from the tree. We are called just before this
* happens.
*/
static void
devremoved(device_t dev)
{
devaddq("-", device_get_nameunit(dev), dev);
}
/*
* Called when there's no match for this device. This is only called
* the first time that no match happens, so we don't keep getting this
* message. Should that prove to be undesirable, we can change it.
* This is called when all drivers that can attach to a given bus
* decline to accept this device. Other errors may not be detected.
*/
static void
devnomatch(device_t dev)
{
devaddq("?", "", dev);
}
static int
sysctl_devctl_disable(SYSCTL_HANDLER_ARGS)
{
struct dev_event_info *n1;
int dis, error;
dis = (devctl_queue_length == 0);
error = sysctl_handle_int(oidp, &dis, 0, req);
if (error || !req->newptr)
return (error);
if (mtx_initialized(&devsoftc.mtx))
mtx_lock(&devsoftc.mtx);
if (dis) {
while (!TAILQ_EMPTY(&devsoftc.devq)) {
n1 = TAILQ_FIRST(&devsoftc.devq);
TAILQ_REMOVE(&devsoftc.devq, n1, dei_link);
free(n1->dei_data, M_BUS);
free(n1, M_BUS);
}
devsoftc.queued = 0;
devctl_queue_length = 0;
} else {
devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN;
}
if (mtx_initialized(&devsoftc.mtx))
mtx_unlock(&devsoftc.mtx);
return (0);
}
static int
sysctl_devctl_queue(SYSCTL_HANDLER_ARGS)
{
struct dev_event_info *n1;
int q, error;
q = devctl_queue_length;
error = sysctl_handle_int(oidp, &q, 0, req);
if (error || !req->newptr)
return (error);
if (q < 0)
return (EINVAL);
if (mtx_initialized(&devsoftc.mtx))
mtx_lock(&devsoftc.mtx);
devctl_queue_length = q;
while (devsoftc.queued > devctl_queue_length) {
n1 = TAILQ_FIRST(&devsoftc.devq);
TAILQ_REMOVE(&devsoftc.devq, n1, dei_link);
free(n1->dei_data, M_BUS);
free(n1, M_BUS);
devsoftc.queued--;
}
if (mtx_initialized(&devsoftc.mtx))
mtx_unlock(&devsoftc.mtx);
return (0);
}
/**
* @brief safely quotes strings that might have double quotes in them.
*
* The devctl protocol relies on quoted strings having matching quotes.
* This routine quotes any internal quotes so the resulting string
* is safe to pass to snprintf to construct, for example pnp info strings.
* Strings are always terminated with a NUL, but may be truncated if longer
* than @p len bytes after quotes.
*
* @param dst Buffer to hold the string. Must be at least @p len bytes long
* @param src Original buffer.
* @param len Length of buffer pointed to by @dst, including trailing NUL
*/
void
devctl_safe_quote(char *dst, const char *src, size_t len)
{
char *walker = dst, *ep = dst + len - 1;
if (len == 0)
return;
while (src != NULL && walker < ep)
{
if (*src == '"' || *src == '\\') {
if (ep - walker < 2)
break;
*walker++ = '\\';
}
*walker++ = *src++;
}
*walker = '\0';
}
/* End of /dev/devctl code */
static TAILQ_HEAD(,device) bus_data_devices;
static int bus_data_generation = 1;
static kobj_method_t null_methods[] = {
KOBJMETHOD_END
};
DEFINE_CLASS(null, null_methods, 0);
/*
* Bus pass implementation
*/
static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes);
int bus_current_pass = BUS_PASS_ROOT;
/**
* @internal
* @brief Register the pass level of a new driver attachment
*
* Register a new driver attachment's pass level. If no driver
* attachment with the same pass level has been added, then @p new
* will be added to the global passes list.
*
* @param new the new driver attachment
*/
static void
driver_register_pass(struct driverlink *new)
{
struct driverlink *dl;
/* We only consider pass numbers during boot. */
if (bus_current_pass == BUS_PASS_DEFAULT)
return;
/*
* Walk the passes list. If we already know about this pass
* then there is nothing to do. If we don't, then insert this
* driver link into the list.
*/
TAILQ_FOREACH(dl, &passes, passlink) {
if (dl->pass < new->pass)
continue;
if (dl->pass == new->pass)
return;
TAILQ_INSERT_BEFORE(dl, new, passlink);
return;
}
TAILQ_INSERT_TAIL(&passes, new, passlink);
}
/**
* @brief Raise the current bus pass
*
* Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS()
* method on the root bus to kick off a new device tree scan for each
* new pass level that has at least one driver.
*/
void
bus_set_pass(int pass)
{
struct driverlink *dl;
if (bus_current_pass > pass)
panic("Attempt to lower bus pass level");
TAILQ_FOREACH(dl, &passes, passlink) {
/* Skip pass values below the current pass level. */
if (dl->pass <= bus_current_pass)
continue;
/*
* Bail once we hit a driver with a pass level that is
* too high.
*/
if (dl->pass > pass)
break;
/*
* Raise the pass level to the next level and rescan
* the tree.
*/
bus_current_pass = dl->pass;
BUS_NEW_PASS(root_bus);
}
/*
* If there isn't a driver registered for the requested pass,
* then bus_current_pass might still be less than 'pass'. Set
* it to 'pass' in that case.
*/
if (bus_current_pass < pass)
bus_current_pass = pass;
KASSERT(bus_current_pass == pass, ("Failed to update bus pass level"));
}
/*
* Devclass implementation
*/
static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses);
/**
* @internal
* @brief Find or create a device class
*
* If a device class with the name @p classname exists, return it,
* otherwise if @p create is non-zero create and return a new device
* class.
*
* If @p parentname is non-NULL, the parent of the devclass is set to
* the devclass of that name.
*
* @param classname the devclass name to find or create
* @param parentname the parent devclass name or @c NULL
* @param create non-zero to create a devclass
*/
static devclass_t
devclass_find_internal(const char *classname, const char *parentname,
int create)
{
devclass_t dc;
PDEBUG(("looking for %s", classname));
if (!classname)
return (NULL);
TAILQ_FOREACH(dc, &devclasses, link) {
if (!strcmp(dc->name, classname))
break;
}
if (create && !dc) {
PDEBUG(("creating %s", classname));
dc = malloc(sizeof(struct devclass) + strlen(classname) + 1,
M_BUS, M_NOWAIT | M_ZERO);
if (!dc)
return (NULL);
dc->parent = NULL;
dc->name = (char*) (dc + 1);
strcpy(dc->name, classname);
TAILQ_INIT(&dc->drivers);
TAILQ_INSERT_TAIL(&devclasses, dc, link);
bus_data_generation_update();
}
/*
* If a parent class is specified, then set that as our parent so
* that this devclass will support drivers for the parent class as
* well. If the parent class has the same name don't do this though
* as it creates a cycle that can trigger an infinite loop in
* device_probe_child() if a device exists for which there is no
* suitable driver.
*/
if (parentname && dc && !dc->parent &&
strcmp(classname, parentname) != 0) {
dc->parent = devclass_find_internal(parentname, NULL, TRUE);
dc->parent->flags |= DC_HAS_CHILDREN;
}
return (dc);
}
/**
* @brief Create a device class
*
* If a device class with the name @p classname exists, return it,
* otherwise create and return a new device class.
*
* @param classname the devclass name to find or create
*/
devclass_t
devclass_create(const char *classname)
{
return (devclass_find_internal(classname, NULL, TRUE));
}
/**
* @brief Find a device class
*
* If a device class with the name @p classname exists, return it,
* otherwise return @c NULL.
*
* @param classname the devclass name to find
*/
devclass_t
devclass_find(const char *classname)
{
return (devclass_find_internal(classname, NULL, FALSE));
}
/**
* @brief Register that a device driver has been added to a devclass
*
* Register that a device driver has been added to a devclass. This
* is called by devclass_add_driver to accomplish the recursive
* notification of all the children classes of dc, as well as dc.
* Each layer will have BUS_DRIVER_ADDED() called for all instances of
* the devclass.
*
* We do a full search here of the devclass list at each iteration
* level to save storing children-lists in the devclass structure. If
* we ever move beyond a few dozen devices doing this, we may need to
* reevaluate...
*
* @param dc the devclass to edit
* @param driver the driver that was just added
*/
static void
devclass_driver_added(devclass_t dc, driver_t *driver)
{
devclass_t parent;
int i;
/*
* Call BUS_DRIVER_ADDED for any existing buses in this class.
*/
for (i = 0; i < dc->maxunit; i++)
if (dc->devices[i] && device_is_attached(dc->devices[i]))
BUS_DRIVER_ADDED(dc->devices[i], driver);
/*
* Walk through the children classes. Since we only keep a
* single parent pointer around, we walk the entire list of
* devclasses looking for children. We set the
* DC_HAS_CHILDREN flag when a child devclass is created on
* the parent, so we only walk the list for those devclasses
* that have children.
*/
if (!(dc->flags & DC_HAS_CHILDREN))
return;
parent = dc;
TAILQ_FOREACH(dc, &devclasses, link) {
if (dc->parent == parent)
devclass_driver_added(dc, driver);
}
}
/**
* @brief Add a device driver to a device class
*
* Add a device driver to a devclass. This is normally called
* automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of
* all devices in the devclass will be called to allow them to attempt
* to re-probe any unmatched children.
*
* @param dc the devclass to edit
* @param driver the driver to register
*/
int
devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp)
{
driverlink_t dl;
const char *parentname;
PDEBUG(("%s", DRIVERNAME(driver)));
/* Don't allow invalid pass values. */
if (pass <= BUS_PASS_ROOT)
return (EINVAL);
dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO);
if (!dl)
return (ENOMEM);
/*
* Compile the driver's methods. Also increase the reference count
* so that the class doesn't get freed when the last instance
* goes. This means we can safely use static methods and avoids a
* double-free in devclass_delete_driver.
*/
kobj_class_compile((kobj_class_t) driver);
/*
* If the driver has any base classes, make the
* devclass inherit from the devclass of the driver's
* first base class. This will allow the system to
* search for drivers in both devclasses for children
* of a device using this driver.
*/
if (driver->baseclasses)
parentname = driver->baseclasses[0]->name;
else
parentname = NULL;
*dcp = devclass_find_internal(driver->name, parentname, TRUE);
dl->driver = driver;
TAILQ_INSERT_TAIL(&dc->drivers, dl, link);
driver->refs++; /* XXX: kobj_mtx */
dl->pass = pass;
driver_register_pass(dl);
devclass_driver_added(dc, driver);
bus_data_generation_update();
return (0);
}
/**
* @brief Register that a device driver has been deleted from a devclass
*
* Register that a device driver has been removed from a devclass.
* This is called by devclass_delete_driver to accomplish the
* recursive notification of all the children classes of busclass, as
* well as busclass. Each layer will attempt to detach the driver
* from any devices that are children of the bus's devclass. The function
* will return an error if a device fails to detach.
*
* We do a full search here of the devclass list at each iteration
* level to save storing children-lists in the devclass structure. If
* we ever move beyond a few dozen devices doing this, we may need to
* reevaluate...
*
* @param busclass the devclass of the parent bus
* @param dc the devclass of the driver being deleted
* @param driver the driver being deleted
*/
static int
devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver)
{
devclass_t parent;
device_t dev;
int error, i;
/*
* Disassociate from any devices. We iterate through all the
* devices in the devclass of the driver and detach any which are
* using the driver and which have a parent in the devclass which
* we are deleting from.
*
* Note that since a driver can be in multiple devclasses, we
* should not detach devices which are not children of devices in
* the affected devclass.
*/
for (i = 0; i < dc->maxunit; i++) {
if (dc->devices[i]) {
dev = dc->devices[i];
if (dev->driver == driver && dev->parent &&
dev->parent->devclass == busclass) {
if ((error = device_detach(dev)) != 0)
return (error);
BUS_PROBE_NOMATCH(dev->parent, dev);
devnomatch(dev);
dev->flags |= DF_DONENOMATCH;
}
}
}
/*
* Walk through the children classes. Since we only keep a
* single parent pointer around, we walk the entire list of
* devclasses looking for children. We set the
* DC_HAS_CHILDREN flag when a child devclass is created on
* the parent, so we only walk the list for those devclasses
* that have children.
*/
if (!(busclass->flags & DC_HAS_CHILDREN))
return (0);
parent = busclass;
TAILQ_FOREACH(busclass, &devclasses, link) {
if (busclass->parent == parent) {
error = devclass_driver_deleted(busclass, dc, driver);
if (error)
return (error);
}
}
return (0);
}
/**
* @brief Delete a device driver from a device class
*
* Delete a device driver from a devclass. This is normally called
* automatically by DRIVER_MODULE().
*
* If the driver is currently attached to any devices,
* devclass_delete_driver() will first attempt to detach from each
* device. If one of the detach calls fails, the driver will not be
* deleted.
*
* @param dc the devclass to edit
* @param driver the driver to unregister
*/
int
devclass_delete_driver(devclass_t busclass, driver_t *driver)
{
devclass_t dc = devclass_find(driver->name);
driverlink_t dl;
int error;
PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass)));
if (!dc)
return (0);
/*
* Find the link structure in the bus' list of drivers.
*/
TAILQ_FOREACH(dl, &busclass->drivers, link) {
if (dl->driver == driver)
break;
}
if (!dl) {
PDEBUG(("%s not found in %s list", driver->name,
busclass->name));
return (ENOENT);
}
error = devclass_driver_deleted(busclass, dc, driver);
if (error != 0)
return (error);
TAILQ_REMOVE(&busclass->drivers, dl, link);
free(dl, M_BUS);
/* XXX: kobj_mtx */
driver->refs--;
if (driver->refs == 0)
kobj_class_free((kobj_class_t) driver);
bus_data_generation_update();
return (0);
}
/**
* @brief Quiesces a set of device drivers from a device class
*
* Quiesce a device driver from a devclass. This is normally called
* automatically by DRIVER_MODULE().
*
* If the driver is currently attached to any devices,
* devclass_quiesece_driver() will first attempt to quiesce each
* device.
*
* @param dc the devclass to edit
* @param driver the driver to unregister
*/
static int
devclass_quiesce_driver(devclass_t busclass, driver_t *driver)
{
devclass_t dc = devclass_find(driver->name);
driverlink_t dl;
device_t dev;
int i;
int error;
PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass)));
if (!dc)
return (0);
/*
* Find the link structure in the bus' list of drivers.
*/
TAILQ_FOREACH(dl, &busclass->drivers, link) {
if (dl->driver == driver)
break;
}
if (!dl) {
PDEBUG(("%s not found in %s list", driver->name,
busclass->name));
return (ENOENT);
}
/*
* Quiesce all devices. We iterate through all the devices in
* the devclass of the driver and quiesce any which are using
* the driver and which have a parent in the devclass which we
* are quiescing.
*
* Note that since a driver can be in multiple devclasses, we
* should not quiesce devices which are not children of
* devices in the affected devclass.
*/
for (i = 0; i < dc->maxunit; i++) {
if (dc->devices[i]) {
dev = dc->devices[i];
if (dev->driver == driver && dev->parent &&
dev->parent->devclass == busclass) {
if ((error = device_quiesce(dev)) != 0)
return (error);
}
}
}
return (0);
}
/**
* @internal
*/
static driverlink_t
devclass_find_driver_internal(devclass_t dc, const char *classname)
{
driverlink_t dl;
PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc)));
TAILQ_FOREACH(dl, &dc->drivers, link) {
if (!strcmp(dl->driver->name, classname))
return (dl);
}
PDEBUG(("not found"));
return (NULL);
}
/**
* @brief Return the name of the devclass
*/
const char *
devclass_get_name(devclass_t dc)
{
return (dc->name);
}
/**
* @brief Find a device given a unit number
*
* @param dc the devclass to search
* @param unit the unit number to search for
*
* @returns the device with the given unit number or @c
* NULL if there is no such device
*/
device_t
devclass_get_device(devclass_t dc, int unit)
{
if (dc == NULL || unit < 0 || unit >= dc->maxunit)
return (NULL);
return (dc->devices[unit]);
}
/**
* @brief Find the softc field of a device given a unit number
*
* @param dc the devclass to search
* @param unit the unit number to search for
*
* @returns the softc field of the device with the given
* unit number or @c NULL if there is no such
* device
*/
void *
devclass_get_softc(devclass_t dc, int unit)
{
device_t dev;
dev = devclass_get_device(dc, unit);
if (!dev)
return (NULL);
return (device_get_softc(dev));
}
/**
* @brief Get a list of devices in the devclass
*
* An array containing a list of all the devices in the given devclass
* is allocated and returned in @p *devlistp. The number of devices
* in the array is returned in @p *devcountp. The caller should free
* the array using @c free(p, M_TEMP), even if @p *devcountp is 0.
*
* @param dc the devclass to examine
* @param devlistp points at location for array pointer return
* value
* @param devcountp points at location for array size return value
*
* @retval 0 success
* @retval ENOMEM the array allocation failed
*/
int
devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp)
{
int count, i;
device_t *list;
count = devclass_get_count(dc);
- list = mallocarray(count, sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO);
+ list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO);
if (!list)
return (ENOMEM);
count = 0;
for (i = 0; i < dc->maxunit; i++) {
if (dc->devices[i]) {
list[count] = dc->devices[i];
count++;
}
}
*devlistp = list;
*devcountp = count;
return (0);
}
/**
* @brief Get a list of drivers in the devclass
*
* An array containing a list of pointers to all the drivers in the
* given devclass is allocated and returned in @p *listp. The number
* of drivers in the array is returned in @p *countp. The caller should
* free the array using @c free(p, M_TEMP).
*
* @param dc the devclass to examine
* @param listp gives location for array pointer return value
* @param countp gives location for number of array elements
* return value
*
* @retval 0 success
* @retval ENOMEM the array allocation failed
*/
int
devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp)
{
driverlink_t dl;
driver_t **list;
int count;
count = 0;
TAILQ_FOREACH(dl, &dc->drivers, link)
count++;
list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT);
if (list == NULL)
return (ENOMEM);
count = 0;
TAILQ_FOREACH(dl, &dc->drivers, link) {
list[count] = dl->driver;
count++;
}
*listp = list;
*countp = count;
return (0);
}
/**
* @brief Get the number of devices in a devclass
*
* @param dc the devclass to examine
*/
int
devclass_get_count(devclass_t dc)
{
int count, i;
count = 0;
for (i = 0; i < dc->maxunit; i++)
if (dc->devices[i])
count++;
return (count);
}
/**
* @brief Get the maximum unit number used in a devclass
*
* Note that this is one greater than the highest currently-allocated
* unit. If a null devclass_t is passed in, -1 is returned to indicate
* that not even the devclass has been allocated yet.
*
* @param dc the devclass to examine
*/
int
devclass_get_maxunit(devclass_t dc)
{
if (dc == NULL)
return (-1);
return (dc->maxunit);
}
/**
* @brief Find a free unit number in a devclass
*
* This function searches for the first unused unit number greater
* that or equal to @p unit.
*
* @param dc the devclass to examine
* @param unit the first unit number to check
*/
int
devclass_find_free_unit(devclass_t dc, int unit)
{
if (dc == NULL)
return (unit);
while (unit < dc->maxunit && dc->devices[unit] != NULL)
unit++;
return (unit);
}
/**
* @brief Set the parent of a devclass
*
* The parent class is normally initialised automatically by
* DRIVER_MODULE().
*
* @param dc the devclass to edit
* @param pdc the new parent devclass
*/
void
devclass_set_parent(devclass_t dc, devclass_t pdc)
{
dc->parent = pdc;
}
/**
* @brief Get the parent of a devclass
*
* @param dc the devclass to examine
*/
devclass_t
devclass_get_parent(devclass_t dc)
{
return (dc->parent);
}
struct sysctl_ctx_list *
devclass_get_sysctl_ctx(devclass_t dc)
{
return (&dc->sysctl_ctx);
}
struct sysctl_oid *
devclass_get_sysctl_tree(devclass_t dc)
{
return (dc->sysctl_tree);
}
/**
* @internal
* @brief Allocate a unit number
*
* On entry, @p *unitp is the desired unit number (or @c -1 if any
* will do). The allocated unit number is returned in @p *unitp.
* @param dc the devclass to allocate from
* @param unitp points at the location for the allocated unit
* number
*
* @retval 0 success
* @retval EEXIST the requested unit number is already allocated
* @retval ENOMEM memory allocation failure
*/
static int
devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp)
{
const char *s;
int unit = *unitp;
PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc)));
/* Ask the parent bus if it wants to wire this device. */
if (unit == -1)
BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name,
&unit);
/* If we were given a wired unit number, check for existing device */
/* XXX imp XXX */
if (unit != -1) {
if (unit >= 0 && unit < dc->maxunit &&
dc->devices[unit] != NULL) {
if (bootverbose)
printf("%s: %s%d already exists; skipping it\n",
dc->name, dc->name, *unitp);
return (EEXIST);
}
} else {
/* Unwired device, find the next available slot for it */
unit = 0;
for (unit = 0;; unit++) {
/* If there is an "at" hint for a unit then skip it. */
if (resource_string_value(dc->name, unit, "at", &s) ==
0)
continue;
/* If this device slot is already in use, skip it. */
if (unit < dc->maxunit && dc->devices[unit] != NULL)
continue;
break;
}
}
/*
* We've selected a unit beyond the length of the table, so let's
* extend the table to make room for all units up to and including
* this one.
*/
if (unit >= dc->maxunit) {
device_t *newlist, *oldlist;
int newsize;
oldlist = dc->devices;
newsize = roundup((unit + 1), MINALLOCSIZE / sizeof(device_t));
- newlist = mallocarray(newsize, sizeof(device_t), M_BUS, M_NOWAIT);
+ newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT);
if (!newlist)
return (ENOMEM);
if (oldlist != NULL)
bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit);
bzero(newlist + dc->maxunit,
sizeof(device_t) * (newsize - dc->maxunit));
dc->devices = newlist;
dc->maxunit = newsize;
if (oldlist != NULL)
free(oldlist, M_BUS);
}
PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc)));
*unitp = unit;
return (0);
}
/**
* @internal
* @brief Add a device to a devclass
*
* A unit number is allocated for the device (using the device's
* preferred unit number if any) and the device is registered in the
* devclass. This allows the device to be looked up by its unit
* number, e.g. by decoding a dev_t minor number.
*
* @param dc the devclass to add to
* @param dev the device to add
*
* @retval 0 success
* @retval EEXIST the requested unit number is already allocated
* @retval ENOMEM memory allocation failure
*/
static int
devclass_add_device(devclass_t dc, device_t dev)
{
int buflen, error;
PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc)));
buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX);
if (buflen < 0)
return (ENOMEM);
dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO);
if (!dev->nameunit)
return (ENOMEM);
if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) {
free(dev->nameunit, M_BUS);
dev->nameunit = NULL;
return (error);
}
dc->devices[dev->unit] = dev;
dev->devclass = dc;
snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit);
return (0);
}
/**
* @internal
* @brief Delete a device from a devclass
*
* The device is removed from the devclass's device list and its unit
* number is freed.
* @param dc the devclass to delete from
* @param dev the device to delete
*
* @retval 0 success
*/
static int
devclass_delete_device(devclass_t dc, device_t dev)
{
if (!dc || !dev)
return (0);
PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc)));
if (dev->devclass != dc || dc->devices[dev->unit] != dev)
panic("devclass_delete_device: inconsistent device class");
dc->devices[dev->unit] = NULL;
if (dev->flags & DF_WILDCARD)
dev->unit = -1;
dev->devclass = NULL;
free(dev->nameunit, M_BUS);
dev->nameunit = NULL;
return (0);
}
/**
* @internal
* @brief Make a new device and add it as a child of @p parent
*
* @param parent the parent of the new device
* @param name the devclass name of the new device or @c NULL
* to leave the devclass unspecified
* @parem unit the unit number of the new device of @c -1 to
* leave the unit number unspecified
*
* @returns the new device
*/
static device_t
make_device(device_t parent, const char *name, int unit)
{
device_t dev;
devclass_t dc;
PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit));
if (name) {
dc = devclass_find_internal(name, NULL, TRUE);
if (!dc) {
printf("make_device: can't find device class %s\n",
name);
return (NULL);
}
} else {
dc = NULL;
}
dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO);
if (!dev)
return (NULL);
dev->parent = parent;
TAILQ_INIT(&dev->children);
kobj_init((kobj_t) dev, &null_class);
dev->driver = NULL;
dev->devclass = NULL;
dev->unit = unit;
dev->nameunit = NULL;
dev->desc = NULL;
dev->busy = 0;
dev->devflags = 0;
dev->flags = DF_ENABLED;
dev->order = 0;
if (unit == -1)
dev->flags |= DF_WILDCARD;
if (name) {
dev->flags |= DF_FIXEDCLASS;
if (devclass_add_device(dc, dev)) {
kobj_delete((kobj_t) dev, M_BUS);
return (NULL);
}
}
dev->ivars = NULL;
dev->softc = NULL;
dev->state = DS_NOTPRESENT;
TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink);
bus_data_generation_update();
return (dev);
}
/**
* @internal
* @brief Print a description of a device.
*/
static int
device_print_child(device_t dev, device_t child)
{
int retval = 0;
if (device_is_alive(child))
retval += BUS_PRINT_CHILD(dev, child);
else
retval += device_printf(child, " not found\n");
return (retval);
}
/**
* @brief Create a new device
*
* This creates a new device and adds it as a child of an existing
* parent device. The new device will be added after the last existing
* child with order zero.
*
* @param dev the device which will be the parent of the
* new child device
* @param name devclass name for new device or @c NULL if not
* specified
* @param unit unit number for new device or @c -1 if not
* specified
*
* @returns the new device
*/
device_t
device_add_child(device_t dev, const char *name, int unit)
{
return (device_add_child_ordered(dev, 0, name, unit));
}
/**
* @brief Create a new device
*
* This creates a new device and adds it as a child of an existing
* parent device. The new device will be added after the last existing
* child with the same order.
*
* @param dev the device which will be the parent of the
* new child device
* @param order a value which is used to partially sort the
* children of @p dev - devices created using
* lower values of @p order appear first in @p
* dev's list of children
* @param name devclass name for new device or @c NULL if not
* specified
* @param unit unit number for new device or @c -1 if not
* specified
*
* @returns the new device
*/
device_t
device_add_child_ordered(device_t dev, u_int order, const char *name, int unit)
{
device_t child;
device_t place;
PDEBUG(("%s at %s with order %u as unit %d",
name, DEVICENAME(dev), order, unit));
KASSERT(name != NULL || unit == -1,
("child device with wildcard name and specific unit number"));
child = make_device(dev, name, unit);
if (child == NULL)
return (child);
child->order = order;
TAILQ_FOREACH(place, &dev->children, link) {
if (place->order > order)
break;
}
if (place) {
/*
* The device 'place' is the first device whose order is
* greater than the new child.
*/
TAILQ_INSERT_BEFORE(place, child, link);
} else {
/*
* The new child's order is greater or equal to the order of
* any existing device. Add the child to the tail of the list.
*/
TAILQ_INSERT_TAIL(&dev->children, child, link);
}
bus_data_generation_update();
return (child);
}
/**
* @brief Delete a device
*
* This function deletes a device along with all of its children. If
* the device currently has a driver attached to it, the device is
* detached first using device_detach().
*
* @param dev the parent device
* @param child the device to delete
*
* @retval 0 success
* @retval non-zero a unit error code describing the error
*/
int
device_delete_child(device_t dev, device_t child)
{
int error;
device_t grandchild;
PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev)));
/* detach parent before deleting children, if any */
if ((error = device_detach(child)) != 0)
return (error);
/* remove children second */
while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) {
error = device_delete_child(child, grandchild);
if (error)
return (error);
}
if (child->devclass)
devclass_delete_device(child->devclass, child);
if (child->parent)
BUS_CHILD_DELETED(dev, child);
TAILQ_REMOVE(&dev->children, child, link);
TAILQ_REMOVE(&bus_data_devices, child, devlink);
kobj_delete((kobj_t) child, M_BUS);
bus_data_generation_update();
return (0);
}
/**
* @brief Delete all children devices of the given device, if any.
*
* This function deletes all children devices of the given device, if
* any, using the device_delete_child() function for each device it
* finds. If a child device cannot be deleted, this function will
* return an error code.
*
* @param dev the parent device
*
* @retval 0 success
* @retval non-zero a device would not detach
*/
int
device_delete_children(device_t dev)
{
device_t child;
int error;
PDEBUG(("Deleting all children of %s", DEVICENAME(dev)));
error = 0;
while ((child = TAILQ_FIRST(&dev->children)) != NULL) {
error = device_delete_child(dev, child);
if (error) {
PDEBUG(("Failed deleting %s", DEVICENAME(child)));
break;
}
}
return (error);
}
/**
* @brief Find a device given a unit number
*
* This is similar to devclass_get_devices() but only searches for
* devices which have @p dev as a parent.
*
* @param dev the parent device to search
* @param unit the unit number to search for. If the unit is -1,
* return the first child of @p dev which has name
* @p classname (that is, the one with the lowest unit.)
*
* @returns the device with the given unit number or @c
* NULL if there is no such device
*/
device_t
device_find_child(device_t dev, const char *classname, int unit)
{
devclass_t dc;
device_t child;
dc = devclass_find(classname);
if (!dc)
return (NULL);
if (unit != -1) {
child = devclass_get_device(dc, unit);
if (child && child->parent == dev)
return (child);
} else {
for (unit = 0; unit < devclass_get_maxunit(dc); unit++) {
child = devclass_get_device(dc, unit);
if (child && child->parent == dev)
return (child);
}
}
return (NULL);
}
/**
* @internal
*/
static driverlink_t
first_matching_driver(devclass_t dc, device_t dev)
{
if (dev->devclass)
return (devclass_find_driver_internal(dc, dev->devclass->name));
return (TAILQ_FIRST(&dc->drivers));
}
/**
* @internal
*/
static driverlink_t
next_matching_driver(devclass_t dc, device_t dev, driverlink_t last)
{
if (dev->devclass) {
driverlink_t dl;
for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link))
if (!strcmp(dev->devclass->name, dl->driver->name))
return (dl);
return (NULL);
}
return (TAILQ_NEXT(last, link));
}
/**
* @internal
*/
int
device_probe_child(device_t dev, device_t child)
{
devclass_t dc;
driverlink_t best = NULL;
driverlink_t dl;
int result, pri = 0;
int hasclass = (child->devclass != NULL);
GIANT_REQUIRED;
dc = dev->devclass;
if (!dc)
panic("device_probe_child: parent device has no devclass");
/*
* If the state is already probed, then return. However, don't
* return if we can rebid this object.
*/
if (child->state == DS_ALIVE && (child->flags & DF_REBID) == 0)
return (0);
for (; dc; dc = dc->parent) {
for (dl = first_matching_driver(dc, child);
dl;
dl = next_matching_driver(dc, child, dl)) {
/* If this driver's pass is too high, then ignore it. */
if (dl->pass > bus_current_pass)
continue;
PDEBUG(("Trying %s", DRIVERNAME(dl->driver)));
result = device_set_driver(child, dl->driver);
if (result == ENOMEM)
return (result);
else if (result != 0)
continue;
if (!hasclass) {
if (device_set_devclass(child,
dl->driver->name) != 0) {
char const * devname =
device_get_name(child);
if (devname == NULL)
devname = "(unknown)";
printf("driver bug: Unable to set "
"devclass (class: %s "
"devname: %s)\n",
dl->driver->name,
devname);
(void)device_set_driver(child, NULL);
continue;
}
}
/* Fetch any flags for the device before probing. */
resource_int_value(dl->driver->name, child->unit,
"flags", &child->devflags);
result = DEVICE_PROBE(child);
/* Reset flags and devclass before the next probe. */
child->devflags = 0;
if (!hasclass)
(void)device_set_devclass(child, NULL);
/*
* If the driver returns SUCCESS, there can be
* no higher match for this device.
*/
if (result == 0) {
best = dl;
pri = 0;
break;
}
/*
* Reset DF_QUIET in case this driver doesn't
* end up as the best driver.
*/
device_verbose(child);
/*
* Probes that return BUS_PROBE_NOWILDCARD or lower
* only match on devices whose driver was explicitly
* specified.
*/
if (result <= BUS_PROBE_NOWILDCARD &&
!(child->flags & DF_FIXEDCLASS)) {
result = ENXIO;
}
/*
* The driver returned an error so it
* certainly doesn't match.
*/
if (result > 0) {
(void)device_set_driver(child, NULL);
continue;
}
/*
* A priority lower than SUCCESS, remember the
* best matching driver. Initialise the value
* of pri for the first match.
*/
if (best == NULL || result > pri) {
best = dl;
pri = result;
continue;
}
}
/*
* If we have an unambiguous match in this devclass,
* don't look in the parent.
*/
if (best && pri == 0)
break;
}
/*
* If we found a driver, change state and initialise the devclass.
*/
/* XXX What happens if we rebid and got no best? */
if (best) {
/*
* If this device was attached, and we were asked to
* rescan, and it is a different driver, then we have
* to detach the old driver and reattach this new one.
* Note, we don't have to check for DF_REBID here
* because if the state is > DS_ALIVE, we know it must
* be.
*
* This assumes that all DF_REBID drivers can have
* their probe routine called at any time and that
* they are idempotent as well as completely benign in
* normal operations.
*
* We also have to make sure that the detach
* succeeded, otherwise we fail the operation (or
* maybe it should just fail silently? I'm torn).
*/
if (child->state > DS_ALIVE && best->driver != child->driver)
if ((result = device_detach(dev)) != 0)
return (result);
/* Set the winning driver, devclass, and flags. */
if (!child->devclass) {
result = device_set_devclass(child, best->driver->name);
if (result != 0)
return (result);
}
result = device_set_driver(child, best->driver);
if (result != 0)
return (result);
resource_int_value(best->driver->name, child->unit,
"flags", &child->devflags);
if (pri < 0) {
/*
* A bit bogus. Call the probe method again to make
* sure that we have the right description.
*/
DEVICE_PROBE(child);
#if 0
child->flags |= DF_REBID;
#endif
} else
child->flags &= ~DF_REBID;
child->state = DS_ALIVE;
bus_data_generation_update();
return (0);
}
return (ENXIO);
}
/**
* @brief Return the parent of a device
*/
device_t
device_get_parent(device_t dev)
{
return (dev->parent);
}
/**
* @brief Get a list of children of a device
*
* An array containing a list of all the children of the given device
* is allocated and returned in @p *devlistp. The number of devices
* in the array is returned in @p *devcountp. The caller should free
* the array using @c free(p, M_TEMP).
*
* @param dev the device to examine
* @param devlistp points at location for array pointer return
* value
* @param devcountp points at location for array size return value
*
* @retval 0 success
* @retval ENOMEM the array allocation failed
*/
int
device_get_children(device_t dev, device_t **devlistp, int *devcountp)
{
int count;
device_t child;
device_t *list;
count = 0;
TAILQ_FOREACH(child, &dev->children, link) {
count++;
}
if (count == 0) {
*devlistp = NULL;
*devcountp = 0;
return (0);
}
- list = mallocarray(count, sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO);
+ list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO);
if (!list)
return (ENOMEM);
count = 0;
TAILQ_FOREACH(child, &dev->children, link) {
list[count] = child;
count++;
}
*devlistp = list;
*devcountp = count;
return (0);
}
/**
* @brief Return the current driver for the device or @c NULL if there
* is no driver currently attached
*/
driver_t *
device_get_driver(device_t dev)
{
return (dev->driver);
}
/**
* @brief Return the current devclass for the device or @c NULL if
* there is none.
*/
devclass_t
device_get_devclass(device_t dev)
{
return (dev->devclass);
}
/**
* @brief Return the name of the device's devclass or @c NULL if there
* is none.
*/
const char *
device_get_name(device_t dev)
{
if (dev != NULL && dev->devclass)
return (devclass_get_name(dev->devclass));
return (NULL);
}
/**
* @brief Return a string containing the device's devclass name
* followed by an ascii representation of the device's unit number
* (e.g. @c "foo2").
*/
const char *
device_get_nameunit(device_t dev)
{
return (dev->nameunit);
}
/**
* @brief Return the device's unit number.
*/
int
device_get_unit(device_t dev)
{
return (dev->unit);
}
/**
* @brief Return the device's description string
*/
const char *
device_get_desc(device_t dev)
{
return (dev->desc);
}
/**
* @brief Return the device's flags
*/
uint32_t
device_get_flags(device_t dev)
{
return (dev->devflags);
}
struct sysctl_ctx_list *
device_get_sysctl_ctx(device_t dev)
{
return (&dev->sysctl_ctx);
}
struct sysctl_oid *
device_get_sysctl_tree(device_t dev)
{
return (dev->sysctl_tree);
}
/**
* @brief Print the name of the device followed by a colon and a space
*
* @returns the number of characters printed
*/
int
device_print_prettyname(device_t dev)
{
const char *name = device_get_name(dev);
if (name == NULL)
return (printf("unknown: "));
return (printf("%s%d: ", name, device_get_unit(dev)));
}
/**
* @brief Print the name of the device followed by a colon, a space
* and the result of calling vprintf() with the value of @p fmt and
* the following arguments.
*
* @returns the number of characters printed
*/
int
device_printf(device_t dev, const char * fmt, ...)
{
va_list ap;
int retval;
retval = device_print_prettyname(dev);
va_start(ap, fmt);
retval += vprintf(fmt, ap);
va_end(ap);
return (retval);
}
/**
* @internal
*/
static void
device_set_desc_internal(device_t dev, const char* desc, int copy)
{
if (dev->desc && (dev->flags & DF_DESCMALLOCED)) {
free(dev->desc, M_BUS);
dev->flags &= ~DF_DESCMALLOCED;
dev->desc = NULL;
}
if (copy && desc) {
dev->desc = malloc(strlen(desc) + 1, M_BUS, M_NOWAIT);
if (dev->desc) {
strcpy(dev->desc, desc);
dev->flags |= DF_DESCMALLOCED;
}
} else {
/* Avoid a -Wcast-qual warning */
dev->desc = (char *)(uintptr_t) desc;
}
bus_data_generation_update();
}
/**
* @brief Set the device's description
*
* The value of @c desc should be a string constant that will not
* change (at least until the description is changed in a subsequent
* call to device_set_desc() or device_set_desc_copy()).
*/
void
device_set_desc(device_t dev, const char* desc)
{
device_set_desc_internal(dev, desc, FALSE);
}
/**
* @brief Set the device's description
*
* The string pointed to by @c desc is copied. Use this function if
* the device description is generated, (e.g. with sprintf()).
*/
void
device_set_desc_copy(device_t dev, const char* desc)
{
device_set_desc_internal(dev, desc, TRUE);
}
/**
* @brief Set the device's flags
*/
void
device_set_flags(device_t dev, uint32_t flags)
{
dev->devflags = flags;
}
/**
* @brief Return the device's softc field
*
* The softc is allocated and zeroed when a driver is attached, based
* on the size field of the driver.
*/
void *
device_get_softc(device_t dev)
{
return (dev->softc);
}
/**
* @brief Set the device's softc field
*
* Most drivers do not need to use this since the softc is allocated
* automatically when the driver is attached.
*/
void
device_set_softc(device_t dev, void *softc)
{
if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC))
free(dev->softc, M_BUS_SC);
dev->softc = softc;
if (dev->softc)
dev->flags |= DF_EXTERNALSOFTC;
else
dev->flags &= ~DF_EXTERNALSOFTC;
}
/**
* @brief Free claimed softc
*
* Most drivers do not need to use this since the softc is freed
* automatically when the driver is detached.
*/
void
device_free_softc(void *softc)
{
free(softc, M_BUS_SC);
}
/**
* @brief Claim softc
*
* This function can be used to let the driver free the automatically
* allocated softc using "device_free_softc()". This function is
* useful when the driver is refcounting the softc and the softc
* cannot be freed when the "device_detach" method is called.
*/
void
device_claim_softc(device_t dev)
{
if (dev->softc)
dev->flags |= DF_EXTERNALSOFTC;
else
dev->flags &= ~DF_EXTERNALSOFTC;
}
/**
* @brief Get the device's ivars field
*
* The ivars field is used by the parent device to store per-device
* state (e.g. the physical location of the device or a list of
* resources).
*/
void *
device_get_ivars(device_t dev)
{
KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)"));
return (dev->ivars);
}
/**
* @brief Set the device's ivars field
*/
void
device_set_ivars(device_t dev, void * ivars)
{
KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)"));
dev->ivars = ivars;
}
/**
* @brief Return the device's state
*/
device_state_t
device_get_state(device_t dev)
{
return (dev->state);
}
/**
* @brief Set the DF_ENABLED flag for the device
*/
void
device_enable(device_t dev)
{
dev->flags |= DF_ENABLED;
}
/**
* @brief Clear the DF_ENABLED flag for the device
*/
void
device_disable(device_t dev)
{
dev->flags &= ~DF_ENABLED;
}
/**
* @brief Increment the busy counter for the device
*/
void
device_busy(device_t dev)
{
if (dev->state < DS_ATTACHING)
panic("device_busy: called for unattached device");
if (dev->busy == 0 && dev->parent)
device_busy(dev->parent);
dev->busy++;
if (dev->state == DS_ATTACHED)
dev->state = DS_BUSY;
}
/**
* @brief Decrement the busy counter for the device
*/
void
device_unbusy(device_t dev)
{
if (dev->busy != 0 && dev->state != DS_BUSY &&
dev->state != DS_ATTACHING)
panic("device_unbusy: called for non-busy device %s",
device_get_nameunit(dev));
dev->busy--;
if (dev->busy == 0) {
if (dev->parent)
device_unbusy(dev->parent);
if (dev->state == DS_BUSY)
dev->state = DS_ATTACHED;
}
}
/**
* @brief Set the DF_QUIET flag for the device
*/
void
device_quiet(device_t dev)
{
dev->flags |= DF_QUIET;
}
/**
* @brief Clear the DF_QUIET flag for the device
*/
void
device_verbose(device_t dev)
{
dev->flags &= ~DF_QUIET;
}
/**
* @brief Return non-zero if the DF_QUIET flag is set on the device
*/
int
device_is_quiet(device_t dev)
{
return ((dev->flags & DF_QUIET) != 0);
}
/**
* @brief Return non-zero if the DF_ENABLED flag is set on the device
*/
int
device_is_enabled(device_t dev)
{
return ((dev->flags & DF_ENABLED) != 0);
}
/**
* @brief Return non-zero if the device was successfully probed
*/
int
device_is_alive(device_t dev)
{
return (dev->state >= DS_ALIVE);
}
/**
* @brief Return non-zero if the device currently has a driver
* attached to it
*/
int
device_is_attached(device_t dev)
{
return (dev->state >= DS_ATTACHED);
}
/**
* @brief Return non-zero if the device is currently suspended.
*/
int
device_is_suspended(device_t dev)
{
return ((dev->flags & DF_SUSPENDED) != 0);
}
/**
* @brief Set the devclass of a device
* @see devclass_add_device().
*/
int
device_set_devclass(device_t dev, const char *classname)
{
devclass_t dc;
int error;
if (!classname) {
if (dev->devclass)
devclass_delete_device(dev->devclass, dev);
return (0);
}
if (dev->devclass) {
printf("device_set_devclass: device class already set\n");
return (EINVAL);
}
dc = devclass_find_internal(classname, NULL, TRUE);
if (!dc)
return (ENOMEM);
error = devclass_add_device(dc, dev);
bus_data_generation_update();
return (error);
}
/**
* @brief Set the devclass of a device and mark the devclass fixed.
* @see device_set_devclass()
*/
int
device_set_devclass_fixed(device_t dev, const char *classname)
{
int error;
if (classname == NULL)
return (EINVAL);
error = device_set_devclass(dev, classname);
if (error)
return (error);
dev->flags |= DF_FIXEDCLASS;
return (0);
}
/**
* @brief Set the driver of a device
*
* @retval 0 success
* @retval EBUSY the device already has a driver attached
* @retval ENOMEM a memory allocation failure occurred
*/
int
device_set_driver(device_t dev, driver_t *driver)
{
if (dev->state >= DS_ATTACHED)
return (EBUSY);
if (dev->driver == driver)
return (0);
if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) {
free(dev->softc, M_BUS_SC);
dev->softc = NULL;
}
device_set_desc(dev, NULL);
kobj_delete((kobj_t) dev, NULL);
dev->driver = driver;
if (driver) {
kobj_init((kobj_t) dev, (kobj_class_t) driver);
if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) {
dev->softc = malloc(driver->size, M_BUS_SC,
M_NOWAIT | M_ZERO);
if (!dev->softc) {
kobj_delete((kobj_t) dev, NULL);
kobj_init((kobj_t) dev, &null_class);
dev->driver = NULL;
return (ENOMEM);
}
}
} else {
kobj_init((kobj_t) dev, &null_class);
}
bus_data_generation_update();
return (0);
}
/**
* @brief Probe a device, and return this status.
*
* This function is the core of the device autoconfiguration
* system. Its purpose is to select a suitable driver for a device and
* then call that driver to initialise the hardware appropriately. The
* driver is selected by calling the DEVICE_PROBE() method of a set of
* candidate drivers and then choosing the driver which returned the
* best value. This driver is then attached to the device using
* device_attach().
*
* The set of suitable drivers is taken from the list of drivers in
* the parent device's devclass. If the device was originally created
* with a specific class name (see device_add_child()), only drivers
* with that name are probed, otherwise all drivers in the devclass
* are probed. If no drivers return successful probe values in the
* parent devclass, the search continues in the parent of that
* devclass (see devclass_get_parent()) if any.
*
* @param dev the device to initialise
*
* @retval 0 success
* @retval ENXIO no driver was found
* @retval ENOMEM memory allocation failure
* @retval non-zero some other unix error code
* @retval -1 Device already attached
*/
int
device_probe(device_t dev)
{
int error;
GIANT_REQUIRED;
if (dev->state >= DS_ALIVE && (dev->flags & DF_REBID) == 0)
return (-1);
if (!(dev->flags & DF_ENABLED)) {
if (bootverbose && device_get_name(dev) != NULL) {
device_print_prettyname(dev);
printf("not probed (disabled)\n");
}
return (-1);
}
if ((error = device_probe_child(dev->parent, dev)) != 0) {
if (bus_current_pass == BUS_PASS_DEFAULT &&
!(dev->flags & DF_DONENOMATCH)) {
BUS_PROBE_NOMATCH(dev->parent, dev);
devnomatch(dev);
dev->flags |= DF_DONENOMATCH;
}
return (error);
}
return (0);
}
/**
* @brief Probe a device and attach a driver if possible
*
* calls device_probe() and attaches if that was successful.
*/
int
device_probe_and_attach(device_t dev)
{
int error;
GIANT_REQUIRED;
error = device_probe(dev);
if (error == -1)
return (0);
else if (error != 0)
return (error);
CURVNET_SET_QUIET(vnet0);
error = device_attach(dev);
CURVNET_RESTORE();
return error;
}
/**
* @brief Attach a device driver to a device
*
* This function is a wrapper around the DEVICE_ATTACH() driver
* method. In addition to calling DEVICE_ATTACH(), it initialises the
* device's sysctl tree, optionally prints a description of the device
* and queues a notification event for user-based device management
* services.
*
* Normally this function is only called internally from
* device_probe_and_attach().
*
* @param dev the device to initialise
*
* @retval 0 success
* @retval ENXIO no driver was found
* @retval ENOMEM memory allocation failure
* @retval non-zero some other unix error code
*/
int
device_attach(device_t dev)
{
uint64_t attachtime;
int error;
if (resource_disabled(dev->driver->name, dev->unit)) {
device_disable(dev);
if (bootverbose)
device_printf(dev, "disabled via hints entry\n");
return (ENXIO);
}
device_sysctl_init(dev);
if (!device_is_quiet(dev))
device_print_child(dev->parent, dev);
attachtime = get_cyclecount();
dev->state = DS_ATTACHING;
if ((error = DEVICE_ATTACH(dev)) != 0) {
printf("device_attach: %s%d attach returned %d\n",
dev->driver->name, dev->unit, error);
if (!(dev->flags & DF_FIXEDCLASS))
devclass_delete_device(dev->devclass, dev);
(void)device_set_driver(dev, NULL);
device_sysctl_fini(dev);
KASSERT(dev->busy == 0, ("attach failed but busy"));
dev->state = DS_NOTPRESENT;
return (error);
}
attachtime = get_cyclecount() - attachtime;
/*
* 4 bits per device is a reasonable value for desktop and server
* hardware with good get_cyclecount() implementations, but WILL
* need to be adjusted on other platforms.
*/
#define RANDOM_PROBE_BIT_GUESS 4
if (bootverbose)
printf("random: harvesting attach, %zu bytes (%d bits) from %s%d\n",
sizeof(attachtime), RANDOM_PROBE_BIT_GUESS,
dev->driver->name, dev->unit);
random_harvest_direct(&attachtime, sizeof(attachtime),
RANDOM_PROBE_BIT_GUESS, RANDOM_ATTACH);
device_sysctl_update(dev);
if (dev->busy)
dev->state = DS_BUSY;
else
dev->state = DS_ATTACHED;
dev->flags &= ~DF_DONENOMATCH;
EVENTHANDLER_DIRECT_INVOKE(device_attach, dev);
devadded(dev);
return (0);
}
/**
* @brief Detach a driver from a device
*
* This function is a wrapper around the DEVICE_DETACH() driver
* method. If the call to DEVICE_DETACH() succeeds, it calls
* BUS_CHILD_DETACHED() for the parent of @p dev, queues a
* notification event for user-based device management services and
* cleans up the device's sysctl tree.
*
* @param dev the device to un-initialise
*
* @retval 0 success
* @retval ENXIO no driver was found
* @retval ENOMEM memory allocation failure
* @retval non-zero some other unix error code
*/
int
device_detach(device_t dev)
{
int error;
GIANT_REQUIRED;
PDEBUG(("%s", DEVICENAME(dev)));
if (dev->state == DS_BUSY)
return (EBUSY);
if (dev->state != DS_ATTACHED)
return (0);
EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN);
if ((error = DEVICE_DETACH(dev)) != 0) {
EVENTHANDLER_DIRECT_INVOKE(device_detach, dev,
EVHDEV_DETACH_FAILED);
return (error);
} else {
EVENTHANDLER_DIRECT_INVOKE(device_detach, dev,
EVHDEV_DETACH_COMPLETE);
}
devremoved(dev);
if (!device_is_quiet(dev))
device_printf(dev, "detached\n");
if (dev->parent)
BUS_CHILD_DETACHED(dev->parent, dev);
if (!(dev->flags & DF_FIXEDCLASS))
devclass_delete_device(dev->devclass, dev);
device_verbose(dev);
dev->state = DS_NOTPRESENT;
(void)device_set_driver(dev, NULL);
device_sysctl_fini(dev);
return (0);
}
/**
* @brief Tells a driver to quiesce itself.
*
* This function is a wrapper around the DEVICE_QUIESCE() driver
* method. If the call to DEVICE_QUIESCE() succeeds.
*
* @param dev the device to quiesce
*
* @retval 0 success
* @retval ENXIO no driver was found
* @retval ENOMEM memory allocation failure
* @retval non-zero some other unix error code
*/
int
device_quiesce(device_t dev)
{
PDEBUG(("%s", DEVICENAME(dev)));
if (dev->state == DS_BUSY)
return (EBUSY);
if (dev->state != DS_ATTACHED)
return (0);
return (DEVICE_QUIESCE(dev));
}
/**
* @brief Notify a device of system shutdown
*
* This function calls the DEVICE_SHUTDOWN() driver method if the
* device currently has an attached driver.
*
* @returns the value returned by DEVICE_SHUTDOWN()
*/
int
device_shutdown(device_t dev)
{
if (dev->state < DS_ATTACHED)
return (0);
return (DEVICE_SHUTDOWN(dev));
}
/**
* @brief Set the unit number of a device
*
* This function can be used to override the unit number used for a
* device (e.g. to wire a device to a pre-configured unit number).
*/
int
device_set_unit(device_t dev, int unit)
{
devclass_t dc;
int err;
dc = device_get_devclass(dev);
if (unit < dc->maxunit && dc->devices[unit])
return (EBUSY);
err = devclass_delete_device(dc, dev);
if (err)
return (err);
dev->unit = unit;
err = devclass_add_device(dc, dev);
if (err)
return (err);
bus_data_generation_update();
return (0);
}
/*======================================*/
/*
* Some useful method implementations to make life easier for bus drivers.
*/
void
resource_init_map_request_impl(struct resource_map_request *args, size_t sz)
{
bzero(args, sz);
args->size = sz;
args->memattr = VM_MEMATTR_UNCACHEABLE;
}
/**
* @brief Initialise a resource list.
*
* @param rl the resource list to initialise
*/
void
resource_list_init(struct resource_list *rl)
{
STAILQ_INIT(rl);
}
/**
* @brief Reclaim memory used by a resource list.
*
* This function frees the memory for all resource entries on the list
* (if any).
*
* @param rl the resource list to free
*/
void
resource_list_free(struct resource_list *rl)
{
struct resource_list_entry *rle;
while ((rle = STAILQ_FIRST(rl)) != NULL) {
if (rle->res)
panic("resource_list_free: resource entry is busy");
STAILQ_REMOVE_HEAD(rl, link);
free(rle, M_BUS);
}
}
/**
* @brief Add a resource entry.
*
* This function adds a resource entry using the given @p type, @p
* start, @p end and @p count values. A rid value is chosen by
* searching sequentially for the first unused rid starting at zero.
*
* @param rl the resource list to edit
* @param type the resource entry type (e.g. SYS_RES_MEMORY)
* @param start the start address of the resource
* @param end the end address of the resource
* @param count XXX end-start+1
*/
int
resource_list_add_next(struct resource_list *rl, int type, rman_res_t start,
rman_res_t end, rman_res_t count)
{
int rid;
rid = 0;
while (resource_list_find(rl, type, rid) != NULL)
rid++;
resource_list_add(rl, type, rid, start, end, count);
return (rid);
}
/**
* @brief Add or modify a resource entry.
*
* If an existing entry exists with the same type and rid, it will be
* modified using the given values of @p start, @p end and @p
* count. If no entry exists, a new one will be created using the
* given values. The resource list entry that matches is then returned.
*
* @param rl the resource list to edit
* @param type the resource entry type (e.g. SYS_RES_MEMORY)
* @param rid the resource identifier
* @param start the start address of the resource
* @param end the end address of the resource
* @param count XXX end-start+1
*/
struct resource_list_entry *
resource_list_add(struct resource_list *rl, int type, int rid,
rman_res_t start, rman_res_t end, rman_res_t count)
{
struct resource_list_entry *rle;
rle = resource_list_find(rl, type, rid);
if (!rle) {
rle = malloc(sizeof(struct resource_list_entry), M_BUS,
M_NOWAIT);
if (!rle)
panic("resource_list_add: can't record entry");
STAILQ_INSERT_TAIL(rl, rle, link);
rle->type = type;
rle->rid = rid;
rle->res = NULL;
rle->flags = 0;
}
if (rle->res)
panic("resource_list_add: resource entry is busy");
rle->start = start;
rle->end = end;
rle->count = count;
return (rle);
}
/**
* @brief Determine if a resource entry is busy.
*
* Returns true if a resource entry is busy meaning that it has an
* associated resource that is not an unallocated "reserved" resource.
*
* @param rl the resource list to search
* @param type the resource entry type (e.g. SYS_RES_MEMORY)
* @param rid the resource identifier
*
* @returns Non-zero if the entry is busy, zero otherwise.
*/
int
resource_list_busy(struct resource_list *rl, int type, int rid)
{
struct resource_list_entry *rle;
rle = resource_list_find(rl, type, rid);
if (rle == NULL || rle->res == NULL)
return (0);
if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) {
KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE),
("reserved resource is active"));
return (0);
}
return (1);
}
/**
* @brief Determine if a resource entry is reserved.
*
* Returns true if a resource entry is reserved meaning that it has an
* associated "reserved" resource. The resource can either be
* allocated or unallocated.
*
* @param rl the resource list to search
* @param type the resource entry type (e.g. SYS_RES_MEMORY)
* @param rid the resource identifier
*
* @returns Non-zero if the entry is reserved, zero otherwise.
*/
int
resource_list_reserved(struct resource_list *rl, int type, int rid)
{
struct resource_list_entry *rle;
rle = resource_list_find(rl, type, rid);
if (rle != NULL && rle->flags & RLE_RESERVED)
return (1);
return (0);
}
/**
* @brief Find a resource entry by type and rid.
*
* @param rl the resource list to search
* @param type the resource entry type (e.g. SYS_RES_MEMORY)
* @param rid the resource identifier
*
* @returns the resource entry pointer or NULL if there is no such
* entry.
*/
struct resource_list_entry *
resource_list_find(struct resource_list *rl, int type, int rid)
{
struct resource_list_entry *rle;
STAILQ_FOREACH(rle, rl, link) {
if (rle->type == type && rle->rid == rid)
return (rle);
}
return (NULL);
}
/**
* @brief Delete a resource entry.
*
* @param rl the resource list to edit
* @param type the resource entry type (e.g. SYS_RES_MEMORY)
* @param rid the resource identifier
*/
void
resource_list_delete(struct resource_list *rl, int type, int rid)
{
struct resource_list_entry *rle = resource_list_find(rl, type, rid);
if (rle) {
if (rle->res != NULL)
panic("resource_list_delete: resource has not been released");
STAILQ_REMOVE(rl, rle, resource_list_entry, link);
free(rle, M_BUS);
}
}
/**
* @brief Allocate a reserved resource
*
* This can be used by buses to force the allocation of resources
* that are always active in the system even if they are not allocated
* by a driver (e.g. PCI BARs). This function is usually called when
* adding a new child to the bus. The resource is allocated from the
* parent bus when it is reserved. The resource list entry is marked
* with RLE_RESERVED to note that it is a reserved resource.
*
* Subsequent attempts to allocate the resource with
* resource_list_alloc() will succeed the first time and will set
* RLE_ALLOCATED to note that it has been allocated. When a reserved
* resource that has been allocated is released with
* resource_list_release() the resource RLE_ALLOCATED is cleared, but
* the actual resource remains allocated. The resource can be released to
* the parent bus by calling resource_list_unreserve().
*
* @param rl the resource list to allocate from
* @param bus the parent device of @p child
* @param child the device for which the resource is being reserved
* @param type the type of resource to allocate
* @param rid a pointer to the resource identifier
* @param start hint at the start of the resource range - pass
* @c 0 for any start address
* @param end hint at the end of the resource range - pass
* @c ~0 for any end address
* @param count hint at the size of range required - pass @c 1
* for any size
* @param flags any extra flags to control the resource
* allocation - see @c RF_XXX flags in
* <sys/rman.h> for details
*
* @returns the resource which was allocated or @c NULL if no
* resource could be allocated
*/
struct resource *
resource_list_reserve(struct resource_list *rl, device_t bus, device_t child,
int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct resource_list_entry *rle = NULL;
int passthrough = (device_get_parent(child) != bus);
struct resource *r;
if (passthrough)
panic(
"resource_list_reserve() should only be called for direct children");
if (flags & RF_ACTIVE)
panic(
"resource_list_reserve() should only reserve inactive resources");
r = resource_list_alloc(rl, bus, child, type, rid, start, end, count,
flags);
if (r != NULL) {
rle = resource_list_find(rl, type, *rid);
rle->flags |= RLE_RESERVED;
}
return (r);
}
/**
* @brief Helper function for implementing BUS_ALLOC_RESOURCE()
*
* Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list
* and passing the allocation up to the parent of @p bus. This assumes
* that the first entry of @c device_get_ivars(child) is a struct
* resource_list. This also handles 'passthrough' allocations where a
* child is a remote descendant of bus by passing the allocation up to
* the parent of bus.
*
* Typically, a bus driver would store a list of child resources
* somewhere in the child device's ivars (see device_get_ivars()) and
* its implementation of BUS_ALLOC_RESOURCE() would find that list and
* then call resource_list_alloc() to perform the allocation.
*
* @param rl the resource list to allocate from
* @param bus the parent device of @p child
* @param child the device which is requesting an allocation
* @param type the type of resource to allocate
* @param rid a pointer to the resource identifier
* @param start hint at the start of the resource range - pass
* @c 0 for any start address
* @param end hint at the end of the resource range - pass
* @c ~0 for any end address
* @param count hint at the size of range required - pass @c 1
* for any size
* @param flags any extra flags to control the resource
* allocation - see @c RF_XXX flags in
* <sys/rman.h> for details
*
* @returns the resource which was allocated or @c NULL if no
* resource could be allocated
*/
struct resource *
resource_list_alloc(struct resource_list *rl, device_t bus, device_t child,
int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct resource_list_entry *rle = NULL;
int passthrough = (device_get_parent(child) != bus);
int isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
if (passthrough) {
return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
type, rid, start, end, count, flags));
}
rle = resource_list_find(rl, type, *rid);
if (!rle)
return (NULL); /* no resource of that type/rid */
if (rle->res) {
if (rle->flags & RLE_RESERVED) {
if (rle->flags & RLE_ALLOCATED)
return (NULL);
if ((flags & RF_ACTIVE) &&
bus_activate_resource(child, type, *rid,
rle->res) != 0)
return (NULL);
rle->flags |= RLE_ALLOCATED;
return (rle->res);
}
device_printf(bus,
"resource entry %#x type %d for child %s is busy\n", *rid,
type, device_get_nameunit(child));
return (NULL);
}
if (isdefault) {
start = rle->start;
count = ulmax(count, rle->count);
end = ulmax(rle->end, start + count - 1);
}
rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
type, rid, start, end, count, flags);
/*
* Record the new range.
*/
if (rle->res) {
rle->start = rman_get_start(rle->res);
rle->end = rman_get_end(rle->res);
rle->count = count;
}
return (rle->res);
}
/**
* @brief Helper function for implementing BUS_RELEASE_RESOURCE()
*
* Implement BUS_RELEASE_RESOURCE() using a resource list. Normally
* used with resource_list_alloc().
*
* @param rl the resource list which was allocated from
* @param bus the parent device of @p child
* @param child the device which is requesting a release
* @param type the type of resource to release
* @param rid the resource identifier
* @param res the resource to release
*
* @retval 0 success
* @retval non-zero a standard unix error code indicating what
* error condition prevented the operation
*/
int
resource_list_release(struct resource_list *rl, device_t bus, device_t child,
int type, int rid, struct resource *res)
{
struct resource_list_entry *rle = NULL;
int passthrough = (device_get_parent(child) != bus);
int error;
if (passthrough) {
return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child,
type, rid, res));
}
rle = resource_list_find(rl, type, rid);
if (!rle)
panic("resource_list_release: can't find resource");
if (!rle->res)
panic("resource_list_release: resource entry is not busy");
if (rle->flags & RLE_RESERVED) {
if (rle->flags & RLE_ALLOCATED) {
if (rman_get_flags(res) & RF_ACTIVE) {
error = bus_deactivate_resource(child, type,
rid, res);
if (error)
return (error);
}
rle->flags &= ~RLE_ALLOCATED;
return (0);
}
return (EINVAL);
}
error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child,
type, rid, res);
if (error)
return (error);
rle->res = NULL;
return (0);
}
/**
* @brief Release all active resources of a given type
*
* Release all active resources of a specified type. This is intended
* to be used to cleanup resources leaked by a driver after detach or
* a failed attach.
*
* @param rl the resource list which was allocated from
* @param bus the parent device of @p child
* @param child the device whose active resources are being released
* @param type the type of resources to release
*
* @retval 0 success
* @retval EBUSY at least one resource was active
*/
int
resource_list_release_active(struct resource_list *rl, device_t bus,
device_t child, int type)
{
struct resource_list_entry *rle;
int error, retval;
retval = 0;
STAILQ_FOREACH(rle, rl, link) {
if (rle->type != type)
continue;
if (rle->res == NULL)
continue;
if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) ==
RLE_RESERVED)
continue;
retval = EBUSY;
error = resource_list_release(rl, bus, child, type,
rman_get_rid(rle->res), rle->res);
if (error != 0)
device_printf(bus,
"Failed to release active resource: %d\n", error);
}
return (retval);
}
/**
* @brief Fully release a reserved resource
*
* Fully releases a resource reserved via resource_list_reserve().
*
* @param rl the resource list which was allocated from
* @param bus the parent device of @p child
* @param child the device whose reserved resource is being released
* @param type the type of resource to release
* @param rid the resource identifier
* @param res the resource to release
*
* @retval 0 success
* @retval non-zero a standard unix error code indicating what
* error condition prevented the operation
*/
int
resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child,
int type, int rid)
{
struct resource_list_entry *rle = NULL;
int passthrough = (device_get_parent(child) != bus);
if (passthrough)
panic(
"resource_list_unreserve() should only be called for direct children");
rle = resource_list_find(rl, type, rid);
if (!rle)
panic("resource_list_unreserve: can't find resource");
if (!(rle->flags & RLE_RESERVED))
return (EINVAL);
if (rle->flags & RLE_ALLOCATED)
return (EBUSY);
rle->flags &= ~RLE_RESERVED;
return (resource_list_release(rl, bus, child, type, rid, rle->res));
}
/**
* @brief Print a description of resources in a resource list
*
* Print all resources of a specified type, for use in BUS_PRINT_CHILD().
* The name is printed if at least one resource of the given type is available.
* The format is used to print resource start and end.
*
* @param rl the resource list to print
* @param name the name of @p type, e.g. @c "memory"
* @param type type type of resource entry to print
* @param format printf(9) format string to print resource
* start and end values
*
* @returns the number of characters printed
*/
int
resource_list_print_type(struct resource_list *rl, const char *name, int type,
const char *format)
{
struct resource_list_entry *rle;
int printed, retval;
printed = 0;
retval = 0;
/* Yes, this is kinda cheating */
STAILQ_FOREACH(rle, rl, link) {
if (rle->type == type) {
if (printed == 0)
retval += printf(" %s ", name);
else
retval += printf(",");
printed++;
retval += printf(format, rle->start);
if (rle->count > 1) {
retval += printf("-");
retval += printf(format, rle->start +
rle->count - 1);
}
}
}
return (retval);
}
/**
* @brief Releases all the resources in a list.
*
* @param rl The resource list to purge.
*
* @returns nothing
*/
void
resource_list_purge(struct resource_list *rl)
{
struct resource_list_entry *rle;
while ((rle = STAILQ_FIRST(rl)) != NULL) {
if (rle->res)
bus_release_resource(rman_get_device(rle->res),
rle->type, rle->rid, rle->res);
STAILQ_REMOVE_HEAD(rl, link);
free(rle, M_BUS);
}
}
device_t
bus_generic_add_child(device_t dev, u_int order, const char *name, int unit)
{
return (device_add_child_ordered(dev, order, name, unit));
}
/**
* @brief Helper function for implementing DEVICE_PROBE()
*
* This function can be used to help implement the DEVICE_PROBE() for
* a bus (i.e. a device which has other devices attached to it). It
* calls the DEVICE_IDENTIFY() method of each driver in the device's
* devclass.
*/
int
bus_generic_probe(device_t dev)
{
devclass_t dc = dev->devclass;
driverlink_t dl;
TAILQ_FOREACH(dl, &dc->drivers, link) {
/*
* If this driver's pass is too high, then ignore it.
* For most drivers in the default pass, this will
* never be true. For early-pass drivers they will
* only call the identify routines of eligible drivers
* when this routine is called. Drivers for later
* passes should have their identify routines called
* on early-pass buses during BUS_NEW_PASS().
*/
if (dl->pass > bus_current_pass)
continue;
DEVICE_IDENTIFY(dl->driver, dev);
}
return (0);
}
/**
* @brief Helper function for implementing DEVICE_ATTACH()
*
* This function can be used to help implement the DEVICE_ATTACH() for
* a bus. It calls device_probe_and_attach() for each of the device's
* children.
*/
int
bus_generic_attach(device_t dev)
{
device_t child;
TAILQ_FOREACH(child, &dev->children, link) {
device_probe_and_attach(child);
}
return (0);
}
/**
* @brief Helper function for implementing DEVICE_DETACH()
*
* This function can be used to help implement the DEVICE_DETACH() for
* a bus. It calls device_detach() for each of the device's
* children.
*/
int
bus_generic_detach(device_t dev)
{
device_t child;
int error;
if (dev->state != DS_ATTACHED)
return (EBUSY);
TAILQ_FOREACH(child, &dev->children, link) {
if ((error = device_detach(child)) != 0)
return (error);
}
return (0);
}
/**
* @brief Helper function for implementing DEVICE_SHUTDOWN()
*
* This function can be used to help implement the DEVICE_SHUTDOWN()
* for a bus. It calls device_shutdown() for each of the device's
* children.
*/
int
bus_generic_shutdown(device_t dev)
{
device_t child;
TAILQ_FOREACH(child, &dev->children, link) {
device_shutdown(child);
}
return (0);
}
/**
* @brief Default function for suspending a child device.
*
* This function is to be used by a bus's DEVICE_SUSPEND_CHILD().
*/
int
bus_generic_suspend_child(device_t dev, device_t child)
{
int error;
error = DEVICE_SUSPEND(child);
if (error == 0)
child->flags |= DF_SUSPENDED;
return (error);
}
/**
* @brief Default function for resuming a child device.
*
* This function is to be used by a bus's DEVICE_RESUME_CHILD().
*/
int
bus_generic_resume_child(device_t dev, device_t child)
{
DEVICE_RESUME(child);
child->flags &= ~DF_SUSPENDED;
return (0);
}
/**
* @brief Helper function for implementing DEVICE_SUSPEND()
*
* This function can be used to help implement the DEVICE_SUSPEND()
* for a bus. It calls DEVICE_SUSPEND() for each of the device's
* children. If any call to DEVICE_SUSPEND() fails, the suspend
* operation is aborted and any devices which were suspended are
* resumed immediately by calling their DEVICE_RESUME() methods.
*/
int
bus_generic_suspend(device_t dev)
{
int error;
device_t child, child2;
TAILQ_FOREACH(child, &dev->children, link) {
error = BUS_SUSPEND_CHILD(dev, child);
if (error) {
for (child2 = TAILQ_FIRST(&dev->children);
child2 && child2 != child;
child2 = TAILQ_NEXT(child2, link))
BUS_RESUME_CHILD(dev, child2);
return (error);
}
}
return (0);
}
/**
* @brief Helper function for implementing DEVICE_RESUME()
*
* This function can be used to help implement the DEVICE_RESUME() for
* a bus. It calls DEVICE_RESUME() on each of the device's children.
*/
int
bus_generic_resume(device_t dev)
{
device_t child;
TAILQ_FOREACH(child, &dev->children, link) {
BUS_RESUME_CHILD(dev, child);
/* if resume fails, there's nothing we can usefully do... */
}
return (0);
}
/**
* @brief Helper function for implementing BUS_PRINT_CHILD().
*
* This function prints the first part of the ascii representation of
* @p child, including its name, unit and description (if any - see
* device_set_desc()).
*
* @returns the number of characters printed
*/
int
bus_print_child_header(device_t dev, device_t child)
{
int retval = 0;
if (device_get_desc(child)) {
retval += device_printf(child, "<%s>", device_get_desc(child));
} else {
retval += printf("%s", device_get_nameunit(child));
}
return (retval);
}
/**
* @brief Helper function for implementing BUS_PRINT_CHILD().
*
* This function prints the last part of the ascii representation of
* @p child, which consists of the string @c " on " followed by the
* name and unit of the @p dev.
*
* @returns the number of characters printed
*/
int
bus_print_child_footer(device_t dev, device_t child)
{
return (printf(" on %s\n", device_get_nameunit(dev)));
}
/**
* @brief Helper function for implementing BUS_PRINT_CHILD().
*
* This function prints out the VM domain for the given device.
*
* @returns the number of characters printed
*/
int
bus_print_child_domain(device_t dev, device_t child)
{
int domain;
/* No domain? Don't print anything */
if (BUS_GET_DOMAIN(dev, child, &domain) != 0)
return (0);
return (printf(" numa-domain %d", domain));
}
/**
* @brief Helper function for implementing BUS_PRINT_CHILD().
*
* This function simply calls bus_print_child_header() followed by
* bus_print_child_footer().
*
* @returns the number of characters printed
*/
int
bus_generic_print_child(device_t dev, device_t child)
{
int retval = 0;
retval += bus_print_child_header(dev, child);
retval += bus_print_child_domain(dev, child);
retval += bus_print_child_footer(dev, child);
return (retval);
}
/**
* @brief Stub function for implementing BUS_READ_IVAR().
*
* @returns ENOENT
*/
int
bus_generic_read_ivar(device_t dev, device_t child, int index,
uintptr_t * result)
{
return (ENOENT);
}
/**
* @brief Stub function for implementing BUS_WRITE_IVAR().
*
* @returns ENOENT
*/
int
bus_generic_write_ivar(device_t dev, device_t child, int index,
uintptr_t value)
{
return (ENOENT);
}
/**
* @brief Stub function for implementing BUS_GET_RESOURCE_LIST().
*
* @returns NULL
*/
struct resource_list *
bus_generic_get_resource_list(device_t dev, device_t child)
{
return (NULL);
}
/**
* @brief Helper function for implementing BUS_DRIVER_ADDED().
*
* This implementation of BUS_DRIVER_ADDED() simply calls the driver's
* DEVICE_IDENTIFY() method to allow it to add new children to the bus
* and then calls device_probe_and_attach() for each unattached child.
*/
void
bus_generic_driver_added(device_t dev, driver_t *driver)
{
device_t child;
DEVICE_IDENTIFY(driver, dev);
TAILQ_FOREACH(child, &dev->children, link) {
if (child->state == DS_NOTPRESENT ||
(child->flags & DF_REBID))
device_probe_and_attach(child);
}
}
/**
* @brief Helper function for implementing BUS_NEW_PASS().
*
* This implementing of BUS_NEW_PASS() first calls the identify
* routines for any drivers that probe at the current pass. Then it
* walks the list of devices for this bus. If a device is already
* attached, then it calls BUS_NEW_PASS() on that device. If the
* device is not already attached, it attempts to attach a driver to
* it.
*/
void
bus_generic_new_pass(device_t dev)
{
driverlink_t dl;
devclass_t dc;
device_t child;
dc = dev->devclass;
TAILQ_FOREACH(dl, &dc->drivers, link) {
if (dl->pass == bus_current_pass)
DEVICE_IDENTIFY(dl->driver, dev);
}
TAILQ_FOREACH(child, &dev->children, link) {
if (child->state >= DS_ATTACHED)
BUS_NEW_PASS(child);
else if (child->state == DS_NOTPRESENT)
device_probe_and_attach(child);
}
}
/**
* @brief Helper function for implementing BUS_SETUP_INTR().
*
* This simple implementation of BUS_SETUP_INTR() simply calls the
* BUS_SETUP_INTR() method of the parent of @p dev.
*/
int
bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq,
int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
void **cookiep)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_SETUP_INTR(dev->parent, child, irq, flags,
filter, intr, arg, cookiep));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_TEARDOWN_INTR().
*
* This simple implementation of BUS_TEARDOWN_INTR() simply calls the
* BUS_TEARDOWN_INTR() method of the parent of @p dev.
*/
int
bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq,
void *cookie)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_ADJUST_RESOURCE().
*
* This simple implementation of BUS_ADJUST_RESOURCE() simply calls the
* BUS_ADJUST_RESOURCE() method of the parent of @p dev.
*/
int
bus_generic_adjust_resource(device_t dev, device_t child, int type,
struct resource *r, rman_res_t start, rman_res_t end)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_ADJUST_RESOURCE(dev->parent, child, type, r, start,
end));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_ALLOC_RESOURCE().
*
* This simple implementation of BUS_ALLOC_RESOURCE() simply calls the
* BUS_ALLOC_RESOURCE() method of the parent of @p dev.
*/
struct resource *
bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid,
start, end, count, flags));
return (NULL);
}
/**
* @brief Helper function for implementing BUS_RELEASE_RESOURCE().
*
* This simple implementation of BUS_RELEASE_RESOURCE() simply calls the
* BUS_RELEASE_RESOURCE() method of the parent of @p dev.
*/
int
bus_generic_release_resource(device_t dev, device_t child, int type, int rid,
struct resource *r)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid,
r));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_ACTIVATE_RESOURCE().
*
* This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the
* BUS_ACTIVATE_RESOURCE() method of the parent of @p dev.
*/
int
bus_generic_activate_resource(device_t dev, device_t child, int type, int rid,
struct resource *r)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid,
r));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE().
*
* This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the
* BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev.
*/
int
bus_generic_deactivate_resource(device_t dev, device_t child, int type,
int rid, struct resource *r)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid,
r));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_MAP_RESOURCE().
*
* This simple implementation of BUS_MAP_RESOURCE() simply calls the
* BUS_MAP_RESOURCE() method of the parent of @p dev.
*/
int
bus_generic_map_resource(device_t dev, device_t child, int type,
struct resource *r, struct resource_map_request *args,
struct resource_map *map)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_MAP_RESOURCE(dev->parent, child, type, r, args,
map));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_UNMAP_RESOURCE().
*
* This simple implementation of BUS_UNMAP_RESOURCE() simply calls the
* BUS_UNMAP_RESOURCE() method of the parent of @p dev.
*/
int
bus_generic_unmap_resource(device_t dev, device_t child, int type,
struct resource *r, struct resource_map *map)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_UNMAP_RESOURCE(dev->parent, child, type, r, map));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_BIND_INTR().
*
* This simple implementation of BUS_BIND_INTR() simply calls the
* BUS_BIND_INTR() method of the parent of @p dev.
*/
int
bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq,
int cpu)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_BIND_INTR(dev->parent, child, irq, cpu));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_CONFIG_INTR().
*
* This simple implementation of BUS_CONFIG_INTR() simply calls the
* BUS_CONFIG_INTR() method of the parent of @p dev.
*/
int
bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig,
enum intr_polarity pol)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_DESCRIBE_INTR().
*
* This simple implementation of BUS_DESCRIBE_INTR() simply calls the
* BUS_DESCRIBE_INTR() method of the parent of @p dev.
*/
int
bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq,
void *cookie, const char *descr)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent)
return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie,
descr));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_GET_CPUS().
*
* This simple implementation of BUS_GET_CPUS() simply calls the
* BUS_GET_CPUS() method of the parent of @p dev.
*/
int
bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op,
size_t setsize, cpuset_t *cpuset)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent != NULL)
return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset));
return (EINVAL);
}
/**
* @brief Helper function for implementing BUS_GET_DMA_TAG().
*
* This simple implementation of BUS_GET_DMA_TAG() simply calls the
* BUS_GET_DMA_TAG() method of the parent of @p dev.
*/
bus_dma_tag_t
bus_generic_get_dma_tag(device_t dev, device_t child)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent != NULL)
return (BUS_GET_DMA_TAG(dev->parent, child));
return (NULL);
}
/**
* @brief Helper function for implementing BUS_GET_BUS_TAG().
*
* This simple implementation of BUS_GET_BUS_TAG() simply calls the
* BUS_GET_BUS_TAG() method of the parent of @p dev.
*/
bus_space_tag_t
bus_generic_get_bus_tag(device_t dev, device_t child)
{
/* Propagate up the bus hierarchy until someone handles it. */
if (dev->parent != NULL)
return (BUS_GET_BUS_TAG(dev->parent, child));
return ((bus_space_tag_t)0);
}
/**
* @brief Helper function for implementing BUS_GET_RESOURCE().
*
* This implementation of BUS_GET_RESOURCE() uses the
* resource_list_find() function to do most of the work. It calls
* BUS_GET_RESOURCE_LIST() to find a suitable resource list to
* search.
*/
int
bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid,
rman_res_t *startp, rman_res_t *countp)
{
struct resource_list * rl = NULL;
struct resource_list_entry * rle = NULL;
rl = BUS_GET_RESOURCE_LIST(dev, child);
if (!rl)
return (EINVAL);
rle = resource_list_find(rl, type, rid);
if (!rle)
return (ENOENT);
if (startp)
*startp = rle->start;
if (countp)
*countp = rle->count;
return (0);
}
/**
* @brief Helper function for implementing BUS_SET_RESOURCE().
*
* This implementation of BUS_SET_RESOURCE() uses the
* resource_list_add() function to do most of the work. It calls
* BUS_GET_RESOURCE_LIST() to find a suitable resource list to
* edit.
*/
int
bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid,
rman_res_t start, rman_res_t count)
{
struct resource_list * rl = NULL;
rl = BUS_GET_RESOURCE_LIST(dev, child);
if (!rl)
return (EINVAL);
resource_list_add(rl, type, rid, start, (start + count - 1), count);
return (0);
}
/**
* @brief Helper function for implementing BUS_DELETE_RESOURCE().
*
* This implementation of BUS_DELETE_RESOURCE() uses the
* resource_list_delete() function to do most of the work. It calls
* BUS_GET_RESOURCE_LIST() to find a suitable resource list to
* edit.
*/
void
bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid)
{
struct resource_list * rl = NULL;
rl = BUS_GET_RESOURCE_LIST(dev, child);
if (!rl)
return;
resource_list_delete(rl, type, rid);
return;
}
/**
* @brief Helper function for implementing BUS_RELEASE_RESOURCE().
*
* This implementation of BUS_RELEASE_RESOURCE() uses the
* resource_list_release() function to do most of the work. It calls
* BUS_GET_RESOURCE_LIST() to find a suitable resource list.
*/
int
bus_generic_rl_release_resource(device_t dev, device_t child, int type,
int rid, struct resource *r)
{
struct resource_list * rl = NULL;
if (device_get_parent(child) != dev)
return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
type, rid, r));
rl = BUS_GET_RESOURCE_LIST(dev, child);
if (!rl)
return (EINVAL);
return (resource_list_release(rl, dev, child, type, rid, r));
}
/**
* @brief Helper function for implementing BUS_ALLOC_RESOURCE().
*
* This implementation of BUS_ALLOC_RESOURCE() uses the
* resource_list_alloc() function to do most of the work. It calls
* BUS_GET_RESOURCE_LIST() to find a suitable resource list.
*/
struct resource *
bus_generic_rl_alloc_resource(device_t dev, device_t child, int type,
int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct resource_list * rl = NULL;
if (device_get_parent(child) != dev)
return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
type, rid, start, end, count, flags));
rl = BUS_GET_RESOURCE_LIST(dev, child);
if (!rl)
return (NULL);
return (resource_list_alloc(rl, dev, child, type, rid,
start, end, count, flags));
}
/**
* @brief Helper function for implementing BUS_CHILD_PRESENT().
*
* This simple implementation of BUS_CHILD_PRESENT() simply calls the
* BUS_CHILD_PRESENT() method of the parent of @p dev.
*/
int
bus_generic_child_present(device_t dev, device_t child)
{
return (BUS_CHILD_PRESENT(device_get_parent(dev), dev));
}
int
bus_generic_get_domain(device_t dev, device_t child, int *domain)
{
if (dev->parent)
return (BUS_GET_DOMAIN(dev->parent, dev, domain));
return (ENOENT);
}
/**
* @brief Helper function for implementing BUS_RESCAN().
*
* This null implementation of BUS_RESCAN() always fails to indicate
* the bus does not support rescanning.
*/
int
bus_null_rescan(device_t dev)
{
return (ENXIO);
}
/*
* Some convenience functions to make it easier for drivers to use the
* resource-management functions. All these really do is hide the
* indirection through the parent's method table, making for slightly
* less-wordy code. In the future, it might make sense for this code
* to maintain some sort of a list of resources allocated by each device.
*/
int
bus_alloc_resources(device_t dev, struct resource_spec *rs,
struct resource **res)
{
int i;
for (i = 0; rs[i].type != -1; i++)
res[i] = NULL;
for (i = 0; rs[i].type != -1; i++) {
res[i] = bus_alloc_resource_any(dev,
rs[i].type, &rs[i].rid, rs[i].flags);
if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) {
bus_release_resources(dev, rs, res);
return (ENXIO);
}
}
return (0);
}
void
bus_release_resources(device_t dev, const struct resource_spec *rs,
struct resource **res)
{
int i;
for (i = 0; rs[i].type != -1; i++)
if (res[i] != NULL) {
bus_release_resource(
dev, rs[i].type, rs[i].rid, res[i]);
res[i] = NULL;
}
}
/**
* @brief Wrapper function for BUS_ALLOC_RESOURCE().
*
* This function simply calls the BUS_ALLOC_RESOURCE() method of the
* parent of @p dev.
*/
struct resource *
bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start,
rman_res_t end, rman_res_t count, u_int flags)
{
struct resource *res;
if (dev->parent == NULL)
return (NULL);
res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end,
count, flags);
return (res);
}
/**
* @brief Wrapper function for BUS_ADJUST_RESOURCE().
*
* This function simply calls the BUS_ADJUST_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_adjust_resource(device_t dev, int type, struct resource *r, rman_res_t start,
rman_res_t end)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_ADJUST_RESOURCE(dev->parent, dev, type, r, start, end));
}
/**
* @brief Wrapper function for BUS_ACTIVATE_RESOURCE().
*
* This function simply calls the BUS_ACTIVATE_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_activate_resource(device_t dev, int type, int rid, struct resource *r)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r));
}
/**
* @brief Wrapper function for BUS_DEACTIVATE_RESOURCE().
*
* This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r));
}
/**
* @brief Wrapper function for BUS_MAP_RESOURCE().
*
* This function simply calls the BUS_MAP_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_map_resource(device_t dev, int type, struct resource *r,
struct resource_map_request *args, struct resource_map *map)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_MAP_RESOURCE(dev->parent, dev, type, r, args, map));
}
/**
* @brief Wrapper function for BUS_UNMAP_RESOURCE().
*
* This function simply calls the BUS_UNMAP_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_unmap_resource(device_t dev, int type, struct resource *r,
struct resource_map *map)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_UNMAP_RESOURCE(dev->parent, dev, type, r, map));
}
/**
* @brief Wrapper function for BUS_RELEASE_RESOURCE().
*
* This function simply calls the BUS_RELEASE_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_release_resource(device_t dev, int type, int rid, struct resource *r)
{
int rv;
if (dev->parent == NULL)
return (EINVAL);
rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r);
return (rv);
}
/**
* @brief Wrapper function for BUS_SETUP_INTR().
*
* This function simply calls the BUS_SETUP_INTR() method of the
* parent of @p dev.
*/
int
bus_setup_intr(device_t dev, struct resource *r, int flags,
driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep)
{
int error;
if (dev->parent == NULL)
return (EINVAL);
error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler,
arg, cookiep);
if (error != 0)
return (error);
if (handler != NULL && !(flags & INTR_MPSAFE))
device_printf(dev, "[GIANT-LOCKED]\n");
return (0);
}
/**
* @brief Wrapper function for BUS_TEARDOWN_INTR().
*
* This function simply calls the BUS_TEARDOWN_INTR() method of the
* parent of @p dev.
*/
int
bus_teardown_intr(device_t dev, struct resource *r, void *cookie)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie));
}
/**
* @brief Wrapper function for BUS_BIND_INTR().
*
* This function simply calls the BUS_BIND_INTR() method of the
* parent of @p dev.
*/
int
bus_bind_intr(device_t dev, struct resource *r, int cpu)
{
if (dev->parent == NULL)
return (EINVAL);
return (BUS_BIND_INTR(dev->parent, dev, r, cpu));
}
/**
* @brief Wrapper function for BUS_DESCRIBE_INTR().
*
* This function first formats the requested description into a
* temporary buffer and then calls the BUS_DESCRIBE_INTR() method of
* the parent of @p dev.
*/
int
bus_describe_intr(device_t dev, struct resource *irq, void *cookie,
const char *fmt, ...)
{
va_list ap;
char descr[MAXCOMLEN + 1];
if (dev->parent == NULL)
return (EINVAL);
va_start(ap, fmt);
vsnprintf(descr, sizeof(descr), fmt, ap);
va_end(ap);
return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr));
}
/**
* @brief Wrapper function for BUS_SET_RESOURCE().
*
* This function simply calls the BUS_SET_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_set_resource(device_t dev, int type, int rid,
rman_res_t start, rman_res_t count)
{
return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid,
start, count));
}
/**
* @brief Wrapper function for BUS_GET_RESOURCE().
*
* This function simply calls the BUS_GET_RESOURCE() method of the
* parent of @p dev.
*/
int
bus_get_resource(device_t dev, int type, int rid,
rman_res_t *startp, rman_res_t *countp)
{
return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid,
startp, countp));
}
/**
* @brief Wrapper function for BUS_GET_RESOURCE().
*
* This function simply calls the BUS_GET_RESOURCE() method of the
* parent of @p dev and returns the start value.
*/
rman_res_t
bus_get_resource_start(device_t dev, int type, int rid)
{
rman_res_t start;
rman_res_t count;
int error;
error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid,
&start, &count);
if (error)
return (0);
return (start);
}
/**
* @brief Wrapper function for BUS_GET_RESOURCE().
*
* This function simply calls the BUS_GET_RESOURCE() method of the
* parent of @p dev and returns the count value.
*/
rman_res_t
bus_get_resource_count(device_t dev, int type, int rid)
{
rman_res_t start;
rman_res_t count;
int error;
error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid,
&start, &count);
if (error)
return (0);
return (count);
}
/**
* @brief Wrapper function for BUS_DELETE_RESOURCE().
*
* This function simply calls the BUS_DELETE_RESOURCE() method of the
* parent of @p dev.
*/
void
bus_delete_resource(device_t dev, int type, int rid)
{
BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid);
}
/**
* @brief Wrapper function for BUS_CHILD_PRESENT().
*
* This function simply calls the BUS_CHILD_PRESENT() method of the
* parent of @p dev.
*/
int
bus_child_present(device_t child)
{
return (BUS_CHILD_PRESENT(device_get_parent(child), child));
}
/**
* @brief Wrapper function for BUS_CHILD_PNPINFO_STR().
*
* This function simply calls the BUS_CHILD_PNPINFO_STR() method of the
* parent of @p dev.
*/
int
bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen)
{
device_t parent;
parent = device_get_parent(child);
if (parent == NULL) {
*buf = '\0';
return (0);
}
return (BUS_CHILD_PNPINFO_STR(parent, child, buf, buflen));
}
/**
* @brief Wrapper function for BUS_CHILD_LOCATION_STR().
*
* This function simply calls the BUS_CHILD_LOCATION_STR() method of the
* parent of @p dev.
*/
int
bus_child_location_str(device_t child, char *buf, size_t buflen)
{
device_t parent;
parent = device_get_parent(child);
if (parent == NULL) {
*buf = '\0';
return (0);
}
return (BUS_CHILD_LOCATION_STR(parent, child, buf, buflen));
}
/**
* @brief Wrapper function for BUS_GET_CPUS().
*
* This function simply calls the BUS_GET_CPUS() method of the
* parent of @p dev.
*/
int
bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset)
{
device_t parent;
parent = device_get_parent(dev);
if (parent == NULL)
return (EINVAL);
return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset));
}
/**
* @brief Wrapper function for BUS_GET_DMA_TAG().
*
* This function simply calls the BUS_GET_DMA_TAG() method of the
* parent of @p dev.
*/
bus_dma_tag_t
bus_get_dma_tag(device_t dev)
{
device_t parent;
parent = device_get_parent(dev);
if (parent == NULL)
return (NULL);
return (BUS_GET_DMA_TAG(parent, dev));
}
/**
* @brief Wrapper function for BUS_GET_BUS_TAG().
*
* This function simply calls the BUS_GET_BUS_TAG() method of the
* parent of @p dev.
*/
bus_space_tag_t
bus_get_bus_tag(device_t dev)
{
device_t parent;
parent = device_get_parent(dev);
if (parent == NULL)
return ((bus_space_tag_t)0);
return (BUS_GET_BUS_TAG(parent, dev));
}
/**
* @brief Wrapper function for BUS_GET_DOMAIN().
*
* This function simply calls the BUS_GET_DOMAIN() method of the
* parent of @p dev.
*/
int
bus_get_domain(device_t dev, int *domain)
{
return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain));
}
/* Resume all devices and then notify userland that we're up again. */
static int
root_resume(device_t dev)
{
int error;
error = bus_generic_resume(dev);
if (error == 0)
devctl_notify("kern", "power", "resume", NULL);
return (error);
}
static int
root_print_child(device_t dev, device_t child)
{
int retval = 0;
retval += bus_print_child_header(dev, child);
retval += printf("\n");
return (retval);
}
static int
root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
{
/*
* If an interrupt mapping gets to here something bad has happened.
*/
panic("root_setup_intr");
}
/*
* If we get here, assume that the device is permanent and really is
* present in the system. Removable bus drivers are expected to intercept
* this call long before it gets here. We return -1 so that drivers that
* really care can check vs -1 or some ERRNO returned higher in the food
* chain.
*/
static int
root_child_present(device_t dev, device_t child)
{
return (-1);
}
static int
root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
cpuset_t *cpuset)
{
switch (op) {
case INTR_CPUS:
/* Default to returning the set of all CPUs. */
if (setsize != sizeof(cpuset_t))
return (EINVAL);
*cpuset = all_cpus;
return (0);
default:
return (EINVAL);
}
}
static kobj_method_t root_methods[] = {
/* Device interface */
KOBJMETHOD(device_shutdown, bus_generic_shutdown),
KOBJMETHOD(device_suspend, bus_generic_suspend),
KOBJMETHOD(device_resume, root_resume),
/* Bus interface */
KOBJMETHOD(bus_print_child, root_print_child),
KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar),
KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar),
KOBJMETHOD(bus_setup_intr, root_setup_intr),
KOBJMETHOD(bus_child_present, root_child_present),
KOBJMETHOD(bus_get_cpus, root_get_cpus),
KOBJMETHOD_END
};
static driver_t root_driver = {
"root",
root_methods,
1, /* no softc */
};
device_t root_bus;
devclass_t root_devclass;
static int
root_bus_module_handler(module_t mod, int what, void* arg)
{
switch (what) {
case MOD_LOAD:
TAILQ_INIT(&bus_data_devices);
kobj_class_compile((kobj_class_t) &root_driver);
root_bus = make_device(NULL, "root", 0);
root_bus->desc = "System root bus";
kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver);
root_bus->driver = &root_driver;
root_bus->state = DS_ATTACHED;
root_devclass = devclass_find_internal("root", NULL, FALSE);
devinit();
return (0);
case MOD_SHUTDOWN:
device_shutdown(root_bus);
return (0);
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t root_bus_mod = {
"rootbus",
root_bus_module_handler,
NULL
};
DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
/**
* @brief Automatically configure devices
*
* This function begins the autoconfiguration process by calling
* device_probe_and_attach() for each child of the @c root0 device.
*/
void
root_bus_configure(void)
{
PDEBUG(("."));
/* Eventually this will be split up, but this is sufficient for now. */
bus_set_pass(BUS_PASS_DEFAULT);
}
/**
* @brief Module handler for registering device drivers
*
* This module handler is used to automatically register device
* drivers when modules are loaded. If @p what is MOD_LOAD, it calls
* devclass_add_driver() for the driver described by the
* driver_module_data structure pointed to by @p arg
*/
int
driver_module_handler(module_t mod, int what, void *arg)
{
struct driver_module_data *dmd;
devclass_t bus_devclass;
kobj_class_t driver;
int error, pass;
dmd = (struct driver_module_data *)arg;
bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE);
error = 0;
switch (what) {
case MOD_LOAD:
if (dmd->dmd_chainevh)
error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg);
pass = dmd->dmd_pass;
driver = dmd->dmd_driver;
PDEBUG(("Loading module: driver %s on bus %s (pass %d)",
DRIVERNAME(driver), dmd->dmd_busname, pass));
error = devclass_add_driver(bus_devclass, driver, pass,
dmd->dmd_devclass);
break;
case MOD_UNLOAD:
PDEBUG(("Unloading module: driver %s from bus %s",
DRIVERNAME(dmd->dmd_driver),
dmd->dmd_busname));
error = devclass_delete_driver(bus_devclass,
dmd->dmd_driver);
if (!error && dmd->dmd_chainevh)
error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg);
break;
case MOD_QUIESCE:
PDEBUG(("Quiesce module: driver %s from bus %s",
DRIVERNAME(dmd->dmd_driver),
dmd->dmd_busname));
error = devclass_quiesce_driver(bus_devclass,
dmd->dmd_driver);
if (!error && dmd->dmd_chainevh)
error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg);
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
/**
* @brief Enumerate all hinted devices for this bus.
*
* Walks through the hints for this bus and calls the bus_hinted_child
* routine for each one it fines. It searches first for the specific
* bus that's being probed for hinted children (eg isa0), and then for
* generic children (eg isa).
*
* @param dev bus device to enumerate
*/
void
bus_enumerate_hinted_children(device_t bus)
{
int i;
const char *dname, *busname;
int dunit;
/*
* enumerate all devices on the specific bus
*/
busname = device_get_nameunit(bus);
i = 0;
while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0)
BUS_HINTED_CHILD(bus, dname, dunit);
/*
* and all the generic ones.
*/
busname = device_get_name(bus);
i = 0;
while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0)
BUS_HINTED_CHILD(bus, dname, dunit);
}
#ifdef BUS_DEBUG
/* the _short versions avoid iteration by not calling anything that prints
* more than oneliners. I love oneliners.
*/
static void
print_device_short(device_t dev, int indent)
{
if (!dev)
return;
indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n",
dev->unit, dev->desc,
(dev->parent? "":"no "),
(TAILQ_EMPTY(&dev->children)? "no ":""),
(dev->flags&DF_ENABLED? "enabled,":"disabled,"),
(dev->flags&DF_FIXEDCLASS? "fixed,":""),
(dev->flags&DF_WILDCARD? "wildcard,":""),
(dev->flags&DF_DESCMALLOCED? "descmalloced,":""),
(dev->flags&DF_REBID? "rebiddable,":""),
(dev->ivars? "":"no "),
(dev->softc? "":"no "),
dev->busy));
}
static void
print_device(device_t dev, int indent)
{
if (!dev)
return;
print_device_short(dev, indent);
indentprintf(("Parent:\n"));
print_device_short(dev->parent, indent+1);
indentprintf(("Driver:\n"));
print_driver_short(dev->driver, indent+1);
indentprintf(("Devclass:\n"));
print_devclass_short(dev->devclass, indent+1);
}
void
print_device_tree_short(device_t dev, int indent)
/* print the device and all its children (indented) */
{
device_t child;
if (!dev)
return;
print_device_short(dev, indent);
TAILQ_FOREACH(child, &dev->children, link) {
print_device_tree_short(child, indent+1);
}
}
void
print_device_tree(device_t dev, int indent)
/* print the device and all its children (indented) */
{
device_t child;
if (!dev)
return;
print_device(dev, indent);
TAILQ_FOREACH(child, &dev->children, link) {
print_device_tree(child, indent+1);
}
}
static void
print_driver_short(driver_t *driver, int indent)
{
if (!driver)
return;
indentprintf(("driver %s: softc size = %zd\n",
driver->name, driver->size));
}
static void
print_driver(driver_t *driver, int indent)
{
if (!driver)
return;
print_driver_short(driver, indent);
}
static void
print_driver_list(driver_list_t drivers, int indent)
{
driverlink_t driver;
TAILQ_FOREACH(driver, &drivers, link) {
print_driver(driver->driver, indent);
}
}
static void
print_devclass_short(devclass_t dc, int indent)
{
if ( !dc )
return;
indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit));
}
static void
print_devclass(devclass_t dc, int indent)
{
int i;
if ( !dc )
return;
print_devclass_short(dc, indent);
indentprintf(("Drivers:\n"));
print_driver_list(dc->drivers, indent+1);
indentprintf(("Devices:\n"));
for (i = 0; i < dc->maxunit; i++)
if (dc->devices[i])
print_device(dc->devices[i], indent+1);
}
void
print_devclass_list_short(void)
{
devclass_t dc;
printf("Short listing of devclasses, drivers & devices:\n");
TAILQ_FOREACH(dc, &devclasses, link) {
print_devclass_short(dc, 0);
}
}
void
print_devclass_list(void)
{
devclass_t dc;
printf("Full listing of devclasses, drivers & devices:\n");
TAILQ_FOREACH(dc, &devclasses, link) {
print_devclass(dc, 0);
}
}
#endif
/*
* User-space access to the device tree.
*
* We implement a small set of nodes:
*
* hw.bus Single integer read method to obtain the
* current generation count.
* hw.bus.devices Reads the entire device tree in flat space.
* hw.bus.rman Resource manager interface
*
* We might like to add the ability to scan devclasses and/or drivers to
* determine what else is currently loaded/available.
*/
static int
sysctl_bus(SYSCTL_HANDLER_ARGS)
{
struct u_businfo ubus;
ubus.ub_version = BUS_USER_VERSION;
ubus.ub_generation = bus_data_generation;
return (SYSCTL_OUT(req, &ubus, sizeof(ubus)));
}
SYSCTL_NODE(_hw_bus, OID_AUTO, info, CTLFLAG_RW, sysctl_bus,
"bus-related data");
static int
sysctl_devices(SYSCTL_HANDLER_ARGS)
{
int *name = (int *)arg1;
u_int namelen = arg2;
int index;
device_t dev;
struct u_device udev; /* XXX this is a bit big */
int error;
if (namelen != 2)
return (EINVAL);
if (bus_data_generation_check(name[0]))
return (EINVAL);
index = name[1];
/*
* Scan the list of devices, looking for the requested index.
*/
TAILQ_FOREACH(dev, &bus_data_devices, devlink) {
if (index-- == 0)
break;
}
if (dev == NULL)
return (ENOENT);
/*
* Populate the return array.
*/
bzero(&udev, sizeof(udev));
udev.dv_handle = (uintptr_t)dev;
udev.dv_parent = (uintptr_t)dev->parent;
if (dev->nameunit != NULL)
strlcpy(udev.dv_name, dev->nameunit, sizeof(udev.dv_name));
if (dev->desc != NULL)
strlcpy(udev.dv_desc, dev->desc, sizeof(udev.dv_desc));
if (dev->driver != NULL && dev->driver->name != NULL)
strlcpy(udev.dv_drivername, dev->driver->name,
sizeof(udev.dv_drivername));
bus_child_pnpinfo_str(dev, udev.dv_pnpinfo, sizeof(udev.dv_pnpinfo));
bus_child_location_str(dev, udev.dv_location, sizeof(udev.dv_location));
udev.dv_devflags = dev->devflags;
udev.dv_flags = dev->flags;
udev.dv_state = dev->state;
error = SYSCTL_OUT(req, &udev, sizeof(udev));
return (error);
}
SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD, sysctl_devices,
"system device tree");
int
bus_data_generation_check(int generation)
{
if (generation != bus_data_generation)
return (1);
/* XXX generate optimised lists here? */
return (0);
}
void
bus_data_generation_update(void)
{
bus_data_generation++;
}
int
bus_free_resource(device_t dev, int type, struct resource *r)
{
if (r == NULL)
return (0);
return (bus_release_resource(dev, type, rman_get_rid(r), r));
}
device_t
device_lookup_by_name(const char *name)
{
device_t dev;
TAILQ_FOREACH(dev, &bus_data_devices, devlink) {
if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0)
return (dev);
}
return (NULL);
}
/*
* /dev/devctl2 implementation. The existing /dev/devctl device has
* implicit semantics on open, so it could not be reused for this.
* Another option would be to call this /dev/bus?
*/
static int
find_device(struct devreq *req, device_t *devp)
{
device_t dev;
/*
* First, ensure that the name is nul terminated.
*/
if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL)
return (EINVAL);
/*
* Second, try to find an attached device whose name matches
* 'name'.
*/
dev = device_lookup_by_name(req->dr_name);
if (dev != NULL) {
*devp = dev;
return (0);
}
/* Finally, give device enumerators a chance. */
dev = NULL;
EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev);
if (dev == NULL)
return (ENOENT);
*devp = dev;
return (0);
}
static bool
driver_exists(device_t bus, const char *driver)
{
devclass_t dc;
for (dc = bus->devclass; dc != NULL; dc = dc->parent) {
if (devclass_find_driver_internal(dc, driver) != NULL)
return (true);
}
return (false);
}
static int
devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
struct devreq *req;
device_t dev;
int error, old;
/* Locate the device to control. */
mtx_lock(&Giant);
req = (struct devreq *)data;
switch (cmd) {
case DEV_ATTACH:
case DEV_DETACH:
case DEV_ENABLE:
case DEV_DISABLE:
case DEV_SUSPEND:
case DEV_RESUME:
case DEV_SET_DRIVER:
case DEV_CLEAR_DRIVER:
case DEV_RESCAN:
case DEV_DELETE:
error = priv_check(td, PRIV_DRIVER);
if (error == 0)
error = find_device(req, &dev);
break;
default:
error = ENOTTY;
break;
}
if (error) {
mtx_unlock(&Giant);
return (error);
}
/* Perform the requested operation. */
switch (cmd) {
case DEV_ATTACH:
if (device_is_attached(dev) && (dev->flags & DF_REBID) == 0)
error = EBUSY;
else if (!device_is_enabled(dev))
error = ENXIO;
else
error = device_probe_and_attach(dev);
break;
case DEV_DETACH:
if (!device_is_attached(dev)) {
error = ENXIO;
break;
}
if (!(req->dr_flags & DEVF_FORCE_DETACH)) {
error = device_quiesce(dev);
if (error)
break;
}
error = device_detach(dev);
break;
case DEV_ENABLE:
if (device_is_enabled(dev)) {
error = EBUSY;
break;
}
/*
* If the device has been probed but not attached (e.g.
* when it has been disabled by a loader hint), just
* attach the device rather than doing a full probe.
*/
device_enable(dev);
if (device_is_alive(dev)) {
/*
* If the device was disabled via a hint, clear
* the hint.
*/
if (resource_disabled(dev->driver->name, dev->unit))
resource_unset_value(dev->driver->name,
dev->unit, "disabled");
error = device_attach(dev);
} else
error = device_probe_and_attach(dev);
break;
case DEV_DISABLE:
if (!device_is_enabled(dev)) {
error = ENXIO;
break;
}
if (!(req->dr_flags & DEVF_FORCE_DETACH)) {
error = device_quiesce(dev);
if (error)
break;
}
/*
* Force DF_FIXEDCLASS on around detach to preserve
* the existing name.
*/
old = dev->flags;
dev->flags |= DF_FIXEDCLASS;
error = device_detach(dev);
if (!(old & DF_FIXEDCLASS))
dev->flags &= ~DF_FIXEDCLASS;
if (error == 0)
device_disable(dev);
break;
case DEV_SUSPEND:
if (device_is_suspended(dev)) {
error = EBUSY;
break;
}
if (device_get_parent(dev) == NULL) {
error = EINVAL;
break;
}
error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev);
break;
case DEV_RESUME:
if (!device_is_suspended(dev)) {
error = EINVAL;
break;
}
if (device_get_parent(dev) == NULL) {
error = EINVAL;
break;
}
error = BUS_RESUME_CHILD(device_get_parent(dev), dev);
break;
case DEV_SET_DRIVER: {
devclass_t dc;
char driver[128];
error = copyinstr(req->dr_data, driver, sizeof(driver), NULL);
if (error)
break;
if (driver[0] == '\0') {
error = EINVAL;
break;
}
if (dev->devclass != NULL &&
strcmp(driver, dev->devclass->name) == 0)
/* XXX: Could possibly force DF_FIXEDCLASS on? */
break;
/*
* Scan drivers for this device's bus looking for at
* least one matching driver.
*/
if (dev->parent == NULL) {
error = EINVAL;
break;
}
if (!driver_exists(dev->parent, driver)) {
error = ENOENT;
break;
}
dc = devclass_create(driver);
if (dc == NULL) {
error = ENOMEM;
break;
}
/* Detach device if necessary. */
if (device_is_attached(dev)) {
if (req->dr_flags & DEVF_SET_DRIVER_DETACH)
error = device_detach(dev);
else
error = EBUSY;
if (error)
break;
}
/* Clear any previously-fixed device class and unit. */
if (dev->flags & DF_FIXEDCLASS)
devclass_delete_device(dev->devclass, dev);
dev->flags |= DF_WILDCARD;
dev->unit = -1;
/* Force the new device class. */
error = devclass_add_device(dc, dev);
if (error)
break;
dev->flags |= DF_FIXEDCLASS;
error = device_probe_and_attach(dev);
break;
}
case DEV_CLEAR_DRIVER:
if (!(dev->flags & DF_FIXEDCLASS)) {
error = 0;
break;
}
if (device_is_attached(dev)) {
if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH)
error = device_detach(dev);
else
error = EBUSY;
if (error)
break;
}
dev->flags &= ~DF_FIXEDCLASS;
dev->flags |= DF_WILDCARD;
devclass_delete_device(dev->devclass, dev);
error = device_probe_and_attach(dev);
break;
case DEV_RESCAN:
if (!device_is_attached(dev)) {
error = ENXIO;
break;
}
error = BUS_RESCAN(dev);
break;
case DEV_DELETE: {
device_t parent;
parent = device_get_parent(dev);
if (parent == NULL) {
error = EINVAL;
break;
}
if (!(req->dr_flags & DEVF_FORCE_DELETE)) {
if (bus_child_present(dev) != 0) {
error = EBUSY;
break;
}
}
error = device_delete_child(parent, dev);
break;
}
}
mtx_unlock(&Giant);
return (error);
}
static struct cdevsw devctl2_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = devctl2_ioctl,
.d_name = "devctl2",
};
static void
devctl2_init(void)
{
make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL,
UID_ROOT, GID_WHEEL, 0600, "devctl2");
}
#ifdef DDB
DB_SHOW_COMMAND(device, db_show_device)
{
device_t dev;
if (!have_addr)
return;
dev = (device_t)addr;
db_printf("name: %s\n", device_get_nameunit(dev));
db_printf(" driver: %s\n", DRIVERNAME(dev->driver));
db_printf(" class: %s\n", DEVCLANAME(dev->devclass));
db_printf(" addr: %p\n", dev);
db_printf(" parent: %p\n", dev->parent);
db_printf(" softc: %p\n", dev->softc);
db_printf(" ivars: %p\n", dev->ivars);
}
DB_SHOW_ALL_COMMAND(devices, db_show_all_devices)
{
device_t dev;
TAILQ_FOREACH(dev, &bus_data_devices, devlink) {
db_show_device((db_expr_t)dev, true, count, modif);
}
}
#endif
Index: head/sys/kern/subr_taskqueue.c
===================================================================
--- head/sys/kern/subr_taskqueue.c (revision 328217)
+++ head/sys/kern/subr_taskqueue.c (revision 328218)
@@ -1,846 +1,846 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2000 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cpuset.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/libkern.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/taskqueue.h>
#include <sys/unistd.h>
#include <machine/stdarg.h>
static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
static void *taskqueue_giant_ih;
static void *taskqueue_ih;
static void taskqueue_fast_enqueue(void *);
static void taskqueue_swi_enqueue(void *);
static void taskqueue_swi_giant_enqueue(void *);
struct taskqueue_busy {
struct task *tb_running;
TAILQ_ENTRY(taskqueue_busy) tb_link;
};
struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
struct taskqueue {
STAILQ_HEAD(, task) tq_queue;
taskqueue_enqueue_fn tq_enqueue;
void *tq_context;
char *tq_name;
TAILQ_HEAD(, taskqueue_busy) tq_active;
struct mtx tq_mutex;
struct thread **tq_threads;
int tq_tcount;
int tq_spin;
int tq_flags;
int tq_callouts;
taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
};
#define TQ_FLAGS_ACTIVE (1 << 0)
#define TQ_FLAGS_BLOCKED (1 << 1)
#define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
#define DT_CALLOUT_ARMED (1 << 0)
#define DT_DRAIN_IN_PROGRESS (1 << 1)
#define TQ_LOCK(tq) \
do { \
if ((tq)->tq_spin) \
mtx_lock_spin(&(tq)->tq_mutex); \
else \
mtx_lock(&(tq)->tq_mutex); \
} while (0)
#define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
#define TQ_UNLOCK(tq) \
do { \
if ((tq)->tq_spin) \
mtx_unlock_spin(&(tq)->tq_mutex); \
else \
mtx_unlock(&(tq)->tq_mutex); \
} while (0)
#define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
void
_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
int priority, task_fn_t func, void *context)
{
TASK_INIT(&timeout_task->t, priority, func, context);
callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
CALLOUT_RETURNUNLOCKED);
timeout_task->q = queue;
timeout_task->f = 0;
}
static __inline int
TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
int t)
{
if (tq->tq_spin)
return (msleep_spin(p, m, wm, t));
return (msleep(p, m, pri, wm, t));
}
static struct taskqueue *
_taskqueue_create(const char *name, int mflags,
taskqueue_enqueue_fn enqueue, void *context,
int mtxflags, const char *mtxname __unused)
{
struct taskqueue *queue;
char *tq_name;
tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
if (tq_name == NULL)
return (NULL);
queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
if (queue == NULL) {
free(tq_name, M_TASKQUEUE);
return (NULL);
}
snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
STAILQ_INIT(&queue->tq_queue);
TAILQ_INIT(&queue->tq_active);
queue->tq_enqueue = enqueue;
queue->tq_context = context;
queue->tq_name = tq_name;
queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
queue->tq_flags |= TQ_FLAGS_ACTIVE;
if (enqueue == taskqueue_fast_enqueue ||
enqueue == taskqueue_swi_enqueue ||
enqueue == taskqueue_swi_giant_enqueue ||
enqueue == taskqueue_thread_enqueue)
queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
return (queue);
}
struct taskqueue *
taskqueue_create(const char *name, int mflags,
taskqueue_enqueue_fn enqueue, void *context)
{
return _taskqueue_create(name, mflags, enqueue, context,
MTX_DEF, name);
}
void
taskqueue_set_callback(struct taskqueue *queue,
enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
void *context)
{
KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
(cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
("Callback type %d not valid, must be %d-%d", cb_type,
TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
KASSERT((queue->tq_callbacks[cb_type] == NULL),
("Re-initialization of taskqueue callback?"));
queue->tq_callbacks[cb_type] = callback;
queue->tq_cb_contexts[cb_type] = context;
}
/*
* Signal a taskqueue thread to terminate.
*/
static void
taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
{
while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
wakeup(tq);
TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
}
}
void
taskqueue_free(struct taskqueue *queue)
{
TQ_LOCK(queue);
queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
taskqueue_terminate(queue->tq_threads, queue);
KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
mtx_destroy(&queue->tq_mutex);
free(queue->tq_threads, M_TASKQUEUE);
free(queue->tq_name, M_TASKQUEUE);
free(queue, M_TASKQUEUE);
}
static int
taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
{
struct task *ins;
struct task *prev;
KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
/*
* Count multiple enqueues.
*/
if (task->ta_pending) {
if (task->ta_pending < USHRT_MAX)
task->ta_pending++;
TQ_UNLOCK(queue);
return (0);
}
/*
* Optimise the case when all tasks have the same priority.
*/
prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
if (!prev || prev->ta_priority >= task->ta_priority) {
STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
} else {
prev = NULL;
for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
prev = ins, ins = STAILQ_NEXT(ins, ta_link))
if (ins->ta_priority < task->ta_priority)
break;
if (prev)
STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
else
STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
}
task->ta_pending = 1;
if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
TQ_UNLOCK(queue);
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
queue->tq_enqueue(queue->tq_context);
if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
TQ_UNLOCK(queue);
/* Return with lock released. */
return (0);
}
int
taskqueue_enqueue(struct taskqueue *queue, struct task *task)
{
int res;
TQ_LOCK(queue);
res = taskqueue_enqueue_locked(queue, task);
/* The lock is released inside. */
return (res);
}
static void
taskqueue_timeout_func(void *arg)
{
struct taskqueue *queue;
struct timeout_task *timeout_task;
timeout_task = arg;
queue = timeout_task->q;
KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
timeout_task->f &= ~DT_CALLOUT_ARMED;
queue->tq_callouts--;
taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
/* The lock is released inside. */
}
int
taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags)
{
int res;
TQ_LOCK(queue);
KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
("Migrated queue"));
KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
timeout_task->q = queue;
res = timeout_task->t.ta_pending;
if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
/* Do nothing */
TQ_UNLOCK(queue);
res = -1;
} else if (sbt == 0) {
taskqueue_enqueue_locked(queue, &timeout_task->t);
/* The lock is released inside. */
} else {
if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
res++;
} else {
queue->tq_callouts++;
timeout_task->f |= DT_CALLOUT_ARMED;
if (sbt < 0)
sbt = -sbt; /* Ignore overflow. */
}
if (sbt > 0) {
callout_reset_sbt(&timeout_task->c, sbt, pr,
taskqueue_timeout_func, timeout_task, flags);
}
TQ_UNLOCK(queue);
}
return (res);
}
int
taskqueue_enqueue_timeout(struct taskqueue *queue,
struct timeout_task *ttask, int ticks)
{
return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt,
0, 0));
}
static void
taskqueue_task_nop_fn(void *context, int pending)
{
}
/*
* Block until all currently queued tasks in this taskqueue
* have begun execution. Tasks queued during execution of
* this function are ignored.
*/
static void
taskqueue_drain_tq_queue(struct taskqueue *queue)
{
struct task t_barrier;
if (STAILQ_EMPTY(&queue->tq_queue))
return;
/*
* Enqueue our barrier after all current tasks, but with
* the highest priority so that newly queued tasks cannot
* pass it. Because of the high priority, we can not use
* taskqueue_enqueue_locked directly (which drops the lock
* anyway) so just insert it at tail while we have the
* queue lock.
*/
TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
t_barrier.ta_pending = 1;
/*
* Once the barrier has executed, all previously queued tasks
* have completed or are currently executing.
*/
while (t_barrier.ta_pending != 0)
TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
}
/*
* Block until all currently executing tasks for this taskqueue
* complete. Tasks that begin execution during the execution
* of this function are ignored.
*/
static void
taskqueue_drain_tq_active(struct taskqueue *queue)
{
struct taskqueue_busy tb_marker, *tb_first;
if (TAILQ_EMPTY(&queue->tq_active))
return;
/* Block taskq_terminate().*/
queue->tq_callouts++;
/*
* Wait for all currently executing taskqueue threads
* to go idle.
*/
tb_marker.tb_running = TB_DRAIN_WAITER;
TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
/*
* Wakeup any other drain waiter that happened to queue up
* without any intervening active thread.
*/
tb_first = TAILQ_FIRST(&queue->tq_active);
if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
wakeup(tb_first);
/* Release taskqueue_terminate(). */
queue->tq_callouts--;
if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
wakeup_one(queue->tq_threads);
}
void
taskqueue_block(struct taskqueue *queue)
{
TQ_LOCK(queue);
queue->tq_flags |= TQ_FLAGS_BLOCKED;
TQ_UNLOCK(queue);
}
void
taskqueue_unblock(struct taskqueue *queue)
{
TQ_LOCK(queue);
queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
if (!STAILQ_EMPTY(&queue->tq_queue))
queue->tq_enqueue(queue->tq_context);
TQ_UNLOCK(queue);
}
static void
taskqueue_run_locked(struct taskqueue *queue)
{
struct taskqueue_busy tb;
struct taskqueue_busy *tb_first;
struct task *task;
int pending;
KASSERT(queue != NULL, ("tq is NULL"));
TQ_ASSERT_LOCKED(queue);
tb.tb_running = NULL;
while (STAILQ_FIRST(&queue->tq_queue)) {
TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
/*
* Carefully remove the first task from the queue and
* zero its pending count.
*/
task = STAILQ_FIRST(&queue->tq_queue);
KASSERT(task != NULL, ("task is NULL"));
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
pending = task->ta_pending;
task->ta_pending = 0;
tb.tb_running = task;
TQ_UNLOCK(queue);
KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
task->ta_func(task->ta_context, pending);
TQ_LOCK(queue);
tb.tb_running = NULL;
wakeup(task);
TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
tb_first = TAILQ_FIRST(&queue->tq_active);
if (tb_first != NULL &&
tb_first->tb_running == TB_DRAIN_WAITER)
wakeup(tb_first);
}
}
void
taskqueue_run(struct taskqueue *queue)
{
TQ_LOCK(queue);
taskqueue_run_locked(queue);
TQ_UNLOCK(queue);
}
static int
task_is_running(struct taskqueue *queue, struct task *task)
{
struct taskqueue_busy *tb;
TQ_ASSERT_LOCKED(queue);
TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
if (tb->tb_running == task)
return (1);
}
return (0);
}
/*
* Only use this function in single threaded contexts. It returns
* non-zero if the given task is either pending or running. Else the
* task is idle and can be queued again or freed.
*/
int
taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
{
int retval;
TQ_LOCK(queue);
retval = task->ta_pending > 0 || task_is_running(queue, task);
TQ_UNLOCK(queue);
return (retval);
}
static int
taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
u_int *pendp)
{
if (task->ta_pending > 0)
STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
if (pendp != NULL)
*pendp = task->ta_pending;
task->ta_pending = 0;
return (task_is_running(queue, task) ? EBUSY : 0);
}
int
taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
{
int error;
TQ_LOCK(queue);
error = taskqueue_cancel_locked(queue, task, pendp);
TQ_UNLOCK(queue);
return (error);
}
int
taskqueue_cancel_timeout(struct taskqueue *queue,
struct timeout_task *timeout_task, u_int *pendp)
{
u_int pending, pending1;
int error;
TQ_LOCK(queue);
pending = !!(callout_stop(&timeout_task->c) > 0);
error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
timeout_task->f &= ~DT_CALLOUT_ARMED;
queue->tq_callouts--;
}
TQ_UNLOCK(queue);
if (pendp != NULL)
*pendp = pending + pending1;
return (error);
}
void
taskqueue_drain(struct taskqueue *queue, struct task *task)
{
if (!queue->tq_spin)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
TQ_LOCK(queue);
while (task->ta_pending != 0 || task_is_running(queue, task))
TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
TQ_UNLOCK(queue);
}
void
taskqueue_drain_all(struct taskqueue *queue)
{
if (!queue->tq_spin)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
TQ_LOCK(queue);
taskqueue_drain_tq_queue(queue);
taskqueue_drain_tq_active(queue);
TQ_UNLOCK(queue);
}
void
taskqueue_drain_timeout(struct taskqueue *queue,
struct timeout_task *timeout_task)
{
/*
* Set flag to prevent timer from re-starting during drain:
*/
TQ_LOCK(queue);
KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0,
("Drain already in progress"));
timeout_task->f |= DT_DRAIN_IN_PROGRESS;
TQ_UNLOCK(queue);
callout_drain(&timeout_task->c);
taskqueue_drain(queue, &timeout_task->t);
/*
* Clear flag to allow timer to re-start:
*/
TQ_LOCK(queue);
timeout_task->f &= ~DT_DRAIN_IN_PROGRESS;
TQ_UNLOCK(queue);
}
static void
taskqueue_swi_enqueue(void *context)
{
swi_sched(taskqueue_ih, 0);
}
static void
taskqueue_swi_run(void *dummy)
{
taskqueue_run(taskqueue_swi);
}
static void
taskqueue_swi_giant_enqueue(void *context)
{
swi_sched(taskqueue_giant_ih, 0);
}
static void
taskqueue_swi_giant_run(void *dummy)
{
taskqueue_run(taskqueue_swi_giant);
}
static int
_taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
cpuset_t *mask, const char *name, va_list ap)
{
char ktname[MAXCOMLEN + 1];
struct thread *td;
struct taskqueue *tq;
int i, error;
if (count <= 0)
return (EINVAL);
vsnprintf(ktname, sizeof(ktname), name, ap);
tq = *tqp;
- tq->tq_threads = mallocarray(count, sizeof(struct thread *),
- M_TASKQUEUE, M_NOWAIT | M_ZERO);
+ tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
+ M_NOWAIT | M_ZERO);
if (tq->tq_threads == NULL) {
printf("%s: no memory for %s threads\n", __func__, ktname);
return (ENOMEM);
}
for (i = 0; i < count; i++) {
if (count == 1)
error = kthread_add(taskqueue_thread_loop, tqp, NULL,
&tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
else
error = kthread_add(taskqueue_thread_loop, tqp, NULL,
&tq->tq_threads[i], RFSTOPPED, 0,
"%s_%d", ktname, i);
if (error) {
/* should be ok to continue, taskqueue_free will dtrt */
printf("%s: kthread_add(%s): error %d", __func__,
ktname, error);
tq->tq_threads[i] = NULL; /* paranoid */
} else
tq->tq_tcount++;
}
if (tq->tq_tcount == 0) {
free(tq->tq_threads, M_TASKQUEUE);
tq->tq_threads = NULL;
return (ENOMEM);
}
for (i = 0; i < count; i++) {
if (tq->tq_threads[i] == NULL)
continue;
td = tq->tq_threads[i];
if (mask) {
error = cpuset_setthread(td->td_tid, mask);
/*
* Failing to pin is rarely an actual fatal error;
* it'll just affect performance.
*/
if (error)
printf("%s: curthread=%llu: can't pin; "
"error=%d\n",
__func__,
(unsigned long long) td->td_tid,
error);
}
thread_lock(td);
sched_prio(td, pri);
sched_add(td, SRQ_BORING);
thread_unlock(td);
}
return (0);
}
int
taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
const char *name, ...)
{
va_list ap;
int error;
va_start(ap, name);
error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
va_end(ap);
return (error);
}
int
taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
cpuset_t *mask, const char *name, ...)
{
va_list ap;
int error;
va_start(ap, name);
error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
va_end(ap);
return (error);
}
static inline void
taskqueue_run_callback(struct taskqueue *tq,
enum taskqueue_callback_type cb_type)
{
taskqueue_callback_fn tq_callback;
TQ_ASSERT_UNLOCKED(tq);
tq_callback = tq->tq_callbacks[cb_type];
if (tq_callback != NULL)
tq_callback(tq->tq_cb_contexts[cb_type]);
}
void
taskqueue_thread_loop(void *arg)
{
struct taskqueue **tqp, *tq;
tqp = arg;
tq = *tqp;
taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
TQ_LOCK(tq);
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
/* XXX ? */
taskqueue_run_locked(tq);
/*
* Because taskqueue_run() can drop tq_mutex, we need to
* check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
* meantime, which means we missed a wakeup.
*/
if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
break;
TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
}
taskqueue_run_locked(tq);
/*
* This thread is on its way out, so just drop the lock temporarily
* in order to call the shutdown callback. This allows the callback
* to look at the taskqueue, even just before it dies.
*/
TQ_UNLOCK(tq);
taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
TQ_LOCK(tq);
/* rendezvous with thread that asked us to terminate */
tq->tq_tcount--;
wakeup_one(tq->tq_threads);
TQ_UNLOCK(tq);
kthread_exit();
}
void
taskqueue_thread_enqueue(void *context)
{
struct taskqueue **tqp, *tq;
tqp = context;
tq = *tqp;
wakeup_one(tq);
}
TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
INTR_MPSAFE, &taskqueue_ih));
TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
TASKQUEUE_DEFINE_THREAD(thread);
struct taskqueue *
taskqueue_create_fast(const char *name, int mflags,
taskqueue_enqueue_fn enqueue, void *context)
{
return _taskqueue_create(name, mflags, enqueue, context,
MTX_SPIN, "fast_taskqueue");
}
static void *taskqueue_fast_ih;
static void
taskqueue_fast_enqueue(void *context)
{
swi_sched(taskqueue_fast_ih, 0);
}
static void
taskqueue_fast_run(void *dummy)
{
taskqueue_run(taskqueue_fast);
}
TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
int
taskqueue_member(struct taskqueue *queue, struct thread *td)
{
int i, j, ret = 0;
for (i = 0, j = 0; ; i++) {
if (queue->tq_threads[i] == NULL)
continue;
if (queue->tq_threads[i] == td) {
ret = 1;
break;
}
if (++j >= queue->tq_tcount)
break;
}
return (ret);
}
Index: head/sys/kern/subr_vmem.c
===================================================================
--- head/sys/kern/subr_vmem.c (revision 328217)
+++ head/sys/kern/subr_vmem.c (revision 328218)
@@ -1,1608 +1,1608 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
* Copyright (c) 2013 EMC Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* From:
* $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
* $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
*/
/*
* reference:
* - Magazines and Vmem: Extending the Slab Allocator
* to Many CPUs and Arbitrary Resources
* http://www.usenix.org/event/usenix01/bonwick.html
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/queue.h>
#include <sys/callout.h>
#include <sys/hash.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/condvar.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/vmem.h>
#include "opt_vm.h"
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#define VMEM_OPTORDER 5
#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
#define VMEM_MAXORDER \
(VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
#define VMEM_HASHSIZE_MIN 16
#define VMEM_HASHSIZE_MAX 131072
#define VMEM_QCACHE_IDX_MAX 16
#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT)
#define VMEM_FLAGS \
(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
#define QC_NAME_MAX 16
/*
* Data structures private to vmem.
*/
MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
typedef struct vmem_btag bt_t;
TAILQ_HEAD(vmem_seglist, vmem_btag);
LIST_HEAD(vmem_freelist, vmem_btag);
LIST_HEAD(vmem_hashlist, vmem_btag);
struct qcache {
uma_zone_t qc_cache;
vmem_t *qc_vmem;
vmem_size_t qc_size;
char qc_name[QC_NAME_MAX];
};
typedef struct qcache qcache_t;
#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
#define VMEM_NAME_MAX 16
/* vmem arena */
struct vmem {
struct mtx_padalign vm_lock;
struct cv vm_cv;
char vm_name[VMEM_NAME_MAX+1];
LIST_ENTRY(vmem) vm_alllist;
struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN];
struct vmem_freelist vm_freelist[VMEM_MAXORDER];
struct vmem_seglist vm_seglist;
struct vmem_hashlist *vm_hashlist;
vmem_size_t vm_hashsize;
/* Constant after init */
vmem_size_t vm_qcache_max;
vmem_size_t vm_quantum_mask;
vmem_size_t vm_import_quantum;
int vm_quantum_shift;
/* Written on alloc/free */
LIST_HEAD(, vmem_btag) vm_freetags;
int vm_nfreetags;
int vm_nbusytag;
vmem_size_t vm_inuse;
vmem_size_t vm_size;
vmem_size_t vm_limit;
/* Used on import. */
vmem_import_t *vm_importfn;
vmem_release_t *vm_releasefn;
void *vm_arg;
/* Space exhaustion callback. */
vmem_reclaim_t *vm_reclaimfn;
/* quantum cache */
qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX];
};
/* boundary tag */
struct vmem_btag {
TAILQ_ENTRY(vmem_btag) bt_seglist;
union {
LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
} bt_u;
#define bt_hashlist bt_u.u_hashlist
#define bt_freelist bt_u.u_freelist
vmem_addr_t bt_start;
vmem_size_t bt_size;
int bt_type;
};
#define BT_TYPE_SPAN 1 /* Allocated from importfn */
#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */
#define BT_TYPE_FREE 3 /* Available space. */
#define BT_TYPE_BUSY 4 /* Used space. */
#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
#if defined(DIAGNOSTIC)
static int enable_vmem_check = 1;
SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
&enable_vmem_check, 0, "Enable vmem check");
static void vmem_check(vmem_t *);
#endif
static struct callout vmem_periodic_ch;
static int vmem_periodic_interval;
static struct task vmem_periodic_wk;
static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
static uma_zone_t vmem_zone;
/* ---- misc */
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align)))
#define VMEM_CROSS_P(addr1, addr2, boundary) \
((((addr1) ^ (addr2)) & -(boundary)) != 0)
#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
(vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
(flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
/*
* Maximum number of boundary tags that may be required to satisfy an
* allocation. Two may be required to import. Another two may be
* required to clip edges.
*/
#define BT_MAXALLOC 4
/*
* Max free limits the number of locally cached boundary tags. We
* just want to avoid hitting the zone allocator for every call.
*/
#define BT_MAXFREE (BT_MAXALLOC * 8)
/* Allocator for boundary tags. */
static uma_zone_t vmem_bt_zone;
/* boot time arena storage. */
static struct vmem kernel_arena_storage;
static struct vmem buffer_arena_storage;
static struct vmem transient_arena_storage;
/* kernel and kmem arenas are aliased for backwards KPI compat. */
vmem_t *kernel_arena = &kernel_arena_storage;
vmem_t *kmem_arena = &kernel_arena_storage;
vmem_t *buffer_arena = &buffer_arena_storage;
vmem_t *transient_arena = &transient_arena_storage;
#ifdef DEBUG_MEMGUARD
static struct vmem memguard_arena_storage;
vmem_t *memguard_arena = &memguard_arena_storage;
#endif
/*
* Fill the vmem's boundary tag cache. We guarantee that boundary tag
* allocation will not fail once bt_fill() passes. To do so we cache
* at least the maximum possible tag allocations in the arena.
*/
static int
bt_fill(vmem_t *vm, int flags)
{
bt_t *bt;
VMEM_ASSERT_LOCKED(vm);
/*
* Only allow the kernel arena and arenas derived from kernel arena to
* dip into reserve tags. They are where new tags come from.
*/
flags &= BT_FLAGS;
if (vm != kernel_arena && vm->vm_arg != kernel_arena)
flags &= ~M_USE_RESERVE;
/*
* Loop until we meet the reserve. To minimize the lock shuffle
* and prevent simultaneous fills we first try a NOWAIT regardless
* of the caller's flags. Specify M_NOVM so we don't recurse while
* holding a vmem lock.
*/
while (vm->vm_nfreetags < BT_MAXALLOC) {
bt = uma_zalloc(vmem_bt_zone,
(flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
if (bt == NULL) {
VMEM_UNLOCK(vm);
bt = uma_zalloc(vmem_bt_zone, flags);
VMEM_LOCK(vm);
if (bt == NULL && (flags & M_NOWAIT) != 0)
break;
}
LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
vm->vm_nfreetags++;
}
if (vm->vm_nfreetags < BT_MAXALLOC)
return ENOMEM;
return 0;
}
/*
* Pop a tag off of the freetag stack.
*/
static bt_t *
bt_alloc(vmem_t *vm)
{
bt_t *bt;
VMEM_ASSERT_LOCKED(vm);
bt = LIST_FIRST(&vm->vm_freetags);
MPASS(bt != NULL);
LIST_REMOVE(bt, bt_freelist);
vm->vm_nfreetags--;
return bt;
}
/*
* Trim the per-vmem free list. Returns with the lock released to
* avoid allocator recursions.
*/
static void
bt_freetrim(vmem_t *vm, int freelimit)
{
LIST_HEAD(, vmem_btag) freetags;
bt_t *bt;
LIST_INIT(&freetags);
VMEM_ASSERT_LOCKED(vm);
while (vm->vm_nfreetags > freelimit) {
bt = LIST_FIRST(&vm->vm_freetags);
LIST_REMOVE(bt, bt_freelist);
vm->vm_nfreetags--;
LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
}
VMEM_UNLOCK(vm);
while ((bt = LIST_FIRST(&freetags)) != NULL) {
LIST_REMOVE(bt, bt_freelist);
uma_zfree(vmem_bt_zone, bt);
}
}
static inline void
bt_free(vmem_t *vm, bt_t *bt)
{
VMEM_ASSERT_LOCKED(vm);
MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
vm->vm_nfreetags++;
}
/*
* freelist[0] ... [1, 1]
* freelist[1] ... [2, 2]
* :
* freelist[29] ... [30, 30]
* freelist[30] ... [31, 31]
* freelist[31] ... [32, 63]
* freelist[33] ... [64, 127]
* :
* freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
* :
*/
static struct vmem_freelist *
bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
{
const vmem_size_t qsize = size >> vm->vm_quantum_shift;
const int idx = SIZE2ORDER(qsize);
MPASS(size != 0 && qsize != 0);
MPASS((size & vm->vm_quantum_mask) == 0);
MPASS(idx >= 0);
MPASS(idx < VMEM_MAXORDER);
return &vm->vm_freelist[idx];
}
/*
* bt_freehead_toalloc: return the freelist for the given size and allocation
* strategy.
*
* For M_FIRSTFIT, return the list in which any blocks are large enough
* for the requested size. otherwise, return the list which can have blocks
* large enough for the requested size.
*/
static struct vmem_freelist *
bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
{
const vmem_size_t qsize = size >> vm->vm_quantum_shift;
int idx = SIZE2ORDER(qsize);
MPASS(size != 0 && qsize != 0);
MPASS((size & vm->vm_quantum_mask) == 0);
if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
idx++;
/* check too large request? */
}
MPASS(idx >= 0);
MPASS(idx < VMEM_MAXORDER);
return &vm->vm_freelist[idx];
}
/* ---- boundary tag hash */
static struct vmem_hashlist *
bt_hashhead(vmem_t *vm, vmem_addr_t addr)
{
struct vmem_hashlist *list;
unsigned int hash;
hash = hash32_buf(&addr, sizeof(addr), 0);
list = &vm->vm_hashlist[hash % vm->vm_hashsize];
return list;
}
static bt_t *
bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
{
struct vmem_hashlist *list;
bt_t *bt;
VMEM_ASSERT_LOCKED(vm);
list = bt_hashhead(vm, addr);
LIST_FOREACH(bt, list, bt_hashlist) {
if (bt->bt_start == addr) {
break;
}
}
return bt;
}
static void
bt_rembusy(vmem_t *vm, bt_t *bt)
{
VMEM_ASSERT_LOCKED(vm);
MPASS(vm->vm_nbusytag > 0);
vm->vm_inuse -= bt->bt_size;
vm->vm_nbusytag--;
LIST_REMOVE(bt, bt_hashlist);
}
static void
bt_insbusy(vmem_t *vm, bt_t *bt)
{
struct vmem_hashlist *list;
VMEM_ASSERT_LOCKED(vm);
MPASS(bt->bt_type == BT_TYPE_BUSY);
list = bt_hashhead(vm, bt->bt_start);
LIST_INSERT_HEAD(list, bt, bt_hashlist);
vm->vm_nbusytag++;
vm->vm_inuse += bt->bt_size;
}
/* ---- boundary tag list */
static void
bt_remseg(vmem_t *vm, bt_t *bt)
{
TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
bt_free(vm, bt);
}
static void
bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
{
TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
}
static void
bt_insseg_tail(vmem_t *vm, bt_t *bt)
{
TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
}
static void
bt_remfree(vmem_t *vm, bt_t *bt)
{
MPASS(bt->bt_type == BT_TYPE_FREE);
LIST_REMOVE(bt, bt_freelist);
}
static void
bt_insfree(vmem_t *vm, bt_t *bt)
{
struct vmem_freelist *list;
list = bt_freehead_tofree(vm, bt->bt_size);
LIST_INSERT_HEAD(list, bt, bt_freelist);
}
/* ---- vmem internal functions */
/*
* Import from the arena into the quantum cache in UMA.
*/
static int
qc_import(void *arg, void **store, int cnt, int domain, int flags)
{
qcache_t *qc;
vmem_addr_t addr;
int i;
qc = arg;
if ((flags & VMEM_FITMASK) == 0)
flags |= M_BESTFIT;
for (i = 0; i < cnt; i++) {
if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
break;
store[i] = (void *)addr;
/* Only guarantee one allocation. */
flags &= ~M_WAITOK;
flags |= M_NOWAIT;
}
return i;
}
/*
* Release memory from the UMA cache to the arena.
*/
static void
qc_release(void *arg, void **store, int cnt)
{
qcache_t *qc;
int i;
qc = arg;
for (i = 0; i < cnt; i++)
vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
}
static void
qc_init(vmem_t *vm, vmem_size_t qcache_max)
{
qcache_t *qc;
vmem_size_t size;
int qcache_idx_max;
int i;
MPASS((qcache_max & vm->vm_quantum_mask) == 0);
qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
VMEM_QCACHE_IDX_MAX);
vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
for (i = 0; i < qcache_idx_max; i++) {
qc = &vm->vm_qcache[i];
size = (i + 1) << vm->vm_quantum_shift;
snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
vm->vm_name, size);
qc->qc_vmem = vm;
qc->qc_size = size;
qc->qc_cache = uma_zcache_create(qc->qc_name, size,
NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
UMA_ZONE_VM);
MPASS(qc->qc_cache);
}
}
static void
qc_destroy(vmem_t *vm)
{
int qcache_idx_max;
int i;
qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
for (i = 0; i < qcache_idx_max; i++)
uma_zdestroy(vm->vm_qcache[i].qc_cache);
}
static void
qc_drain(vmem_t *vm)
{
int qcache_idx_max;
int i;
qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
for (i = 0; i < qcache_idx_max; i++)
zone_drain(vm->vm_qcache[i].qc_cache);
}
#ifndef UMA_MD_SMALL_ALLOC
static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
/*
* vmem_bt_alloc: Allocate a new page of boundary tags.
*
* On architectures with uma_small_alloc there is no recursion; no address
* space need be allocated to allocate boundary tags. For the others, we
* must handle recursion. Boundary tags are necessary to allocate new
* boundary tags.
*
* UMA guarantees that enough tags are held in reserve to allocate a new
* page of kva. We dip into this reserve by specifying M_USE_RESERVE only
* when allocating the page to hold new boundary tags. In this way the
* reserve is automatically filled by the allocation that uses the reserve.
*
* We still have to guarantee that the new tags are allocated atomically since
* many threads may try concurrently. The bt_lock provides this guarantee.
* We convert WAITOK allocations to NOWAIT and then handle the blocking here
* on failure. It's ok to return NULL for a WAITOK allocation as UMA will
* loop again after checking to see if we lost the race to allocate.
*
* There is a small race between vmem_bt_alloc() returning the page and the
* zone lock being acquired to add the page to the zone. For WAITOK
* allocations we just pause briefly. NOWAIT may experience a transient
* failure. To alleviate this we permit a small number of simultaneous
* fills to proceed concurrently so NOWAIT is less likely to fail unless
* we are really out of KVA.
*/
static void *
vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
int wait)
{
vmem_addr_t addr;
*pflag = UMA_SLAB_KERNEL;
/*
* Single thread boundary tag allocation so that the address space
* and memory are added in one atomic operation.
*/
mtx_lock(&vmem_bt_lock);
if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
VMEM_ADDR_MIN, VMEM_ADDR_MAX,
M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
if (kmem_back_domain(domain, kernel_object, addr, bytes,
M_NOWAIT | M_USE_RESERVE) == 0) {
mtx_unlock(&vmem_bt_lock);
return ((void *)addr);
}
vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
mtx_unlock(&vmem_bt_lock);
/*
* Out of memory, not address space. This may not even be
* possible due to M_USE_RESERVE page allocation.
*/
if (wait & M_WAITOK)
VM_WAIT;
return (NULL);
}
mtx_unlock(&vmem_bt_lock);
/*
* We're either out of address space or lost a fill race.
*/
if (wait & M_WAITOK)
pause("btalloc", 1);
return (NULL);
}
#endif
void
vmem_startup(void)
{
mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
vmem_zone = uma_zcreate("vmem",
sizeof(struct vmem), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
vmem_bt_zone = uma_zcreate("vmem btag",
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
#ifndef UMA_MD_SMALL_ALLOC
mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
/*
* Reserve enough tags to allocate new tags. We allow multiple
* CPUs to attempt to allocate new tags concurrently to limit
* false restarts in UMA.
*/
uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
#endif
}
/* ---- rehash */
static int
vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
{
bt_t *bt;
int i;
struct vmem_hashlist *newhashlist;
struct vmem_hashlist *oldhashlist;
vmem_size_t oldhashsize;
MPASS(newhashsize > 0);
- newhashlist = mallocarray(newhashsize, sizeof(struct vmem_hashlist),
+ newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
M_VMEM, M_NOWAIT);
if (newhashlist == NULL)
return ENOMEM;
for (i = 0; i < newhashsize; i++) {
LIST_INIT(&newhashlist[i]);
}
VMEM_LOCK(vm);
oldhashlist = vm->vm_hashlist;
oldhashsize = vm->vm_hashsize;
vm->vm_hashlist = newhashlist;
vm->vm_hashsize = newhashsize;
if (oldhashlist == NULL) {
VMEM_UNLOCK(vm);
return 0;
}
for (i = 0; i < oldhashsize; i++) {
while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
bt_rembusy(vm, bt);
bt_insbusy(vm, bt);
}
}
VMEM_UNLOCK(vm);
if (oldhashlist != vm->vm_hash0) {
free(oldhashlist, M_VMEM);
}
return 0;
}
static void
vmem_periodic_kick(void *dummy)
{
taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
}
static void
vmem_periodic(void *unused, int pending)
{
vmem_t *vm;
vmem_size_t desired;
vmem_size_t current;
mtx_lock(&vmem_list_lock);
LIST_FOREACH(vm, &vmem_list, vm_alllist) {
#ifdef DIAGNOSTIC
/* Convenient time to verify vmem state. */
if (enable_vmem_check == 1) {
VMEM_LOCK(vm);
vmem_check(vm);
VMEM_UNLOCK(vm);
}
#endif
desired = 1 << flsl(vm->vm_nbusytag);
desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
VMEM_HASHSIZE_MAX);
current = vm->vm_hashsize;
/* Grow in powers of two. Shrink less aggressively. */
if (desired >= current * 2 || desired * 4 <= current)
vmem_rehash(vm, desired);
/*
* Periodically wake up threads waiting for resources,
* so they could ask for reclamation again.
*/
VMEM_CONDVAR_BROADCAST(vm);
}
mtx_unlock(&vmem_list_lock);
callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
vmem_periodic_kick, NULL);
}
static void
vmem_start_callout(void *unused)
{
TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
vmem_periodic_interval = hz * 10;
callout_init(&vmem_periodic_ch, 1);
callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
vmem_periodic_kick, NULL);
}
SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
static void
vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
{
bt_t *btspan;
bt_t *btfree;
MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
MPASS((size & vm->vm_quantum_mask) == 0);
btspan = bt_alloc(vm);
btspan->bt_type = type;
btspan->bt_start = addr;
btspan->bt_size = size;
bt_insseg_tail(vm, btspan);
btfree = bt_alloc(vm);
btfree->bt_type = BT_TYPE_FREE;
btfree->bt_start = addr;
btfree->bt_size = size;
bt_insseg(vm, btfree, btspan);
bt_insfree(vm, btfree);
vm->vm_size += size;
}
static void
vmem_destroy1(vmem_t *vm)
{
bt_t *bt;
/*
* Drain per-cpu quantum caches.
*/
qc_destroy(vm);
/*
* The vmem should now only contain empty segments.
*/
VMEM_LOCK(vm);
MPASS(vm->vm_nbusytag == 0);
while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
bt_remseg(vm, bt);
if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
free(vm->vm_hashlist, M_VMEM);
bt_freetrim(vm, 0);
VMEM_CONDVAR_DESTROY(vm);
VMEM_LOCK_DESTROY(vm);
uma_zfree(vmem_zone, vm);
}
static int
vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
{
vmem_addr_t addr;
int error;
if (vm->vm_importfn == NULL)
return (EINVAL);
/*
* To make sure we get a span that meets the alignment we double it
* and add the size to the tail. This slightly overestimates.
*/
if (align != vm->vm_quantum_mask + 1)
size = (align * 2) + size;
size = roundup(size, vm->vm_import_quantum);
if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
return (ENOMEM);
/*
* Hide MAXALLOC tags so we're guaranteed to be able to add this
* span and the tag we want to allocate from it.
*/
MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
vm->vm_nfreetags -= BT_MAXALLOC;
VMEM_UNLOCK(vm);
error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
VMEM_LOCK(vm);
vm->vm_nfreetags += BT_MAXALLOC;
if (error)
return (ENOMEM);
vmem_add1(vm, addr, size, BT_TYPE_SPAN);
return 0;
}
/*
* vmem_fit: check if a bt can satisfy the given restrictions.
*
* it's a caller's responsibility to ensure the region is big enough
* before calling us.
*/
static int
vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
vmem_addr_t maxaddr, vmem_addr_t *addrp)
{
vmem_addr_t start;
vmem_addr_t end;
MPASS(size > 0);
MPASS(bt->bt_size >= size); /* caller's responsibility */
/*
* XXX assumption: vmem_addr_t and vmem_size_t are
* unsigned integer of the same size.
*/
start = bt->bt_start;
if (start < minaddr) {
start = minaddr;
}
end = BT_END(bt);
if (end > maxaddr)
end = maxaddr;
if (start > end)
return (ENOMEM);
start = VMEM_ALIGNUP(start - phase, align) + phase;
if (start < bt->bt_start)
start += align;
if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
MPASS(align < nocross);
start = VMEM_ALIGNUP(start - phase, nocross) + phase;
}
if (start <= end && end - start >= size - 1) {
MPASS((start & (align - 1)) == phase);
MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
MPASS(minaddr <= start);
MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
MPASS(bt->bt_start <= start);
MPASS(BT_END(bt) - start >= size - 1);
*addrp = start;
return (0);
}
return (ENOMEM);
}
/*
* vmem_clip: Trim the boundary tag edges to the requested start and size.
*/
static void
vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
{
bt_t *btnew;
bt_t *btprev;
VMEM_ASSERT_LOCKED(vm);
MPASS(bt->bt_type == BT_TYPE_FREE);
MPASS(bt->bt_size >= size);
bt_remfree(vm, bt);
if (bt->bt_start != start) {
btprev = bt_alloc(vm);
btprev->bt_type = BT_TYPE_FREE;
btprev->bt_start = bt->bt_start;
btprev->bt_size = start - bt->bt_start;
bt->bt_start = start;
bt->bt_size -= btprev->bt_size;
bt_insfree(vm, btprev);
bt_insseg(vm, btprev,
TAILQ_PREV(bt, vmem_seglist, bt_seglist));
}
MPASS(bt->bt_start == start);
if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
/* split */
btnew = bt_alloc(vm);
btnew->bt_type = BT_TYPE_BUSY;
btnew->bt_start = bt->bt_start;
btnew->bt_size = size;
bt->bt_start = bt->bt_start + size;
bt->bt_size -= size;
bt_insfree(vm, bt);
bt_insseg(vm, btnew,
TAILQ_PREV(bt, vmem_seglist, bt_seglist));
bt_insbusy(vm, btnew);
bt = btnew;
} else {
bt->bt_type = BT_TYPE_BUSY;
bt_insbusy(vm, bt);
}
MPASS(bt->bt_size >= size);
bt->bt_type = BT_TYPE_BUSY;
}
/* ---- vmem API */
void
vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
{
VMEM_LOCK(vm);
vm->vm_importfn = importfn;
vm->vm_releasefn = releasefn;
vm->vm_arg = arg;
vm->vm_import_quantum = import_quantum;
VMEM_UNLOCK(vm);
}
void
vmem_set_limit(vmem_t *vm, vmem_size_t limit)
{
VMEM_LOCK(vm);
vm->vm_limit = limit;
VMEM_UNLOCK(vm);
}
void
vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
{
VMEM_LOCK(vm);
vm->vm_reclaimfn = reclaimfn;
VMEM_UNLOCK(vm);
}
/*
* vmem_init: Initializes vmem arena.
*/
vmem_t *
vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
vmem_size_t quantum, vmem_size_t qcache_max, int flags)
{
int i;
MPASS(quantum > 0);
MPASS((quantum & (quantum - 1)) == 0);
bzero(vm, sizeof(*vm));
VMEM_CONDVAR_INIT(vm, name);
VMEM_LOCK_INIT(vm, name);
vm->vm_nfreetags = 0;
LIST_INIT(&vm->vm_freetags);
strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
vm->vm_quantum_mask = quantum - 1;
vm->vm_quantum_shift = flsl(quantum) - 1;
vm->vm_nbusytag = 0;
vm->vm_size = 0;
vm->vm_limit = 0;
vm->vm_inuse = 0;
qc_init(vm, qcache_max);
TAILQ_INIT(&vm->vm_seglist);
for (i = 0; i < VMEM_MAXORDER; i++) {
LIST_INIT(&vm->vm_freelist[i]);
}
memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
vm->vm_hashsize = VMEM_HASHSIZE_MIN;
vm->vm_hashlist = vm->vm_hash0;
if (size != 0) {
if (vmem_add(vm, base, size, flags) != 0) {
vmem_destroy1(vm);
return NULL;
}
}
mtx_lock(&vmem_list_lock);
LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
mtx_unlock(&vmem_list_lock);
return vm;
}
/*
* vmem_create: create an arena.
*/
vmem_t *
vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
vmem_size_t quantum, vmem_size_t qcache_max, int flags)
{
vmem_t *vm;
vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
if (vm == NULL)
return (NULL);
if (vmem_init(vm, name, base, size, quantum, qcache_max,
flags) == NULL)
return (NULL);
return (vm);
}
void
vmem_destroy(vmem_t *vm)
{
mtx_lock(&vmem_list_lock);
LIST_REMOVE(vm, vm_alllist);
mtx_unlock(&vmem_list_lock);
vmem_destroy1(vm);
}
vmem_size_t
vmem_roundup_size(vmem_t *vm, vmem_size_t size)
{
return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
}
/*
* vmem_alloc: allocate resource from the arena.
*/
int
vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
const int strat __unused = flags & VMEM_FITMASK;
qcache_t *qc;
flags &= VMEM_FLAGS;
MPASS(size > 0);
MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
if ((flags & M_NOWAIT) == 0)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
if (size <= vm->vm_qcache_max) {
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
if (*addrp == 0)
return (ENOMEM);
return (0);
}
return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
flags, addrp);
}
int
vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
const vmem_size_t phase, const vmem_size_t nocross,
const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
vmem_addr_t *addrp)
{
const vmem_size_t size = vmem_roundup_size(vm, size0);
struct vmem_freelist *list;
struct vmem_freelist *first;
struct vmem_freelist *end;
vmem_size_t avail;
bt_t *bt;
int error;
int strat;
flags &= VMEM_FLAGS;
strat = flags & VMEM_FITMASK;
MPASS(size0 > 0);
MPASS(size > 0);
MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
if ((flags & M_NOWAIT) == 0)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
MPASS((align & vm->vm_quantum_mask) == 0);
MPASS((align & (align - 1)) == 0);
MPASS((phase & vm->vm_quantum_mask) == 0);
MPASS((nocross & vm->vm_quantum_mask) == 0);
MPASS((nocross & (nocross - 1)) == 0);
MPASS((align == 0 && phase == 0) || phase < align);
MPASS(nocross == 0 || nocross >= size);
MPASS(minaddr <= maxaddr);
MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
if (align == 0)
align = vm->vm_quantum_mask + 1;
*addrp = 0;
end = &vm->vm_freelist[VMEM_MAXORDER];
/*
* choose a free block from which we allocate.
*/
first = bt_freehead_toalloc(vm, size, strat);
VMEM_LOCK(vm);
for (;;) {
/*
* Make sure we have enough tags to complete the
* operation.
*/
if (vm->vm_nfreetags < BT_MAXALLOC &&
bt_fill(vm, flags) != 0) {
error = ENOMEM;
break;
}
/*
* Scan freelists looking for a tag that satisfies the
* allocation. If we're doing BESTFIT we may encounter
* sizes below the request. If we're doing FIRSTFIT we
* inspect only the first element from each list.
*/
for (list = first; list < end; list++) {
LIST_FOREACH(bt, list, bt_freelist) {
if (bt->bt_size >= size) {
error = vmem_fit(bt, size, align, phase,
nocross, minaddr, maxaddr, addrp);
if (error == 0) {
vmem_clip(vm, bt, *addrp, size);
goto out;
}
}
/* FIRST skips to the next list. */
if (strat == M_FIRSTFIT)
break;
}
}
/*
* Retry if the fast algorithm failed.
*/
if (strat == M_FIRSTFIT) {
strat = M_BESTFIT;
first = bt_freehead_toalloc(vm, size, strat);
continue;
}
/*
* XXX it is possible to fail to meet restrictions with the
* imported region. It is up to the user to specify the
* import quantum such that it can satisfy any allocation.
*/
if (vmem_import(vm, size, align, flags) == 0)
continue;
/*
* Try to free some space from the quantum cache or reclaim
* functions if available.
*/
if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
avail = vm->vm_size - vm->vm_inuse;
VMEM_UNLOCK(vm);
if (vm->vm_qcache_max != 0)
qc_drain(vm);
if (vm->vm_reclaimfn != NULL)
vm->vm_reclaimfn(vm, flags);
VMEM_LOCK(vm);
/* If we were successful retry even NOWAIT. */
if (vm->vm_size - vm->vm_inuse > avail)
continue;
}
if ((flags & M_NOWAIT) != 0) {
error = ENOMEM;
break;
}
VMEM_CONDVAR_WAIT(vm);
}
out:
VMEM_UNLOCK(vm);
if (error != 0 && (flags & M_NOWAIT) == 0)
panic("failed to allocate waiting allocation\n");
return (error);
}
/*
* vmem_free: free the resource to the arena.
*/
void
vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
{
qcache_t *qc;
MPASS(size > 0);
if (size <= vm->vm_qcache_max) {
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
uma_zfree(qc->qc_cache, (void *)addr);
} else
vmem_xfree(vm, addr, size);
}
void
vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
{
bt_t *bt;
bt_t *t;
MPASS(size > 0);
VMEM_LOCK(vm);
bt = bt_lookupbusy(vm, addr);
MPASS(bt != NULL);
MPASS(bt->bt_start == addr);
MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
MPASS(bt->bt_type == BT_TYPE_BUSY);
bt_rembusy(vm, bt);
bt->bt_type = BT_TYPE_FREE;
/* coalesce */
t = TAILQ_NEXT(bt, bt_seglist);
if (t != NULL && t->bt_type == BT_TYPE_FREE) {
MPASS(BT_END(bt) < t->bt_start); /* YYY */
bt->bt_size += t->bt_size;
bt_remfree(vm, t);
bt_remseg(vm, t);
}
t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
if (t != NULL && t->bt_type == BT_TYPE_FREE) {
MPASS(BT_END(t) < bt->bt_start); /* YYY */
bt->bt_size += t->bt_size;
bt->bt_start = t->bt_start;
bt_remfree(vm, t);
bt_remseg(vm, t);
}
t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
MPASS(t != NULL);
MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
t->bt_size == bt->bt_size) {
vmem_addr_t spanaddr;
vmem_size_t spansize;
MPASS(t->bt_start == bt->bt_start);
spanaddr = bt->bt_start;
spansize = bt->bt_size;
bt_remseg(vm, bt);
bt_remseg(vm, t);
vm->vm_size -= spansize;
VMEM_CONDVAR_BROADCAST(vm);
bt_freetrim(vm, BT_MAXFREE);
(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
} else {
bt_insfree(vm, bt);
VMEM_CONDVAR_BROADCAST(vm);
bt_freetrim(vm, BT_MAXFREE);
}
}
/*
* vmem_add:
*
*/
int
vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
{
int error;
error = 0;
flags &= VMEM_FLAGS;
VMEM_LOCK(vm);
if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
else
error = ENOMEM;
VMEM_UNLOCK(vm);
return (error);
}
/*
* vmem_size: information about arenas size
*/
vmem_size_t
vmem_size(vmem_t *vm, int typemask)
{
int i;
switch (typemask) {
case VMEM_ALLOC:
return vm->vm_inuse;
case VMEM_FREE:
return vm->vm_size - vm->vm_inuse;
case VMEM_FREE|VMEM_ALLOC:
return vm->vm_size;
case VMEM_MAXFREE:
VMEM_LOCK(vm);
for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
if (LIST_EMPTY(&vm->vm_freelist[i]))
continue;
VMEM_UNLOCK(vm);
return ((vmem_size_t)ORDER2SIZE(i) <<
vm->vm_quantum_shift);
}
VMEM_UNLOCK(vm);
return (0);
default:
panic("vmem_size");
}
}
/* ---- debug */
#if defined(DDB) || defined(DIAGNOSTIC)
static void bt_dump(const bt_t *, int (*)(const char *, ...)
__printflike(1, 2));
static const char *
bt_type_string(int type)
{
switch (type) {
case BT_TYPE_BUSY:
return "busy";
case BT_TYPE_FREE:
return "free";
case BT_TYPE_SPAN:
return "span";
case BT_TYPE_SPAN_STATIC:
return "static span";
default:
break;
}
return "BOGUS";
}
static void
bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
{
(*pr)("\t%p: %jx %jx, %d(%s)\n",
bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
bt->bt_type, bt_type_string(bt->bt_type));
}
static void
vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
{
const bt_t *bt;
int i;
(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
bt_dump(bt, pr);
}
for (i = 0; i < VMEM_MAXORDER; i++) {
const struct vmem_freelist *fl = &vm->vm_freelist[i];
if (LIST_EMPTY(fl)) {
continue;
}
(*pr)("freelist[%d]\n", i);
LIST_FOREACH(bt, fl, bt_freelist) {
bt_dump(bt, pr);
}
}
}
#endif /* defined(DDB) || defined(DIAGNOSTIC) */
#if defined(DDB)
#include <ddb/ddb.h>
static bt_t *
vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
{
bt_t *bt;
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
if (BT_ISSPAN_P(bt)) {
continue;
}
if (bt->bt_start <= addr && addr <= BT_END(bt)) {
return bt;
}
}
return NULL;
}
void
vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
{
vmem_t *vm;
LIST_FOREACH(vm, &vmem_list, vm_alllist) {
bt_t *bt;
bt = vmem_whatis_lookup(vm, addr);
if (bt == NULL) {
continue;
}
(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
(void *)addr, (void *)bt->bt_start,
(vmem_size_t)(addr - bt->bt_start), vm->vm_name,
(bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
}
}
void
vmem_printall(const char *modif, int (*pr)(const char *, ...))
{
const vmem_t *vm;
LIST_FOREACH(vm, &vmem_list, vm_alllist) {
vmem_dump(vm, pr);
}
}
void
vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
{
const vmem_t *vm = (const void *)addr;
vmem_dump(vm, pr);
}
DB_SHOW_COMMAND(vmemdump, vmemdump)
{
if (!have_addr) {
db_printf("usage: show vmemdump <addr>\n");
return;
}
vmem_dump((const vmem_t *)addr, db_printf);
}
DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
{
const vmem_t *vm;
LIST_FOREACH(vm, &vmem_list, vm_alllist)
vmem_dump(vm, db_printf);
}
DB_SHOW_COMMAND(vmem, vmem_summ)
{
const vmem_t *vm = (const void *)addr;
const bt_t *bt;
size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
int ord;
if (!have_addr) {
db_printf("usage: show vmem <addr>\n");
return;
}
db_printf("vmem %p '%s'\n", vm, vm->vm_name);
db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
db_printf("\tsize:\t%zu\n", vm->vm_size);
db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
memset(&ft, 0, sizeof(ft));
memset(&ut, 0, sizeof(ut));
memset(&fs, 0, sizeof(fs));
memset(&us, 0, sizeof(us));
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
if (bt->bt_type == BT_TYPE_BUSY) {
ut[ord]++;
us[ord] += bt->bt_size;
} else if (bt->bt_type == BT_TYPE_FREE) {
ft[ord]++;
fs[ord] += bt->bt_size;
}
}
db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
for (ord = 0; ord < VMEM_MAXORDER; ord++) {
if (ut[ord] == 0 && ft[ord] == 0)
continue;
db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
ORDER2SIZE(ord) << vm->vm_quantum_shift,
ut[ord], us[ord], ft[ord], fs[ord]);
}
}
DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
{
const vmem_t *vm;
LIST_FOREACH(vm, &vmem_list, vm_alllist)
vmem_summ((db_expr_t)vm, TRUE, count, modif);
}
#endif /* defined(DDB) */
#define vmem_printf printf
#if defined(DIAGNOSTIC)
static bool
vmem_check_sanity(vmem_t *vm)
{
const bt_t *bt, *bt2;
MPASS(vm != NULL);
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
if (bt->bt_start > BT_END(bt)) {
printf("corrupted tag\n");
bt_dump(bt, vmem_printf);
return false;
}
}
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
if (bt == bt2) {
continue;
}
if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
continue;
}
if (bt->bt_start <= BT_END(bt2) &&
bt2->bt_start <= BT_END(bt)) {
printf("overwrapped tags\n");
bt_dump(bt, vmem_printf);
bt_dump(bt2, vmem_printf);
return false;
}
}
}
return true;
}
static void
vmem_check(vmem_t *vm)
{
if (!vmem_check_sanity(vm)) {
panic("insanity vmem %p", vm);
}
}
#endif /* defined(DIAGNOSTIC) */
Index: head/sys/mips/mips/busdma_machdep.c
===================================================================
--- head/sys/mips/mips/busdma_machdep.c (revision 328217)
+++ head/sys/mips/mips/busdma_machdep.c (revision 328218)
@@ -1,1526 +1,1525 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2006 Oleksandr Tymoshenko
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* MIPS bus dma support routines
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/busdma_bufalloc.h>
#include <sys/interrupt.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/ktr.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/cache.h>
#include <machine/cpufunc.h>
#include <machine/cpuinfo.h>
#include <machine/md_var.h>
#define MAX_BPAGES 64
#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
/*
* On XBurst cores from Ingenic, cache-line writeback is local
* only, unless accompanied by invalidation. Invalidations force
* dirty line writeout and invalidation requests forwarded to
* other cores if other cores have the cache line dirty.
*/
#if defined(SMP) && defined(CPU_XBURST)
#define BUS_DMA_FORCE_WBINV
#endif
struct bounce_zone;
struct bus_dma_tag {
bus_dma_tag_t parent;
bus_size_t alignment;
bus_addr_t boundary;
bus_addr_t lowaddr;
bus_addr_t highaddr;
bus_dma_filter_t *filter;
void *filterarg;
bus_size_t maxsize;
u_int nsegments;
bus_size_t maxsegsz;
int flags;
int ref_count;
int map_count;
bus_dma_lock_t *lockfunc;
void *lockfuncarg;
bus_dma_segment_t *segments;
struct bounce_zone *bounce_zone;
};
struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
struct sync_list {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
bus_size_t datacount; /* client data count */
};
int busdma_swi_pending;
struct bounce_zone {
STAILQ_ENTRY(bounce_zone) links;
STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
int total_bpages;
int free_bpages;
int reserved_bpages;
int active_bpages;
int total_bounced;
int total_deferred;
int map_count;
bus_size_t alignment;
bus_addr_t lowaddr;
char zoneid[8];
char lowaddrid[20];
struct sysctl_ctx_list sysctl_tree;
struct sysctl_oid *sysctl_tree_top;
};
static struct mtx bounce_lock;
static int total_bpages;
static int busdma_zonecount;
static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
"Total bounce pages");
#define DMAMAP_UNCACHEABLE 0x08
#define DMAMAP_CACHE_ALIGNED 0x10
struct bus_dmamap {
struct bp_list bpages;
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
struct memdesc mem;
int flags;
void *origbuffer;
void *allocbuffer;
TAILQ_ENTRY(bus_dmamap) freelist;
STAILQ_ENTRY(bus_dmamap) links;
bus_dmamap_callback_t *callback;
void *callback_arg;
int sync_count;
struct sync_list *slist;
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_zone(bus_dma_tag_t dmat);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
/* Default tag, as most drivers provide no parent tag. */
bus_dma_tag_t mips_root_dma_tag;
static uma_zone_t dmamap_zone; /* Cache of struct bus_dmamap items */
static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
/*
* This is the ctor function passed to uma_zcreate() for the pool of dma maps.
* It'll need platform-specific changes if this code is copied.
*/
static int
dmamap_ctor(void *mem, int size, void *arg, int flags)
{
bus_dmamap_t map;
bus_dma_tag_t dmat;
map = (bus_dmamap_t)mem;
dmat = (bus_dma_tag_t)arg;
dmat->map_count++;
map->dmat = dmat;
map->flags = 0;
map->slist = NULL;
map->allocbuffer = NULL;
map->sync_count = 0;
STAILQ_INIT(&map->bpages);
return (0);
}
/*
* This is the dtor function passed to uma_zcreate() for the pool of dma maps.
* It may need platform-specific changes if this code is copied .
*/
static void
dmamap_dtor(void *mem, int size, void *arg)
{
bus_dmamap_t map;
map = (bus_dmamap_t)mem;
map->dmat->map_count--;
}
static void
busdma_init(void *dummy)
{
/* Create a cache of maps for bus_dmamap_create(). */
dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
/* Create a cache of buffers in standard (cacheable) memory. */
standard_allocator = busdma_bufalloc_create("buffer",
mips_dcache_max_linesize, /* minimum_alignment */
NULL, /* uma_alloc func */
NULL, /* uma_free func */
0); /* uma_zcreate_flags */
/*
* Create a cache of buffers in uncacheable memory, to implement the
* BUS_DMA_COHERENT flag.
*/
coherent_allocator = busdma_bufalloc_create("coherent",
mips_dcache_max_linesize, /* minimum_alignment */
busdma_bufalloc_alloc_uncacheable,
busdma_bufalloc_free_uncacheable,
0); /* uma_zcreate_flags */
}
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
/*
* Return true if a match is made.
*
* To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
*
* If paddr is within the bounds of the dma tag then call the filter callback
* to check for a match, if there is no filter callback then assume a match.
*/
static int
run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
{
int retval;
retval = 0;
do {
if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
|| ((paddr & (dmat->alignment - 1)) != 0))
&& (dmat->filter == NULL
|| (*dmat->filter)(dmat->filterarg, paddr) != 0))
retval = 1;
dmat = dmat->parent;
} while (retval == 0 && dmat != NULL);
return (retval);
}
/*
* Check to see if the specified page is in an allowed DMA range.
*/
static __inline int
_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
{
int i;
for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
|| (lowaddr < phys_avail[i] &&
highaddr > phys_avail[i]))
return (1);
}
return (0);
}
/*
* Convenience function for manipulating driver locks from busdma (during
* busdma_swi, for example). Drivers that don't provide their own locks
* should specify &Giant to dmat->lockfuncarg. Drivers that use their own
* non-mutex locking scheme don't have to use this at all.
*/
void
busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
{
struct mtx *dmtx;
dmtx = (struct mtx *)arg;
switch (op) {
case BUS_DMA_LOCK:
mtx_lock(dmtx);
break;
case BUS_DMA_UNLOCK:
mtx_unlock(dmtx);
break;
default:
panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
}
}
/*
* dflt_lock should never get called. It gets put into the dma tag when
* lockfunc == NULL, which is only valid if the maps that are associated
* with the tag are meant to never be defered.
* XXX Should have a way to identify which driver is responsible here.
*/
static void
dflt_lock(void *arg, bus_dma_lock_op_t op)
{
#ifdef INVARIANTS
panic("driver error: busdma dflt_lock called");
#else
printf("DRIVER_ERROR: busdma dflt_lock called\n");
#endif
}
static __inline bus_dmamap_t
_busdma_alloc_dmamap(bus_dma_tag_t dmat)
{
struct sync_list *slist;
bus_dmamap_t map;
- slist = mallocarray(dmat->nsegments, sizeof(*slist), M_BUSDMA,
- M_NOWAIT);
+ slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
if (slist == NULL)
return (NULL);
map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
if (map != NULL)
map->slist = slist;
else
free(slist, M_BUSDMA);
return (map);
}
static __inline void
_busdma_free_dmamap(bus_dmamap_t map)
{
free(map->slist, M_BUSDMA);
uma_zfree(dmamap_zone, map);
}
/*
* Allocate a device specific dma_tag.
*/
#define SEG_NB 1024
int
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_addr_t boundary, bus_addr_t lowaddr,
bus_addr_t highaddr, bus_dma_filter_t *filter,
void *filterarg, bus_size_t maxsize, int nsegments,
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat)
{
bus_dma_tag_t newtag;
int error = 0;
/* Return a NULL tag on failure */
*dmat = NULL;
if (!parent)
parent = mips_root_dma_tag;
newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
if (newtag == NULL) {
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, 0, error);
return (ENOMEM);
}
newtag->parent = parent;
newtag->alignment = alignment;
newtag->boundary = boundary;
newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
newtag->filter = filter;
newtag->filterarg = filterarg;
newtag->maxsize = maxsize;
newtag->nsegments = nsegments;
newtag->maxsegsz = maxsegsz;
newtag->flags = flags;
if (cpuinfo.cache_coherent_dma)
newtag->flags |= BUS_DMA_COHERENT;
newtag->ref_count = 1; /* Count ourself */
newtag->map_count = 0;
if (lockfunc != NULL) {
newtag->lockfunc = lockfunc;
newtag->lockfuncarg = lockfuncarg;
} else {
newtag->lockfunc = dflt_lock;
newtag->lockfuncarg = NULL;
}
newtag->segments = NULL;
/*
* Take into account any restrictions imposed by our parent tag
*/
if (parent != NULL) {
newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
if (newtag->boundary == 0)
newtag->boundary = parent->boundary;
else if (parent->boundary != 0)
newtag->boundary =
MIN(parent->boundary, newtag->boundary);
if ((newtag->filter != NULL) ||
((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
newtag->flags |= BUS_DMA_COULD_BOUNCE;
if (newtag->filter == NULL) {
/*
* Short circuit looking at our parent directly
* since we have encapsulated all of its information
*/
newtag->filter = parent->filter;
newtag->filterarg = parent->filterarg;
newtag->parent = parent->parent;
}
if (newtag->parent != NULL)
atomic_add_int(&parent->ref_count, 1);
}
if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
|| newtag->alignment > 1)
newtag->flags |= BUS_DMA_COULD_BOUNCE;
if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
(flags & BUS_DMA_ALLOCNOW) != 0) {
struct bounce_zone *bz;
/* Must bounce */
if ((error = alloc_bounce_zone(newtag)) != 0) {
free(newtag, M_BUSDMA);
return (error);
}
bz = newtag->bounce_zone;
if (ptoa(bz->total_bpages) < maxsize) {
int pages;
pages = atop(maxsize) - bz->total_bpages;
/* Add pages to our bounce pool */
if (alloc_bounce_pages(newtag, pages) < pages)
error = ENOMEM;
}
/* Performed initial allocation */
newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
} else
newtag->bounce_zone = NULL;
if (error != 0)
free(newtag, M_BUSDMA);
else
*dmat = newtag;
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
#ifdef KTR
bus_dma_tag_t dmat_copy = dmat;
#endif
if (dmat != NULL) {
if (dmat->map_count != 0)
return (EBUSY);
while (dmat != NULL) {
bus_dma_tag_t parent;
parent = dmat->parent;
atomic_subtract_int(&dmat->ref_count, 1);
if (dmat->ref_count == 0) {
if (dmat->segments != NULL)
free(dmat->segments, M_BUSDMA);
free(dmat, M_BUSDMA);
/*
* Last reference count, so
* release our reference
* count on our parent.
*/
dmat = parent;
} else
dmat = NULL;
}
}
CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
return (0);
}
#include <sys/kdb.h>
/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.
*/
int
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
bus_dmamap_t newmap;
int error = 0;
if (dmat->segments == NULL) {
- dmat->segments =
- (bus_dma_segment_t *)mallocarray(dmat->nsegments,
- sizeof(bus_dma_segment_t), M_BUSDMA, M_NOWAIT);
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
+ M_NOWAIT);
if (dmat->segments == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
return (ENOMEM);
}
}
newmap = _busdma_alloc_dmamap(dmat);
if (newmap == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
return (ENOMEM);
}
*mapp = newmap;
/*
* Bouncing might be required if the driver asks for an active
* exclusion region, a data alignment that is stricter than 1, and/or
* an active address boundary.
*/
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
/* Must bounce */
struct bounce_zone *bz;
int maxpages;
if (dmat->bounce_zone == NULL) {
if ((error = alloc_bounce_zone(dmat)) != 0) {
_busdma_free_dmamap(newmap);
*mapp = NULL;
return (error);
}
}
bz = dmat->bounce_zone;
/* Initialize the new map */
STAILQ_INIT(&((*mapp)->bpages));
/*
* Attempt to add pages to our pool on a per-instance
* basis up to a sane limit.
*/
maxpages = MAX_BPAGES;
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
|| (bz->map_count > 0 && bz->total_bpages < maxpages)) {
int pages;
pages = MAX(atop(dmat->maxsize), 1);
pages = MIN(maxpages - bz->total_bpages, pages);
pages = MAX(pages, 1);
if (alloc_bounce_pages(dmat, pages) < pages)
error = ENOMEM;
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
if (error == 0)
dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
} else {
error = 0;
}
}
bz->map_count++;
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, error);
return (0);
}
/*
* Destroy a handle for mapping from kva/uva/physical
* address space into bus device space.
*/
int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
return (EBUSY);
}
if (dmat->bounce_zone)
dmat->bounce_zone->map_count--;
_busdma_free_dmamap(map);
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
return (0);
}
/*
* Allocate a piece of memory that can be efficiently mapped into
* bus device space based on the constraints lited in the dma tag.
* A dmamap to for use with dmamap_load is also allocated.
*/
int
bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
bus_dmamap_t *mapp)
{
bus_dmamap_t newmap = NULL;
busdma_bufalloc_t ba;
struct busdma_bufzone *bufzone;
vm_memattr_t memattr;
void *vaddr;
int mflags;
if (flags & BUS_DMA_NOWAIT)
mflags = M_NOWAIT;
else
mflags = M_WAITOK;
if (dmat->segments == NULL) {
- dmat->segments =
- (bus_dma_segment_t *)mallocarray(dmat->nsegments,
- sizeof(bus_dma_segment_t), M_BUSDMA, mflags);
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
+ mflags);
if (dmat->segments == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
}
}
newmap = _busdma_alloc_dmamap(dmat);
if (newmap == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
}
/*
* If all the memory is coherent with DMA then we don't need to
* do anything special for a coherent mapping request.
*/
if (dmat->flags & BUS_DMA_COHERENT)
flags &= ~BUS_DMA_COHERENT;
if (flags & BUS_DMA_COHERENT) {
memattr = VM_MEMATTR_UNCACHEABLE;
ba = coherent_allocator;
newmap->flags |= DMAMAP_UNCACHEABLE;
} else {
memattr = VM_MEMATTR_DEFAULT;
ba = standard_allocator;
}
/* All buffers we allocate are cache-aligned. */
newmap->flags |= DMAMAP_CACHE_ALIGNED;
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
/*
* Try to find a bufzone in the allocator that holds a cache of buffers
* of the right size for this request. If the buffer is too big to be
* held in the allocator cache, this returns NULL.
*/
bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
/*
* Allocate the buffer from the uma(9) allocator if...
* - It's small enough to be in the allocator (bufzone not NULL).
* - The alignment constraint isn't larger than the allocation size
* (the allocator aligns buffers to their size boundaries).
* - There's no need to handle lowaddr/highaddr exclusion zones.
* else allocate non-contiguous pages if...
* - The page count that could get allocated doesn't exceed
* nsegments also when the maximum segment size is less
* than PAGE_SIZE.
* - The alignment constraint isn't larger than a page boundary.
* - There are no boundary-crossing constraints.
* else allocate a block of contiguous pages because one or more of the
* constraints is something that only the contig allocator can fulfill.
*/
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >=
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
dmat->alignment <= PAGE_SIZE &&
(dmat->boundary % PAGE_SIZE) == 0) {
vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
if (vaddr == NULL) {
_busdma_free_dmamap(newmap);
newmap = NULL;
} else {
newmap->sync_count = 0;
}
*vaddrp = vaddr;
*mapp = newmap;
return (vaddr == NULL ? ENOMEM : 0);
}
/*
* Free a piece of memory and it's allocated dmamap, that was allocated
* via bus_dmamem_alloc. Make the same choice for free/contigfree.
*/
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
struct busdma_bufzone *bufzone;
busdma_bufalloc_t ba;
if (map->flags & DMAMAP_UNCACHEABLE)
ba = coherent_allocator;
else
ba = standard_allocator;
free(map->slist, M_BUSDMA);
uma_zfree(dmamap_zone, map);
bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if (map->pagesneeded == 0) {
CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
dmat->lowaddr, dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr) != 0) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
vm_offset_t vaddr;
vm_offset_t vendaddr;
bus_addr_t paddr;
if (map->pagesneeded == 0) {
CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
dmat->lowaddr, dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
vaddr = (vm_offset_t)buf;
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
bus_size_t sg_len;
KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
paddr = pmap_kextract(vaddr);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
run_filter(dmat, paddr) != 0) {
sg_len = roundup2(sg_len, dmat->alignment);
map->pagesneeded++;
}
vaddr += sg_len;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
{
/* Reserve Necessary Bounce Pages */
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
*/
seg = *segp;
if (seg >= 0 &&
curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
segs[seg].ds_len += sgsize;
} else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int error;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
if (buflen != 0) {
bus_dmamap_unload(dmat, map);
return (EFBIG); /* XXX better return value here? */
}
return (0);
}
int
_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
bus_dma_segment_t *segs, int *segp)
{
return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
segs, segp));
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrance, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
struct sync_list *sl;
vm_offset_t vaddr = (vm_offset_t)buf;
int error = 0;
if (segs == NULL)
segs = dmat->segments;
if ((flags & BUS_DMA_LOAD_MBUF) != 0)
map->flags |= DMAMAP_CACHE_ALIGNED;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
"alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
while (buflen > 0) {
/*
* Get the physical address for this segment.
*
* XXX Don't support checking for coherent mappings
* XXX in user address space.
*/
KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
curaddr = pmap_kextract(vaddr);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
if (sgsize > dmat->maxsegsz)
sgsize = dmat->maxsegsz;
if (buflen < sgsize)
sgsize = buflen;
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
} else {
sl = &map->slist[map->sync_count - 1];
if (map->sync_count == 0 ||
vaddr != sl->vaddr + sl->datacount) {
if (++map->sync_count > dmat->nsegments)
goto cleanup;
sl++;
sl->vaddr = vaddr;
sl->datacount = sgsize;
sl->busaddr = curaddr;
} else
sl->datacount += sgsize;
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
vaddr += sgsize;
buflen -= sgsize;
}
cleanup:
/*
* Did we fit?
*/
if (buflen != 0) {
bus_dmamap_unload(dmat, map);
error = EFBIG; /* XXX better return value here? */
}
return (error);
}
void
_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
KASSERT(dmat != NULL, ("dmatag is NULL"));
KASSERT(map != NULL, ("dmamap is NULL"));
map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
}
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
* Release the mapping held by map.
*/
void
bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
}
map->sync_count = 0;
return;
}
static void
bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
{
char tmp_cl[mips_dcache_max_linesize], tmp_clend[mips_dcache_max_linesize];
vm_offset_t buf_cl, buf_clend;
vm_size_t size_cl, size_clend;
int cache_linesize_mask = mips_dcache_max_linesize - 1;
/*
* dcache invalidation operates on cache line aligned addresses
* and could modify areas of memory that share the same cache line
* at the beginning and the ending of the buffer. In order to
* prevent a data loss we save these chunks in temporary buffer
* before invalidation and restore them afer it.
*
* If the aligned flag is set the buffer is either an mbuf or came from
* our allocator caches. In both cases they are always sized and
* aligned to cacheline boundaries, so we can skip preserving nearby
* data if a transfer appears to overlap cachelines. An mbuf in
* particular will usually appear to be overlapped because of offsetting
* within the buffer to align the L3 headers, but we know that the bytes
* preceeding that offset are part of the same mbuf memory and are not
* unrelated adjacent data (and a rule of mbuf handling is that the cpu
* is not allowed to touch the mbuf while dma is in progress, including
* header fields).
*/
if (aligned) {
size_cl = 0;
size_clend = 0;
} else {
buf_cl = buf & ~cache_linesize_mask;
size_cl = buf & cache_linesize_mask;
buf_clend = buf + len;
size_clend = (mips_dcache_max_linesize -
(buf_clend & cache_linesize_mask)) & cache_linesize_mask;
}
switch (op) {
case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
case BUS_DMASYNC_POSTREAD:
/*
* Save buffers that might be modified by invalidation
*/
if (size_cl)
memcpy (tmp_cl, (void*)buf_cl, size_cl);
if (size_clend)
memcpy (tmp_clend, (void*)buf_clend, size_clend);
mips_dcache_inv_range(buf, len);
/*
* Restore them
*/
if (size_cl)
memcpy ((void*)buf_cl, tmp_cl, size_cl);
if (size_clend)
memcpy ((void*)buf_clend, tmp_clend, size_clend);
/*
* Copies above have brought corresponding memory
* cache lines back into dirty state. Write them back
* out and invalidate affected cache lines again if
* necessary.
*/
if (size_cl)
mips_dcache_wbinv_range(buf_cl, size_cl);
if (size_clend && (size_cl == 0 ||
buf_clend - buf_cl > mips_dcache_max_linesize))
mips_dcache_wbinv_range(buf_clend, size_clend);
break;
case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
mips_dcache_wbinv_range(buf, len);
break;
case BUS_DMASYNC_PREREAD:
/*
* Save buffers that might be modified by invalidation
*/
if (size_cl)
memcpy (tmp_cl, (void *)buf_cl, size_cl);
if (size_clend)
memcpy (tmp_clend, (void *)buf_clend, size_clend);
mips_dcache_inv_range(buf, len);
/*
* Restore them
*/
if (size_cl)
memcpy ((void *)buf_cl, tmp_cl, size_cl);
if (size_clend)
memcpy ((void *)buf_clend, tmp_clend, size_clend);
/*
* Copies above have brought corresponding memory
* cache lines back into dirty state. Write them back
* out and invalidate affected cache lines again if
* necessary.
*/
if (size_cl)
mips_dcache_wbinv_range(buf_cl, size_cl);
if (size_clend && (size_cl == 0 ||
buf_clend - buf_cl > mips_dcache_max_linesize))
mips_dcache_wbinv_range(buf_clend, size_clend);
break;
case BUS_DMASYNC_PREWRITE:
#ifdef BUS_DMA_FORCE_WBINV
mips_dcache_wbinv_range(buf, len);
#else
mips_dcache_wb_range(buf, len);
#endif
break;
}
}
static void
_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct bounce_page *bpage;
STAILQ_FOREACH(bpage, &map->bpages, links) {
if (op & BUS_DMASYNC_PREWRITE) {
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
if (bpage->vaddr_nocache == 0) {
#ifdef BUS_DMA_FORCE_WBINV
mips_dcache_wbinv_range(bpage->vaddr,
bpage->datacount);
#else
mips_dcache_wb_range(bpage->vaddr,
bpage->datacount);
#endif
}
dmat->bounce_zone->total_bounced++;
}
if (op & BUS_DMASYNC_POSTREAD) {
if (bpage->vaddr_nocache == 0) {
mips_dcache_inv_range(bpage->vaddr,
bpage->datacount);
}
if (bpage->datavaddr != 0)
bcopy((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
(void *)bpage->datavaddr, bpage->datacount);
else
physcopyin((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
bpage->dataaddr, bpage->datacount);
dmat->bounce_zone->total_bounced++;
}
}
}
void
bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct sync_list *sl, *end;
int aligned;
if (op == BUS_DMASYNC_POSTWRITE)
return;
if (STAILQ_FIRST(&map->bpages))
_bus_dmamap_sync_bp(dmat, map, op);
if ((dmat->flags & BUS_DMA_COHERENT) ||
(map->flags & DMAMAP_UNCACHEABLE)) {
if (op & BUS_DMASYNC_PREWRITE)
mips_sync();
return;
}
aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
if (map->sync_count) {
end = &map->slist[map->sync_count];
for (sl = &map->slist[0]; sl != end; sl++)
bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
aligned);
}
}
static void
init_bounce_pages(void *dummy __unused)
{
total_bpages = 0;
STAILQ_INIT(&bounce_zone_list);
STAILQ_INIT(&bounce_map_waitinglist);
STAILQ_INIT(&bounce_map_callbacklist);
mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
}
SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
static struct sysctl_ctx_list *
busdma_sysctl_tree(struct bounce_zone *bz)
{
return (&bz->sysctl_tree);
}
static struct sysctl_oid *
busdma_sysctl_tree_top(struct bounce_zone *bz)
{
return (bz->sysctl_tree_top);
}
static int
alloc_bounce_zone(bus_dma_tag_t dmat)
{
struct bounce_zone *bz;
/* Check to see if we already have a suitable zone */
STAILQ_FOREACH(bz, &bounce_zone_list, links) {
if ((dmat->alignment <= bz->alignment)
&& (dmat->lowaddr >= bz->lowaddr)) {
dmat->bounce_zone = bz;
return (0);
}
}
if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
M_NOWAIT | M_ZERO)) == NULL)
return (ENOMEM);
STAILQ_INIT(&bz->bounce_page_list);
bz->free_bpages = 0;
bz->reserved_bpages = 0;
bz->active_bpages = 0;
bz->lowaddr = dmat->lowaddr;
bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
bz->map_count = 0;
snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
busdma_zonecount++;
snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
dmat->bounce_zone = bz;
sysctl_ctx_init(&bz->sysctl_tree);
bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
CTLFLAG_RD, 0, "");
if (bz->sysctl_tree_top == NULL) {
sysctl_ctx_free(&bz->sysctl_tree);
return (0); /* XXX error code? */
}
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
"Total bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
"Free bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
"Reserved bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
"Active bounce pages");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
"Total bounce requests");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
"Total bounce requests that were deferred");
SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"alignment", CTLFLAG_RD, &bz->alignment, "");
return (0);
}
static int
alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
{
struct bounce_zone *bz;
int count;
bz = dmat->bounce_zone;
count = 0;
while (numpages > 0) {
struct bounce_page *bpage;
bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
M_NOWAIT | M_ZERO);
if (bpage == NULL)
break;
bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
M_NOWAIT, 0ul,
bz->lowaddr,
PAGE_SIZE,
0);
if (bpage->vaddr == 0) {
free(bpage, M_BUSDMA);
break;
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
bpage->vaddr_nocache =
(vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
mtx_lock(&bounce_lock);
STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
total_bpages++;
bz->total_bpages++;
bz->free_bpages++;
mtx_unlock(&bounce_lock);
count++;
numpages--;
}
return (count);
}
static int
reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
{
struct bounce_zone *bz;
int pages;
mtx_assert(&bounce_lock, MA_OWNED);
bz = dmat->bounce_zone;
pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
return (map->pagesneeded - (map->pagesreserved + pages));
bz->free_bpages -= pages;
bz->reserved_bpages += pages;
map->pagesreserved += pages;
pages = map->pagesneeded - map->pagesreserved;
return (pages);
}
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
bz = dmat->bounce_zone;
if (map->pagesneeded == 0)
panic("add_bounce_page: map doesn't need any pages");
map->pagesneeded--;
if (map->pagesreserved == 0)
panic("add_bounce_page: map doesn't need any pages");
map->pagesreserved--;
mtx_lock(&bounce_lock);
bpage = STAILQ_FIRST(&bz->bounce_page_list);
if (bpage == NULL)
panic("add_bounce_page: free page list is empty");
STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
bz->reserved_bpages--;
bz->active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
/* Page offset needs to be preserved. */
bpage->vaddr |= addr & PAGE_MASK;
bpage->busaddr |= addr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
}
static void
free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
{
struct bus_dmamap *map;
struct bounce_zone *bz;
bz = dmat->bounce_zone;
bpage->datavaddr = 0;
bpage->datacount = 0;
if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
/*
* Reset the bounce page to start at offset 0. Other uses
* of this bounce page may need to store a full page of
* data and/or assume it starts on a page boundary.
*/
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
}
mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
bz->free_bpages++;
bz->active_bpages--;
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
busdma_swi_pending = 1;
bz->total_deferred++;
swi_sched(vm_ih, 0);
}
}
mtx_unlock(&bounce_lock);
}
void
busdma_swi(void)
{
bus_dma_tag_t dmat;
struct bus_dmamap *map;
mtx_lock(&bounce_lock);
while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
map->callback_arg, BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}
mtx_unlock(&bounce_lock);
}
Index: head/sys/mips/nlm/dev/sec/nlmrsa.c
===================================================================
--- head/sys/mips/nlm/dev/sec/nlmrsa.c (revision 328217)
+++ head/sys/mips/nlm/dev/sec/nlmrsa.c (revision 328218)
@@ -1,558 +1,557 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/errno.h>
#include <sys/endian.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/mbuf.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/bus.h>
#include <sys/random.h>
#include <sys/rman.h>
#include <sys/uio.h>
#include <sys/kobj.h>
#include <dev/pci/pcivar.h>
#include <opencrypto/cryptodev.h>
#include "cryptodev_if.h"
#include <vm/vm.h>
#include <vm/pmap.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/xlp.h>
#include <mips/nlm/hal/sys.h>
#include <mips/nlm/hal/fmn.h>
#include <mips/nlm/hal/nlmsaelib.h>
#include <mips/nlm/dev/sec/rsa_ucode.h>
#include <mips/nlm/hal/cop2.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/msgring.h>
#include <mips/nlm/dev/sec/nlmrsalib.h>
#ifdef NLM_RSA_DEBUG
static void print_krp_params(struct cryptkop *krp);
#endif
static int xlp_rsa_init(struct xlp_rsa_softc *sc, int node);
static int xlp_rsa_newsession(device_t , uint32_t *, struct cryptoini *);
static int xlp_rsa_freesession(device_t , uint64_t);
static int xlp_rsa_kprocess(device_t , struct cryptkop *, int);
static int xlp_get_rsa_opsize(struct xlp_rsa_command *cmd, unsigned int bits);
static void xlp_free_cmd_params(struct xlp_rsa_command *cmd);
static int xlp_rsa_inp2hwformat(uint8_t *src, uint8_t *dst,
uint32_t paramsize, uint8_t result);
static int xlp_rsa_probe(device_t);
static int xlp_rsa_attach(device_t);
static int xlp_rsa_detach(device_t);
static device_method_t xlp_rsa_methods[] = {
/* device interface */
DEVMETHOD(device_probe, xlp_rsa_probe),
DEVMETHOD(device_attach, xlp_rsa_attach),
DEVMETHOD(device_detach, xlp_rsa_detach),
/* bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
/* crypto device methods */
DEVMETHOD(cryptodev_newsession, xlp_rsa_newsession),
DEVMETHOD(cryptodev_freesession, xlp_rsa_freesession),
DEVMETHOD(cryptodev_kprocess, xlp_rsa_kprocess),
DEVMETHOD_END
};
static driver_t xlp_rsa_driver = {
"nlmrsa",
xlp_rsa_methods,
sizeof(struct xlp_rsa_softc)
};
static devclass_t xlp_rsa_devclass;
DRIVER_MODULE(nlmrsa, pci, xlp_rsa_driver, xlp_rsa_devclass, 0, 0);
MODULE_DEPEND(nlmrsa, crypto, 1, 1, 1);
#ifdef NLM_RSA_DEBUG
static void
print_krp_params(struct cryptkop *krp)
{
int i;
printf("krp->krp_op :%d\n", krp->krp_op);
printf("krp->krp_status :%d\n", krp->krp_status);
printf("krp->krp_iparams:%d\n", krp->krp_iparams);
printf("krp->krp_oparams:%d\n", krp->krp_oparams);
for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
printf("krp->krp_param[%d].crp_p :0x%llx\n", i,
(unsigned long long)krp->krp_param[i].crp_p);
printf("krp->krp_param[%d].crp_nbits :%d\n", i,
krp->krp_param[i].crp_nbits);
printf("krp->krp_param[%d].crp_nbytes :%d\n", i,
howmany(krp->krp_param[i].crp_nbits, 8));
}
}
#endif
static int
xlp_rsa_init(struct xlp_rsa_softc *sc, int node)
{
struct xlp_rsa_command *cmd = NULL;
uint32_t fbvc, dstvc, endsel, regval;
struct nlm_fmn_msg m;
int err, ret, i;
uint64_t base;
/* Register interrupt handler for the RSA/ECC CMS messages */
if (register_msgring_handler(sc->rsaecc_vc_start,
sc->rsaecc_vc_end, nlm_xlprsaecc_msgring_handler, sc) != 0) {
err = -1;
printf("Couldn't register rsa/ecc msgring handler\n");
goto errout;
}
fbvc = nlm_cpuid() * 4 + XLPGE_FB_VC;
/* Do the CMS credit initialization */
/* Currently it is configured by default to 50 when kernel comes up */
#if BYTE_ORDER == LITTLE_ENDIAN
for (i = 0; i < nitems(nlm_rsa_ucode_data); i++)
nlm_rsa_ucode_data[i] = htobe64(nlm_rsa_ucode_data[i]);
#endif
for (dstvc = sc->rsaecc_vc_start; dstvc <= sc->rsaecc_vc_end; dstvc++) {
cmd = malloc(sizeof(struct xlp_rsa_command), M_DEVBUF,
M_NOWAIT | M_ZERO);
KASSERT(cmd != NULL, ("%s:cmd is NULL\n", __func__));
cmd->rsasrc = contigmalloc(sizeof(nlm_rsa_ucode_data),
M_DEVBUF,
(M_WAITOK | M_ZERO),
0UL /* low address */, -1UL /* high address */,
XLP_L2L3_CACHELINE_SIZE /* alignment */,
0UL /* boundary */);
KASSERT(cmd->rsasrc != NULL,
("%s:cmd->rsasrc is NULL\n", __func__));
memcpy(cmd->rsasrc, nlm_rsa_ucode_data,
sizeof(nlm_rsa_ucode_data));
m.msg[0] = nlm_crypto_form_rsa_ecc_fmn_entry0(1, 0x70, 0,
vtophys(cmd->rsasrc));
m.msg[1] = nlm_crypto_form_rsa_ecc_fmn_entry1(0, 1, fbvc,
vtophys(cmd->rsasrc));
/* Software scratch pad */
m.msg[2] = (uintptr_t)cmd;
m.msg[3] = 0;
ret = nlm_fmn_msgsend(dstvc, 3, FMN_SWCODE_RSA, &m);
if (ret != 0) {
err = -1;
printf("%s: msgsnd failed (%x)\n", __func__, ret);
goto errout;
}
}
/* Configure so that all VCs send request to all RSA pipes */
base = nlm_get_rsa_regbase(node);
if (nlm_is_xlp3xx()) {
endsel = 1;
regval = 0xFFFF;
} else {
endsel = 3;
regval = 0x07FFFFFF;
}
for (i = 0; i < endsel; i++)
nlm_write_rsa_reg(base, RSA_ENG_SEL_0 + i, regval);
return (0);
errout:
xlp_free_cmd_params(cmd);
return (err);
}
/* This function is called from an interrupt handler */
void
nlm_xlprsaecc_msgring_handler(int vc, int size, int code, int src_id,
struct nlm_fmn_msg *msg, void *data)
{
struct xlp_rsa_command *cmd;
struct xlp_rsa_softc *sc;
struct crparam *outparam;
int ostart;
KASSERT(code == FMN_SWCODE_RSA,
("%s: bad code = %d, expected code = %d\n", __func__, code,
FMN_SWCODE_RSA));
sc = data;
KASSERT(src_id >= sc->rsaecc_vc_start && src_id <= sc->rsaecc_vc_end,
("%s: bad src_id = %d, expect %d - %d\n", __func__,
src_id, sc->rsaecc_vc_start, sc->rsaecc_vc_end));
cmd = (struct xlp_rsa_command *)(uintptr_t)msg->msg[1];
KASSERT(cmd != NULL, ("%s:cmd not received properly\n", __func__));
if (RSA_ERROR(msg->msg[0]) != 0) {
printf("%s: Message rcv msg0 %llx msg1 %llx err %x \n",
__func__, (unsigned long long)msg->msg[0],
(unsigned long long)msg->msg[1],
(int)RSA_ERROR(msg->msg[0]));
cmd->krp->krp_status = EBADMSG;
}
if (cmd->krp != NULL) {
ostart = cmd->krp->krp_iparams;
outparam = &cmd->krp->krp_param[ostart];
xlp_rsa_inp2hwformat(cmd->rsasrc + cmd->rsaopsize * ostart,
outparam->crp_p,
howmany(outparam->crp_nbits, 8),
1);
crypto_kdone(cmd->krp);
}
xlp_free_cmd_params(cmd);
}
static int
xlp_rsa_probe(device_t dev)
{
struct xlp_rsa_softc *sc;
if (pci_get_vendor(dev) == PCI_VENDOR_NETLOGIC &&
pci_get_device(dev) == PCI_DEVICE_ID_NLM_RSA) {
sc = device_get_softc(dev);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
/*
* Attach an interface that successfully probed.
*/
static int
xlp_rsa_attach(device_t dev)
{
struct xlp_rsa_softc *sc = device_get_softc(dev);
uint64_t base;
int qstart, qnum;
int freq, node;
sc->sc_dev = dev;
node = nlm_get_device_node(pci_get_slot(dev));
freq = nlm_set_device_frequency(node, DFS_DEVICE_RSA, 250);
if (bootverbose)
device_printf(dev, "RSA Freq: %dMHz\n", freq);
if (pci_get_device(dev) == PCI_DEVICE_ID_NLM_RSA) {
device_set_desc(dev, "XLP RSA/ECC Accelerator");
if ((sc->sc_cid = crypto_get_driverid(dev,
CRYPTOCAP_F_HARDWARE)) < 0) {
printf("xlp_rsaecc-err:couldn't get the driver id\n");
goto error_exit;
}
if (crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0) != 0)
goto error_exit;
base = nlm_get_rsa_pcibase(node);
qstart = nlm_qidstart(base);
qnum = nlm_qnum(base);
sc->rsaecc_vc_start = qstart;
sc->rsaecc_vc_end = qstart + qnum - 1;
}
if (xlp_rsa_init(sc, node) != 0)
goto error_exit;
device_printf(dev, "RSA Initialization complete!\n");
return (0);
error_exit:
return (ENXIO);
}
/*
* Detach an interface that successfully probed.
*/
static int
xlp_rsa_detach(device_t dev)
{
return (0);
}
/*
* Allocate a new 'session' and return an encoded session id. 'sidp'
* contains our registration id, and should contain an encoded session
* id on successful allocation.
*/
static int
xlp_rsa_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
{
struct xlp_rsa_softc *sc = device_get_softc(dev);
struct xlp_rsa_session *ses = NULL;
int sesn;
if (sidp == NULL || cri == NULL || sc == NULL)
return (EINVAL);
if (sc->sc_sessions == NULL) {
ses = sc->sc_sessions = malloc(sizeof(struct xlp_rsa_session),
M_DEVBUF, M_NOWAIT);
if (ses == NULL)
return (ENOMEM);
sesn = 0;
sc->sc_nsessions = 1;
} else {
for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
if (!sc->sc_sessions[sesn].hs_used) {
ses = &sc->sc_sessions[sesn];
break;
}
}
if (ses == NULL) {
sesn = sc->sc_nsessions;
- ses = mallocarray(sesn + 1, sizeof(*ses),
+ ses = malloc((sesn + 1) * sizeof(*ses),
M_DEVBUF, M_NOWAIT);
if (ses == NULL)
return (ENOMEM);
bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
bzero(sc->sc_sessions, sesn * sizeof(*ses));
free(sc->sc_sessions, M_DEVBUF);
sc->sc_sessions = ses;
ses = &sc->sc_sessions[sesn];
sc->sc_nsessions++;
}
}
bzero(ses, sizeof(*ses));
ses->sessionid = sesn;
ses->hs_used = 1;
*sidp = XLP_RSA_SID(device_get_unit(sc->sc_dev), sesn);
return (0);
}
/*
* Deallocate a session.
* XXX this routine should run a zero'd mac/encrypt key into context ram.
* XXX to blow away any keys already stored there.
*/
static int
xlp_rsa_freesession(device_t dev, u_int64_t tid)
{
struct xlp_rsa_softc *sc = device_get_softc(dev);
int session;
u_int32_t sid = CRYPTO_SESID2LID(tid);
if (sc == NULL)
return (EINVAL);
session = XLP_RSA_SESSION(sid);
if (session >= sc->sc_nsessions)
return (EINVAL);
sc->sc_sessions[session].hs_used = 0;
return (0);
}
static void
xlp_free_cmd_params(struct xlp_rsa_command *cmd)
{
if (cmd == NULL)
return;
if (cmd->rsasrc != NULL) {
if (cmd->krp == NULL) /* Micro code load */
contigfree(cmd->rsasrc, sizeof(nlm_rsa_ucode_data),
M_DEVBUF);
else
free(cmd->rsasrc, M_DEVBUF);
}
free(cmd, M_DEVBUF);
}
static int
xlp_get_rsa_opsize(struct xlp_rsa_command *cmd, unsigned int bits)
{
if (bits == 0 || bits > 8192)
return (-1);
/* XLP hardware expects always a fixed size with unused bytes
* zeroed out in the input data */
if (bits <= 512) {
cmd->rsatype = 0x40;
cmd->rsaopsize = 64;
} else if (bits <= 1024) {
cmd->rsatype = 0x41;
cmd->rsaopsize = 128;
} else if (bits <= 2048) {
cmd->rsatype = 0x42;
cmd->rsaopsize = 256;
} else if (bits <= 4096) {
cmd->rsatype = 0x43;
cmd->rsaopsize = 512;
} else if (bits <= 8192) {
cmd->rsatype = 0x44;
cmd->rsaopsize = 1024;
}
return (0);
}
static int
xlp_rsa_inp2hwformat(uint8_t *src, uint8_t *dst, uint32_t paramsize,
uint8_t result)
{
uint32_t pdwords, pbytes;
int i, j, k;
pdwords = paramsize / 8;
pbytes = paramsize % 8;
for (i = 0, k = 0; i < pdwords; i++) {
/* copy dwords of inp/hw to hw/out format */
for (j = 7; j >= 0; j--, k++)
dst[i * 8 + j] = src[k];
}
if (pbytes) {
if (result == 0) {
/* copy rem bytes of input data to hw format */
for (j = 7; k < paramsize; j--, k++)
dst[i * 8 + j] = src[k];
} else {
/* copy rem bytes of hw data to exp output format */
for (j = 7; k < paramsize; j--, k++)
dst[k] = src[i * 8 + j];
}
}
return (0);
}
static int
nlm_crypto_complete_rsa_request(struct xlp_rsa_softc *sc,
struct xlp_rsa_command *cmd)
{
unsigned int fbvc;
struct nlm_fmn_msg m;
int ret;
fbvc = nlm_cpuid() * 4 + XLPGE_FB_VC;
m.msg[0] = nlm_crypto_form_rsa_ecc_fmn_entry0(1, cmd->rsatype,
cmd->rsafn, vtophys(cmd->rsasrc));
m.msg[1] = nlm_crypto_form_rsa_ecc_fmn_entry1(0, 1, fbvc,
vtophys(cmd->rsasrc + cmd->rsaopsize * cmd->krp->krp_iparams));
/* Software scratch pad */
m.msg[2] = (uintptr_t)cmd;
m.msg[3] = 0;
/* Send the message to rsa engine vc */
ret = nlm_fmn_msgsend(sc->rsaecc_vc_start, 3, FMN_SWCODE_RSA, &m);
if (ret != 0) {
#ifdef NLM_SEC_DEBUG
printf("%s: msgsnd failed (%x)\n", __func__, ret);
#endif
return (ERESTART);
}
return (0);
}
static int
xlp_rsa_kprocess(device_t dev, struct cryptkop *krp, int hint)
{
struct xlp_rsa_softc *sc = device_get_softc(dev);
struct xlp_rsa_command *cmd;
struct crparam *kp;
int err, i;
if (krp == NULL || krp->krp_callback == NULL)
return (EINVAL);
cmd = malloc(sizeof(struct xlp_rsa_command), M_DEVBUF,
M_NOWAIT | M_ZERO);
KASSERT(cmd != NULL, ("%s:cmd is NULL\n", __func__));
cmd->krp = krp;
#ifdef NLM_RSA_DEBUG
print_krp_params(krp);
#endif
err = EOPNOTSUPP;
switch (krp->krp_op) {
case CRK_MOD_EXP:
if (krp->krp_iparams == 3 && krp->krp_oparams == 1)
break;
goto errout;
default:
device_printf(dev, "Op:%d not yet supported\n", krp->krp_op);
goto errout;
}
err = xlp_get_rsa_opsize(cmd,
krp->krp_param[krp->krp_iparams - 1].crp_nbits);
if (err != 0) {
err = EINVAL;
goto errout;
}
cmd->rsafn = 0; /* Mod Exp */
- cmd->rsasrc = mallocarray(
- krp->krp_iparams + krp->krp_oparams,
- cmd->rsaopsize,
+ cmd->rsasrc = malloc(
+ cmd->rsaopsize * (krp->krp_iparams + krp->krp_oparams),
M_DEVBUF,
M_NOWAIT | M_ZERO);
if (cmd->rsasrc == NULL) {
err = ENOMEM;
goto errout;
}
for (i = 0, kp = krp->krp_param; i < krp->krp_iparams; i++, kp++) {
KASSERT(kp->crp_nbits != 0,
("%s: parameter[%d]'s length is zero\n", __func__, i));
xlp_rsa_inp2hwformat(kp->crp_p,
cmd->rsasrc + i * cmd->rsaopsize,
howmany(kp->crp_nbits, 8), 0);
}
err = nlm_crypto_complete_rsa_request(sc, cmd);
if (err != 0)
goto errout;
return (0);
errout:
xlp_free_cmd_params(cmd);
krp->krp_status = err;
crypto_kdone(krp);
return (err);
}
Index: head/sys/net/if_vlan.c
===================================================================
--- head/sys/net/if_vlan.c (revision 328217)
+++ head/sys/net/if_vlan.c (revision 328218)
@@ -1,2059 +1,2059 @@
/*-
* Copyright 1998 Massachusetts Institute of Technology
* Copyright 2012 ADARA Networks, Inc.
* Copyright 2017 Dell EMC Isilon
*
* Portions of this software were developed by Robert N. M. Watson under
* contract to ADARA Networks, Inc.
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that both the above copyright notice and this
* permission notice appear in all copies, that both the above
* copyright notice and this permission notice appear in all
* supporting documentation, and that the name of M.I.T. not be used
* in advertising or publicity pertaining to distribution of the
* software without specific, written prior permission. M.I.T. makes
* no representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs.
* This is sort of sneaky in the implementation, since
* we need to pretend to be enough of an Ethernet implementation
* to make arp work. The way we do this is by telling everyone
* that we are an Ethernet, and then catch the packets that
* ether_output() sends to us via if_transmit(), rewrite them for
* use by the real outgoing interface, and ask it to send them.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_vlan.h"
#include "opt_ratelimit.h"
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/priv.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/vnet.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#define VLAN_DEF_HWIDTH 4
#define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST)
#define UP_AND_RUNNING(ifp) \
((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING)
LIST_HEAD(ifvlanhead, ifvlan);
struct ifvlantrunk {
struct ifnet *parent; /* parent interface of this trunk */
struct rmlock lock;
#ifdef VLAN_ARRAY
#define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1)
struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */
#else
struct ifvlanhead *hash; /* dynamic hash-list table */
uint16_t hmask;
uint16_t hwidth;
#endif
int refcnt;
};
/*
* This macro provides a facility to iterate over every vlan on a trunk with
* the assumption that none will be added/removed during iteration.
*/
#ifdef VLAN_ARRAY
#define VLAN_FOREACH(_ifv, _trunk) \
size_t _i; \
for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \
if (((_ifv) = (_trunk)->vlans[_i]) != NULL)
#else /* VLAN_ARRAY */
#define VLAN_FOREACH(_ifv, _trunk) \
struct ifvlan *_next; \
size_t _i; \
for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \
LIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next)
#endif /* VLAN_ARRAY */
/*
* This macro provides a facility to iterate over every vlan on a trunk while
* also modifying the number of vlans on the trunk. The iteration continues
* until some condition is met or there are no more vlans on the trunk.
*/
#ifdef VLAN_ARRAY
/* The VLAN_ARRAY case is simple -- just a for loop using the condition. */
#define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \
size_t _i; \
for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \
if (((_ifv) = (_trunk)->vlans[_i]))
#else /* VLAN_ARRAY */
/*
* The hash table case is more complicated. We allow for the hash table to be
* modified (i.e. vlans removed) while we are iterating over it. To allow for
* this we must restart the iteration every time we "touch" something during
* the iteration, since removal will resize the hash table and invalidate our
* current position. If acting on the touched element causes the trunk to be
* emptied, then iteration also stops.
*/
#define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \
size_t _i; \
bool _touch = false; \
for (_i = 0; \
!(_cond) && _i < (1 << (_trunk)->hwidth); \
_i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \
if (((_ifv) = LIST_FIRST(&(_trunk)->hash[_i])) != NULL && \
(_touch = true))
#endif /* VLAN_ARRAY */
struct vlan_mc_entry {
struct sockaddr_dl mc_addr;
SLIST_ENTRY(vlan_mc_entry) mc_entries;
};
struct ifvlan {
struct ifvlantrunk *ifv_trunk;
struct ifnet *ifv_ifp;
#define TRUNK(ifv) ((ifv)->ifv_trunk)
#define PARENT(ifv) ((ifv)->ifv_trunk->parent)
void *ifv_cookie;
int ifv_pflags; /* special flags we have set on parent */
int ifv_capenable;
struct ifv_linkmib {
int ifvm_encaplen; /* encapsulation length */
int ifvm_mtufudge; /* MTU fudged by this much */
int ifvm_mintu; /* min transmission unit */
uint16_t ifvm_proto; /* encapsulation ethertype */
uint16_t ifvm_tag; /* tag to apply on packets leaving if */
uint16_t ifvm_vid; /* VLAN ID */
uint8_t ifvm_pcp; /* Priority Code Point (PCP). */
} ifv_mib;
struct task lladdr_task;
SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead;
#ifndef VLAN_ARRAY
LIST_ENTRY(ifvlan) ifv_list;
#endif
};
#define ifv_proto ifv_mib.ifvm_proto
#define ifv_tag ifv_mib.ifvm_tag
#define ifv_vid ifv_mib.ifvm_vid
#define ifv_pcp ifv_mib.ifvm_pcp
#define ifv_encaplen ifv_mib.ifvm_encaplen
#define ifv_mtufudge ifv_mib.ifvm_mtufudge
#define ifv_mintu ifv_mib.ifvm_mintu
/* Special flags we should propagate to parent. */
static struct {
int flag;
int (*func)(struct ifnet *, int);
} vlan_pflags[] = {
{IFF_PROMISC, ifpromisc},
{IFF_ALLMULTI, if_allmulti},
{0, NULL}
};
SYSCTL_DECL(_net_link);
static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0,
"IEEE 802.1Q VLAN");
static SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0,
"for consistency");
static VNET_DEFINE(int, soft_pad);
#define V_soft_pad VNET(soft_pad)
SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
&VNET_NAME(soft_pad), 0, "pad short frames before tagging");
/*
* For now, make preserving PCP via an mbuf tag optional, as it increases
* per-packet memory allocations and frees. In the future, it would be
* preferable to reuse ether_vtag for this, or similar.
*/
static int vlan_mtag_pcp = 0;
SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW, &vlan_mtag_pcp, 0,
"Retain VLAN PCP information as packets are passed up the stack");
static const char vlanname[] = "vlan";
static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface");
static eventhandler_tag ifdetach_tag;
static eventhandler_tag iflladdr_tag;
/*
* if_vlan uses two module-level locks to allow concurrent modification of vlan
* interfaces and (mostly) allow for vlans to be destroyed while they are being
* used for tx/rx. To accomplish this in a way that has acceptable performance
* and cooperation with other parts of the network stack there is a
* non-sleepable rmlock(9) and an sx(9). Both locks are exclusively acquired
* when destroying a vlan interface, i.e. when the if_vlantrunk field of struct
* ifnet is de-allocated and NULL'd. Thus a reader holding either lock has a
* guarantee that the struct ifvlantrunk references a valid vlan trunk.
*
* The performance-sensitive paths that warrant using the rmlock(9) are
* vlan_transmit and vlan_input. Both have to check for the vlan interface's
* existence using if_vlantrunk, and being in the network tx/rx paths the use
* of an rmlock(9) gives a measureable improvement in performance.
*
* The reason for having an sx(9) is mostly because there are still areas that
* must be sleepable and also have safe concurrent access to a vlan interface.
* Since the sx(9) exists, it is used by default in most paths unless sleeping
* is not permitted, or if it is not clear whether sleeping is permitted.
*
* Note that despite these protections, there is still an inherent race in the
* destruction of vlans since there's no guarantee that the ifnet hasn't been
* freed/reused when the tx/rx functions are called by the stack. This can only
* be fixed by addressing ifnet's lifetime issues.
*/
#define _VLAN_RM_ID ifv_rm_lock
#define _VLAN_SX_ID ifv_sx
static struct rmlock _VLAN_RM_ID;
static struct sx _VLAN_SX_ID;
#define VLAN_LOCKING_INIT() \
rm_init(&_VLAN_RM_ID, "vlan_rm"); \
sx_init(&_VLAN_SX_ID, "vlan_sx")
#define VLAN_LOCKING_DESTROY() \
rm_destroy(&_VLAN_RM_ID); \
sx_destroy(&_VLAN_SX_ID)
#define _VLAN_RM_TRACKER _vlan_rm_tracker
#define VLAN_RLOCK() rm_rlock(&_VLAN_RM_ID, \
&_VLAN_RM_TRACKER)
#define VLAN_RUNLOCK() rm_runlock(&_VLAN_RM_ID, \
&_VLAN_RM_TRACKER)
#define VLAN_WLOCK() rm_wlock(&_VLAN_RM_ID)
#define VLAN_WUNLOCK() rm_wunlock(&_VLAN_RM_ID)
#define VLAN_RLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_RLOCKED)
#define VLAN_WLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_WLOCKED)
#define VLAN_RWLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_LOCKED)
#define VLAN_LOCK_READER struct rm_priotracker _VLAN_RM_TRACKER
#define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID)
#define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID)
#define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID)
#define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID)
#define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED)
#define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED)
#define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED)
/*
* We also have a per-trunk rmlock(9), that is locked shared on packet
* processing and exclusive when configuration is changed. Note: This should
* only be acquired while there is a shared lock on either of the global locks
* via VLAN_SLOCK or VLAN_RLOCK. Thus, an exclusive lock on the global locks
* makes a call to TRUNK_RLOCK/TRUNK_WLOCK technically superfluous.
*/
#define _TRUNK_RM_TRACKER _trunk_rm_tracker
#define TRUNK_LOCK_INIT(trunk) rm_init(&(trunk)->lock, vlanname)
#define TRUNK_LOCK_DESTROY(trunk) rm_destroy(&(trunk)->lock)
#define TRUNK_RLOCK(trunk) rm_rlock(&(trunk)->lock, \
&_TRUNK_RM_TRACKER)
#define TRUNK_WLOCK(trunk) rm_wlock(&(trunk)->lock)
#define TRUNK_RUNLOCK(trunk) rm_runlock(&(trunk)->lock, \
&_TRUNK_RM_TRACKER)
#define TRUNK_WUNLOCK(trunk) rm_wunlock(&(trunk)->lock)
#define TRUNK_RLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_RLOCKED)
#define TRUNK_LOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_LOCKED)
#define TRUNK_WLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_WLOCKED)
#define TRUNK_LOCK_READER struct rm_priotracker _TRUNK_RM_TRACKER
/*
* The VLAN_ARRAY substitutes the dynamic hash with a static array
* with 4096 entries. In theory this can give a boost in processing,
* however in practice it does not. Probably this is because the array
* is too big to fit into CPU cache.
*/
#ifndef VLAN_ARRAY
static void vlan_inithash(struct ifvlantrunk *trunk);
static void vlan_freehash(struct ifvlantrunk *trunk);
static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv);
static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv);
static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch);
static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk,
uint16_t vid);
#endif
static void trunk_destroy(struct ifvlantrunk *trunk);
static void vlan_init(void *foo);
static void vlan_input(struct ifnet *ifp, struct mbuf *m);
static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr);
#ifdef RATELIMIT
static int vlan_snd_tag_alloc(struct ifnet *,
union if_snd_tag_alloc_params *, struct m_snd_tag **);
#endif
static void vlan_qflush(struct ifnet *ifp);
static int vlan_setflag(struct ifnet *ifp, int flag, int status,
int (*func)(struct ifnet *, int));
static int vlan_setflags(struct ifnet *ifp, int status);
static int vlan_setmulti(struct ifnet *ifp);
static int vlan_transmit(struct ifnet *ifp, struct mbuf *m);
static void vlan_unconfig(struct ifnet *ifp);
static void vlan_unconfig_locked(struct ifnet *ifp, int departing);
static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag);
static void vlan_link_state(struct ifnet *ifp);
static void vlan_capabilities(struct ifvlan *ifv);
static void vlan_trunk_capabilities(struct ifnet *ifp);
static struct ifnet *vlan_clone_match_ethervid(const char *, int *);
static int vlan_clone_match(struct if_clone *, const char *);
static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t);
static int vlan_clone_destroy(struct if_clone *, struct ifnet *);
static void vlan_ifdetach(void *arg, struct ifnet *ifp);
static void vlan_iflladdr(void *arg, struct ifnet *ifp);
static void vlan_lladdr_fn(void *arg, int pending);
static struct if_clone *vlan_cloner;
#ifdef VIMAGE
static VNET_DEFINE(struct if_clone *, vlan_cloner);
#define V_vlan_cloner VNET(vlan_cloner)
#endif
#ifndef VLAN_ARRAY
#define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m))
static void
vlan_inithash(struct ifvlantrunk *trunk)
{
int i, n;
/*
* The trunk must not be locked here since we call malloc(M_WAITOK).
* It is OK in case this function is called before the trunk struct
* gets hooked up and becomes visible from other threads.
*/
KASSERT(trunk->hwidth == 0 && trunk->hash == NULL,
("%s: hash already initialized", __func__));
trunk->hwidth = VLAN_DEF_HWIDTH;
n = 1 << trunk->hwidth;
trunk->hmask = n - 1;
trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK);
for (i = 0; i < n; i++)
LIST_INIT(&trunk->hash[i]);
}
static void
vlan_freehash(struct ifvlantrunk *trunk)
{
#ifdef INVARIANTS
int i;
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
for (i = 0; i < (1 << trunk->hwidth); i++)
KASSERT(LIST_EMPTY(&trunk->hash[i]),
("%s: hash table not empty", __func__));
#endif
free(trunk->hash, M_VLAN);
trunk->hash = NULL;
trunk->hwidth = trunk->hmask = 0;
}
static int
vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
int i, b;
struct ifvlan *ifv2;
TRUNK_WLOCK_ASSERT(trunk);
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
b = 1 << trunk->hwidth;
i = HASH(ifv->ifv_vid, trunk->hmask);
LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
if (ifv->ifv_vid == ifv2->ifv_vid)
return (EEXIST);
/*
* Grow the hash when the number of vlans exceeds half of the number of
* hash buckets squared. This will make the average linked-list length
* buckets/2.
*/
if (trunk->refcnt > (b * b) / 2) {
vlan_growhash(trunk, 1);
i = HASH(ifv->ifv_vid, trunk->hmask);
}
LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list);
trunk->refcnt++;
return (0);
}
static int
vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
int i, b;
struct ifvlan *ifv2;
TRUNK_WLOCK_ASSERT(trunk);
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
b = 1 << trunk->hwidth;
i = HASH(ifv->ifv_vid, trunk->hmask);
LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
if (ifv2 == ifv) {
trunk->refcnt--;
LIST_REMOVE(ifv2, ifv_list);
if (trunk->refcnt < (b * b) / 2)
vlan_growhash(trunk, -1);
return (0);
}
panic("%s: vlan not found\n", __func__);
return (ENOENT); /*NOTREACHED*/
}
/*
* Grow the hash larger or smaller if memory permits.
*/
static void
vlan_growhash(struct ifvlantrunk *trunk, int howmuch)
{
struct ifvlan *ifv;
struct ifvlanhead *hash2;
int hwidth2, i, j, n, n2;
TRUNK_WLOCK_ASSERT(trunk);
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
if (howmuch == 0) {
/* Harmless yet obvious coding error */
printf("%s: howmuch is 0\n", __func__);
return;
}
hwidth2 = trunk->hwidth + howmuch;
n = 1 << trunk->hwidth;
n2 = 1 << hwidth2;
/* Do not shrink the table below the default */
if (hwidth2 < VLAN_DEF_HWIDTH)
return;
/* M_NOWAIT because we're called with trunk mutex held */
- hash2 = mallocarray(n2, sizeof(struct ifvlanhead), M_VLAN, M_NOWAIT);
+ hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT);
if (hash2 == NULL) {
printf("%s: out of memory -- hash size not changed\n",
__func__);
return; /* We can live with the old hash table */
}
for (j = 0; j < n2; j++)
LIST_INIT(&hash2[j]);
for (i = 0; i < n; i++)
while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) {
LIST_REMOVE(ifv, ifv_list);
j = HASH(ifv->ifv_vid, n2 - 1);
LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list);
}
free(trunk->hash, M_VLAN);
trunk->hash = hash2;
trunk->hwidth = hwidth2;
trunk->hmask = n2 - 1;
if (bootverbose)
if_printf(trunk->parent,
"VLAN hash table resized from %d to %d buckets\n", n, n2);
}
static __inline struct ifvlan *
vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid)
{
struct ifvlan *ifv;
TRUNK_RLOCK_ASSERT(trunk);
LIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list)
if (ifv->ifv_vid == vid)
return (ifv);
return (NULL);
}
#if 0
/* Debugging code to view the hashtables. */
static void
vlan_dumphash(struct ifvlantrunk *trunk)
{
int i;
struct ifvlan *ifv;
for (i = 0; i < (1 << trunk->hwidth); i++) {
printf("%d: ", i);
LIST_FOREACH(ifv, &trunk->hash[i], ifv_list)
printf("%s ", ifv->ifv_ifp->if_xname);
printf("\n");
}
}
#endif /* 0 */
#else
static __inline struct ifvlan *
vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid)
{
return trunk->vlans[vid];
}
static __inline int
vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
if (trunk->vlans[ifv->ifv_vid] != NULL)
return EEXIST;
trunk->vlans[ifv->ifv_vid] = ifv;
trunk->refcnt++;
return (0);
}
static __inline int
vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
trunk->vlans[ifv->ifv_vid] = NULL;
trunk->refcnt--;
return (0);
}
static __inline void
vlan_freehash(struct ifvlantrunk *trunk)
{
}
static __inline void
vlan_inithash(struct ifvlantrunk *trunk)
{
}
#endif /* !VLAN_ARRAY */
static void
trunk_destroy(struct ifvlantrunk *trunk)
{
VLAN_XLOCK_ASSERT();
VLAN_WLOCK_ASSERT();
vlan_freehash(trunk);
trunk->parent->if_vlantrunk = NULL;
TRUNK_LOCK_DESTROY(trunk);
if_rele(trunk->parent);
free(trunk, M_VLAN);
}
/*
* Program our multicast filter. What we're actually doing is
* programming the multicast filter of the parent. This has the
* side effect of causing the parent interface to receive multicast
* traffic that it doesn't really want, which ends up being discarded
* later by the upper protocol layers. Unfortunately, there's no way
* to avoid this: there really is only one physical interface.
*/
static int
vlan_setmulti(struct ifnet *ifp)
{
struct ifnet *ifp_p;
struct ifmultiaddr *ifma;
struct ifvlan *sc;
struct vlan_mc_entry *mc;
int error;
/*
* XXX This stupidly needs the rmlock to avoid sleeping while holding
* the in6_multi_mtx (see in6_mc_join_locked).
*/
VLAN_RWLOCK_ASSERT();
/* Find the parent. */
sc = ifp->if_softc;
TRUNK_WLOCK_ASSERT(TRUNK(sc));
ifp_p = PARENT(sc);
CURVNET_SET_QUIET(ifp_p->if_vnet);
/* First, remove any existing filter entries. */
while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) {
SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
(void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr);
free(mc, M_VLAN);
}
/* Now program new ones. */
IF_ADDR_WLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT);
if (mc == NULL) {
IF_ADDR_WUNLOCK(ifp);
return (ENOMEM);
}
bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len);
mc->mc_addr.sdl_index = ifp_p->if_index;
SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries);
}
IF_ADDR_WUNLOCK(ifp);
SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) {
error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr,
NULL);
if (error)
return (error);
}
CURVNET_RESTORE();
return (0);
}
/*
* A handler for parent interface link layer address changes.
* If the parent interface link layer address is changed we
* should also change it on all children vlans.
*/
static void
vlan_iflladdr(void *arg __unused, struct ifnet *ifp)
{
struct ifvlan *ifv;
struct ifnet *ifv_ifp;
struct ifvlantrunk *trunk;
struct sockaddr_dl *sdl;
VLAN_LOCK_READER;
/* Need the rmlock since this is run on taskqueue_swi. */
VLAN_RLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_RUNLOCK();
return;
}
/*
* OK, it's a trunk. Loop over and change all vlan's lladdrs on it.
* We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR
* ioctl calls on the parent garbling the lladdr of the child vlan.
*/
TRUNK_WLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
/*
* Copy new new lladdr into the ifv_ifp, enqueue a task
* to actually call if_setlladdr. if_setlladdr needs to
* be deferred to a taskqueue because it will call into
* the if_vlan ioctl path and try to acquire the global
* lock.
*/
ifv_ifp = ifv->ifv_ifp;
bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp),
ifp->if_addrlen);
sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr;
sdl->sdl_alen = ifp->if_addrlen;
taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task);
}
TRUNK_WUNLOCK(trunk);
VLAN_RUNLOCK();
}
/*
* A handler for network interface departure events.
* Track departure of trunks here so that we don't access invalid
* pointers or whatever if a trunk is ripped from under us, e.g.,
* by ejecting its hot-plug card. However, if an ifnet is simply
* being renamed, then there's no need to tear down the state.
*/
static void
vlan_ifdetach(void *arg __unused, struct ifnet *ifp)
{
struct ifvlan *ifv;
struct ifvlantrunk *trunk;
/* If the ifnet is just being renamed, don't do anything. */
if (ifp->if_flags & IFF_RENAMING)
return;
VLAN_XLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_XUNLOCK();
return;
}
/*
* OK, it's a trunk. Loop over and detach all vlan's on it.
* Check trunk pointer after each vlan_unconfig() as it will
* free it and set to NULL after the last vlan was detached.
*/
VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk,
ifp->if_vlantrunk == NULL)
vlan_unconfig_locked(ifv->ifv_ifp, 1);
/* Trunk should have been destroyed in vlan_unconfig(). */
KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__));
VLAN_XUNLOCK();
}
/*
* Return the trunk device for a virtual interface.
*/
static struct ifnet *
vlan_trunkdev(struct ifnet *ifp)
{
struct ifvlan *ifv;
VLAN_LOCK_READER;
if (ifp->if_type != IFT_L2VLAN)
return (NULL);
/* Not clear if callers are sleepable, so acquire the rmlock. */
VLAN_RLOCK();
ifv = ifp->if_softc;
ifp = NULL;
if (ifv->ifv_trunk)
ifp = PARENT(ifv);
VLAN_RUNLOCK();
return (ifp);
}
/*
* Return the 12-bit VLAN VID for this interface, for use by external
* components such as Infiniband.
*
* XXXRW: Note that the function name here is historical; it should be named
* vlan_vid().
*/
static int
vlan_tag(struct ifnet *ifp, uint16_t *vidp)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (EINVAL);
ifv = ifp->if_softc;
*vidp = ifv->ifv_vid;
return (0);
}
/*
* Return a driver specific cookie for this interface. Synchronization
* with setcookie must be provided by the driver.
*/
static void *
vlan_cookie(struct ifnet *ifp)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (NULL);
ifv = ifp->if_softc;
return (ifv->ifv_cookie);
}
/*
* Store a cookie in our softc that drivers can use to store driver
* private per-instance data in.
*/
static int
vlan_setcookie(struct ifnet *ifp, void *cookie)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (EINVAL);
ifv = ifp->if_softc;
ifv->ifv_cookie = cookie;
return (0);
}
/*
* Return the vlan device present at the specific VID.
*/
static struct ifnet *
vlan_devat(struct ifnet *ifp, uint16_t vid)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
VLAN_LOCK_READER;
TRUNK_LOCK_READER;
/* Not clear if callers are sleepable, so acquire the rmlock. */
VLAN_RLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_RUNLOCK();
return (NULL);
}
ifp = NULL;
TRUNK_RLOCK(trunk);
ifv = vlan_gethash(trunk, vid);
if (ifv)
ifp = ifv->ifv_ifp;
TRUNK_RUNLOCK(trunk);
VLAN_RUNLOCK();
return (ifp);
}
/*
* Recalculate the cached VLAN tag exposed via the MIB.
*/
static void
vlan_tag_recalculate(struct ifvlan *ifv)
{
ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0);
}
/*
* VLAN support can be loaded as a module. The only place in the
* system that's intimately aware of this is ether_input. We hook
* into this code through vlan_input_p which is defined there and
* set here. No one else in the system should be aware of this so
* we use an explicit reference here.
*/
extern void (*vlan_input_p)(struct ifnet *, struct mbuf *);
/* For if_link_state_change() eyes only... */
extern void (*vlan_link_state_p)(struct ifnet *);
static int
vlan_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY);
if (ifdetach_tag == NULL)
return (ENOMEM);
iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event,
vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
if (iflladdr_tag == NULL)
return (ENOMEM);
VLAN_LOCKING_INIT();
vlan_input_p = vlan_input;
vlan_link_state_p = vlan_link_state;
vlan_trunk_cap_p = vlan_trunk_capabilities;
vlan_trunkdev_p = vlan_trunkdev;
vlan_cookie_p = vlan_cookie;
vlan_setcookie_p = vlan_setcookie;
vlan_tag_p = vlan_tag;
vlan_devat_p = vlan_devat;
#ifndef VIMAGE
vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match,
vlan_clone_create, vlan_clone_destroy);
#endif
if (bootverbose)
printf("vlan: initialized, using "
#ifdef VLAN_ARRAY
"full-size arrays"
#else
"hash tables with chaining"
#endif
"\n");
break;
case MOD_UNLOAD:
#ifndef VIMAGE
if_clone_detach(vlan_cloner);
#endif
EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag);
EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag);
vlan_input_p = NULL;
vlan_link_state_p = NULL;
vlan_trunk_cap_p = NULL;
vlan_trunkdev_p = NULL;
vlan_tag_p = NULL;
vlan_cookie_p = NULL;
vlan_setcookie_p = NULL;
vlan_devat_p = NULL;
VLAN_LOCKING_DESTROY();
if (bootverbose)
printf("vlan: unloaded\n");
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t vlan_mod = {
"if_vlan",
vlan_modevent,
0
};
DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_vlan, 3);
#ifdef VIMAGE
static void
vnet_vlan_init(const void *unused __unused)
{
vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match,
vlan_clone_create, vlan_clone_destroy);
V_vlan_cloner = vlan_cloner;
}
VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_vlan_init, NULL);
static void
vnet_vlan_uninit(const void *unused __unused)
{
if_clone_detach(V_vlan_cloner);
}
VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
vnet_vlan_uninit, NULL);
#endif
/*
* Check for <etherif>.<vlan> style interface names.
*/
static struct ifnet *
vlan_clone_match_ethervid(const char *name, int *vidp)
{
char ifname[IFNAMSIZ];
char *cp;
struct ifnet *ifp;
int vid;
strlcpy(ifname, name, IFNAMSIZ);
if ((cp = strchr(ifname, '.')) == NULL)
return (NULL);
*cp = '\0';
if ((ifp = ifunit_ref(ifname)) == NULL)
return (NULL);
/* Parse VID. */
if (*++cp == '\0') {
if_rele(ifp);
return (NULL);
}
vid = 0;
for(; *cp >= '0' && *cp <= '9'; cp++)
vid = (vid * 10) + (*cp - '0');
if (*cp != '\0') {
if_rele(ifp);
return (NULL);
}
if (vidp != NULL)
*vidp = vid;
return (ifp);
}
static int
vlan_clone_match(struct if_clone *ifc, const char *name)
{
const char *cp;
if (vlan_clone_match_ethervid(name, NULL) != NULL)
return (1);
if (strncmp(vlanname, name, strlen(vlanname)) != 0)
return (0);
for (cp = name + 4; *cp != '\0'; cp++) {
if (*cp < '0' || *cp > '9')
return (0);
}
return (1);
}
static int
vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
{
char *dp;
int wildcard;
int unit;
int error;
int vid;
struct ifvlan *ifv;
struct ifnet *ifp;
struct ifnet *p;
struct ifaddr *ifa;
struct sockaddr_dl *sdl;
struct vlanreq vlr;
static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
/*
* There are 3 (ugh) ways to specify the cloned device:
* o pass a parameter block with the clone request.
* o specify parameters in the text of the clone device name
* o specify no parameters and get an unattached device that
* must be configured separately.
* The first technique is preferred; the latter two are
* supported for backwards compatibility.
*
* XXXRW: Note historic use of the word "tag" here. New ioctls may be
* called for.
*/
if (params) {
error = copyin(params, &vlr, sizeof(vlr));
if (error)
return error;
p = ifunit_ref(vlr.vlr_parent);
if (p == NULL)
return (ENXIO);
error = ifc_name2unit(name, &unit);
if (error != 0) {
if_rele(p);
return (error);
}
vid = vlr.vlr_tag;
wildcard = (unit < 0);
} else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) {
unit = -1;
wildcard = 0;
} else {
p = NULL;
error = ifc_name2unit(name, &unit);
if (error != 0)
return (error);
wildcard = (unit < 0);
}
error = ifc_alloc_unit(ifc, &unit);
if (error != 0) {
if (p != NULL)
if_rele(p);
return (error);
}
/* In the wildcard case, we need to update the name. */
if (wildcard) {
for (dp = name; *dp != '\0'; dp++);
if (snprintf(dp, len - (dp-name), "%d", unit) >
len - (dp-name) - 1) {
panic("%s: interface name too long", __func__);
}
}
ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO);
ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
ifc_free_unit(ifc, unit);
free(ifv, M_VLAN);
if (p != NULL)
if_rele(p);
return (ENOSPC);
}
SLIST_INIT(&ifv->vlan_mc_listhead);
ifp->if_softc = ifv;
/*
* Set the name manually rather than using if_initname because
* we don't conform to the default naming convention for interfaces.
*/
strlcpy(ifp->if_xname, name, IFNAMSIZ);
ifp->if_dname = vlanname;
ifp->if_dunit = unit;
/* NB: flags are not set here */
ifp->if_linkmib = &ifv->ifv_mib;
ifp->if_linkmiblen = sizeof(ifv->ifv_mib);
/* NB: mtu is not set here */
ifp->if_init = vlan_init;
ifp->if_transmit = vlan_transmit;
ifp->if_qflush = vlan_qflush;
ifp->if_ioctl = vlan_ioctl;
#ifdef RATELIMIT
ifp->if_snd_tag_alloc = vlan_snd_tag_alloc;
#endif
ifp->if_flags = VLAN_IFFLAGS;
ether_ifattach(ifp, eaddr);
/* Now undo some of the damage... */
ifp->if_baudrate = 0;
ifp->if_type = IFT_L2VLAN;
ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN;
ifa = ifp->if_addr;
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
sdl->sdl_type = IFT_L2VLAN;
if (p != NULL) {
error = vlan_config(ifv, p, vid);
if_rele(p);
if (error != 0) {
/*
* Since we've partially failed, we need to back
* out all the way, otherwise userland could get
* confused. Thus, we destroy the interface.
*/
ether_ifdetach(ifp);
vlan_unconfig(ifp);
if_free(ifp);
ifc_free_unit(ifc, unit);
free(ifv, M_VLAN);
return (error);
}
}
return (0);
}
static int
vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp)
{
struct ifvlan *ifv = ifp->if_softc;
int unit = ifp->if_dunit;
ether_ifdetach(ifp); /* first, remove it from system-wide lists */
vlan_unconfig(ifp); /* now it can be unconfigured and freed */
/*
* We should have the only reference to the ifv now, so we can now
* drain any remaining lladdr task before freeing the ifnet and the
* ifvlan.
*/
taskqueue_drain(taskqueue_thread, &ifv->lladdr_task);
if_free(ifp);
free(ifv, M_VLAN);
ifc_free_unit(ifc, unit);
return (0);
}
/*
* The ifp->if_init entry point for vlan(4) is a no-op.
*/
static void
vlan_init(void *foo __unused)
{
}
/*
* The if_transmit method for vlan(4) interface.
*/
static int
vlan_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct ifvlan *ifv;
struct ifnet *p;
struct m_tag *mtag;
uint16_t tag;
int error, len, mcast;
VLAN_LOCK_READER;
VLAN_RLOCK();
ifv = ifp->if_softc;
if (TRUNK(ifv) == NULL) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
VLAN_RUNLOCK();
m_freem(m);
return (ENETDOWN);
}
p = PARENT(ifv);
len = m->m_pkthdr.len;
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
BPF_MTAP(ifp, m);
/*
* Do not run parent's if_transmit() if the parent is not up,
* or parent's driver will cause a system crash.
*/
if (!UP_AND_RUNNING(p)) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
VLAN_RUNLOCK();
m_freem(m);
return (ENETDOWN);
}
/*
* Pad the frame to the minimum size allowed if told to.
* This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
* paragraph C.4.4.3.b. It can help to work around buggy
* bridges that violate paragraph C.4.4.3.a from the same
* document, i.e., fail to pad short frames after untagging.
* E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
* untagging it will produce a 62-byte frame, which is a runt
* and requires padding. There are VLAN-enabled network
* devices that just discard such runts instead or mishandle
* them somehow.
*/
if (V_soft_pad && p->if_type == IFT_ETHER) {
static char pad[8]; /* just zeros */
int n;
for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len;
n > 0; n -= sizeof(pad))
if (!m_append(m, min(n, sizeof(pad)), pad))
break;
if (n > 0) {
if_printf(ifp, "cannot pad short frame\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
VLAN_RUNLOCK();
m_freem(m);
return (0);
}
}
/*
* If underlying interface can do VLAN tag insertion itself,
* just pass the packet along. However, we need some way to
* tell the interface where the packet came from so that it
* knows how to find the VLAN tag to use, so we attach a
* packet tag that holds it.
*/
if (vlan_mtag_pcp && (mtag = m_tag_locate(m, MTAG_8021Q,
MTAG_8021Q_PCP_OUT, NULL)) != NULL)
tag = EVL_MAKETAG(ifv->ifv_vid, *(uint8_t *)(mtag + 1), 0);
else
tag = ifv->ifv_tag;
if (p->if_capenable & IFCAP_VLAN_HWTAGGING) {
m->m_pkthdr.ether_vtag = tag;
m->m_flags |= M_VLANTAG;
} else {
m = ether_vlanencap(m, tag);
if (m == NULL) {
if_printf(ifp, "unable to prepend VLAN header\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
VLAN_RUNLOCK();
return (0);
}
}
/*
* Send it, precisely as ether_output() would have.
*/
error = (p->if_transmit)(p, m);
if (error == 0) {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast);
} else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
VLAN_RUNLOCK();
return (error);
}
/*
* The ifp->if_qflush entry point for vlan(4) is a no-op.
*/
static void
vlan_qflush(struct ifnet *ifp __unused)
{
}
static void
vlan_input(struct ifnet *ifp, struct mbuf *m)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
VLAN_LOCK_READER;
TRUNK_LOCK_READER;
struct m_tag *mtag;
uint16_t vid, tag;
VLAN_RLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_RUNLOCK();
m_freem(m);
return;
}
if (m->m_flags & M_VLANTAG) {
/*
* Packet is tagged, but m contains a normal
* Ethernet frame; the tag is stored out-of-band.
*/
tag = m->m_pkthdr.ether_vtag;
m->m_flags &= ~M_VLANTAG;
} else {
struct ether_vlan_header *evl;
/*
* Packet is tagged in-band as specified by 802.1q.
*/
switch (ifp->if_type) {
case IFT_ETHER:
if (m->m_len < sizeof(*evl) &&
(m = m_pullup(m, sizeof(*evl))) == NULL) {
if_printf(ifp, "cannot pullup VLAN header\n");
VLAN_RUNLOCK();
return;
}
evl = mtod(m, struct ether_vlan_header *);
tag = ntohs(evl->evl_tag);
/*
* Remove the 802.1q header by copying the Ethernet
* addresses over it and adjusting the beginning of
* the data in the mbuf. The encapsulated Ethernet
* type field is already in place.
*/
bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
break;
default:
#ifdef INVARIANTS
panic("%s: %s has unsupported if_type %u",
__func__, ifp->if_xname, ifp->if_type);
#endif
if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
VLAN_RUNLOCK();
m_freem(m);
return;
}
}
vid = EVL_VLANOFTAG(tag);
TRUNK_RLOCK(trunk);
ifv = vlan_gethash(trunk, vid);
if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) {
TRUNK_RUNLOCK(trunk);
if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
VLAN_RUNLOCK();
m_freem(m);
return;
}
TRUNK_RUNLOCK(trunk);
if (vlan_mtag_pcp) {
/*
* While uncommon, it is possible that we will find a 802.1q
* packet encapsulated inside another packet that also had an
* 802.1q header. For example, ethernet tunneled over IPSEC
* arriving over ethernet. In that case, we replace the
* existing 802.1q PCP m_tag value.
*/
mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
if (mtag == NULL) {
mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN,
sizeof(uint8_t), M_NOWAIT);
if (mtag == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
VLAN_RUNLOCK();
m_freem(m);
return;
}
m_tag_prepend(m, mtag);
}
*(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag);
}
m->m_pkthdr.rcvif = ifv->ifv_ifp;
if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1);
VLAN_RUNLOCK();
/* Pass it back through the parent's input routine. */
(*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m);
}
static void
vlan_lladdr_fn(void *arg, int pending __unused)
{
struct ifvlan *ifv;
struct ifnet *ifp;
ifv = (struct ifvlan *)arg;
ifp = ifv->ifv_ifp;
/* The ifv_ifp already has the lladdr copied in. */
if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen);
}
static int
vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid)
{
struct ifvlantrunk *trunk;
struct ifnet *ifp;
int error = 0;
/*
* We can handle non-ethernet hardware types as long as
* they handle the tagging and headers themselves.
*/
if (p->if_type != IFT_ETHER &&
(p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
return (EPROTONOSUPPORT);
if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS)
return (EPROTONOSUPPORT);
/*
* Don't let the caller set up a VLAN VID with
* anything except VLID bits.
* VID numbers 0x0 and 0xFFF are reserved.
*/
if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK))
return (EINVAL);
if (ifv->ifv_trunk)
return (EBUSY);
/* Acquire rmlock after the branch so we can M_WAITOK. */
VLAN_XLOCK();
if (p->if_vlantrunk == NULL) {
trunk = malloc(sizeof(struct ifvlantrunk),
M_VLAN, M_WAITOK | M_ZERO);
vlan_inithash(trunk);
TRUNK_LOCK_INIT(trunk);
VLAN_WLOCK();
TRUNK_WLOCK(trunk);
p->if_vlantrunk = trunk;
trunk->parent = p;
if_ref(trunk->parent);
} else {
VLAN_WLOCK();
trunk = p->if_vlantrunk;
TRUNK_WLOCK(trunk);
}
ifv->ifv_vid = vid; /* must set this before vlan_inshash() */
ifv->ifv_pcp = 0; /* Default: best effort delivery. */
vlan_tag_recalculate(ifv);
error = vlan_inshash(trunk, ifv);
if (error)
goto done;
ifv->ifv_proto = ETHERTYPE_VLAN;
ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN;
ifv->ifv_mintu = ETHERMIN;
ifv->ifv_pflags = 0;
ifv->ifv_capenable = -1;
/*
* If the parent supports the VLAN_MTU capability,
* i.e. can Tx/Rx larger than ETHER_MAX_LEN frames,
* use it.
*/
if (p->if_capenable & IFCAP_VLAN_MTU) {
/*
* No need to fudge the MTU since the parent can
* handle extended frames.
*/
ifv->ifv_mtufudge = 0;
} else {
/*
* Fudge the MTU by the encapsulation size. This
* makes us incompatible with strictly compliant
* 802.1Q implementations, but allows us to use
* the feature with other NetBSD implementations,
* which might still be useful.
*/
ifv->ifv_mtufudge = ifv->ifv_encaplen;
}
ifv->ifv_trunk = trunk;
ifp = ifv->ifv_ifp;
/*
* Initialize fields from our parent. This duplicates some
* work with ether_ifattach() but allows for non-ethernet
* interfaces to also work.
*/
ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge;
ifp->if_baudrate = p->if_baudrate;
ifp->if_output = p->if_output;
ifp->if_input = p->if_input;
ifp->if_resolvemulti = p->if_resolvemulti;
ifp->if_addrlen = p->if_addrlen;
ifp->if_broadcastaddr = p->if_broadcastaddr;
/*
* Copy only a selected subset of flags from the parent.
* Other flags are none of our business.
*/
#define VLAN_COPY_FLAGS (IFF_SIMPLEX)
ifp->if_flags &= ~VLAN_COPY_FLAGS;
ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS;
#undef VLAN_COPY_FLAGS
ifp->if_link_state = p->if_link_state;
vlan_capabilities(ifv);
/*
* Set up our interface address to reflect the underlying
* physical interface's.
*/
bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen);
((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen =
p->if_addrlen;
/*
* Configure multicast addresses that may already be
* joined on the vlan device.
*/
(void)vlan_setmulti(ifp);
TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv);
/* We are ready for operation now. */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
/* Update flags on the parent, if necessary. */
vlan_setflags(ifp, 1);
done:
/*
* We need to drop the non-sleepable rmlock so that the underlying
* devices can sleep in their vlan_config hooks.
*/
TRUNK_WUNLOCK(trunk);
VLAN_WUNLOCK();
if (error == 0)
EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid);
VLAN_XUNLOCK();
return (error);
}
static void
vlan_unconfig(struct ifnet *ifp)
{
VLAN_XLOCK();
vlan_unconfig_locked(ifp, 0);
VLAN_XUNLOCK();
}
static void
vlan_unconfig_locked(struct ifnet *ifp, int departing)
{
struct ifvlantrunk *trunk;
struct vlan_mc_entry *mc;
struct ifvlan *ifv;
struct ifnet *parent;
int error;
VLAN_XLOCK_ASSERT();
ifv = ifp->if_softc;
trunk = ifv->ifv_trunk;
parent = NULL;
if (trunk != NULL) {
/*
* Both vlan_transmit and vlan_input rely on the trunk fields
* being NULL to determine whether to bail, so we need to get
* an exclusive lock here to prevent them from using bad
* ifvlans.
*/
VLAN_WLOCK();
parent = trunk->parent;
/*
* Since the interface is being unconfigured, we need to
* empty the list of multicast groups that we may have joined
* while we were alive from the parent's list.
*/
while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) {
/*
* If the parent interface is being detached,
* all its multicast addresses have already
* been removed. Warn about errors if
* if_delmulti() does fail, but don't abort as
* all callers expect vlan destruction to
* succeed.
*/
if (!departing) {
error = if_delmulti(parent,
(struct sockaddr *)&mc->mc_addr);
if (error)
if_printf(ifp,
"Failed to delete multicast address from parent: %d\n",
error);
}
SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
free(mc, M_VLAN);
}
vlan_setflags(ifp, 0); /* clear special flags on parent */
/*
* The trunk lock isn't actually required here, but
* vlan_remhash expects it.
*/
TRUNK_WLOCK(trunk);
vlan_remhash(trunk, ifv);
TRUNK_WUNLOCK(trunk);
ifv->ifv_trunk = NULL;
/*
* Check if we were the last.
*/
if (trunk->refcnt == 0) {
parent->if_vlantrunk = NULL;
trunk_destroy(trunk);
}
VLAN_WUNLOCK();
}
/* Disconnect from parent. */
if (ifv->ifv_pflags)
if_printf(ifp, "%s: ifv_pflags unclean\n", __func__);
ifp->if_mtu = ETHERMTU;
ifp->if_link_state = LINK_STATE_UNKNOWN;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
/*
* Only dispatch an event if vlan was
* attached, otherwise there is nothing
* to cleanup anyway.
*/
if (parent != NULL)
EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid);
}
/* Handle a reference counted flag that should be set on the parent as well */
static int
vlan_setflag(struct ifnet *ifp, int flag, int status,
int (*func)(struct ifnet *, int))
{
struct ifvlan *ifv;
int error;
VLAN_SXLOCK_ASSERT();
ifv = ifp->if_softc;
status = status ? (ifp->if_flags & flag) : 0;
/* Now "status" contains the flag value or 0 */
/*
* See if recorded parent's status is different from what
* we want it to be. If it is, flip it. We record parent's
* status in ifv_pflags so that we won't clear parent's flag
* we haven't set. In fact, we don't clear or set parent's
* flags directly, but get or release references to them.
* That's why we can be sure that recorded flags still are
* in accord with actual parent's flags.
*/
if (status != (ifv->ifv_pflags & flag)) {
error = (*func)(PARENT(ifv), status);
if (error)
return (error);
ifv->ifv_pflags &= ~flag;
ifv->ifv_pflags |= status;
}
return (0);
}
/*
* Handle IFF_* flags that require certain changes on the parent:
* if "status" is true, update parent's flags respective to our if_flags;
* if "status" is false, forcedly clear the flags set on parent.
*/
static int
vlan_setflags(struct ifnet *ifp, int status)
{
int error, i;
for (i = 0; vlan_pflags[i].flag; i++) {
error = vlan_setflag(ifp, vlan_pflags[i].flag,
status, vlan_pflags[i].func);
if (error)
return (error);
}
return (0);
}
/* Inform all vlans that their parent has changed link state */
static void
vlan_link_state(struct ifnet *ifp)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
VLAN_LOCK_READER;
/* Called from a taskqueue_swi task, so we cannot sleep. */
VLAN_RLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_RUNLOCK();
return;
}
TRUNK_WLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate;
if_link_state_change(ifv->ifv_ifp,
trunk->parent->if_link_state);
}
TRUNK_WUNLOCK(trunk);
VLAN_RUNLOCK();
}
static void
vlan_capabilities(struct ifvlan *ifv)
{
struct ifnet *p;
struct ifnet *ifp;
struct ifnet_hw_tsomax hw_tsomax;
int cap = 0, ena = 0, mena;
u_long hwa = 0;
VLAN_SXLOCK_ASSERT();
TRUNK_WLOCK_ASSERT(TRUNK(ifv));
p = PARENT(ifv);
ifp = ifv->ifv_ifp;
/* Mask parent interface enabled capabilities disabled by user. */
mena = p->if_capenable & ifv->ifv_capenable;
/*
* If the parent interface can do checksum offloading
* on VLANs, then propagate its hardware-assisted
* checksumming flags. Also assert that checksum
* offloading requires hardware VLAN tagging.
*/
if (p->if_capabilities & IFCAP_VLAN_HWCSUM)
cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if (p->if_capenable & IFCAP_VLAN_HWCSUM &&
p->if_capenable & IFCAP_VLAN_HWTAGGING) {
ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if (ena & IFCAP_TXCSUM)
hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP |
CSUM_UDP | CSUM_SCTP);
if (ena & IFCAP_TXCSUM_IPV6)
hwa |= p->if_hwassist & (CSUM_TCP_IPV6 |
CSUM_UDP_IPV6 | CSUM_SCTP_IPV6);
}
/*
* If the parent interface can do TSO on VLANs then
* propagate the hardware-assisted flag. TSO on VLANs
* does not necessarily require hardware VLAN tagging.
*/
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
if_hw_tsomax_common(p, &hw_tsomax);
if_hw_tsomax_update(ifp, &hw_tsomax);
if (p->if_capabilities & IFCAP_VLAN_HWTSO)
cap |= p->if_capabilities & IFCAP_TSO;
if (p->if_capenable & IFCAP_VLAN_HWTSO) {
ena |= mena & IFCAP_TSO;
if (ena & IFCAP_TSO)
hwa |= p->if_hwassist & CSUM_TSO;
}
/*
* If the parent interface can do LRO and checksum offloading on
* VLANs, then guess it may do LRO on VLANs. False positive here
* cost nothing, while false negative may lead to some confusions.
*/
if (p->if_capabilities & IFCAP_VLAN_HWCSUM)
cap |= p->if_capabilities & IFCAP_LRO;
if (p->if_capenable & IFCAP_VLAN_HWCSUM)
ena |= p->if_capenable & IFCAP_LRO;
/*
* If the parent interface can offload TCP connections over VLANs then
* propagate its TOE capability to the VLAN interface.
*
* All TOE drivers in the tree today can deal with VLANs. If this
* changes then IFCAP_VLAN_TOE should be promoted to a full capability
* with its own bit.
*/
#define IFCAP_VLAN_TOE IFCAP_TOE
if (p->if_capabilities & IFCAP_VLAN_TOE)
cap |= p->if_capabilities & IFCAP_TOE;
if (p->if_capenable & IFCAP_VLAN_TOE) {
TOEDEV(ifp) = TOEDEV(p);
ena |= mena & IFCAP_TOE;
}
/*
* If the parent interface supports dynamic link state, so does the
* VLAN interface.
*/
cap |= (p->if_capabilities & IFCAP_LINKSTATE);
ena |= (mena & IFCAP_LINKSTATE);
#ifdef RATELIMIT
/*
* If the parent interface supports ratelimiting, so does the
* VLAN interface.
*/
cap |= (p->if_capabilities & IFCAP_TXRTLMT);
ena |= (mena & IFCAP_TXRTLMT);
#endif
ifp->if_capabilities = cap;
ifp->if_capenable = ena;
ifp->if_hwassist = hwa;
}
static void
vlan_trunk_capabilities(struct ifnet *ifp)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
VLAN_SLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_SUNLOCK();
return;
}
TRUNK_WLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
vlan_capabilities(ifv);
}
TRUNK_WUNLOCK(trunk);
VLAN_SUNLOCK();
}
static int
vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifnet *p;
struct ifreq *ifr;
struct ifaddr *ifa;
struct ifvlan *ifv;
struct ifvlantrunk *trunk;
struct vlanreq vlr;
int error = 0;
VLAN_LOCK_READER;
ifr = (struct ifreq *)data;
ifa = (struct ifaddr *) data;
ifv = ifp->if_softc;
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
arp_ifinit(ifp, ifa);
#endif
break;
case SIOCGIFADDR:
{
struct sockaddr *sa;
sa = (struct sockaddr *)&ifr->ifr_data;
bcopy(IF_LLADDR(ifp), sa->sa_data, ifp->if_addrlen);
}
break;
case SIOCGIFMEDIA:
VLAN_SLOCK();
if (TRUNK(ifv) != NULL) {
p = PARENT(ifv);
if_ref(p);
error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data);
if_rele(p);
/* Limit the result to the parent's current config. */
if (error == 0) {
struct ifmediareq *ifmr;
ifmr = (struct ifmediareq *)data;
if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) {
ifmr->ifm_count = 1;
error = copyout(&ifmr->ifm_current,
ifmr->ifm_ulist,
sizeof(int));
}
}
} else {
error = EINVAL;
}
VLAN_SUNLOCK();
break;
case SIOCSIFMEDIA:
error = EINVAL;
break;
case SIOCSIFMTU:
/*
* Set the interface MTU.
*/
VLAN_SLOCK();
trunk = TRUNK(ifv);
if (trunk != NULL) {
TRUNK_WLOCK(trunk);
if (ifr->ifr_mtu >
(PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) ||
ifr->ifr_mtu <
(ifv->ifv_mintu - ifv->ifv_mtufudge))
error = EINVAL;
else
ifp->if_mtu = ifr->ifr_mtu;
TRUNK_WUNLOCK(trunk);
} else
error = EINVAL;
VLAN_SUNLOCK();
break;
case SIOCSETVLAN:
#ifdef VIMAGE
/*
* XXXRW/XXXBZ: The goal in these checks is to allow a VLAN
* interface to be delegated to a jail without allowing the
* jail to change what underlying interface/VID it is
* associated with. We are not entirely convinced that this
* is the right way to accomplish that policy goal.
*/
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
error = copyin(ifr->ifr_data, &vlr, sizeof(vlr));
if (error)
break;
if (vlr.vlr_parent[0] == '\0') {
vlan_unconfig(ifp);
break;
}
p = ifunit_ref(vlr.vlr_parent);
if (p == NULL) {
error = ENOENT;
break;
}
error = vlan_config(ifv, p, vlr.vlr_tag);
if_rele(p);
break;
case SIOCGETVLAN:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
bzero(&vlr, sizeof(vlr));
VLAN_SLOCK();
if (TRUNK(ifv) != NULL) {
strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname,
sizeof(vlr.vlr_parent));
vlr.vlr_tag = ifv->ifv_vid;
}
VLAN_SUNLOCK();
error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
break;
case SIOCSIFFLAGS:
/*
* We should propagate selected flags to the parent,
* e.g., promiscuous mode.
*/
VLAN_XLOCK();
if (TRUNK(ifv) != NULL)
error = vlan_setflags(ifp, 1);
VLAN_XUNLOCK();
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/*
* If we don't have a parent, just remember the membership for
* when we do.
*
* XXX We need the rmlock here to avoid sleeping while
* holding in6_multi_mtx.
*/
VLAN_RLOCK();
trunk = TRUNK(ifv);
if (trunk != NULL) {
TRUNK_WLOCK(trunk);
error = vlan_setmulti(ifp);
TRUNK_WUNLOCK(trunk);
}
VLAN_RUNLOCK();
break;
case SIOCGVLANPCP:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
ifr->ifr_vlan_pcp = ifv->ifv_pcp;
break;
case SIOCSVLANPCP:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
error = priv_check(curthread, PRIV_NET_SETVLANPCP);
if (error)
break;
if (ifr->ifr_vlan_pcp > 7) {
error = EINVAL;
break;
}
ifv->ifv_pcp = ifr->ifr_vlan_pcp;
vlan_tag_recalculate(ifv);
break;
case SIOCSIFCAP:
VLAN_SLOCK();
ifv->ifv_capenable = ifr->ifr_reqcap;
trunk = TRUNK(ifv);
if (trunk != NULL) {
TRUNK_WLOCK(trunk);
vlan_capabilities(ifv);
TRUNK_WUNLOCK(trunk);
}
VLAN_SUNLOCK();
break;
default:
error = EINVAL;
break;
}
return (error);
}
#ifdef RATELIMIT
static int
vlan_snd_tag_alloc(struct ifnet *ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
/* get trunk device */
ifp = vlan_trunkdev(ifp);
if (ifp == NULL || (ifp->if_capenable & IFCAP_TXRTLMT) == 0)
return (EOPNOTSUPP);
/* forward allocation request */
return (ifp->if_snd_tag_alloc(ifp, params, ppmt));
}
#endif
Index: head/sys/net/iflib.c
===================================================================
--- head/sys/net/iflib.c (revision 328217)
+++ head/sys/net/iflib.c (revision 328218)
@@ -1,5967 +1,5964 @@
/*-
* Copyright (c) 2014-2017, Matthew Macy <mmacy@mattmacy.io>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Neither the name of Matthew Macy nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_acpi.h"
#include "opt_sched.h"
#include <sys/param.h>
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/eventhandler.h>
#include <sys/sockio.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/kobj.h>
#include <sys/rman.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/taskqueue.h>
#include <sys/limits.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/mp_ring.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/tcp_lro.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/ip_var.h>
#include <netinet6/ip6_var.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <dev/led/led.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pci_private.h>
#include <net/iflib.h>
#include "ifdi_if.h"
#if defined(__i386__) || defined(__amd64__)
#include <sys/memdesc.h>
#include <machine/bus.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
#include <x86/include/busdma_impl.h>
#include <x86/iommu/busdma_dmar.h>
#endif
#include <sys/bitstring.h>
/*
* enable accounting of every mbuf as it comes in to and goes out of
* iflib's software descriptor references
*/
#define MEMORY_LOGGING 0
/*
* Enable mbuf vectors for compressing long mbuf chains
*/
/*
* NB:
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
* we prefetch needs to be determined by the time spent in m_free vis a vis
* the cost of a prefetch. This will of course vary based on the workload:
* - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
* is quite expensive, thus suggesting very little prefetch.
* - small packet forwarding which is just returning a single mbuf to
* UMA will typically be very fast vis a vis the cost of a memory
* access.
*/
/*
* File organization:
* - private structures
* - iflib private utility functions
* - ifnet functions
* - vlan registry and other exported functions
* - iflib public core functions
*
*
*/
static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
struct iflib_txq;
typedef struct iflib_txq *iflib_txq_t;
struct iflib_rxq;
typedef struct iflib_rxq *iflib_rxq_t;
struct iflib_fl;
typedef struct iflib_fl *iflib_fl_t;
struct iflib_ctx;
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
void *ifi_filter_arg;
struct grouptask *ifi_task;
void *ifi_ctx;
} *iflib_filter_info_t;
struct iflib_ctx {
KOBJ_FIELDS;
/*
* Pointer to hardware driver's softc
*/
void *ifc_softc;
device_t ifc_dev;
if_t ifc_ifp;
cpuset_t ifc_cpus;
if_shared_ctx_t ifc_sctx;
struct if_softc_ctx ifc_softc_ctx;
struct mtx ifc_mtx;
uint16_t ifc_nhwtxqs;
uint16_t ifc_nhwrxqs;
iflib_txq_t ifc_txqs;
iflib_rxq_t ifc_rxqs;
uint32_t ifc_if_flags;
uint32_t ifc_flags;
uint32_t ifc_max_fl_buf_size;
int ifc_in_detach;
int ifc_link_state;
int ifc_link_irq;
int ifc_watchdog_events;
struct cdev *ifc_led_dev;
struct resource *ifc_msix_mem;
struct if_irq ifc_legacy_irq;
struct grouptask ifc_admin_task;
struct grouptask ifc_vflr_task;
struct iflib_filter_info ifc_filter_info;
struct ifmedia ifc_media;
struct sysctl_oid *ifc_sysctl_node;
uint16_t ifc_sysctl_ntxqs;
uint16_t ifc_sysctl_nrxqs;
uint16_t ifc_sysctl_qs_eq_override;
uint16_t ifc_sysctl_rx_budget;
qidx_t ifc_sysctl_ntxds[8];
qidx_t ifc_sysctl_nrxds[8];
struct if_txrx ifc_txrx;
#define isc_txd_encap ifc_txrx.ift_txd_encap
#define isc_txd_flush ifc_txrx.ift_txd_flush
#define isc_txd_credits_update ifc_txrx.ift_txd_credits_update
#define isc_rxd_available ifc_txrx.ift_rxd_available
#define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
#define isc_rxd_flush ifc_txrx.ift_rxd_flush
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
#define isc_legacy_intr ifc_txrx.ift_legacy_intr
eventhandler_tag ifc_vlan_attach_event;
eventhandler_tag ifc_vlan_detach_event;
uint8_t ifc_mac[ETHER_ADDR_LEN];
char ifc_mtx_name[16];
};
void *
iflib_get_softc(if_ctx_t ctx)
{
return (ctx->ifc_softc);
}
device_t
iflib_get_dev(if_ctx_t ctx)
{
return (ctx->ifc_dev);
}
if_t
iflib_get_ifp(if_ctx_t ctx)
{
return (ctx->ifc_ifp);
}
struct ifmedia *
iflib_get_media(if_ctx_t ctx)
{
return (&ctx->ifc_media);
}
void
iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
{
bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
}
if_softc_ctx_t
iflib_get_softc_ctx(if_ctx_t ctx)
{
return (&ctx->ifc_softc_ctx);
}
if_shared_ctx_t
iflib_get_sctx(if_ctx_t ctx)
{
return (ctx->ifc_sctx);
}
#define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
#define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
#define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
#define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
#define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
#define RX_SW_DESC_MAP_CREATED (1 << 0)
#define TX_SW_DESC_MAP_CREATED (1 << 1)
#define RX_SW_DESC_INUSE (1 << 3)
#define TX_SW_DESC_MAPPED (1 << 4)
#define M_TOOBIG M_PROTO1
typedef struct iflib_sw_rx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
struct mbuf **ifsd_m; /* pkthdr mbufs */
caddr_t *ifsd_cl; /* direct cluster pointer for rx */
uint8_t *ifsd_flags;
} iflib_rxsd_array_t;
typedef struct iflib_sw_tx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
struct mbuf **ifsd_m; /* pkthdr mbufs */
uint8_t *ifsd_flags;
} if_txsd_vec_t;
/* magic number that should be high enough for any hardware */
#define IFLIB_MAX_TX_SEGS 128
/* bnxt supports 64 with hardware LRO enabled */
#define IFLIB_MAX_RX_SEGS 64
#define IFLIB_RX_COPY_THRESH 128
#define IFLIB_MAX_RX_REFRESH 32
/* The minimum descriptors per second before we start coalescing */
#define IFLIB_MIN_DESC_SEC 16384
#define IFLIB_DEFAULT_TX_UPDATE_FREQ 16
#define IFLIB_QUEUE_IDLE 0
#define IFLIB_QUEUE_HUNG 1
#define IFLIB_QUEUE_WORKING 2
/* maximum number of txqs that can share an rx interrupt */
#define IFLIB_MAX_TX_SHARED_INTR 4
/* this should really scale with ring size - this is a fairly arbitrary value */
#define TX_BATCH_SIZE 32
#define IFLIB_RESTART_BUDGET 8
#define IFC_LEGACY 0x001
#define IFC_QFLUSH 0x002
#define IFC_MULTISEG 0x004
#define IFC_DMAR 0x008
#define IFC_SC_ALLOCATED 0x010
#define IFC_INIT_DONE 0x020
#define IFC_PREFETCH 0x040
#define IFC_DO_RESET 0x080
#define IFC_CHECK_HUNG 0x100
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
struct iflib_txq {
qidx_t ift_in_use;
qidx_t ift_cidx;
qidx_t ift_cidx_processed;
qidx_t ift_pidx;
uint8_t ift_gen;
uint8_t ift_br_offset;
uint16_t ift_npending;
uint16_t ift_db_pending;
uint16_t ift_rs_pending;
/* implicit pad */
uint8_t ift_txd_size[8];
uint64_t ift_processed;
uint64_t ift_cleaned;
uint64_t ift_cleaned_prev;
#if MEMORY_LOGGING
uint64_t ift_enqueued;
uint64_t ift_dequeued;
#endif
uint64_t ift_no_tx_dma_setup;
uint64_t ift_no_desc_avail;
uint64_t ift_mbuf_defrag_failed;
uint64_t ift_mbuf_defrag;
uint64_t ift_map_failed;
uint64_t ift_txd_encap_efbig;
uint64_t ift_pullups;
struct mtx ift_mtx;
struct mtx ift_db_mtx;
/* constant values */
if_ctx_t ift_ctx;
struct ifmp_ring *ift_br;
struct grouptask ift_task;
qidx_t ift_size;
uint16_t ift_id;
struct callout ift_timer;
if_txsd_vec_t ift_sds;
uint8_t ift_qstatus;
uint8_t ift_closed;
uint8_t ift_update_freq;
struct iflib_filter_info ift_filter_info;
bus_dma_tag_t ift_desc_tag;
bus_dma_tag_t ift_tso_desc_tag;
iflib_dma_info_t ift_ifdi;
#define MTX_NAME_LEN 16
char ift_mtx_name[MTX_NAME_LEN];
char ift_db_mtx_name[MTX_NAME_LEN];
bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ift_cpu_exec_count[256];
#endif
} __aligned(CACHE_LINE_SIZE);
struct iflib_fl {
qidx_t ifl_cidx;
qidx_t ifl_pidx;
qidx_t ifl_credits;
uint8_t ifl_gen;
uint8_t ifl_rxd_size;
#if MEMORY_LOGGING
uint64_t ifl_m_enqueued;
uint64_t ifl_m_dequeued;
uint64_t ifl_cl_enqueued;
uint64_t ifl_cl_dequeued;
#endif
/* implicit pad */
bitstr_t *ifl_rx_bitmap;
qidx_t ifl_fragidx;
/* constant */
qidx_t ifl_size;
uint16_t ifl_buf_size;
uint16_t ifl_cltype;
uma_zone_t ifl_zone;
iflib_rxsd_array_t ifl_sds;
iflib_rxq_t ifl_rxq;
uint8_t ifl_id;
bus_dma_tag_t ifl_desc_tag;
iflib_dma_info_t ifl_ifdi;
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
} __aligned(CACHE_LINE_SIZE);
static inline qidx_t
get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
{
qidx_t used;
if (pidx > cidx)
used = pidx - cidx;
else if (pidx < cidx)
used = size - cidx + pidx;
else if (gen == 0 && pidx == cidx)
used = 0;
else if (gen == 1 && pidx == cidx)
used = size;
else
panic("bad state");
return (used);
}
#define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
#define IDXDIFF(head, tail, wrap) \
((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
struct iflib_rxq {
/* If there is a separate completion queue -
* these are the cq cidx and pidx. Otherwise
* these are unused.
*/
qidx_t ifr_size;
qidx_t ifr_cq_cidx;
qidx_t ifr_cq_pidx;
uint8_t ifr_cq_gen;
uint8_t ifr_fl_offset;
if_ctx_t ifr_ctx;
iflib_fl_t ifr_fl;
uint64_t ifr_rx_irq;
uint16_t ifr_id;
uint8_t ifr_lro_enabled;
uint8_t ifr_nfl;
uint8_t ifr_ntxqirq;
uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
struct lro_ctrl ifr_lc;
struct grouptask ifr_task;
struct iflib_filter_info ifr_filter_info;
iflib_dma_info_t ifr_ifdi;
/* dynamically allocate if any drivers need a value substantially larger than this */
struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ifr_cpu_exec_count[256];
#endif
} __aligned(CACHE_LINE_SIZE);
typedef struct if_rxsd {
caddr_t *ifsd_cl;
struct mbuf **ifsd_m;
iflib_fl_t ifsd_fl;
qidx_t ifsd_cidx;
} *if_rxsd_t;
/* multiple of word size */
#ifdef __LP64__
#define PKT_INFO_SIZE 6
#define RXD_INFO_SIZE 5
#define PKT_TYPE uint64_t
#else
#define PKT_INFO_SIZE 11
#define RXD_INFO_SIZE 8
#define PKT_TYPE uint32_t
#endif
#define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3)
#define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4)
typedef struct if_pkt_info_pad {
PKT_TYPE pkt_val[PKT_INFO_SIZE];
} *if_pkt_info_pad_t;
typedef struct if_rxd_info_pad {
PKT_TYPE rxd_val[RXD_INFO_SIZE];
} *if_rxd_info_pad_t;
CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
static inline void
pkt_info_zero(if_pkt_info_t pi)
{
if_pkt_info_pad_t pi_pad;
pi_pad = (if_pkt_info_pad_t)pi;
pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
#ifndef __LP64__
pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
#endif
}
static inline void
rxd_info_zero(if_rxd_info_t ri)
{
if_rxd_info_pad_t ri_pad;
int i;
ri_pad = (if_rxd_info_pad_t)ri;
for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
ri_pad->rxd_val[i] = 0;
ri_pad->rxd_val[i+1] = 0;
ri_pad->rxd_val[i+2] = 0;
ri_pad->rxd_val[i+3] = 0;
}
#ifdef __LP64__
ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
#endif
}
/*
* Only allow a single packet to take up most 1/nth of the tx ring
*/
#define MAX_SINGLE_PACKET_FRACTION 12
#define IF_BAD_DMA (bus_addr_t)-1
#define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
#define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF)
#define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx)
#define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx)
#define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx)
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
/* Our boot-time initialization hook */
static int iflib_module_event_handler(module_t, int, void *);
static moduledata_t iflib_moduledata = {
"iflib",
iflib_module_event_handler,
NULL
};
DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
MODULE_VERSION(iflib, 1);
MODULE_DEPEND(iflib, pci, 1, 1, 1);
MODULE_DEPEND(iflib, ether, 1, 1, 1);
TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
#ifndef IFLIB_DEBUG_COUNTERS
#ifdef INVARIANTS
#define IFLIB_DEBUG_COUNTERS 1
#else
#define IFLIB_DEBUG_COUNTERS 0
#endif /* !INVARIANTS */
#endif
static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
"iflib driver parameters");
/*
* XXX need to ensure that this can't accidentally cause the head to be moved backwards
*/
static int iflib_min_tx_latency = 0;
SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
&iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
static int iflib_no_tx_batch = 0;
SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
&iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
#if IFLIB_DEBUG_COUNTERS
static int iflib_tx_seen;
static int iflib_tx_sent;
static int iflib_tx_encap;
static int iflib_rx_allocs;
static int iflib_fl_refills;
static int iflib_fl_refills_large;
static int iflib_tx_frees;
SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
&iflib_tx_seen, 0, "# tx mbufs seen");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
&iflib_tx_sent, 0, "# tx mbufs sent");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
&iflib_tx_encap, 0, "# tx mbufs encapped");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
&iflib_tx_frees, 0, "# tx frees");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
&iflib_rx_allocs, 0, "# rx allocations");
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
&iflib_fl_refills, 0, "# refills");
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
&iflib_fl_refills_large, 0, "# large refills");
static int iflib_txq_drain_flushing;
static int iflib_txq_drain_oactive;
static int iflib_txq_drain_notready;
static int iflib_txq_drain_encapfail;
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
&iflib_txq_drain_flushing, 0, "# drain flushes");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
&iflib_txq_drain_oactive, 0, "# drain oactives");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
&iflib_txq_drain_notready, 0, "# drain notready");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
&iflib_txq_drain_encapfail, 0, "# drain encap fails");
static int iflib_encap_load_mbuf_fail;
static int iflib_encap_pad_mbuf_fail;
static int iflib_encap_txq_avail_fail;
static int iflib_encap_txd_encap_fail;
SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
&iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
&iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
&iflib_encap_txq_avail_fail, 0, "# txq avail failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
&iflib_encap_txd_encap_fail, 0, "# driver encap failures");
static int iflib_task_fn_rxs;
static int iflib_rx_intr_enables;
static int iflib_fast_intrs;
static int iflib_intr_link;
static int iflib_intr_msix;
static int iflib_rx_unavail;
static int iflib_rx_ctx_inactive;
static int iflib_rx_zero_len;
static int iflib_rx_if_input;
static int iflib_rx_mbuf_null;
static int iflib_rxd_flush;
static int iflib_verbose_debug;
SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
&iflib_intr_link, 0, "# intr link calls");
SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
&iflib_intr_msix, 0, "# intr msix calls");
SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
&iflib_task_fn_rxs, 0, "# task_fn_rx calls");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
&iflib_rx_intr_enables, 0, "# rx intr enables");
SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
&iflib_fast_intrs, 0, "# fast_intr calls");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
&iflib_rx_unavail, 0, "# times rxeof called with no available data");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
&iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
&iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
&iflib_rx_if_input, 0, "# times rxeof called if_input");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
&iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
&iflib_rxd_flush, 0, "# times rxd_flush called");
SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
&iflib_verbose_debug, 0, "enable verbose debugging");
#define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
static void
iflib_debug_reset(void)
{
iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
iflib_txq_drain_flushing = iflib_txq_drain_oactive =
iflib_txq_drain_notready = iflib_txq_drain_encapfail =
iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
iflib_rx_mbuf_null = iflib_rxd_flush = 0;
}
#else
#define DBG_COUNTER_INC(name)
static void iflib_debug_reset(void) {}
#endif
#define IFLIB_DEBUG 0
static void iflib_tx_structures_free(if_ctx_t ctx);
static void iflib_rx_structures_free(if_ctx_t ctx);
static int iflib_queues_alloc(if_ctx_t ctx);
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
static int iflib_qset_structures_setup(if_ctx_t ctx);
static int iflib_msix_init(if_ctx_t ctx);
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str);
static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
static int iflib_register(if_ctx_t);
static void iflib_init_locked(if_ctx_t ctx);
static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
static void iflib_add_device_sysctl_post(if_ctx_t ctx);
static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
static void iflib_stop(if_ctx_t ctx);
static void iflib_if_init_locked(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
#endif
#ifdef DEV_NETMAP
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
MODULE_DEPEND(iflib, netmap, 1, 1, 1);
static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
/*
* device-specific sysctl variables:
*
* iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
* During regular operations the CRC is stripped, but on some
* hardware reception of frames not multiple of 64 is slower,
* so using crcstrip=0 helps in benchmarks.
*
* iflib_rx_miss, iflib_rx_miss_bufs:
* count packets that might be missed due to lost interrupts.
*/
SYSCTL_DECL(_dev_netmap);
/*
* The xl driver by default strips CRCs and we do not override it.
*/
int iflib_crcstrip = 1;
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
int iflib_rx_miss, iflib_rx_miss_bufs;
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
/*
* Register/unregister. We are already under netmap lock.
* Only called on the first register or the last unregister.
*/
static int
iflib_netmap_register(struct netmap_adapter *na, int onoff)
{
struct ifnet *ifp = na->ifp;
if_ctx_t ctx = ifp->if_softc;
int status;
CTX_LOCK(ctx);
IFDI_INTR_DISABLE(ctx);
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if (!CTX_IS_VF(ctx))
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
/* enable or disable flags and callbacks in na and ifp */
if (onoff) {
nm_set_native_flags(na);
} else {
nm_clear_native_flags(na);
}
iflib_stop(ctx);
iflib_init_locked(ctx);
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
if (status)
nm_clear_native_flags(na);
CTX_UNLOCK(ctx);
return (status);
}
static int
netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
{
struct netmap_adapter *na = kring->na;
u_int const lim = kring->nkr_num_slots - 1;
u_int head = kring->rhead;
struct netmap_ring *ring = kring->ring;
bus_dmamap_t *map;
struct if_rxd_update iru;
if_ctx_t ctx = rxq->ifr_ctx;
iflib_fl_t fl = &rxq->ifr_fl[0];
uint32_t refill_pidx, nic_i;
if (nm_i == head && __predict_true(!init))
return 0;
iru_init(&iru, rxq, 0 /* flid */);
map = fl->ifl_sds.ifsd_map;
refill_pidx = netmap_idx_k2n(kring, nm_i);
/*
* IMPORTANT: we must leave one free slot in the ring,
* so move head back by one unit
*/
head = nm_prev(head, lim);
while (nm_i != head) {
for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
struct netmap_slot *slot = &ring->slot[nm_i];
void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
uint32_t nic_i_dma = refill_pidx;
nic_i = netmap_idx_k2n(kring, nm_i);
MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
return netmap_ring_reinit(kring);
fl->ifl_vm_addrs[tmp_pidx] = addr;
if (__predict_false(init) && map) {
netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
} else if (map && (slot->flags & NS_BUF_CHANGED)) {
/* buffer has changed, reload map */
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
}
slot->flags &= ~NS_BUF_CHANGED;
nm_i = nm_next(nm_i, lim);
fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
continue;
iru.iru_pidx = refill_pidx;
iru.iru_count = tmp_pidx+1;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
refill_pidx = nic_i;
if (map == NULL)
continue;
for (int n = 0; n < iru.iru_count; n++) {
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
BUS_DMASYNC_PREREAD);
/* XXX - change this to not use the netmap func*/
nic_i_dma = nm_next(nic_i_dma, lim);
}
}
}
kring->nr_hwcur = head;
if (map)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
return (0);
}
/*
* Reconcile kernel and user view of the transmit ring.
*
* All information is in the kring.
* Userspace wants to send packets up to the one before kring->rhead,
* kernel knows kring->nr_hwcur is the first unsent packet.
*
* Here we push packets out (as many as possible), and possibly
* reclaim buffers from previously completed transmission.
*
* The caller (netmap) guarantees that there is only one instance
* running at any time. Any interference with other driver
* methods should be handled by the individual drivers.
*/
static int
iflib_netmap_txsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct ifnet *ifp = na->ifp;
struct netmap_ring *ring = kring->ring;
u_int nm_i; /* index into the netmap ring */
u_int nic_i; /* index into the NIC ring */
u_int n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
struct if_pkt_info pi;
/*
* interrupts on every tx packet are expensive so request
* them every half ring, or where NS_REPORT is set
*/
u_int report_frequency = kring->nkr_num_slots >> 1;
/* device-specific */
if_ctx_t ctx = ifp->if_softc;
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
if (txq->ift_sds.ifsd_map)
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* First part: process new packets to send.
* nm_i is the current index in the netmap ring,
* nic_i is the corresponding index in the NIC ring.
*
* If we have packets to send (nm_i != head)
* iterate over the netmap ring, fetch length and update
* the corresponding slot in the NIC ring. Some drivers also
* need to update the buffer's physical address in the NIC slot
* even NS_BUF_CHANGED is not set (PNMB computes the addresses).
*
* The netmap_reload_map() calls is especially expensive,
* even when (as in this case) the tag is 0, so do only
* when the buffer has actually changed.
*
* If possible do not set the report/intr bit on all slots,
* but only a few times per ring or when NS_REPORT is set.
*
* Finally, on 10G and faster drivers, it might be useful
* to prefetch the next slot and txr entry.
*/
nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
pkt_info_zero(&pi);
pi.ipi_segs = txq->ift_segs;
pi.ipi_qsidx = kring->ring_id;
if (nm_i != head) { /* we have new packets to send */
nic_i = netmap_idx_k2n(kring, nm_i);
__builtin_prefetch(&ring->slot[nm_i]);
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
if (txq->ift_sds.ifsd_map)
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
for (n = 0; nm_i != head; n++) {
struct netmap_slot *slot = &ring->slot[nm_i];
u_int len = slot->len;
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
int flags = (slot->flags & NS_REPORT ||
nic_i == 0 || nic_i == report_frequency) ?
IPI_TX_INTR : 0;
/* device-specific */
pi.ipi_len = len;
pi.ipi_segs[0].ds_addr = paddr;
pi.ipi_segs[0].ds_len = len;
pi.ipi_nsegs = 1;
pi.ipi_ndescs = 0;
pi.ipi_pidx = nic_i;
pi.ipi_flags = flags;
/* Fill the slot in the NIC ring. */
ctx->isc_txd_encap(ctx->ifc_softc, &pi);
/* prefetch for next round */
__builtin_prefetch(&ring->slot[nm_i + 1]);
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
if (txq->ift_sds.ifsd_map) {
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
NM_CHECK_ADDR_LEN(na, addr, len);
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
}
/* make sure changes to the buffer are synced */
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
BUS_DMASYNC_PREWRITE);
}
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
kring->nr_hwcur = head;
/* synchronize the NIC ring */
if (txq->ift_sds.ifsd_map)
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
}
/*
* Second part: reclaim buffers for completed transmissions.
*/
if (iflib_tx_credits_update(ctx, txq)) {
/* some tx completed, increment avail */
nic_i = txq->ift_cidx_processed;
kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
}
return (0);
}
/*
* Reconcile kernel and user view of the receive ring.
* Same as for the txsync, this routine must be efficient.
* The caller guarantees a single invocations, but races against
* the rest of the driver should be handled here.
*
* On call, kring->rhead is the first packet that userspace wants
* to keep, and kring->rcur is the wakeup point.
* The kernel has previously reported packets up to kring->rtail.
*
* If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
* of whether or not we received an interrupt.
*/
static int
iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct netmap_ring *ring = kring->ring;
uint32_t nm_i; /* index into the netmap ring */
uint32_t nic_i; /* index into the NIC ring */
u_int i, n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = netmap_idx_n2k(kring, kring->rhead);
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
struct if_rxd_info ri;
struct ifnet *ifp = na->ifp;
if_ctx_t ctx = ifp->if_softc;
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
iflib_fl_t fl = rxq->ifr_fl;
if (head > lim)
return netmap_ring_reinit(kring);
/* XXX check sync modes */
for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
if (fl->ifl_sds.ifsd_map == NULL)
continue;
bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
}
/*
* First part: import newly received packets.
*
* nm_i is the index of the next free slot in the netmap ring,
* nic_i is the index of the next received packet in the NIC ring,
* and they may differ in case if_init() has been called while
* in netmap mode. For the receive ring we have
*
* nic_i = rxr->next_check;
* nm_i = kring->nr_hwtail (previous)
* and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*
* rxr->next_check is set to 0 on a ring reinit
*/
if (netmap_no_pendintr || force_update) {
int crclen = iflib_crcstrip ? 0 : 4;
int error, avail;
uint16_t slot_flags = kring->nkr_slot_flags;
for (i = 0; i < rxq->ifr_nfl; i++) {
fl = &rxq->ifr_fl[i];
nic_i = fl->ifl_cidx;
nm_i = netmap_idx_n2k(kring, nic_i);
avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
for (n = 0; avail > 0; n++, avail--) {
rxd_info_zero(&ri);
ri.iri_frags = rxq->ifr_frags;
ri.iri_qsidx = kring->ring_id;
ri.iri_ifp = ctx->ifc_ifp;
ri.iri_cidx = nic_i;
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
ring->slot[nm_i].flags = slot_flags;
if (fl->ifl_sds.ifsd_map)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
if (n) { /* update the state variables */
if (netmap_no_pendintr && !force_update) {
/* diagnostics */
iflib_rx_miss ++;
iflib_rx_miss_bufs += n;
}
fl->ifl_cidx = nic_i;
kring->nr_hwtail = netmap_idx_k2n(kring, nm_i);
}
kring->nr_kflags &= ~NKR_PENDINTR;
}
}
/*
* Second part: skip past packets that userspace has released.
* (kring->nr_hwcur to head excluded),
* and make the buffers available for reception.
* As usual nm_i is the index in the netmap ring,
* nic_i is the index in the NIC ring, and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*/
/* XXX not sure how this will work with multiple free lists */
nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
return (netmap_fl_refill(rxq, kring, nm_i, false));
}
static void
iflib_netmap_intr(struct netmap_adapter *na, int onoff)
{
struct ifnet *ifp = na->ifp;
if_ctx_t ctx = ifp->if_softc;
CTX_LOCK(ctx);
if (onoff) {
IFDI_INTR_ENABLE(ctx);
} else {
IFDI_INTR_DISABLE(ctx);
}
CTX_UNLOCK(ctx);
}
static int
iflib_netmap_attach(if_ctx_t ctx)
{
struct netmap_adapter na;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
bzero(&na, sizeof(na));
na.ifp = ctx->ifc_ifp;
na.na_flags = NAF_BDG_MAYSLEEP;
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
na.num_tx_desc = scctx->isc_ntxd[0];
na.num_rx_desc = scctx->isc_nrxd[0];
na.nm_txsync = iflib_netmap_txsync;
na.nm_rxsync = iflib_netmap_rxsync;
na.nm_register = iflib_netmap_register;
na.nm_intr = iflib_netmap_intr;
na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
return (netmap_attach(&na));
}
static void
iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
{
struct netmap_adapter *na = NA(ctx->ifc_ifp);
struct netmap_slot *slot;
slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
if (slot == NULL)
return;
if (txq->ift_sds.ifsd_map == NULL)
return;
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
/*
* In netmap mode, set the map for the packet buffer.
* NOTE: Some drivers (not this one) also need to set
* the physical buffer address in the NIC ring.
* netmap_idx_n2k() maps a nic index, i, into the corresponding
* netmap slot index, si
*/
int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i);
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
}
}
static void
iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
{
struct netmap_adapter *na = NA(ctx->ifc_ifp);
struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id];
struct netmap_slot *slot;
uint32_t nm_i;
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
if (slot == NULL)
return;
nm_i = netmap_idx_n2k(kring, 0);
netmap_fl_refill(rxq, kring, nm_i, true);
}
#define iflib_netmap_detach(ifp) netmap_detach(ifp)
#else
#define iflib_netmap_txq_init(ctx, txq)
#define iflib_netmap_rxq_init(ctx, rxq)
#define iflib_netmap_detach(ifp)
#define iflib_netmap_attach(ctx) (0)
#define netmap_rx_irq(ifp, qid, budget) (0)
#define netmap_tx_irq(ifp, qid) do {} while (0)
#endif
#if defined(__i386__) || defined(__amd64__)
static __inline void
prefetch(void *x)
{
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
}
static __inline void
prefetch2cachelines(void *x)
{
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
#if (CACHE_LINE_SIZE < 128)
__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
#endif
}
#else
#define prefetch(x)
#define prefetch2cachelines(x)
#endif
static void
iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
{
iflib_fl_t fl;
fl = &rxq->ifr_fl[flid];
iru->iru_paddrs = fl->ifl_bus_addrs;
iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
iru->iru_idxs = fl->ifl_rxd_idxs;
iru->iru_qsidx = rxq->ifr_id;
iru->iru_buf_size = fl->ifl_buf_size;
iru->iru_flidx = fl->ifl_id;
}
static void
_iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
{
if (err)
return;
*(bus_addr_t *) arg = segs[0].ds_addr;
}
int
iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
{
int err;
if_shared_ctx_t sctx = ctx->ifc_sctx;
device_t dev = ctx->ifc_dev;
KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
sctx->isc_q_align, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
size, /* maxsize */
1, /* nsegments */
size, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&dma->idi_tag);
if (err) {
device_printf(dev,
"%s: bus_dma_tag_create failed: %d\n",
__func__, err);
goto fail_0;
}
err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
if (err) {
device_printf(dev,
"%s: bus_dmamem_alloc(%ju) failed: %d\n",
__func__, (uintmax_t)size, err);
goto fail_1;
}
dma->idi_paddr = IF_BAD_DMA;
err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
if (err || dma->idi_paddr == IF_BAD_DMA) {
device_printf(dev,
"%s: bus_dmamap_load failed: %d\n",
__func__, err);
goto fail_2;
}
dma->idi_size = size;
return (0);
fail_2:
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
fail_1:
bus_dma_tag_destroy(dma->idi_tag);
fail_0:
dma->idi_tag = NULL;
return (err);
}
int
iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
{
int i, err;
iflib_dma_info_t *dmaiter;
dmaiter = dmalist;
for (i = 0; i < count; i++, dmaiter++) {
if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
break;
}
if (err)
iflib_dma_free_multi(dmalist, i);
return (err);
}
void
iflib_dma_free(iflib_dma_info_t dma)
{
if (dma->idi_tag == NULL)
return;
if (dma->idi_paddr != IF_BAD_DMA) {
bus_dmamap_sync(dma->idi_tag, dma->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->idi_tag, dma->idi_map);
dma->idi_paddr = IF_BAD_DMA;
}
if (dma->idi_vaddr != NULL) {
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
dma->idi_vaddr = NULL;
}
bus_dma_tag_destroy(dma->idi_tag);
dma->idi_tag = NULL;
}
void
iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
{
int i;
iflib_dma_info_t *dmaiter = dmalist;
for (i = 0; i < count; i++, dmaiter++)
iflib_dma_free(*dmaiter);
}
#ifdef EARLY_AP_STARTUP
static const int iflib_started = 1;
#else
/*
* We used to abuse the smp_started flag to decide if the queues have been
* fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
* That gave bad races, since the SYSINIT() runs strictly after smp_started
* is set. Run a SYSINIT() strictly after that to just set a usable
* completion flag.
*/
static int iflib_started;
static void
iflib_record_started(void *arg)
{
iflib_started = 1;
}
SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
iflib_record_started, NULL);
#endif
static int
iflib_fast_intr(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
if (!iflib_started)
return (FILTER_HANDLED);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
GROUPTASK_ENQUEUE(gtask);
return (FILTER_HANDLED);
}
static int
iflib_fast_intr_rxtx(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
if_ctx_t ctx;
int i, cidx;
if (!iflib_started)
return (FILTER_HANDLED);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
for (i = 0; i < rxq->ifr_ntxqirq; i++) {
qidx_t txqid = rxq->ifr_txqid[i];
ctx = rxq->ifr_ctx;
if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
continue;
}
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
}
if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
cidx = rxq->ifr_cq_cidx;
else
cidx = rxq->ifr_fl[0].ifl_cidx;
if (iflib_rxd_avail(ctx, rxq, cidx, 1))
GROUPTASK_ENQUEUE(gtask);
else
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
return (FILTER_HANDLED);
}
static int
iflib_fast_intr_ctx(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
if (!iflib_started)
return (FILTER_HANDLED);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
GROUPTASK_ENQUEUE(gtask);
return (FILTER_HANDLED);
}
static int
_iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
driver_filter_t filter, driver_intr_t handler, void *arg,
char *name)
{
int rc, flags;
struct resource *res;
void *tag = NULL;
device_t dev = ctx->ifc_dev;
flags = RF_ACTIVE;
if (ctx->ifc_flags & IFC_LEGACY)
flags |= RF_SHAREABLE;
MPASS(rid < 512);
irq->ii_rid = rid;
res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
if (res == NULL) {
device_printf(dev,
"failed to allocate IRQ for rid %d, name %s.\n", rid, name);
return (ENOMEM);
}
irq->ii_res = res;
KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
filter, handler, arg, &tag);
if (rc != 0) {
device_printf(dev,
"failed to setup interrupt for rid %d, name %s: %d\n",
rid, name ? name : "unknown", rc);
return (rc);
} else if (name)
bus_describe_intr(dev, res, tag, "%s", name);
irq->ii_tag = tag;
return (0);
}
/*********************************************************************
*
* Allocate memory for tx_buffer structures. The tx_buffer stores all
* the information needed to transmit a packet on the wire. This is
* called only once at attach, setup is done every reset.
*
**********************************************************************/
static int
iflib_txsd_alloc(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
int err, nsegments, ntsosegments;
nsegments = scctx->isc_tx_nsegments;
ntsosegments = scctx->isc_tx_tso_segments_max;
MPASS(scctx->isc_ntxd[0] > 0);
MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
MPASS(nsegments > 0);
MPASS(ntsosegments > 0);
/*
* Setup DMA descriptor areas.
*/
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sctx->isc_tx_maxsize, /* maxsize */
nsegments, /* nsegments */
sctx->isc_tx_maxsegsize, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txq->ift_desc_tag))) {
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
goto fail;
}
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
scctx->isc_tx_tso_size_max, /* maxsize */
ntsosegments, /* nsegments */
scctx->isc_tx_tso_segsize_max, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txq->ift_tso_desc_tag))) {
device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
goto fail;
}
if (!(txq->ift_sds.ifsd_flags =
- (uint8_t *) mallocarray(scctx->isc_ntxd[txq->ift_br_offset],
- sizeof(uint8_t), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (uint8_t *) malloc(sizeof(uint8_t) *
+ scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(txq->ift_sds.ifsd_m =
- (struct mbuf **) mallocarray(scctx->isc_ntxd[txq->ift_br_offset],
- sizeof(struct mbuf *), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (struct mbuf **) malloc(sizeof(struct mbuf *) *
+ scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
/* Create the descriptor buffer dma maps */
#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
return (0);
if (!(txq->ift_sds.ifsd_map =
- (bus_dmamap_t *) mallocarray(scctx->isc_ntxd[txq->ift_br_offset],
- sizeof(bus_dmamap_t), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
goto fail;
}
}
#endif
return (0);
fail:
/* We free all, it handles case where we are in the middle */
iflib_tx_structures_free(ctx);
return (err);
}
static void
iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
{
bus_dmamap_t map;
map = NULL;
if (txq->ift_sds.ifsd_map != NULL)
map = txq->ift_sds.ifsd_map[i];
if (map != NULL) {
bus_dmamap_unload(txq->ift_desc_tag, map);
bus_dmamap_destroy(txq->ift_desc_tag, map);
txq->ift_sds.ifsd_map[i] = NULL;
}
}
static void
iflib_txq_destroy(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
for (int i = 0; i < txq->ift_size; i++)
iflib_txsd_destroy(ctx, txq, i);
if (txq->ift_sds.ifsd_map != NULL) {
free(txq->ift_sds.ifsd_map, M_IFLIB);
txq->ift_sds.ifsd_map = NULL;
}
if (txq->ift_sds.ifsd_m != NULL) {
free(txq->ift_sds.ifsd_m, M_IFLIB);
txq->ift_sds.ifsd_m = NULL;
}
if (txq->ift_sds.ifsd_flags != NULL) {
free(txq->ift_sds.ifsd_flags, M_IFLIB);
txq->ift_sds.ifsd_flags = NULL;
}
if (txq->ift_desc_tag != NULL) {
bus_dma_tag_destroy(txq->ift_desc_tag);
txq->ift_desc_tag = NULL;
}
if (txq->ift_tso_desc_tag != NULL) {
bus_dma_tag_destroy(txq->ift_tso_desc_tag);
txq->ift_tso_desc_tag = NULL;
}
}
static void
iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
{
struct mbuf **mp;
mp = &txq->ift_sds.ifsd_m[i];
if (*mp == NULL)
return;
if (txq->ift_sds.ifsd_map != NULL) {
bus_dmamap_sync(txq->ift_desc_tag,
txq->ift_sds.ifsd_map[i],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_desc_tag,
txq->ift_sds.ifsd_map[i]);
}
m_free(*mp);
DBG_COUNTER_INC(tx_frees);
*mp = NULL;
}
static int
iflib_txq_setup(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
iflib_dma_info_t di;
int i;
/* Set number of descriptors available */
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
/* XXX make configurable */
txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
/* Reset indices */
txq->ift_cidx_processed = 0;
txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
IFDI_TXQ_SETUP(ctx, txq->ift_id);
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
bus_dmamap_sync(di->idi_tag, di->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
* rx_buffer per received packet, the maximum number of rx_buffer's
* that we'll need is equal to the number of receive descriptors
* that we've allocated.
*
**********************************************************************/
static int
iflib_rxsd_alloc(iflib_rxq_t rxq)
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
iflib_fl_t fl;
int err;
MPASS(scctx->isc_nrxd[0] > 0);
MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sctx->isc_rx_maxsize, /* maxsize */
sctx->isc_rx_nsegments, /* nsegments */
sctx->isc_rx_maxsegsize, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&fl->ifl_desc_tag);
if (err) {
device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
__func__, err);
goto fail;
}
if (!(fl->ifl_sds.ifsd_flags =
- (uint8_t *) mallocarray(scctx->isc_nrxd[rxq->ifr_fl_offset],
- sizeof(uint8_t), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (uint8_t *) malloc(sizeof(uint8_t) *
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(fl->ifl_sds.ifsd_m =
- (struct mbuf **) mallocarray(scctx->isc_nrxd[rxq->ifr_fl_offset],
- sizeof(struct mbuf *), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (struct mbuf **) malloc(sizeof(struct mbuf *) *
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(fl->ifl_sds.ifsd_cl =
- (caddr_t *) mallocarray(scctx->isc_nrxd[rxq->ifr_fl_offset],
- sizeof(caddr_t), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (caddr_t *) malloc(sizeof(caddr_t) *
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
/* Create the descriptor buffer dma maps */
#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
continue;
if (!(fl->ifl_sds.ifsd_map =
- (bus_dmamap_t *) mallocarray(scctx->isc_nrxd[rxq->ifr_fl_offset],
- sizeof(bus_dmamap_t), M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create RX buffer DMA map\n");
goto fail;
}
}
#endif
}
return (0);
fail:
iflib_rx_structures_free(ctx);
return (err);
}
/*
* Internal service routines
*/
struct rxq_refill_cb_arg {
int error;
bus_dma_segment_t seg;
int nseg;
};
static void
_rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct rxq_refill_cb_arg *cb_arg = arg;
cb_arg->error = error;
cb_arg->seg = segs[0];
cb_arg->nseg = nseg;
}
#ifdef ACPI_DMAR
#define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR)
#else
#define IS_DMAR(ctx) (0)
#endif
/**
* rxq_refill - refill an rxq free-buffer list
* @ctx: the iflib context
* @rxq: the free-list to refill
* @n: the number of new buffers to allocate
*
* (Re)populate an rxq free-buffer list with up to @n new packet buffers.
* The caller must assure that @n does not exceed the queue's capacity.
*/
static void
_iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
{
struct mbuf *m;
int idx, frag_idx = fl->ifl_fragidx;
int pidx = fl->ifl_pidx;
caddr_t cl, *sd_cl;
struct mbuf **sd_m;
uint8_t *sd_flags;
struct if_rxd_update iru;
bus_dmamap_t *sd_map;
int n, i = 0;
uint64_t bus_addr;
int err;
qidx_t credits;
sd_m = fl->ifl_sds.ifsd_m;
sd_map = fl->ifl_sds.ifsd_map;
sd_cl = fl->ifl_sds.ifsd_cl;
sd_flags = fl->ifl_sds.ifsd_flags;
idx = pidx;
credits = fl->ifl_credits;
n = count;
MPASS(n > 0);
MPASS(credits + n <= fl->ifl_size);
if (pidx < fl->ifl_cidx)
MPASS(pidx + n <= fl->ifl_cidx);
if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
MPASS(fl->ifl_gen == 0);
if (pidx > fl->ifl_cidx)
MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
DBG_COUNTER_INC(fl_refills);
if (n > 8)
DBG_COUNTER_INC(fl_refills_large);
iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
while (n--) {
/*
* We allocate an uninitialized mbuf + cluster, mbuf is
* initialized after rx.
*
* If the cluster is still set then we know a minimum sized packet was received
*/
bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx);
if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
if ((cl = sd_cl[frag_idx]) == NULL) {
if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
break;
#if MEMORY_LOGGING
fl->ifl_cl_enqueued++;
#endif
}
if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
break;
}
#if MEMORY_LOGGING
fl->ifl_m_enqueued++;
#endif
DBG_COUNTER_INC(rx_allocs);
#if defined(__i386__) || defined(__amd64__)
if (!IS_DMAR(ctx)) {
bus_addr = pmap_kextract((vm_offset_t)cl);
} else
#endif
{
struct rxq_refill_cb_arg cb_arg;
iflib_rxq_t q;
cb_arg.error = 0;
q = fl->ifl_rxq;
MPASS(sd_map != NULL);
MPASS(sd_map[frag_idx] != NULL);
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
BUS_DMASYNC_PREREAD);
if (err != 0 || cb_arg.error) {
/*
* !zone_pack ?
*/
if (fl->ifl_zone == zone_pack)
uma_zfree(fl->ifl_zone, cl);
m_free(m);
n = 0;
goto done;
}
bus_addr = cb_arg.seg.ds_addr;
}
bit_set(fl->ifl_rx_bitmap, frag_idx);
sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
MPASS(sd_m[frag_idx] == NULL);
sd_cl[frag_idx] = cl;
sd_m[frag_idx] = m;
fl->ifl_rxd_idxs[i] = frag_idx;
fl->ifl_bus_addrs[i] = bus_addr;
fl->ifl_vm_addrs[i] = cl;
credits++;
i++;
MPASS(credits <= fl->ifl_size);
if (++idx == fl->ifl_size) {
fl->ifl_gen = 1;
idx = 0;
}
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
iru.iru_pidx = pidx;
iru.iru_count = i;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
i = 0;
pidx = idx;
fl->ifl_pidx = idx;
fl->ifl_credits = credits;
}
}
done:
if (i) {
iru.iru_pidx = pidx;
iru.iru_count = i;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
fl->ifl_pidx = idx;
fl->ifl_credits = credits;
}
DBG_COUNTER_INC(rxd_flush);
if (fl->ifl_pidx == 0)
pidx = fl->ifl_size - 1;
else
pidx = fl->ifl_pidx - 1;
if (sd_map)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
fl->ifl_fragidx = frag_idx;
}
static __inline void
__iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
{
/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
#ifdef INVARIANTS
int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
#endif
MPASS(fl->ifl_credits <= fl->ifl_size);
MPASS(reclaimable == delta);
if (reclaimable > 0)
_iflib_fl_refill(ctx, fl, min(max, reclaimable));
}
static void
iflib_fl_bufs_free(iflib_fl_t fl)
{
iflib_dma_info_t idi = fl->ifl_ifdi;
uint32_t i;
for (i = 0; i < fl->ifl_size; i++) {
struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
if (*sd_flags & RX_SW_DESC_INUSE) {
if (fl->ifl_sds.ifsd_map != NULL) {
bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
}
if (*sd_m != NULL) {
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
uma_zfree(zone_mbuf, *sd_m);
}
if (*sd_cl != NULL)
uma_zfree(fl->ifl_zone, *sd_cl);
*sd_flags = 0;
} else {
MPASS(*sd_cl == NULL);
MPASS(*sd_m == NULL);
}
#if MEMORY_LOGGING
fl->ifl_m_dequeued++;
fl->ifl_cl_dequeued++;
#endif
*sd_cl = NULL;
*sd_m = NULL;
}
#ifdef INVARIANTS
for (i = 0; i < fl->ifl_size; i++) {
MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
}
#endif
/*
* Reset free list values
*/
fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
bzero(idi->idi_vaddr, idi->idi_size);
}
/*********************************************************************
*
* Initialize a receive ring and its buffers.
*
**********************************************************************/
static int
iflib_fl_setup(iflib_fl_t fl)
{
iflib_rxq_t rxq = fl->ifl_rxq;
if_ctx_t ctx = rxq->ifr_ctx;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
/*
** Free current RX buffer structs and their mbufs
*/
iflib_fl_bufs_free(fl);
/* Now replenish the mbufs */
MPASS(fl->ifl_credits == 0);
/*
* XXX don't set the max_frame_size to larger
* than the hardware can handle
*/
if (sctx->isc_max_frame_size <= 2048)
fl->ifl_buf_size = MCLBYTES;
#ifndef CONTIGMALLOC_WORKS
else
fl->ifl_buf_size = MJUMPAGESIZE;
#else
else if (sctx->isc_max_frame_size <= 4096)
fl->ifl_buf_size = MJUMPAGESIZE;
else if (sctx->isc_max_frame_size <= 9216)
fl->ifl_buf_size = MJUM9BYTES;
else
fl->ifl_buf_size = MJUM16BYTES;
#endif
if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
/* avoid pre-allocating zillions of clusters to an idle card
* potentially speeding up attach
*/
_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
if (min(128, fl->ifl_size) != fl->ifl_credits)
return (ENOBUFS);
/*
* handle failure
*/
MPASS(rxq != NULL);
MPASS(fl->ifl_ifdi != NULL);
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*********************************************************************
*
* Free receive ring data structures
*
**********************************************************************/
static void
iflib_rx_sds_free(iflib_rxq_t rxq)
{
iflib_fl_t fl;
int i;
if (rxq->ifr_fl != NULL) {
for (i = 0; i < rxq->ifr_nfl; i++) {
fl = &rxq->ifr_fl[i];
if (fl->ifl_desc_tag != NULL) {
bus_dma_tag_destroy(fl->ifl_desc_tag);
fl->ifl_desc_tag = NULL;
}
free(fl->ifl_sds.ifsd_m, M_IFLIB);
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
/* XXX destroy maps first */
free(fl->ifl_sds.ifsd_map, M_IFLIB);
fl->ifl_sds.ifsd_m = NULL;
fl->ifl_sds.ifsd_cl = NULL;
fl->ifl_sds.ifsd_map = NULL;
}
free(rxq->ifr_fl, M_IFLIB);
rxq->ifr_fl = NULL;
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
}
}
/*
* MI independent logic
*
*/
static void
iflib_timer(void *arg)
{
iflib_txq_t txq = arg;
if_ctx_t ctx = txq->ift_ctx;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
/*
** Check on the state of the TX queue(s), this
** can be done without the lock because its RO
** and the HUNG state will be static if set.
*/
IFDI_TIMER(ctx, txq->ift_id);
if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
((txq->ift_cleaned_prev == txq->ift_cleaned) ||
(sctx->isc_pause_frames == 0)))
goto hung;
if (ifmp_ring_is_stalled(txq->ift_br))
txq->ift_qstatus = IFLIB_QUEUE_HUNG;
txq->ift_cleaned_prev = txq->ift_cleaned;
/* handle any laggards */
if (txq->ift_db_pending)
GROUPTASK_ENQUEUE(&txq->ift_task);
sctx->isc_pause_frames = 0;
if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
return;
hung:
CTX_LOCK(ctx);
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
IFDI_WATCHDOG_RESET(ctx);
ctx->ifc_watchdog_events++;
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_init_locked(if_ctx_t ctx)
{
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_t ifp = ctx->ifc_ifp;
iflib_fl_t fl;
iflib_txq_t txq;
iflib_rxq_t rxq;
int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
/* Set hardware offload abilities */
if_clearhwassist(ifp);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
iflib_netmap_txq_init(ctx, txq);
}
#ifdef INVARIANTS
i = if_getdrvflags(ifp);
#endif
IFDI_INIT(ctx);
MPASS(if_getdrvflags(ifp) == i);
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
/* XXX this should really be done on a per-queue basis */
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
MPASS(rxq->ifr_id == i);
iflib_netmap_rxq_init(ctx, rxq);
continue;
}
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
if (iflib_fl_setup(fl)) {
device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
goto done;
}
}
}
done:
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
IFDI_INTR_ENABLE(ctx);
txq = ctx->ifc_txqs;
for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
txq->ift_timer.c_cpu);
}
static int
iflib_media_change(if_t ifp)
{
if_ctx_t ctx = if_getsoftc(ifp);
int err;
CTX_LOCK(ctx);
if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
iflib_init_locked(ctx);
CTX_UNLOCK(ctx);
return (err);
}
static void
iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
{
if_ctx_t ctx = if_getsoftc(ifp);
CTX_LOCK(ctx);
IFDI_UPDATE_ADMIN_STATUS(ctx);
IFDI_MEDIA_STATUS(ctx, ifmr);
CTX_UNLOCK(ctx);
}
static void
iflib_stop(if_ctx_t ctx)
{
iflib_txq_t txq = ctx->ifc_txqs;
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
iflib_dma_info_t di;
iflib_fl_t fl;
int i, j;
/* Tell the stack that the interface is no longer active */
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
DELAY(1000);
IFDI_STOP(ctx);
DELAY(1000);
iflib_debug_reset();
/* Wait for current tx queue users to exit to disarm watchdog timer. */
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
/* make sure all transmitters have completed before proceeding XXX */
/* clean any enqueued buffers */
iflib_ifmp_purge(txq);
/* Free any existing tx buffers. */
for (j = 0; j < txq->ift_size; j++) {
iflib_txsd_free(ctx, txq, j);
}
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
txq->ift_pullups = 0;
ifmp_ring_reset_stats(txq->ift_br);
for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
}
for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
/* make sure all transmitters have completed before proceeding XXX */
for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwrxqs; j++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
/* also resets the free lists pidx/cidx */
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
iflib_fl_bufs_free(fl);
}
}
static inline caddr_t
calc_next_rxd(iflib_fl_t fl, int cidx)
{
qidx_t size;
int nrxd;
caddr_t start, end, cur, next;
nrxd = fl->ifl_size;
size = fl->ifl_rxd_size;
start = fl->ifl_ifdi->idi_vaddr;
if (__predict_false(size == 0))
return (start);
cur = start + size*cidx;
end = start + size*nrxd;
next = CACHE_PTR_NEXT(cur);
return (next < end ? next : start);
}
static inline void
prefetch_pkts(iflib_fl_t fl, int cidx)
{
int nextptr;
int nrxd = fl->ifl_size;
caddr_t next_rxd;
nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
next_rxd = calc_next_rxd(fl, cidx);
prefetch(next_rxd);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
}
static void
rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
{
int flid, cidx;
bus_dmamap_t map;
iflib_fl_t fl;
iflib_dma_info_t di;
int next;
map = NULL;
flid = irf->irf_flid;
cidx = irf->irf_idx;
fl = &rxq->ifr_fl[flid];
sd->ifsd_fl = fl;
sd->ifsd_cidx = cidx;
sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
fl->ifl_credits--;
#if MEMORY_LOGGING
fl->ifl_m_dequeued++;
#endif
if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
prefetch_pkts(fl, cidx);
if (fl->ifl_sds.ifsd_map != NULL) {
next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
prefetch(&fl->ifl_sds.ifsd_map[next]);
map = fl->ifl_sds.ifsd_map[cidx];
di = fl->ifl_ifdi;
next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
prefetch(&fl->ifl_sds.ifsd_flags[next]);
bus_dmamap_sync(di->idi_tag, di->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* not valid assert if bxe really does SGE from non-contiguous elements */
MPASS(fl->ifl_cidx == cidx);
if (unload)
bus_dmamap_unload(fl->ifl_desc_tag, map);
}
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
if (__predict_false(fl->ifl_cidx == 0))
fl->ifl_gen = 0;
if (map != NULL)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bit_clear(fl->ifl_rx_bitmap, cidx);
}
static struct mbuf *
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
{
int i, padlen , flags;
struct mbuf *m, *mh, *mt;
caddr_t cl;
i = 0;
mh = NULL;
do {
rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
MPASS(*sd->ifsd_cl != NULL);
MPASS(*sd->ifsd_m != NULL);
/* Don't include zero-length frags */
if (ri->iri_frags[i].irf_len == 0) {
/* XXX we can save the cluster here, but not the mbuf */
m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
m_free(*sd->ifsd_m);
*sd->ifsd_m = NULL;
continue;
}
m = *sd->ifsd_m;
*sd->ifsd_m = NULL;
if (mh == NULL) {
flags = M_PKTHDR|M_EXT;
mh = mt = m;
padlen = ri->iri_pad;
} else {
flags = M_EXT;
mt->m_next = m;
mt = m;
/* assuming padding is only on the first fragment */
padlen = 0;
}
cl = *sd->ifsd_cl;
*sd->ifsd_cl = NULL;
/* Can these two be made one ? */
m_init(m, M_NOWAIT, MT_DATA, flags);
m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
/*
* These must follow m_init and m_cljset
*/
m->m_data += padlen;
ri->iri_len -= padlen;
m->m_len = ri->iri_frags[i].irf_len;
} while (++i < ri->iri_nfrags);
return (mh);
}
/*
* Process one software descriptor
*/
static struct mbuf *
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
{
struct if_rxsd sd;
struct mbuf *m;
/* should I merge this back in now that the two paths are basically duplicated? */
if (ri->iri_nfrags == 1 &&
ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) {
rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
m = *sd.ifsd_m;
*sd.ifsd_m = NULL;
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
#ifndef __NO_STRICT_ALIGNMENT
if (!IP_ALIGNED(m))
m->m_data += 2;
#endif
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
m->m_len = ri->iri_frags[0].irf_len;
} else {
m = assemble_segments(rxq, ri, &sd);
}
m->m_pkthdr.len = ri->iri_len;
m->m_pkthdr.rcvif = ri->iri_ifp;
m->m_flags |= ri->iri_flags;
m->m_pkthdr.ether_vtag = ri->iri_vtag;
m->m_pkthdr.flowid = ri->iri_flowid;
M_HASHTYPE_SET(m, ri->iri_rsstype);
m->m_pkthdr.csum_flags = ri->iri_csum_flags;
m->m_pkthdr.csum_data = ri->iri_csum_data;
return (m);
}
#if defined(INET6) || defined(INET)
static void
iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
{
CURVNET_SET(lc->ifp->if_vnet);
#if defined(INET6)
*v6 = VNET(ip6_forwarding);
#endif
#if defined(INET)
*v4 = VNET(ipforwarding);
#endif
CURVNET_RESTORE();
}
/*
* Returns true if it's possible this packet could be LROed.
* if it returns false, it is guaranteed that tcp_lro_rx()
* would not return zero.
*/
static bool
iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
{
struct ether_header *eh;
uint16_t eh_type;
eh = mtod(m, struct ether_header *);
eh_type = ntohs(eh->ether_type);
switch (eh_type) {
#if defined(INET6)
case ETHERTYPE_IPV6:
return !v6_forwarding;
#endif
#if defined (INET)
case ETHERTYPE_IP:
return !v4_forwarding;
#endif
}
return false;
}
#else
static void
iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
{
}
#endif
static bool
iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int avail, i;
qidx_t *cidxp;
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
struct ifnet *ifp;
int lro_enabled;
bool lro_possible = false;
bool v4_forwarding, v6_forwarding;
/*
* XXX early demux data packets so that if_input processing only handles
* acks in interrupt context
*/
struct mbuf *m, *mh, *mt, *mf;
ifp = ctx->ifc_ifp;
mh = mt = NULL;
MPASS(budget > 0);
rx_pkts = rx_bytes = 0;
if (sctx->isc_flags & IFLIB_HAS_RXCQ)
cidxp = &rxq->ifr_cq_cidx;
else
cidxp = &rxq->ifr_fl[0].ifl_cidx;
if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
__iflib_fl_refill_lt(ctx, fl, budget + 8);
DBG_COUNTER_INC(rx_unavail);
return (false);
}
for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) {
if (__predict_false(!CTX_ACTIVE(ctx))) {
DBG_COUNTER_INC(rx_ctx_inactive);
break;
}
/*
* Reset client set fields to their default values
*/
rxd_info_zero(&ri);
ri.iri_qsidx = rxq->ifr_id;
ri.iri_cidx = *cidxp;
ri.iri_ifp = ifp;
ri.iri_frags = rxq->ifr_frags;
err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
if (err)
goto err;
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
*cidxp = ri.iri_cidx;
/* Update our consumer index */
/* XXX NB: shurd - check if this is still safe */
while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
rxq->ifr_cq_gen = 0;
}
/* was this only a completion queue message? */
if (__predict_false(ri.iri_nfrags == 0))
continue;
}
MPASS(ri.iri_nfrags != 0);
MPASS(ri.iri_len != 0);
/* will advance the cidx on the corresponding free lists */
m = iflib_rxd_pkt_get(rxq, &ri);
if (avail == 0 && budget_left)
avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
if (__predict_false(m == NULL)) {
DBG_COUNTER_INC(rx_mbuf_null);
continue;
}
/* imm_pkt: -- cxgb */
if (mh == NULL)
mh = mt = m;
else {
mt->m_nextpkt = m;
mt = m;
}
}
/* make sure that we can refill faster than drain */
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
__iflib_fl_refill_lt(ctx, fl, budget + 8);
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
if (lro_enabled)
iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
mt = mf = NULL;
while (mh != NULL) {
m = mh;
mh = mh->m_nextpkt;
m->m_nextpkt = NULL;
#ifndef __NO_STRICT_ALIGNMENT
if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
continue;
#endif
rx_bytes += m->m_pkthdr.len;
rx_pkts++;
#if defined(INET6) || defined(INET)
if (lro_enabled) {
if (!lro_possible) {
lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
if (lro_possible && mf != NULL) {
ifp->if_input(ifp, mf);
DBG_COUNTER_INC(rx_if_input);
mt = mf = NULL;
}
}
if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
(CSUM_L4_CALC|CSUM_L4_VALID)) {
if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
continue;
}
}
#endif
if (lro_possible) {
ifp->if_input(ifp, m);
DBG_COUNTER_INC(rx_if_input);
continue;
}
if (mf == NULL)
mf = m;
if (mt != NULL)
mt->m_nextpkt = m;
mt = m;
}
if (mf != NULL) {
ifp->if_input(ifp, mf);
DBG_COUNTER_INC(rx_if_input);
}
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
/*
* Flush any outstanding LRO work
*/
#if defined(INET6) || defined(INET)
tcp_lro_flush_all(&rxq->ifr_lc);
#endif
if (avail)
return true;
return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
err:
CTX_LOCK(ctx);
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
CTX_UNLOCK(ctx);
return (false);
}
#define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
static inline qidx_t
txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
{
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
qidx_t minthresh = txq->ift_size / 8;
if (in_use > 4*minthresh)
return (notify_count);
if (in_use > 2*minthresh)
return (notify_count >> 1);
if (in_use > minthresh)
return (notify_count >> 3);
return (0);
}
static inline qidx_t
txq_max_rs_deferred(iflib_txq_t txq)
{
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
qidx_t minthresh = txq->ift_size / 8;
if (txq->ift_in_use > 4*minthresh)
return (notify_count);
if (txq->ift_in_use > 2*minthresh)
return (notify_count >> 1);
if (txq->ift_in_use > minthresh)
return (notify_count >> 2);
return (2);
}
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
#define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
#define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
/* forward compatibility for cxgb */
#define FIRST_QSET(ctx) 0
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
/* XXX we should be setting this to something other than zero */
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
static inline bool
iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
{
qidx_t dbval, max;
bool rang;
rang = false;
max = TXQ_MAX_DB_DEFERRED(txq, in_use);
if (ring || txq->ift_db_pending >= max) {
dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
txq->ift_db_pending = txq->ift_npending = 0;
rang = true;
}
return (rang);
}
#ifdef PKT_DEBUG
static void
print_pkt(if_pkt_info_t pi)
{
printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
}
#endif
#define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
#define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
static int
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
{
if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
struct ether_vlan_header *eh;
struct mbuf *m, *n;
n = m = *mp;
if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
M_WRITABLE(m) == 0) {
if ((m = m_dup(m, M_NOWAIT)) == NULL) {
return (ENOMEM);
} else {
m_freem(*mp);
n = *mp = m;
}
}
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
* helpful for QinQ too.
*/
if (__predict_false(m->m_len < sizeof(*eh))) {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
return (ENOMEM);
}
eh = mtod(m, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
pi->ipi_etype = ntohs(eh->evl_proto);
pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
pi->ipi_etype = ntohs(eh->evl_encap_proto);
pi->ipi_ehdrlen = ETHER_HDR_LEN;
}
switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
{
struct ip *ip = NULL;
struct tcphdr *th = NULL;
int minthlen;
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
if (__predict_false(m->m_len < minthlen)) {
/*
* if this code bloat is causing too much of a hit
* move it to a separate function and mark it noinline
*/
if (m->m_len == pi->ipi_ehdrlen) {
n = m->m_next;
MPASS(n);
if (n->m_len >= sizeof(*ip)) {
ip = (struct ip *)n->m_data;
if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
} else {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
}
} else {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
}
} else {
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
}
pi->ipi_ip_hlen = ip->ip_hl << 2;
pi->ipi_ipproto = ip->ip_p;
pi->ipi_flags |= IPI_TX_IPV4;
if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
ip->ip_sum = 0;
if (IS_TSO4(pi)) {
if (pi->ipi_ipproto == IPPROTO_TCP) {
if (__predict_false(th == NULL)) {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
return (ENOMEM);
th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
}
pi->ipi_tcp_hflags = th->th_flags;
pi->ipi_tcp_hlen = th->th_off << 2;
pi->ipi_tcp_seq = th->th_seq;
}
if (__predict_false(ip->ip_p != IPPROTO_TCP))
return (ENXIO);
th->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
ip->ip_sum = 0;
ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
}
}
break;
}
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
{
struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
struct tcphdr *th;
pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
return (ENOMEM);
}
th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
/* XXX-BZ this will go badly in case of ext hdrs. */
pi->ipi_ipproto = ip6->ip6_nxt;
pi->ipi_flags |= IPI_TX_IPV6;
if (IS_TSO6(pi)) {
if (pi->ipi_ipproto == IPPROTO_TCP) {
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
return (ENOMEM);
}
pi->ipi_tcp_hflags = th->th_flags;
pi->ipi_tcp_hlen = th->th_off << 2;
}
if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
return (ENXIO);
/*
* The corresponding flag is set by the stack in the IPv4
* TSO case, but not in IPv6 (at least in FreeBSD 10.2).
* So, set it here because the rest of the flow requires it.
*/
pi->ipi_csum_flags |= CSUM_TCP_IPV6;
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
}
break;
}
#endif
default:
pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
pi->ipi_ip_hlen = 0;
break;
}
*mp = m;
return (0);
}
static __noinline struct mbuf *
collapse_pkthdr(struct mbuf *m0)
{
struct mbuf *m, *m_next, *tmp;
m = m0;
m_next = m->m_next;
while (m_next != NULL && m_next->m_len == 0) {
m = m_next;
m->m_next = NULL;
m_free(m);
m_next = m_next->m_next;
}
m = m0;
m->m_next = m_next;
if ((m_next->m_flags & M_EXT) == 0) {
m = m_defrag(m, M_NOWAIT);
} else {
tmp = m_next->m_next;
memcpy(m_next, m, MPKTHSIZE);
m = m_next;
m->m_next = tmp;
}
return (m);
}
/*
* If dodgy hardware rejects the scatter gather chain we've handed it
* we'll need to remove the mbuf chain from ifsg_m[] before we can add the
* m_defrag'd mbufs
*/
static __noinline struct mbuf *
iflib_remove_mbuf(iflib_txq_t txq)
{
int ntxd, i, pidx;
struct mbuf *m, *mh, **ifsd_m;
pidx = txq->ift_pidx;
ifsd_m = txq->ift_sds.ifsd_m;
ntxd = txq->ift_size;
mh = m = ifsd_m[pidx];
ifsd_m[pidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
i = 1;
while (m) {
ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
m = m->m_next;
i++;
}
return (mh);
}
static int
iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
int max_segs, int flags)
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
if_softc_ctx_t scctx;
int i, next, pidx, err, ntxd, count;
struct mbuf *m, *tmp, **ifsd_m;
m = *m0;
/*
* Please don't ever do this
*/
if (__predict_false(m->m_len == 0))
*m0 = m = collapse_pkthdr(m);
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
scctx = &ctx->ifc_softc_ctx;
ifsd_m = txq->ift_sds.ifsd_m;
ntxd = txq->ift_size;
pidx = txq->ift_pidx;
if (map != NULL) {
uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
err = bus_dmamap_load_mbuf_sg(tag, map,
*m0, segs, nsegs, BUS_DMA_NOWAIT);
if (err)
return (err);
ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
count = 0;
m = *m0;
do {
if (__predict_false(m->m_len <= 0)) {
tmp = m;
m = m->m_next;
tmp->m_next = NULL;
m_free(tmp);
continue;
}
m = m->m_next;
count++;
} while (m != NULL);
if (count > *nsegs) {
ifsd_m[pidx] = *m0;
ifsd_m[pidx]->m_flags |= M_TOOBIG;
return (0);
}
m = *m0;
count = 0;
do {
next = (pidx + count) & (ntxd-1);
MPASS(ifsd_m[next] == NULL);
ifsd_m[next] = m;
count++;
tmp = m;
m = m->m_next;
} while (m != NULL);
} else {
int buflen, sgsize, maxsegsz, max_sgsize;
vm_offset_t vaddr;
vm_paddr_t curaddr;
count = i = 0;
m = *m0;
if (m->m_pkthdr.csum_flags & CSUM_TSO)
maxsegsz = scctx->isc_tx_tso_segsize_max;
else
maxsegsz = sctx->isc_tx_maxsegsize;
do {
if (__predict_false(m->m_len <= 0)) {
tmp = m;
m = m->m_next;
tmp->m_next = NULL;
m_free(tmp);
continue;
}
buflen = m->m_len;
vaddr = (vm_offset_t)m->m_data;
/*
* see if we can't be smarter about physically
* contiguous mappings
*/
next = (pidx + count) & (ntxd-1);
MPASS(ifsd_m[next] == NULL);
#if MEMORY_LOGGING
txq->ift_enqueued++;
#endif
ifsd_m[next] = m;
while (buflen > 0) {
if (i >= max_segs)
goto err;
max_sgsize = MIN(buflen, maxsegsz);
curaddr = pmap_kextract(vaddr);
sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
sgsize = MIN(sgsize, max_sgsize);
segs[i].ds_addr = curaddr;
segs[i].ds_len = sgsize;
vaddr += sgsize;
buflen -= sgsize;
i++;
}
count++;
tmp = m;
m = m->m_next;
} while (m != NULL);
*nsegs = i;
}
return (0);
err:
*m0 = iflib_remove_mbuf(txq);
return (EFBIG);
}
static inline caddr_t
calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
{
qidx_t size;
int ntxd;
caddr_t start, end, cur, next;
ntxd = txq->ift_size;
size = txq->ift_txd_size[qid];
start = txq->ift_ifdi[qid].idi_vaddr;
if (__predict_false(size == 0))
return (start);
cur = start + size*cidx;
end = start + size*ntxd;
next = CACHE_PTR_NEXT(cur);
return (next < end ? next : start);
}
/*
* Pad an mbuf to ensure a minimum ethernet frame size.
* min_frame_size is the frame size (less CRC) to pad the mbuf to
*/
static __noinline int
iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
{
/*
* 18 is enough bytes to pad an ARP packet to 46 bytes, and
* and ARP message is the smallest common payload I can think of
*/
static char pad[18]; /* just zeros */
int n;
struct mbuf *new_head;
if (!M_WRITABLE(*m_head)) {
new_head = m_dup(*m_head, M_NOWAIT);
if (new_head == NULL) {
m_freem(*m_head);
device_printf(dev, "cannot pad short frame, m_dup() failed");
DBG_COUNTER_INC(encap_pad_mbuf_fail);
return ENOMEM;
}
m_freem(*m_head);
*m_head = new_head;
}
for (n = min_frame_size - (*m_head)->m_pkthdr.len;
n > 0; n -= sizeof(pad))
if (!m_append(*m_head, min(n, sizeof(pad)), pad))
break;
if (n > 0) {
m_freem(*m_head);
device_printf(dev, "cannot pad short frame\n");
DBG_COUNTER_INC(encap_pad_mbuf_fail);
return (ENOBUFS);
}
return 0;
}
static int
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
if_softc_ctx_t scctx;
bus_dma_segment_t *segs;
struct mbuf *m_head;
void *next_txd;
bus_dmamap_t map;
struct if_pkt_info pi;
int remap = 0;
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
bus_dma_tag_t desc_tag;
segs = txq->ift_segs;
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
scctx = &ctx->ifc_softc_ctx;
segs = txq->ift_segs;
ntxd = txq->ift_size;
m_head = *m_headp;
map = NULL;
/*
* If we're doing TSO the next descriptor to clean may be quite far ahead
*/
cidx = txq->ift_cidx;
pidx = txq->ift_pidx;
if (ctx->ifc_flags & IFC_PREFETCH) {
next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
next_txd = calc_next_txd(txq, cidx, 0);
prefetch(next_txd);
}
/* prefetch the next cache line of mbuf pointers and flags */
prefetch(&txq->ift_sds.ifsd_m[next]);
if (txq->ift_sds.ifsd_map != NULL) {
prefetch(&txq->ift_sds.ifsd_map[next]);
next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
prefetch(&txq->ift_sds.ifsd_flags[next]);
}
} else if (txq->ift_sds.ifsd_map != NULL)
map = txq->ift_sds.ifsd_map[pidx];
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
desc_tag = txq->ift_tso_desc_tag;
max_segs = scctx->isc_tx_tso_segments_max;
} else {
desc_tag = txq->ift_desc_tag;
max_segs = scctx->isc_tx_nsegments;
}
if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
__predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
if (err)
return err;
}
m_head = *m_headp;
pkt_info_zero(&pi);
pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
pi.ipi_pidx = pidx;
pi.ipi_qsidx = txq->ift_id;
pi.ipi_len = m_head->m_pkthdr.len;
pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
/* deliberate bitwise OR to make one condition */
if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
return (err);
m_head = *m_headp;
}
retry:
err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
defrag:
if (__predict_false(err)) {
switch (err) {
case EFBIG:
/* try collapse once and defrag once */
if (remap == 0)
m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
if (remap == 1)
m_head = m_defrag(*m_headp, M_NOWAIT);
remap++;
if (__predict_false(m_head == NULL))
goto defrag_failed;
txq->ift_mbuf_defrag++;
*m_headp = m_head;
goto retry;
break;
case ENOMEM:
txq->ift_no_tx_dma_setup++;
break;
default:
txq->ift_no_tx_dma_setup++;
m_freem(*m_headp);
DBG_COUNTER_INC(tx_frees);
*m_headp = NULL;
break;
}
txq->ift_map_failed++;
DBG_COUNTER_INC(encap_load_mbuf_fail);
return (err);
}
/*
* XXX assumes a 1 to 1 relationship between segments and
* descriptors - this does not hold true on all drivers, e.g.
* cxgb
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
txq->ift_no_desc_avail++;
if (map != NULL)
bus_dmamap_unload(desc_tag, map);
DBG_COUNTER_INC(encap_txq_avail_fail);
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
GROUPTASK_ENQUEUE(&txq->ift_task);
return (ENOBUFS);
}
/*
* On Intel cards we can greatly reduce the number of TX interrupts
* we see by only setting report status on every Nth descriptor.
* However, this also means that the driver will need to keep track
* of the descriptors that RS was set on to check them for the DD bit.
*/
txq->ift_rs_pending += nsegs + 1;
if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs - 1) <= MAX_TX_DESC(ctx)) {
pi.ipi_flags |= IPI_TX_INTR;
txq->ift_rs_pending = 0;
}
pi.ipi_segs = segs;
pi.ipi_nsegs = nsegs;
MPASS(pidx >= 0 && pidx < txq->ift_size);
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
if (map != NULL)
bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
if (map != NULL)
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
DBG_COUNTER_INC(tx_encap);
MPASS(pi.ipi_new_pidx < txq->ift_size);
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
if (pi.ipi_new_pidx < pi.ipi_pidx) {
ndesc += txq->ift_size;
txq->ift_gen = 1;
}
/*
* drivers can need as many as
* two sentinels
*/
MPASS(ndesc <= pi.ipi_nsegs + 2);
MPASS(pi.ipi_new_pidx != pidx);
MPASS(ndesc > 0);
txq->ift_in_use += ndesc;
/*
* We update the last software descriptor again here because there may
* be a sentinel and/or there may be more mbufs than segments
*/
txq->ift_pidx = pi.ipi_new_pidx;
txq->ift_npending += pi.ipi_ndescs;
} else if (__predict_false(err == EFBIG && remap < 2)) {
*m_headp = m_head = iflib_remove_mbuf(txq);
remap = 1;
txq->ift_txd_encap_efbig++;
goto defrag;
} else
DBG_COUNTER_INC(encap_txd_encap_fail);
return (err);
defrag_failed:
txq->ift_mbuf_defrag_failed++;
txq->ift_map_failed++;
m_freem(*m_headp);
DBG_COUNTER_INC(tx_frees);
*m_headp = NULL;
return (ENOMEM);
}
static void
iflib_tx_desc_free(iflib_txq_t txq, int n)
{
int hasmap;
uint32_t qsize, cidx, mask, gen;
struct mbuf *m, **ifsd_m;
uint8_t *ifsd_flags;
bus_dmamap_t *ifsd_map;
bool do_prefetch;
cidx = txq->ift_cidx;
gen = txq->ift_gen;
qsize = txq->ift_size;
mask = qsize-1;
hasmap = txq->ift_sds.ifsd_map != NULL;
ifsd_flags = txq->ift_sds.ifsd_flags;
ifsd_m = txq->ift_sds.ifsd_m;
ifsd_map = txq->ift_sds.ifsd_map;
do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
while (n--) {
if (do_prefetch) {
prefetch(ifsd_m[(cidx + 3) & mask]);
prefetch(ifsd_m[(cidx + 4) & mask]);
}
if (ifsd_m[cidx] != NULL) {
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
/*
* does it matter if it's not the TSO tag? If so we'll
* have to add the type to flags
*/
bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
}
if ((m = ifsd_m[cidx]) != NULL) {
/* XXX we don't support any drivers that batch packets yet */
MPASS(m->m_nextpkt == NULL);
/* if the number of clusters exceeds the number of segments
* there won't be space on the ring to save a pointer to each
* cluster so we simply free the list here
*/
if (m->m_flags & M_TOOBIG) {
m_freem(m);
} else {
m_free(m);
}
ifsd_m[cidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
DBG_COUNTER_INC(tx_frees);
}
}
if (__predict_false(++cidx == qsize)) {
cidx = 0;
gen = 0;
}
}
txq->ift_cidx = cidx;
txq->ift_gen = gen;
}
static __inline int
iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
{
int reclaim;
if_ctx_t ctx = txq->ift_ctx;
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
/*
* Need a rate-limiting check so that this isn't called every time
*/
iflib_tx_credits_update(ctx, txq);
reclaim = DESC_RECLAIMABLE(txq);
if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
#ifdef INVARIANTS
if (iflib_verbose_debug) {
printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
reclaim, thresh);
}
#endif
return (0);
}
iflib_tx_desc_free(txq, reclaim);
txq->ift_cleaned += reclaim;
txq->ift_in_use -= reclaim;
return (reclaim);
}
static struct mbuf **
_ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
{
int next, size;
struct mbuf **items;
size = r->size;
next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
items = __DEVOLATILE(struct mbuf **, &r->items[0]);
prefetch(items[(cidx + offset) & (size-1)]);
if (remaining > 1) {
prefetch2cachelines(&items[next]);
prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
}
return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
}
static void
iflib_txq_check_drain(iflib_txq_t txq, int budget)
{
ifmp_ring_check_drainage(txq->ift_br, budget);
}
static uint32_t
iflib_txq_can_drain(struct ifmp_ring *r)
{
iflib_txq_t txq = r->cookie;
if_ctx_t ctx = txq->ift_ctx;
return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
}
static uint32_t
iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
{
iflib_txq_t txq = r->cookie;
if_ctx_t ctx = txq->ift_ctx;
struct ifnet *ifp = ctx->ifc_ifp;
struct mbuf **mp, *m;
int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
int reclaimed, err, in_use_prev, desc_used;
bool do_prefetch, ring, rang;
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
!LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(txq_drain_notready);
return (0);
}
reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
avail = IDXDIFF(pidx, cidx, r->size);
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
DBG_COUNTER_INC(txq_drain_flushing);
for (i = 0; i < avail; i++) {
m_free(r->items[(cidx + i) & (r->size-1)]);
r->items[(cidx + i) & (r->size-1)] = NULL;
}
return (avail);
}
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
DBG_COUNTER_INC(txq_drain_oactive);
return (0);
}
if (reclaimed)
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
consumed = mcast_sent = bytes_sent = pkt_sent = 0;
count = MIN(avail, TX_BATCH_SIZE);
#ifdef INVARIANTS
if (iflib_verbose_debug)
printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
avail, ctx->ifc_flags, TXQ_AVAIL(txq));
#endif
do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
avail = TXQ_AVAIL(txq);
for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
int pidx_prev, rem = do_prefetch ? count - i : 0;
mp = _ring_peek_one(r, cidx, i, rem);
MPASS(mp != NULL && *mp != NULL);
if (__predict_false(*mp == (struct mbuf *)txq)) {
consumed++;
reclaimed++;
continue;
}
in_use_prev = txq->ift_in_use;
pidx_prev = txq->ift_pidx;
err = iflib_encap(txq, mp);
if (__predict_false(err)) {
DBG_COUNTER_INC(txq_drain_encapfail);
/* no room - bail out */
if (err == ENOBUFS)
break;
consumed++;
DBG_COUNTER_INC(txq_drain_encapfail);
/* we can't send this packet - skip it */
continue;
}
consumed++;
pkt_sent++;
m = *mp;
DBG_COUNTER_INC(tx_sent);
bytes_sent += m->m_pkthdr.len;
mcast_sent += !!(m->m_flags & M_MCAST);
avail = TXQ_AVAIL(txq);
txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
desc_used += (txq->ift_in_use - in_use_prev);
ETHER_BPF_MTAP(ifp, m);
if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
break;
rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
}
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
if (mcast_sent)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
#ifdef INVARIANTS
if (iflib_verbose_debug)
printf("consumed=%d\n", consumed);
#endif
return (consumed);
}
static uint32_t
iflib_txq_drain_always(struct ifmp_ring *r)
{
return (1);
}
static uint32_t
iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
{
int i, avail;
struct mbuf **mp;
iflib_txq_t txq;
txq = r->cookie;
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
avail = IDXDIFF(pidx, cidx, r->size);
for (i = 0; i < avail; i++) {
mp = _ring_peek_one(r, cidx, i, avail - i);
if (__predict_false(*mp == (struct mbuf *)txq))
continue;
m_freem(*mp);
}
MPASS(ifmp_ring_is_stalled(r) == 0);
return (avail);
}
static void
iflib_ifmp_purge(iflib_txq_t txq)
{
struct ifmp_ring *r;
r = txq->ift_br;
r->drain = iflib_txq_drain_free;
r->can_drain = iflib_txq_drain_always;
ifmp_ring_check_drainage(r, r->size);
r->drain = iflib_txq_drain;
r->can_drain = iflib_txq_can_drain;
}
static void
_task_fn_tx(void *context)
{
iflib_txq_t txq = context;
if_ctx_t ctx = txq->ift_ctx;
struct ifnet *ifp = ctx->ifc_ifp;
int rc;
#ifdef IFLIB_DIAGNOSTICS
txq->ift_cpu_exec_count[curcpu]++;
#endif
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
netmap_tx_irq(ifp, txq->ift_id);
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
return;
}
if (txq->ift_db_pending)
ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE);
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else {
rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
}
}
static void
_task_fn_rx(void *context)
{
iflib_rxq_t rxq = context;
if_ctx_t ctx = rxq->ifr_ctx;
bool more;
int rc;
uint16_t budget;
#ifdef IFLIB_DIAGNOSTICS
rxq->ifr_cpu_exec_count[curcpu]++;
#endif
DBG_COUNTER_INC(task_fn_rxs);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
more = true;
#ifdef DEV_NETMAP
if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
u_int work = 0;
if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
more = false;
}
}
#endif
budget = ctx->ifc_sysctl_rx_budget;
if (budget == 0)
budget = 16; /* XXX */
if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else {
DBG_COUNTER_INC(rx_intr_enables);
rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
}
}
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
if (more)
GROUPTASK_ENQUEUE(&rxq->ifr_task);
}
static void
_task_fn_admin(void *context)
{
if_ctx_t ctx = context;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
iflib_txq_t txq;
int i;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) {
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
return;
}
}
CTX_LOCK(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
}
IFDI_UPDATE_ADMIN_STATUS(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
IFDI_LINK_INTR_ENABLE(ctx);
if (ctx->ifc_flags & IFC_DO_RESET) {
ctx->ifc_flags &= ~IFC_DO_RESET;
iflib_if_init_locked(ctx);
}
CTX_UNLOCK(ctx);
if (LINK_ACTIVE(ctx) == 0)
return;
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
}
static void
_task_fn_iov(void *context)
{
if_ctx_t ctx = context;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
CTX_LOCK(ctx);
IFDI_VFLR_HANDLE(ctx);
CTX_UNLOCK(ctx);
}
static int
iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
{
int err;
if_int_delay_info_t info;
if_ctx_t ctx;
info = (if_int_delay_info_t)arg1;
ctx = info->iidi_ctx;
info->iidi_req = req;
info->iidi_oidp = oidp;
CTX_LOCK(ctx);
err = IFDI_SYSCTL_INT_DELAY(ctx, info);
CTX_UNLOCK(ctx);
return (err);
}
/*********************************************************************
*
* IFNET FUNCTIONS
*
**********************************************************************/
static void
iflib_if_init_locked(if_ctx_t ctx)
{
iflib_stop(ctx);
iflib_init_locked(ctx);
}
static void
iflib_if_init(void *arg)
{
if_ctx_t ctx = arg;
CTX_LOCK(ctx);
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
}
static int
iflib_if_transmit(if_t ifp, struct mbuf *m)
{
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq;
int err, qidx;
if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(tx_frees);
m_freem(m);
return (ENOBUFS);
}
MPASS(m->m_nextpkt == NULL);
qidx = 0;
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
qidx = QIDX(ctx, m);
/*
* XXX calculate buf_ring based on flowid (divvy up bits?)
*/
txq = &ctx->ifc_txqs[qidx];
#ifdef DRIVER_BACKPRESSURE
if (txq->ift_closed) {
while (m != NULL) {
next = m->m_nextpkt;
m->m_nextpkt = NULL;
m_freem(m);
m = next;
}
return (ENOBUFS);
}
#endif
#ifdef notyet
qidx = count = 0;
mp = marr;
next = m;
do {
count++;
next = next->m_nextpkt;
} while (next != NULL);
if (count > nitems(marr))
if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
/* XXX check nextpkt */
m_freem(m);
/* XXX simplify for now */
DBG_COUNTER_INC(tx_frees);
return (ENOBUFS);
}
for (next = m, i = 0; next != NULL; i++) {
mp[i] = next;
next = next->m_nextpkt;
mp[i]->m_nextpkt = NULL;
}
#endif
DBG_COUNTER_INC(tx_seen);
err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE);
GROUPTASK_ENQUEUE(&txq->ift_task);
if (err) {
/* support forthcoming later */
#ifdef DRIVER_BACKPRESSURE
txq->ift_closed = TRUE;
#endif
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
m_freem(m);
}
return (err);
}
static void
iflib_if_qflush(if_t ifp)
{
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq = ctx->ifc_txqs;
int i;
CTX_LOCK(ctx);
ctx->ifc_flags |= IFC_QFLUSH;
CTX_UNLOCK(ctx);
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
iflib_txq_check_drain(txq, 0);
CTX_LOCK(ctx);
ctx->ifc_flags &= ~IFC_QFLUSH;
CTX_UNLOCK(ctx);
if_qflush(ifp);
}
#define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
static int
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
{
if_ctx_t ctx = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
bool avoid_reset = FALSE;
int err = 0, reinit = 0, bits;
switch (command) {
case SIOCSIFADDR:
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = TRUE;
#endif
#ifdef INET6
if (ifa->ifa_addr->sa_family == AF_INET6)
avoid_reset = TRUE;
#endif
/*
** Calling init results in link renegotiation,
** so we avoid doing it when possible.
*/
if (avoid_reset) {
if_setflagbits(ifp, IFF_UP,0);
if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING))
reinit = 1;
#ifdef INET
if (!(if_getflags(ifp) & IFF_NOARP))
arp_ifinit(ifp, ifa);
#endif
} else
err = ether_ioctl(ifp, command, data);
break;
case SIOCSIFMTU:
CTX_LOCK(ctx);
if (ifr->ifr_mtu == if_getmtu(ifp)) {
CTX_UNLOCK(ctx);
break;
}
bits = if_getdrvflags(ifp);
/* stop the driver and free any clusters before proceeding */
iflib_stop(ctx);
if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
ctx->ifc_flags |= IFC_MULTISEG;
else
ctx->ifc_flags &= ~IFC_MULTISEG;
err = if_setmtu(ifp, ifr->ifr_mtu);
}
iflib_init_locked(ctx);
if_setdrvflags(ifp, bits);
CTX_UNLOCK(ctx);
break;
case SIOCSIFFLAGS:
CTX_LOCK(ctx);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
}
} else
reinit = 1;
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
iflib_stop(ctx);
}
ctx->ifc_if_flags = if_getflags(ifp);
CTX_UNLOCK(ctx);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
CTX_LOCK(ctx);
IFDI_INTR_DISABLE(ctx);
IFDI_MULTI_SET(ctx);
IFDI_INTR_ENABLE(ctx);
CTX_UNLOCK(ctx);
}
break;
case SIOCSIFMEDIA:
CTX_LOCK(ctx);
IFDI_MEDIA_SET(ctx);
CTX_UNLOCK(ctx);
/* falls thru */
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
break;
case SIOCGI2C:
{
struct ifi2creq i2c;
err = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
if (err != 0)
break;
if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
err = EINVAL;
break;
}
if (i2c.len > sizeof(i2c.data)) {
err = EINVAL;
break;
}
if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
err = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
break;
}
case SIOCSIFCAP:
{
int mask, setmask;
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
setmask = 0;
#ifdef TCP_OFFLOAD
setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
#endif
setmask |= (mask & IFCAP_FLAGS);
if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
if ((mask & IFCAP_WOL) &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0)
setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
if_vlancap(ifp);
/*
* want to ensure that traffic has stopped before we change any of the flags
*/
if (setmask) {
CTX_LOCK(ctx);
bits = if_getdrvflags(ifp);
if (bits & IFF_DRV_RUNNING)
iflib_stop(ctx);
if_togglecapenable(ifp, setmask);
if (bits & IFF_DRV_RUNNING)
iflib_init_locked(ctx);
if_setdrvflags(ifp, bits);
CTX_UNLOCK(ctx);
}
break;
}
case SIOCGPRIVATE_0:
case SIOCSDRVSPEC:
case SIOCGDRVSPEC:
CTX_LOCK(ctx);
err = IFDI_PRIV_IOCTL(ctx, command, data);
CTX_UNLOCK(ctx);
break;
default:
err = ether_ioctl(ifp, command, data);
break;
}
if (reinit)
iflib_if_init(ctx);
return (err);
}
static uint64_t
iflib_if_get_counter(if_t ifp, ift_counter cnt)
{
if_ctx_t ctx = if_getsoftc(ifp);
return (IFDI_GET_COUNTER(ctx, cnt));
}
/*********************************************************************
*
* OTHER FUNCTIONS EXPORTED TO THE STACK
*
**********************************************************************/
static void
iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
{
if_ctx_t ctx = if_getsoftc(ifp);
if ((void *)ctx != arg)
return;
if ((vtag == 0) || (vtag > 4095))
return;
CTX_LOCK(ctx);
IFDI_VLAN_REGISTER(ctx, vtag);
/* Re-init to load the changes */
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
{
if_ctx_t ctx = if_getsoftc(ifp);
if ((void *)ctx != arg)
return;
if ((vtag == 0) || (vtag > 4095))
return;
CTX_LOCK(ctx);
IFDI_VLAN_UNREGISTER(ctx, vtag);
/* Re-init to load the changes */
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_led_func(void *arg, int onoff)
{
if_ctx_t ctx = arg;
CTX_LOCK(ctx);
IFDI_LED_FUNC(ctx, onoff);
CTX_UNLOCK(ctx);
}
/*********************************************************************
*
* BUS FUNCTION DEFINITIONS
*
**********************************************************************/
int
iflib_device_probe(device_t dev)
{
pci_vendor_info_t *ent;
uint16_t pci_vendor_id, pci_device_id;
uint16_t pci_subvendor_id, pci_subdevice_id;
uint16_t pci_rev_id;
if_shared_ctx_t sctx;
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
return (ENOTSUP);
pci_vendor_id = pci_get_vendor(dev);
pci_device_id = pci_get_device(dev);
pci_subvendor_id = pci_get_subvendor(dev);
pci_subdevice_id = pci_get_subdevice(dev);
pci_rev_id = pci_get_revid(dev);
if (sctx->isc_parse_devinfo != NULL)
sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
ent = sctx->isc_vendor_info;
while (ent->pvi_vendor_id != 0) {
if (pci_vendor_id != ent->pvi_vendor_id) {
ent++;
continue;
}
if ((pci_device_id == ent->pvi_device_id) &&
((pci_subvendor_id == ent->pvi_subvendor_id) ||
(ent->pvi_subvendor_id == 0)) &&
((pci_subdevice_id == ent->pvi_subdevice_id) ||
(ent->pvi_subdevice_id == 0)) &&
((pci_rev_id == ent->pvi_rev_id) ||
(ent->pvi_rev_id == 0))) {
device_set_desc_copy(dev, ent->pvi_name);
/* this needs to be changed to zero if the bus probing code
* ever stops re-probing on best match because the sctx
* may have its values over written by register calls
* in subsequent probes
*/
return (BUS_PROBE_DEFAULT);
}
ent++;
}
return (ENXIO);
}
int
iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
{
int err, rid, msix, msix_bar;
if_ctx_t ctx;
if_t ifp;
if_softc_ctx_t scctx;
int i;
uint16_t main_txq;
uint16_t main_rxq;
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
if (sc == NULL) {
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
device_set_softc(dev, ctx);
ctx->ifc_flags |= IFC_SC_ALLOCATED;
}
ctx->ifc_sctx = sctx;
ctx->ifc_dev = dev;
ctx->ifc_softc = sc;
if ((err = iflib_register(ctx)) != 0) {
device_printf(dev, "iflib_register failed %d\n", err);
return (err);
}
iflib_add_device_sysctl_pre(ctx);
scctx = &ctx->ifc_softc_ctx;
ifp = ctx->ifc_ifp;
/*
* XXX sanity check that ntxd & nrxd are a power of 2
*/
if (ctx->ifc_sysctl_ntxqs != 0)
scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
if (ctx->ifc_sysctl_nrxqs != 0)
scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
for (i = 0; i < sctx->isc_ntxqs; i++) {
if (ctx->ifc_sysctl_ntxds[i] != 0)
scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
else
scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
}
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (ctx->ifc_sysctl_nrxds[i] != 0)
scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
else
scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
}
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
}
if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
}
}
for (i = 0; i < sctx->isc_ntxqs; i++) {
if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
}
if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
}
}
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
return (err);
}
_iflib_pre_assert(scctx);
ctx->ifc_txrx = *scctx->isc_txrx;
#ifdef INVARIANTS
MPASS(scctx->isc_capenable);
if (scctx->isc_capenable & IFCAP_TXCSUM)
MPASS(scctx->isc_tx_csum_flags);
#endif
if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
#ifdef ACPI_DMAR
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
ctx->ifc_flags |= IFC_DMAR;
#elif !(defined(__i386__) || defined(__amd64__))
/* set unconditionally for !x86 */
ctx->ifc_flags |= IFC_DMAR;
#endif
msix_bar = scctx->isc_msix_bar;
main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
/* XXX change for per-queue sizes */
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (!powerof2(scctx->isc_nrxd[i])) {
/* round down instead? */
device_printf(dev, "# rx descriptors must be a power of 2\n");
err = EINVAL;
goto fail;
}
}
for (i = 0; i < sctx->isc_ntxqs; i++) {
if (!powerof2(scctx->isc_ntxd[i])) {
device_printf(dev,
"# tx descriptors must be a power of 2");
err = EINVAL;
goto fail;
}
}
if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
MAX_SINGLE_PACKET_FRACTION)
scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
MAX_SINGLE_PACKET_FRACTION);
if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
MAX_SINGLE_PACKET_FRACTION)
scctx->isc_tx_tso_segments_max = max(1,
scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
/*
* Protect the stack against modern hardware
*/
if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX)
scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX;
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max;
ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max;
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
if (scctx->isc_rss_table_size == 0)
scctx->isc_rss_table_size = 64;
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
/* XXX format name */
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
/* Set up cpu set. If it fails, use the set of all CPUs. */
if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
device_printf(dev, "Unable to fetch CPU list\n");
CPU_COPY(&all_cpus, &ctx->ifc_cpus);
}
MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
/*
** Now setup MSI or MSI/X, should
** return us the number of supported
** vectors. (Will be 1 for MSI)
*/
if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
msix = scctx->isc_vectors;
} else if (scctx->isc_msix_bar != 0)
/*
* The simple fact that isc_msix_bar is not 0 does not mean we
* we have a good value there that is known to work.
*/
msix = iflib_msix_init(ctx);
else {
scctx->isc_vectors = 1;
scctx->isc_ntxqsets = 1;
scctx->isc_nrxqsets = 1;
scctx->isc_intr = IFLIB_INTR_LEGACY;
msix = 0;
}
/* Get memory for the station queues */
if ((err = iflib_queues_alloc(ctx))) {
device_printf(dev, "Unable to allocate queue memory\n");
goto fail;
}
if ((err = iflib_qset_structures_setup(ctx))) {
device_printf(dev, "qset structure setup failed %d\n", err);
goto fail_queues;
}
/*
* Group taskqueues aren't properly set up until SMP is started,
* so we disable interrupts until we can handle them post
* SI_SUB_SMP.
*
* XXX: disabling interrupts doesn't actually work, at least for
* the non-MSI case. When they occur before SI_SUB_SMP completes,
* we do null handling and depend on this not causing too large an
* interrupt storm.
*/
IFDI_INTR_DISABLE(ctx);
if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
goto fail_intr_free;
}
if (msix <= 1) {
rid = 0;
if (scctx->isc_intr == IFLIB_INTR_MSI) {
MPASS(msix == 1);
rid = 1;
}
if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
device_printf(dev, "iflib_legacy_setup failed %d\n", err);
goto fail_intr_free;
}
}
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
goto fail_detach;
}
if ((err = iflib_netmap_attach(ctx))) {
device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
goto fail_detach;
}
*ctxp = ctx;
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
iflib_add_device_sysctl_post(ctx);
ctx->ifc_flags |= IFC_INIT_DONE;
return (0);
fail_detach:
ether_ifdetach(ctx->ifc_ifp);
fail_intr_free:
if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
pci_release_msi(ctx->ifc_dev);
fail_queues:
/* XXX free queues */
fail:
IFDI_DETACH(ctx);
return (err);
}
int
iflib_device_attach(device_t dev)
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
return (ENOTSUP);
pci_enable_busmaster(dev);
return (iflib_device_register(dev, NULL, sctx, &ctx));
}
int
iflib_device_deregister(if_ctx_t ctx)
{
if_t ifp = ctx->ifc_ifp;
iflib_txq_t txq;
iflib_rxq_t rxq;
device_t dev = ctx->ifc_dev;
int i, j;
struct taskqgroup *tqg;
iflib_fl_t fl;
/* Make sure VLANS are not using driver */
if (if_vlantrunkinuse(ifp)) {
device_printf(dev,"Vlan in use, detach first\n");
return (EBUSY);
}
CTX_LOCK(ctx);
ctx->ifc_in_detach = 1;
iflib_stop(ctx);
CTX_UNLOCK(ctx);
/* Unregister VLAN events */
if (ctx->ifc_vlan_attach_event != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
if (ctx->ifc_vlan_detach_event != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
iflib_netmap_detach(ifp);
ether_ifdetach(ifp);
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
CTX_LOCK_DESTROY(ctx);
if (ctx->ifc_led_dev != NULL)
led_destroy(ctx->ifc_led_dev);
/* XXX drain any dependent tasks */
tqg = qgroup_if_io_tqg;
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
callout_drain(&txq->ift_timer);
if (txq->ift_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &txq->ift_task);
}
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
if (rxq->ifr_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &rxq->ifr_task);
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
free(fl->ifl_rx_bitmap, M_IFLIB);
}
tqg = qgroup_if_config_tqg;
if (ctx->ifc_admin_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &ctx->ifc_admin_task);
if (ctx->ifc_vflr_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
IFDI_DETACH(ctx);
device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
pci_release_msi(dev);
}
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
}
if (ctx->ifc_msix_mem != NULL) {
bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
}
bus_generic_detach(dev);
if_free(ifp);
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
free(ctx->ifc_softc, M_IFLIB);
free(ctx, M_IFLIB);
return (0);
}
int
iflib_device_detach(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
return (iflib_device_deregister(ctx));
}
int
iflib_device_suspend(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
IFDI_SUSPEND(ctx);
CTX_UNLOCK(ctx);
return bus_generic_suspend(dev);
}
int
iflib_device_shutdown(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
IFDI_SHUTDOWN(ctx);
CTX_UNLOCK(ctx);
return bus_generic_suspend(dev);
}
int
iflib_device_resume(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
iflib_txq_t txq = ctx->ifc_txqs;
CTX_LOCK(ctx);
IFDI_RESUME(ctx);
iflib_init_locked(ctx);
CTX_UNLOCK(ctx);
for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
return (bus_generic_resume(dev));
}
int
iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
{
int error;
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
error = IFDI_IOV_INIT(ctx, num_vfs, params);
CTX_UNLOCK(ctx);
return (error);
}
void
iflib_device_iov_uninit(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
IFDI_IOV_UNINIT(ctx);
CTX_UNLOCK(ctx);
}
int
iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
{
int error;
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
CTX_UNLOCK(ctx);
return (error);
}
/*********************************************************************
*
* MODULE FUNCTION DEFINITIONS
*
**********************************************************************/
/*
* - Start a fast taskqueue thread for each core
* - Start a taskqueue for control operations
*/
static int
iflib_module_init(void)
{
return (0);
}
static int
iflib_module_event_handler(module_t mod, int what, void *arg)
{
int err;
switch (what) {
case MOD_LOAD:
if ((err = iflib_module_init()) != 0)
return (err);
break;
case MOD_UNLOAD:
return (EBUSY);
default:
return (EOPNOTSUPP);
}
return (0);
}
/*********************************************************************
*
* PUBLIC FUNCTION DEFINITIONS
* ordered as in iflib.h
*
**********************************************************************/
static void
_iflib_assert(if_shared_ctx_t sctx)
{
MPASS(sctx->isc_tx_maxsize);
MPASS(sctx->isc_tx_maxsegsize);
MPASS(sctx->isc_rx_maxsize);
MPASS(sctx->isc_rx_nsegments);
MPASS(sctx->isc_rx_maxsegsize);
MPASS(sctx->isc_nrxd_min[0]);
MPASS(sctx->isc_nrxd_max[0]);
MPASS(sctx->isc_nrxd_default[0]);
MPASS(sctx->isc_ntxd_min[0]);
MPASS(sctx->isc_ntxd_max[0]);
MPASS(sctx->isc_ntxd_default[0]);
}
static void
_iflib_pre_assert(if_softc_ctx_t scctx)
{
MPASS(scctx->isc_txrx->ift_txd_encap);
MPASS(scctx->isc_txrx->ift_txd_flush);
MPASS(scctx->isc_txrx->ift_txd_credits_update);
MPASS(scctx->isc_txrx->ift_rxd_available);
MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
MPASS(scctx->isc_txrx->ift_rxd_refill);
MPASS(scctx->isc_txrx->ift_rxd_flush);
}
static int
iflib_register(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
driver_t *driver = sctx->isc_driver;
device_t dev = ctx->ifc_dev;
if_t ifp;
_iflib_assert(sctx);
CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "can not allocate ifnet structure\n");
return (ENOMEM);
}
/*
* Initialize our context's device specific methods
*/
kobj_init((kobj_t) ctx, (kobj_class_t) driver);
kobj_class_compile((kobj_class_t) driver);
driver->refs++;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, ctx);
if_setdev(ifp, dev);
if_setinitfn(ifp, iflib_if_init);
if_setioctlfn(ifp, iflib_if_ioctl);
if_settransmitfn(ifp, iflib_if_transmit);
if_setqflushfn(ifp, iflib_if_qflush);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
ctx->ifc_vlan_attach_event =
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
EVENTHANDLER_PRI_FIRST);
ctx->ifc_vlan_detach_event =
EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
EVENTHANDLER_PRI_FIRST);
ifmedia_init(&ctx->ifc_media, IFM_IMASK,
iflib_media_change, iflib_media_status);
return (0);
}
static int
iflib_queues_alloc(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
int nrxqsets = scctx->isc_nrxqsets;
int ntxqsets = scctx->isc_ntxqsets;
iflib_txq_t txq;
iflib_rxq_t rxq;
iflib_fl_t fl = NULL;
int i, j, cpu, err, txconf, rxconf;
iflib_dma_info_t ifdip;
uint32_t *rxqsizes = scctx->isc_rxqsizes;
uint32_t *txqsizes = scctx->isc_txqsizes;
uint8_t nrxqs = sctx->isc_nrxqs;
uint8_t ntxqs = sctx->isc_ntxqs;
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
caddr_t *vaddrs;
uint64_t *paddrs;
struct ifmp_ring **brscp;
KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
brscp = NULL;
txq = NULL;
rxq = NULL;
/* Allocate the TX ring struct memory */
if (!(txq =
- (iflib_txq_t) mallocarray(ntxqsets, sizeof(struct iflib_txq),
- M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
+ ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX ring memory\n");
err = ENOMEM;
goto fail;
}
/* Now allocate the RX */
if (!(rxq =
- (iflib_rxq_t) mallocarray(nrxqsets, sizeof(struct iflib_rxq),
- M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
+ nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate RX ring memory\n");
err = ENOMEM;
goto rx_fail;
}
ctx->ifc_txqs = txq;
ctx->ifc_rxqs = rxq;
/*
* XXX handle allocation failure
*/
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
/* Set up some basics */
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
device_printf(dev, "failed to allocate iflib_dma_info\n");
err = ENOMEM;
goto err_tx_desc;
}
txq->ift_ifdi = ifdip;
for (j = 0; j < ntxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
device_printf(dev, "Unable to allocate Descriptor memory\n");
err = ENOMEM;
goto err_tx_desc;
}
txq->ift_txd_size[j] = scctx->isc_txd_size[j];
bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
}
txq->ift_ctx = ctx;
txq->ift_id = i;
if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
txq->ift_br_offset = 1;
} else {
txq->ift_br_offset = 0;
}
/* XXX fix this */
txq->ift_timer.c_cpu = cpu;
if (iflib_txsd_alloc(txq)) {
device_printf(dev, "Critical Failure setting up TX buffers\n");
err = ENOMEM;
goto err_tx_desc;
}
/* Initialize the TX lock */
snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
device_get_nameunit(dev), txq->ift_id);
mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
device_get_nameunit(dev), txq->ift_id);
err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
iflib_txq_can_drain, M_IFLIB, M_WAITOK);
if (err) {
/* XXX free any allocated rings */
device_printf(dev, "Unable to allocate buf_ring\n");
goto err_tx_desc;
}
}
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
/* Set up some basics */
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
device_printf(dev, "failed to allocate iflib_dma_info\n");
err = ENOMEM;
goto err_tx_desc;
}
rxq->ifr_ifdi = ifdip;
/* XXX this needs to be changed if #rx queues != #tx queues */
rxq->ifr_ntxqirq = 1;
rxq->ifr_txqid[0] = i;
for (j = 0; j < nrxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
device_printf(dev, "Unable to allocate Descriptor memory\n");
err = ENOMEM;
goto err_tx_desc;
}
bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
}
rxq->ifr_ctx = ctx;
rxq->ifr_id = i;
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
rxq->ifr_fl_offset = 1;
} else {
rxq->ifr_fl_offset = 0;
}
rxq->ifr_nfl = nfree_lists;
if (!(fl =
- (iflib_fl_t) mallocarray(nfree_lists, sizeof(struct iflib_fl),
- M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate free list memory\n");
err = ENOMEM;
goto err_tx_desc;
}
rxq->ifr_fl = fl;
for (j = 0; j < nfree_lists; j++) {
fl[j].ifl_rxq = rxq;
fl[j].ifl_id = j;
fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
}
/* Allocate receive buffers for the ring*/
if (iflib_rxsd_alloc(rxq)) {
device_printf(dev,
"Critical Failure setting up receive buffers\n");
err = ENOMEM;
goto err_rx_desc;
}
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
}
/* TXQs */
vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
for (i = 0; i < ntxqsets; i++) {
iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
for (j = 0; j < ntxqs; j++, di++) {
vaddrs[i*ntxqs + j] = di->idi_vaddr;
paddrs[i*ntxqs + j] = di->idi_paddr;
}
}
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
goto err_rx_desc;
}
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
/* RXQs */
vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
for (i = 0; i < nrxqsets; i++) {
iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
for (j = 0; j < nrxqs; j++, di++) {
vaddrs[i*nrxqs + j] = di->idi_vaddr;
paddrs[i*nrxqs + j] = di->idi_paddr;
}
}
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
goto err_rx_desc;
}
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
return (0);
/* XXX handle allocation failure changes */
err_rx_desc:
err_tx_desc:
if (ctx->ifc_rxqs != NULL)
free(ctx->ifc_rxqs, M_IFLIB);
ctx->ifc_rxqs = NULL;
if (ctx->ifc_txqs != NULL)
free(ctx->ifc_txqs, M_IFLIB);
ctx->ifc_txqs = NULL;
rx_fail:
if (brscp != NULL)
free(brscp, M_IFLIB);
if (rxq != NULL)
free(rxq, M_IFLIB);
if (txq != NULL)
free(txq, M_IFLIB);
fail:
return (err);
}
static int
iflib_tx_structures_setup(if_ctx_t ctx)
{
iflib_txq_t txq = ctx->ifc_txqs;
int i;
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
iflib_txq_setup(txq);
return (0);
}
static void
iflib_tx_structures_free(if_ctx_t ctx)
{
iflib_txq_t txq = ctx->ifc_txqs;
int i, j;
for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
iflib_txq_destroy(txq);
for (j = 0; j < ctx->ifc_nhwtxqs; j++)
iflib_dma_free(&txq->ift_ifdi[j]);
}
free(ctx->ifc_txqs, M_IFLIB);
ctx->ifc_txqs = NULL;
IFDI_QUEUES_FREE(ctx);
}
/*********************************************************************
*
* Initialize all receive rings.
*
**********************************************************************/
static int
iflib_rx_structures_setup(if_ctx_t ctx)
{
iflib_rxq_t rxq = ctx->ifc_rxqs;
int q;
#if defined(INET6) || defined(INET)
int i, err;
#endif
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
#if defined(INET6) || defined(INET)
tcp_lro_free(&rxq->ifr_lc);
if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
TCP_LRO_ENTRIES, min(1024,
ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
goto fail;
}
rxq->ifr_lro_enabled = TRUE;
#endif
IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
}
return (0);
#if defined(INET6) || defined(INET)
fail:
/*
* Free RX software descriptors allocated so far, we will only handle
* the rings that completed, the failing case will have
* cleaned up for itself. 'q' failed, so its the terminus.
*/
rxq = ctx->ifc_rxqs;
for (i = 0; i < q; ++i, rxq++) {
iflib_rx_sds_free(rxq);
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
}
return (err);
#endif
}
/*********************************************************************
*
* Free all receive rings.
*
**********************************************************************/
static void
iflib_rx_structures_free(if_ctx_t ctx)
{
iflib_rxq_t rxq = ctx->ifc_rxqs;
for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
iflib_rx_sds_free(rxq);
}
}
static int
iflib_qset_structures_setup(if_ctx_t ctx)
{
int err;
if ((err = iflib_tx_structures_setup(ctx)) != 0)
return (err);
if ((err = iflib_rx_structures_setup(ctx)) != 0) {
device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
}
return (err);
}
int
iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name)
{
return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
}
#ifdef SMP
static int
find_nth(if_ctx_t ctx, int qid)
{
cpuset_t cpus;
int i, cpuid, eqid, count;
CPU_COPY(&ctx->ifc_cpus, &cpus);
count = CPU_COUNT(&cpus);
eqid = qid % count;
/* clear up to the qid'th bit */
for (i = 0; i < eqid; i++) {
cpuid = CPU_FFS(&cpus);
MPASS(cpuid != 0);
CPU_CLR(cpuid-1, &cpus);
}
cpuid = CPU_FFS(&cpus);
MPASS(cpuid != 0);
return (cpuid-1);
}
#ifdef SCHED_ULE
extern struct cpu_group *cpu_top; /* CPU topology */
static int
find_child_with_core(int cpu, struct cpu_group *grp)
{
int i;
if (grp->cg_children == 0)
return -1;
MPASS(grp->cg_child);
for (i = 0; i < grp->cg_children; i++) {
if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
return i;
}
return -1;
}
/*
* Find the nth thread on the specified core
*/
static int
find_thread(int cpu, int thread_num)
{
struct cpu_group *grp;
int i;
cpuset_t cs;
grp = cpu_top;
if (grp == NULL)
return cpu;
i = 0;
while ((i = find_child_with_core(cpu, grp)) != -1) {
/* If the child only has one cpu, don't descend */
if (grp->cg_child[i].cg_count <= 1)
break;
grp = &grp->cg_child[i];
}
/* If they don't share at least an L2 cache, use the same CPU */
if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
return cpu;
/* Now pick one */
CPU_COPY(&grp->cg_mask, &cs);
for (i = thread_num % grp->cg_count; i > 0; i--) {
MPASS(CPU_FFS(&cs));
CPU_CLR(CPU_FFS(&cs) - 1, &cs);
}
MPASS(CPU_FFS(&cs));
return CPU_FFS(&cs) - 1;
}
#else
static int
find_thread(int cpu, int thread_num __unused)
{
return cpu;
}
#endif
static int
get_thread_num(if_ctx_t ctx, iflib_intr_type_t type, int qid)
{
switch (type) {
case IFLIB_INTR_TX:
/* TX queues get threads on the same core as the corresponding RX queue */
/* XXX handle multiple RX threads per core and more than two threads per core */
return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
case IFLIB_INTR_RX:
case IFLIB_INTR_RXTX:
/* RX queues get the first thread on their core */
return qid / CPU_COUNT(&ctx->ifc_cpus);
default:
return -1;
}
}
#else
#define get_thread_num(ctx, type, qid) CPU_FIRST()
#define find_thread(cpuid, tid) CPU_FIRST()
#define find_nth(ctx, gid) CPU_FIRST()
#endif
/* Just to avoid copy/paste */
static inline int
iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, char *name)
{
int cpuid;
int err, tid;
cpuid = find_nth(ctx, qid);
tid = get_thread_num(ctx, type, qid);
MPASS(tid >= 0);
cpuid = find_thread(cpuid, tid);
err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name);
if (err) {
device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err);
return (err);
}
#ifdef notyet
if (cpuid > ctx->ifc_cpuid_highest)
ctx->ifc_cpuid_highest = cpuid;
#endif
return 0;
}
int
iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
iflib_intr_type_t type, driver_filter_t *filter,
void *filter_arg, int qid, char *name)
{
struct grouptask *gtask;
struct taskqgroup *tqg;
iflib_filter_info_t info;
gtask_fn_t *fn;
int tqrid, err;
driver_filter_t *intr_fast;
void *q;
info = &ctx->ifc_filter_info;
tqrid = rid;
switch (type) {
/* XXX merge tx/rx for netmap? */
case IFLIB_INTR_TX:
q = &ctx->ifc_txqs[qid];
info = &ctx->ifc_txqs[qid].ift_filter_info;
gtask = &ctx->ifc_txqs[qid].ift_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_tx;
intr_fast = iflib_fast_intr;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_RX:
q = &ctx->ifc_rxqs[qid];
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
gtask = &ctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_RXTX:
q = &ctx->ifc_rxqs[qid];
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
gtask = &ctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr_rxtx;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_ADMIN:
q = ctx;
tqrid = -1;
info = &ctx->ifc_filter_info;
gtask = &ctx->ifc_admin_task;
tqg = qgroup_if_config_tqg;
fn = _task_fn_admin;
intr_fast = iflib_fast_intr_ctx;
break;
default:
panic("unknown net intr type");
}
info->ifi_filter = filter;
info->ifi_filter_arg = filter_arg;
info->ifi_task = gtask;
info->ifi_ctx = q;
err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
if (err != 0) {
device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
return (err);
}
if (type == IFLIB_INTR_ADMIN)
return (0);
if (tqrid != -1) {
err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name);
if (err)
return (err);
} else {
taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
}
return (0);
}
void
iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name)
{
struct grouptask *gtask;
struct taskqgroup *tqg;
gtask_fn_t *fn;
void *q;
int irq_num = -1;
int err;
switch (type) {
case IFLIB_INTR_TX:
q = &ctx->ifc_txqs[qid];
gtask = &ctx->ifc_txqs[qid].ift_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_tx;
if (irq != NULL)
irq_num = rman_get_start(irq->ii_res);
break;
case IFLIB_INTR_RX:
q = &ctx->ifc_rxqs[qid];
gtask = &ctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
if (irq != NULL)
irq_num = rman_get_start(irq->ii_res);
break;
case IFLIB_INTR_IOV:
q = ctx;
gtask = &ctx->ifc_vflr_task;
tqg = qgroup_if_config_tqg;
fn = _task_fn_iov;
break;
default:
panic("unknown net intr type");
}
GROUPTASK_INIT(gtask, 0, fn, q);
if (irq_num != -1) {
err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
if (err)
taskqgroup_attach(tqg, gtask, q, irq_num, name);
}
else {
taskqgroup_attach(tqg, gtask, q, irq_num, name);
}
}
void
iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
{
if (irq->ii_tag)
bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
if (irq->ii_res)
bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res);
}
static int
iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name)
{
iflib_txq_t txq = ctx->ifc_txqs;
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_irq_t irq = &ctx->ifc_legacy_irq;
iflib_filter_info_t info;
struct grouptask *gtask;
struct taskqgroup *tqg;
gtask_fn_t *fn;
int tqrid;
void *q;
int err;
q = &ctx->ifc_rxqs[0];
info = &rxq[0].ifr_filter_info;
gtask = &rxq[0].ifr_task;
tqg = qgroup_if_io_tqg;
tqrid = irq->ii_rid = *rid;
fn = _task_fn_rx;
ctx->ifc_flags |= IFC_LEGACY;
info->ifi_filter = filter;
info->ifi_filter_arg = filter_arg;
info->ifi_task = gtask;
info->ifi_ctx = ctx;
/* We allocate a single interrupt resource */
if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
return (err);
GROUPTASK_INIT(gtask, 0, fn, q);
taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
return (0);
}
void
iflib_led_create(if_ctx_t ctx)
{
ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
device_get_nameunit(ctx->ifc_dev));
}
void
iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
{
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
}
void
iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
{
GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
}
void
iflib_admin_intr_deferred(if_ctx_t ctx)
{
#ifdef INVARIANTS
struct grouptask *gtask;
gtask = &ctx->ifc_admin_task;
MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
#endif
GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
}
void
iflib_iov_intr_deferred(if_ctx_t ctx)
{
GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
}
void
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
{
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
}
void
iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn,
char *name)
{
GROUPTASK_INIT(gtask, 0, fn, ctx);
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
}
void
iflib_config_gtask_deinit(struct grouptask *gtask)
{
taskqgroup_detach(qgroup_if_config_tqg, gtask);
}
void
iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
{
if_t ifp = ctx->ifc_ifp;
iflib_txq_t txq = ctx->ifc_txqs;
if_setbaudrate(ifp, baudrate);
if (baudrate >= IF_Gbps(10))
ctx->ifc_flags |= IFC_PREFETCH;
/* If link down, disable watchdog */
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
}
ctx->ifc_link_state = link_state;
if_link_state_change(ifp, link_state);
}
static int
iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
{
int credits;
#ifdef INVARIANTS
int credits_pre = txq->ift_cidx_processed;
#endif
if (ctx->isc_txd_credits_update == NULL)
return (0);
if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
return (0);
txq->ift_processed += credits;
txq->ift_cidx_processed += credits;
MPASS(credits_pre + credits == txq->ift_cidx_processed);
if (txq->ift_cidx_processed >= txq->ift_size)
txq->ift_cidx_processed -= txq->ift_size;
return (credits);
}
static int
iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
{
return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
budget));
}
void
iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
const char *description, if_int_delay_info_t info,
int offset, int value)
{
info->iidi_ctx = ctx;
info->iidi_offset = offset;
info->iidi_value = value;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
info, 0, iflib_sysctl_int_delay, "I", description);
}
struct mtx *
iflib_ctx_lock_get(if_ctx_t ctx)
{
return (&ctx->ifc_mtx);
}
static int
iflib_msix_init(if_ctx_t ctx)
{
device_t dev = ctx->ifc_dev;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
int iflib_num_tx_queues, iflib_num_rx_queues;
int err, admincnt, bar;
iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
bar = ctx->ifc_softc_ctx.isc_msix_bar;
admincnt = sctx->isc_admin_intrcnt;
/* Override by global tuneable */
{
int i;
size_t len = sizeof(i);
err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0);
if (err == 0) {
if (i == 0)
goto msi;
}
else {
device_printf(dev, "unable to read hw.pci.enable_msix.");
}
}
/* Override by tuneable */
if (scctx->isc_disable_msix)
goto msi;
/*
** When used in a virtualized environment
** PCI BUSMASTER capability may not be set
** so explicity set it here and rewrite
** the ENABLE in the MSIX control register
** at this point to cause the host to
** successfully initialize us.
*/
{
int msix_ctrl, rid;
pci_enable_busmaster(dev);
rid = 0;
if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) {
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
} else {
device_printf(dev, "PCIY_MSIX capability not found; "
"or rid %d == 0.\n", rid);
goto msi;
}
}
/*
* bar == -1 => "trust me I know what I'm doing"
* Some drivers are for hardware that is so shoddily
* documented that no one knows which bars are which
* so the developer has to map all bars. This hack
* allows shoddy garbage to use msix in this framework.
*/
if (bar != -1) {
ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &bar, RF_ACTIVE);
if (ctx->ifc_msix_mem == NULL) {
/* May not be enabled */
device_printf(dev, "Unable to map MSIX table \n");
goto msi;
}
}
/* First try MSI/X */
if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
device_printf(dev, "System has MSIX disabled \n");
bus_release_resource(dev, SYS_RES_MEMORY,
bar, ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
goto msi;
}
#if IFLIB_DEBUG
/* use only 1 qset in debug mode */
queuemsgs = min(msgs - admincnt, 1);
#else
queuemsgs = msgs - admincnt;
#endif
#ifdef RSS
queues = imin(queuemsgs, rss_getnumbuckets());
#else
queues = queuemsgs;
#endif
queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
rx_queues = iflib_num_rx_queues;
else
rx_queues = queues;
if (rx_queues > scctx->isc_nrxqsets)
rx_queues = scctx->isc_nrxqsets;
/*
* We want this to be all logical CPUs by default
*/
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
tx_queues = iflib_num_tx_queues;
else
tx_queues = mp_ncpus;
if (tx_queues > scctx->isc_ntxqsets)
tx_queues = scctx->isc_ntxqsets;
if (ctx->ifc_sysctl_qs_eq_override == 0) {
#ifdef INVARIANTS
if (tx_queues != rx_queues)
device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
min(rx_queues, tx_queues), min(rx_queues, tx_queues));
#endif
tx_queues = min(rx_queues, tx_queues);
rx_queues = min(rx_queues, tx_queues);
}
device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
vectors = rx_queues + admincnt;
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
device_printf(dev,
"Using MSIX interrupts with %d vectors\n", vectors);
scctx->isc_vectors = vectors;
scctx->isc_nrxqsets = rx_queues;
scctx->isc_ntxqsets = tx_queues;
scctx->isc_intr = IFLIB_INTR_MSIX;
return (vectors);
} else {
device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
}
msi:
vectors = pci_msi_count(dev);
scctx->isc_nrxqsets = 1;
scctx->isc_ntxqsets = 1;
scctx->isc_vectors = vectors;
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
device_printf(dev,"Using an MSI interrupt\n");
scctx->isc_intr = IFLIB_INTR_MSI;
} else {
device_printf(dev,"Using a Legacy interrupt\n");
scctx->isc_intr = IFLIB_INTR_LEGACY;
}
return (vectors);
}
char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
static int
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
{
int rc;
uint16_t *state = ((uint16_t *)oidp->oid_arg1);
struct sbuf *sb;
char *ring_state = "UNKNOWN";
/* XXX needed ? */
rc = sysctl_wire_old_buffer(req, 0);
MPASS(rc == 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
MPASS(sb != NULL);
if (sb == NULL)
return (ENOMEM);
if (state[3] <= 3)
ring_state = ring_states[state[3]];
sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
state[0], state[1], state[2], ring_state);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return(rc);
}
enum iflib_ndesc_handler {
IFLIB_NTXD_HANDLER,
IFLIB_NRXD_HANDLER,
};
static int
mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
{
if_ctx_t ctx = (void *)arg1;
enum iflib_ndesc_handler type = arg2;
char buf[256] = {0};
qidx_t *ndesc;
char *p, *next;
int nqs, rc, i;
MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
nqs = 8;
switch(type) {
case IFLIB_NTXD_HANDLER:
ndesc = ctx->ifc_sysctl_ntxds;
if (ctx->ifc_sctx)
nqs = ctx->ifc_sctx->isc_ntxqs;
break;
case IFLIB_NRXD_HANDLER:
ndesc = ctx->ifc_sysctl_nrxds;
if (ctx->ifc_sctx)
nqs = ctx->ifc_sctx->isc_nrxqs;
break;
}
if (nqs == 0)
nqs = 8;
for (i=0; i<8; i++) {
if (i >= nqs)
break;
if (i)
strcat(buf, ",");
sprintf(strchr(buf, 0), "%d", ndesc[i]);
}
rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (rc || req->newptr == NULL)
return rc;
for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
i++, p = strsep(&next, " ,")) {
ndesc[i] = strtoul(p, NULL, 10);
}
return(rc);
}
#define NAME_BUFLEN 32
static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)
{
device_t dev = iflib_get_dev(ctx);
struct sysctl_oid_list *child, *oid_list;
struct sysctl_ctx_list *ctx_list;
struct sysctl_oid *node;
ctx_list = device_get_sysctl_ctx(dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
CTLFLAG_RD, NULL, "IFLIB fields");
oid_list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
"driver version");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
"# of txqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
"# of rxqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
"permit #txq != #rxq");
SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
"disable MSIX (default 0)");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
"set the rx budget");
/* XXX change for per-queue sizes */
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
mp_ndesc_handler, "A",
"list of # of tx descriptors to use, 0 = use default #");
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
mp_ndesc_handler, "A",
"list of # of rx descriptors to use, 0 = use default #");
}
static void
iflib_add_device_sysctl_post(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = iflib_get_dev(ctx);
struct sysctl_oid_list *child;
struct sysctl_ctx_list *ctx_list;
iflib_fl_t fl;
iflib_txq_t txq;
iflib_rxq_t rxq;
int i, j;
char namebuf[NAME_BUFLEN];
char *qfmt;
struct sysctl_oid *queue_node, *fl_node, *node;
struct sysctl_oid_list *queue_list, *fl_list;
ctx_list = device_get_sysctl_ctx(dev);
node = ctx->ifc_sysctl_node;
child = SYSCTL_CHILDREN(node);
if (scctx->isc_ntxqsets > 100)
qfmt = "txq%03d";
else if (scctx->isc_ntxqsets > 10)
qfmt = "txq%02d";
else
qfmt = "txq%d";
for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
#if MEMORY_LOGGING
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
CTLFLAG_RD,
&txq->ift_dequeued, "total mbufs freed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
CTLFLAG_RD,
&txq->ift_enqueued, "total mbufs enqueued");
#endif
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
CTLFLAG_RD,
&txq->ift_mbuf_defrag, "# of times m_defrag was called");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
CTLFLAG_RD,
&txq->ift_pullups, "# of times m_pullup was called");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD,
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD,
&txq->ift_no_desc_avail, "# of times no descriptors were available");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
CTLFLAG_RD,
&txq->ift_map_failed, "# of times dma map failed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
CTLFLAG_RD,
&txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
CTLFLAG_RD,
&txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
CTLFLAG_RD,
&txq->ift_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
CTLFLAG_RD,
&txq->ift_cidx, 1, "Consumer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
CTLFLAG_RD,
&txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
CTLFLAG_RD,
&txq->ift_in_use, 1, "descriptors in use");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
CTLFLAG_RD,
&txq->ift_processed, "descriptors procesed for clean");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
CTLFLAG_RD,
&txq->ift_cleaned, "total cleaned");
SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
0, mp_ring_state_handler, "A", "soft ring state");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
CTLFLAG_RD, &txq->ift_br->enqueues,
"# of enqueues to the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
CTLFLAG_RD, &txq->ift_br->drops,
"# of drops in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
CTLFLAG_RD, &txq->ift_br->starts,
"# of normal consumer starts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
CTLFLAG_RD, &txq->ift_br->stalls,
"# of consumer stalls in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
CTLFLAG_RD, &txq->ift_br->restarts,
"# of consumer restarts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
CTLFLAG_RD, &txq->ift_br->abdications,
"# of consumer abdications in the mp_ring for this queue");
}
if (scctx->isc_nrxqsets > 100)
qfmt = "rxq%03d";
else if (scctx->isc_nrxqsets > 10)
qfmt = "rxq%02d";
else
qfmt = "rxq%d";
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
CTLFLAG_RD,
&rxq->ifr_cq_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
CTLFLAG_RD,
&rxq->ifr_cq_cidx, 1, "Consumer Index");
}
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "freelist Name");
fl_list = SYSCTL_CHILDREN(fl_node);
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
CTLFLAG_RD,
&fl->ifl_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
CTLFLAG_RD,
&fl->ifl_cidx, 1, "Consumer Index");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
CTLFLAG_RD,
&fl->ifl_credits, 1, "credits available");
#if MEMORY_LOGGING
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
CTLFLAG_RD,
&fl->ifl_m_enqueued, "mbufs allocated");
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
CTLFLAG_RD,
&fl->ifl_m_dequeued, "mbufs freed");
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
CTLFLAG_RD,
&fl->ifl_cl_enqueued, "clusters allocated");
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
CTLFLAG_RD,
&fl->ifl_cl_dequeued, "clusters freed");
#endif
}
}
}
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
iflib_fixup_rx(struct mbuf *m)
{
struct mbuf *n;
if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
n = m;
} else {
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n == NULL) {
m_freem(m);
return (NULL);
}
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;
m->m_len -= ETHER_HDR_LEN;
n->m_len = ETHER_HDR_LEN;
M_MOVE_PKTHDR(n, m);
n->m_next = m;
}
return (n);
}
#endif
Index: head/sys/netgraph/ng_bridge.c
===================================================================
--- head/sys/netgraph/ng_bridge.c (revision 328217)
+++ head/sys/netgraph/ng_bridge.c (revision 328218)
@@ -1,1055 +1,1055 @@
/*
* ng_bridge.c
*/
/*-
* Copyright (c) 2000 Whistle Communications, Inc.
* All rights reserved.
*
* Subject to the following obligations and disclaimer of warranty, use and
* redistribution of this software, in source or object code forms, with or
* without modifications are expressly permitted by Whistle Communications;
* provided, however, that:
* 1. Any and all reproductions of the source or object code must include the
* copyright notice above and the following disclaimer of warranties; and
* 2. No rights are granted, in any manner or form, to use Whistle
* Communications, Inc. trademarks, including the mark "WHISTLE
* COMMUNICATIONS" on advertising, endorsements, or otherwise except as
* such appears in the above copyright notice or in the software.
*
* THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
* INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
* WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
* REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
* SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
* IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
* RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
* WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* Author: Archie Cobbs <archie@freebsd.org>
*
* $FreeBSD$
*/
/*
* ng_bridge(4) netgraph node type
*
* The node performs standard intelligent Ethernet bridging over
* each of its connected hooks, or links. A simple loop detection
* algorithm is included which disables a link for priv->conf.loopTimeout
* seconds when a host is seen to have jumped from one link to
* another within priv->conf.minStableAge seconds.
*
* We keep a hashtable that maps Ethernet addresses to host info,
* which is contained in struct ng_bridge_host's. These structures
* tell us on which link the host may be found. A host's entry will
* expire after priv->conf.maxStaleness seconds.
*
* This node is optimzed for stable networks, where machines jump
* from one port to the other only rarely.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/errno.h>
#include <sys/rwlock.h>
#include <sys/syslog.h>
#include <sys/socket.h>
#include <sys/ctype.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/vnet.h>
#include <netinet/in.h>
#if 0 /* not used yet */
#include <netinet/ip_fw.h>
#endif
#include <netgraph/ng_message.h>
#include <netgraph/netgraph.h>
#include <netgraph/ng_parse.h>
#include <netgraph/ng_bridge.h>
#ifdef NG_SEPARATE_MALLOC
static MALLOC_DEFINE(M_NETGRAPH_BRIDGE, "netgraph_bridge",
"netgraph bridge node");
#else
#define M_NETGRAPH_BRIDGE M_NETGRAPH
#endif
/* Per-link private data */
struct ng_bridge_link {
hook_p hook; /* netgraph hook */
u_int16_t loopCount; /* loop ignore timer */
struct ng_bridge_link_stats stats; /* link stats */
};
/* Per-node private data */
struct ng_bridge_private {
struct ng_bridge_bucket *tab; /* hash table bucket array */
struct ng_bridge_link *links[NG_BRIDGE_MAX_LINKS];
struct ng_bridge_config conf; /* node configuration */
node_p node; /* netgraph node */
u_int numHosts; /* num entries in table */
u_int numBuckets; /* num buckets in table */
u_int hashMask; /* numBuckets - 1 */
int numLinks; /* num connected links */
int persistent; /* can exist w/o hooks */
struct callout timer; /* one second periodic timer */
};
typedef struct ng_bridge_private *priv_p;
/* Information about a host, stored in a hash table entry */
struct ng_bridge_hent {
struct ng_bridge_host host; /* actual host info */
SLIST_ENTRY(ng_bridge_hent) next; /* next entry in bucket */
};
/* Hash table bucket declaration */
SLIST_HEAD(ng_bridge_bucket, ng_bridge_hent);
/* Netgraph node methods */
static ng_constructor_t ng_bridge_constructor;
static ng_rcvmsg_t ng_bridge_rcvmsg;
static ng_shutdown_t ng_bridge_shutdown;
static ng_newhook_t ng_bridge_newhook;
static ng_rcvdata_t ng_bridge_rcvdata;
static ng_disconnect_t ng_bridge_disconnect;
/* Other internal functions */
static struct ng_bridge_host *ng_bridge_get(priv_p priv, const u_char *addr);
static int ng_bridge_put(priv_p priv, const u_char *addr, int linkNum);
static void ng_bridge_rehash(priv_p priv);
static void ng_bridge_remove_hosts(priv_p priv, int linkNum);
static void ng_bridge_timeout(node_p node, hook_p hook, void *arg1, int arg2);
static const char *ng_bridge_nodename(node_p node);
/* Ethernet broadcast */
static const u_char ng_bridge_bcast_addr[ETHER_ADDR_LEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* Store each hook's link number in the private field */
#define LINK_NUM(hook) (*(u_int16_t *)(&(hook)->private))
/* Compare Ethernet addresses using 32 and 16 bit words instead of bytewise */
#define ETHER_EQUAL(a,b) (((const u_int32_t *)(a))[0] \
== ((const u_int32_t *)(b))[0] \
&& ((const u_int16_t *)(a))[2] \
== ((const u_int16_t *)(b))[2])
/* Minimum and maximum number of hash buckets. Must be a power of two. */
#define MIN_BUCKETS (1 << 5) /* 32 */
#define MAX_BUCKETS (1 << 14) /* 16384 */
/* Configuration default values */
#define DEFAULT_LOOP_TIMEOUT 60
#define DEFAULT_MAX_STALENESS (15 * 60) /* same as ARP timeout */
#define DEFAULT_MIN_STABLE_AGE 1
/******************************************************************
NETGRAPH PARSE TYPES
******************************************************************/
/*
* How to determine the length of the table returned by NGM_BRIDGE_GET_TABLE
*/
static int
ng_bridge_getTableLength(const struct ng_parse_type *type,
const u_char *start, const u_char *buf)
{
const struct ng_bridge_host_ary *const hary
= (const struct ng_bridge_host_ary *)(buf - sizeof(u_int32_t));
return hary->numHosts;
}
/* Parse type for struct ng_bridge_host_ary */
static const struct ng_parse_struct_field ng_bridge_host_type_fields[]
= NG_BRIDGE_HOST_TYPE_INFO(&ng_parse_enaddr_type);
static const struct ng_parse_type ng_bridge_host_type = {
&ng_parse_struct_type,
&ng_bridge_host_type_fields
};
static const struct ng_parse_array_info ng_bridge_hary_type_info = {
&ng_bridge_host_type,
ng_bridge_getTableLength
};
static const struct ng_parse_type ng_bridge_hary_type = {
&ng_parse_array_type,
&ng_bridge_hary_type_info
};
static const struct ng_parse_struct_field ng_bridge_host_ary_type_fields[]
= NG_BRIDGE_HOST_ARY_TYPE_INFO(&ng_bridge_hary_type);
static const struct ng_parse_type ng_bridge_host_ary_type = {
&ng_parse_struct_type,
&ng_bridge_host_ary_type_fields
};
/* Parse type for struct ng_bridge_config */
static const struct ng_parse_fixedarray_info ng_bridge_ipfwary_type_info = {
&ng_parse_uint8_type,
NG_BRIDGE_MAX_LINKS
};
static const struct ng_parse_type ng_bridge_ipfwary_type = {
&ng_parse_fixedarray_type,
&ng_bridge_ipfwary_type_info
};
static const struct ng_parse_struct_field ng_bridge_config_type_fields[]
= NG_BRIDGE_CONFIG_TYPE_INFO(&ng_bridge_ipfwary_type);
static const struct ng_parse_type ng_bridge_config_type = {
&ng_parse_struct_type,
&ng_bridge_config_type_fields
};
/* Parse type for struct ng_bridge_link_stat */
static const struct ng_parse_struct_field ng_bridge_stats_type_fields[]
= NG_BRIDGE_STATS_TYPE_INFO;
static const struct ng_parse_type ng_bridge_stats_type = {
&ng_parse_struct_type,
&ng_bridge_stats_type_fields
};
/* List of commands and how to convert arguments to/from ASCII */
static const struct ng_cmdlist ng_bridge_cmdlist[] = {
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_SET_CONFIG,
"setconfig",
&ng_bridge_config_type,
NULL
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_GET_CONFIG,
"getconfig",
NULL,
&ng_bridge_config_type
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_RESET,
"reset",
NULL,
NULL
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_GET_STATS,
"getstats",
&ng_parse_uint32_type,
&ng_bridge_stats_type
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_CLR_STATS,
"clrstats",
&ng_parse_uint32_type,
NULL
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_GETCLR_STATS,
"getclrstats",
&ng_parse_uint32_type,
&ng_bridge_stats_type
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_GET_TABLE,
"gettable",
NULL,
&ng_bridge_host_ary_type
},
{
NGM_BRIDGE_COOKIE,
NGM_BRIDGE_SET_PERSISTENT,
"setpersistent",
NULL,
NULL
},
{ 0 }
};
/* Node type descriptor */
static struct ng_type ng_bridge_typestruct = {
.version = NG_ABI_VERSION,
.name = NG_BRIDGE_NODE_TYPE,
.constructor = ng_bridge_constructor,
.rcvmsg = ng_bridge_rcvmsg,
.shutdown = ng_bridge_shutdown,
.newhook = ng_bridge_newhook,
.rcvdata = ng_bridge_rcvdata,
.disconnect = ng_bridge_disconnect,
.cmdlist = ng_bridge_cmdlist,
};
NETGRAPH_INIT(bridge, &ng_bridge_typestruct);
/******************************************************************
NETGRAPH NODE METHODS
******************************************************************/
/*
* Node constructor
*/
static int
ng_bridge_constructor(node_p node)
{
priv_p priv;
/* Allocate and initialize private info */
priv = malloc(sizeof(*priv), M_NETGRAPH_BRIDGE, M_WAITOK | M_ZERO);
ng_callout_init(&priv->timer);
/* Allocate and initialize hash table, etc. */
priv->tab = malloc(MIN_BUCKETS * sizeof(*priv->tab),
M_NETGRAPH_BRIDGE, M_WAITOK | M_ZERO);
priv->numBuckets = MIN_BUCKETS;
priv->hashMask = MIN_BUCKETS - 1;
priv->conf.debugLevel = 1;
priv->conf.loopTimeout = DEFAULT_LOOP_TIMEOUT;
priv->conf.maxStaleness = DEFAULT_MAX_STALENESS;
priv->conf.minStableAge = DEFAULT_MIN_STABLE_AGE;
/*
* This node has all kinds of stuff that could be screwed by SMP.
* Until it gets it's own internal protection, we go through in
* single file. This could hurt a machine bridging between two
* GB ethernets so it should be fixed.
* When it's fixed the process SHOULD NOT SLEEP, spinlocks please!
* (and atomic ops )
*/
NG_NODE_FORCE_WRITER(node);
NG_NODE_SET_PRIVATE(node, priv);
priv->node = node;
/* Start timer; timer is always running while node is alive */
ng_callout(&priv->timer, node, NULL, hz, ng_bridge_timeout, NULL, 0);
/* Done */
return (0);
}
/*
* Method for attaching a new hook
*/
static int
ng_bridge_newhook(node_p node, hook_p hook, const char *name)
{
const priv_p priv = NG_NODE_PRIVATE(node);
/* Check for a link hook */
if (strncmp(name, NG_BRIDGE_HOOK_LINK_PREFIX,
strlen(NG_BRIDGE_HOOK_LINK_PREFIX)) == 0) {
const char *cp;
char *eptr;
u_long linkNum;
cp = name + strlen(NG_BRIDGE_HOOK_LINK_PREFIX);
if (!isdigit(*cp) || (cp[0] == '0' && cp[1] != '\0'))
return (EINVAL);
linkNum = strtoul(cp, &eptr, 10);
if (*eptr != '\0' || linkNum >= NG_BRIDGE_MAX_LINKS)
return (EINVAL);
if (priv->links[linkNum] != NULL)
return (EISCONN);
priv->links[linkNum] = malloc(sizeof(*priv->links[linkNum]),
M_NETGRAPH_BRIDGE, M_NOWAIT|M_ZERO);
if (priv->links[linkNum] == NULL)
return (ENOMEM);
priv->links[linkNum]->hook = hook;
NG_HOOK_SET_PRIVATE(hook, (void *)linkNum);
priv->numLinks++;
return (0);
}
/* Unknown hook name */
return (EINVAL);
}
/*
* Receive a control message
*/
static int
ng_bridge_rcvmsg(node_p node, item_p item, hook_p lasthook)
{
const priv_p priv = NG_NODE_PRIVATE(node);
struct ng_mesg *resp = NULL;
int error = 0;
struct ng_mesg *msg;
NGI_GET_MSG(item, msg);
switch (msg->header.typecookie) {
case NGM_BRIDGE_COOKIE:
switch (msg->header.cmd) {
case NGM_BRIDGE_GET_CONFIG:
{
struct ng_bridge_config *conf;
NG_MKRESPONSE(resp, msg,
sizeof(struct ng_bridge_config), M_NOWAIT);
if (resp == NULL) {
error = ENOMEM;
break;
}
conf = (struct ng_bridge_config *)resp->data;
*conf = priv->conf; /* no sanity checking needed */
break;
}
case NGM_BRIDGE_SET_CONFIG:
{
struct ng_bridge_config *conf;
int i;
if (msg->header.arglen
!= sizeof(struct ng_bridge_config)) {
error = EINVAL;
break;
}
conf = (struct ng_bridge_config *)msg->data;
priv->conf = *conf;
for (i = 0; i < NG_BRIDGE_MAX_LINKS; i++)
priv->conf.ipfw[i] = !!priv->conf.ipfw[i];
break;
}
case NGM_BRIDGE_RESET:
{
int i;
/* Flush all entries in the hash table */
ng_bridge_remove_hosts(priv, -1);
/* Reset all loop detection counters and stats */
for (i = 0; i < NG_BRIDGE_MAX_LINKS; i++) {
if (priv->links[i] == NULL)
continue;
priv->links[i]->loopCount = 0;
bzero(&priv->links[i]->stats,
sizeof(priv->links[i]->stats));
}
break;
}
case NGM_BRIDGE_GET_STATS:
case NGM_BRIDGE_CLR_STATS:
case NGM_BRIDGE_GETCLR_STATS:
{
struct ng_bridge_link *link;
int linkNum;
/* Get link number */
if (msg->header.arglen != sizeof(u_int32_t)) {
error = EINVAL;
break;
}
linkNum = *((u_int32_t *)msg->data);
if (linkNum < 0 || linkNum >= NG_BRIDGE_MAX_LINKS) {
error = EINVAL;
break;
}
if ((link = priv->links[linkNum]) == NULL) {
error = ENOTCONN;
break;
}
/* Get/clear stats */
if (msg->header.cmd != NGM_BRIDGE_CLR_STATS) {
NG_MKRESPONSE(resp, msg,
sizeof(link->stats), M_NOWAIT);
if (resp == NULL) {
error = ENOMEM;
break;
}
bcopy(&link->stats,
resp->data, sizeof(link->stats));
}
if (msg->header.cmd != NGM_BRIDGE_GET_STATS)
bzero(&link->stats, sizeof(link->stats));
break;
}
case NGM_BRIDGE_GET_TABLE:
{
struct ng_bridge_host_ary *ary;
struct ng_bridge_hent *hent;
int i = 0, bucket;
NG_MKRESPONSE(resp, msg, sizeof(*ary)
+ (priv->numHosts * sizeof(*ary->hosts)), M_NOWAIT);
if (resp == NULL) {
error = ENOMEM;
break;
}
ary = (struct ng_bridge_host_ary *)resp->data;
ary->numHosts = priv->numHosts;
for (bucket = 0; bucket < priv->numBuckets; bucket++) {
SLIST_FOREACH(hent, &priv->tab[bucket], next)
ary->hosts[i++] = hent->host;
}
break;
}
case NGM_BRIDGE_SET_PERSISTENT:
{
priv->persistent = 1;
break;
}
default:
error = EINVAL;
break;
}
break;
default:
error = EINVAL;
break;
}
/* Done */
NG_RESPOND_MSG(error, node, item, resp);
NG_FREE_MSG(msg);
return (error);
}
/*
* Receive data on a hook
*/
static int
ng_bridge_rcvdata(hook_p hook, item_p item)
{
const node_p node = NG_HOOK_NODE(hook);
const priv_p priv = NG_NODE_PRIVATE(node);
struct ng_bridge_host *host;
struct ng_bridge_link *link;
struct ether_header *eh;
int error = 0, linkNum, linksSeen;
int manycast;
struct mbuf *m;
struct ng_bridge_link *firstLink;
NGI_GET_M(item, m);
/* Get link number */
linkNum = (intptr_t)NG_HOOK_PRIVATE(hook);
KASSERT(linkNum >= 0 && linkNum < NG_BRIDGE_MAX_LINKS,
("%s: linkNum=%u", __func__, linkNum));
link = priv->links[linkNum];
KASSERT(link != NULL, ("%s: link%d null", __func__, linkNum));
/* Sanity check packet and pull up header */
if (m->m_pkthdr.len < ETHER_HDR_LEN) {
link->stats.recvRunts++;
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (EINVAL);
}
if (m->m_len < ETHER_HDR_LEN && !(m = m_pullup(m, ETHER_HDR_LEN))) {
link->stats.memoryFailures++;
NG_FREE_ITEM(item);
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
if ((eh->ether_shost[0] & 1) != 0) {
link->stats.recvInvalid++;
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (EINVAL);
}
/* Is link disabled due to a loopback condition? */
if (link->loopCount != 0) {
link->stats.loopDrops++;
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (ELOOP); /* XXX is this an appropriate error? */
}
/* Update stats */
link->stats.recvPackets++;
link->stats.recvOctets += m->m_pkthdr.len;
if ((manycast = (eh->ether_dhost[0] & 1)) != 0) {
if (ETHER_EQUAL(eh->ether_dhost, ng_bridge_bcast_addr)) {
link->stats.recvBroadcasts++;
manycast = 2;
} else
link->stats.recvMulticasts++;
}
/* Look up packet's source Ethernet address in hashtable */
if ((host = ng_bridge_get(priv, eh->ether_shost)) != NULL) {
/* Update time since last heard from this host */
host->staleness = 0;
/* Did host jump to a different link? */
if (host->linkNum != linkNum) {
/*
* If the host's old link was recently established
* on the old link and it's already jumped to a new
* link, declare a loopback condition.
*/
if (host->age < priv->conf.minStableAge) {
/* Log the problem */
if (priv->conf.debugLevel >= 2) {
struct ifnet *ifp = m->m_pkthdr.rcvif;
char suffix[32];
if (ifp != NULL)
snprintf(suffix, sizeof(suffix),
" (%s)", ifp->if_xname);
else
*suffix = '\0';
log(LOG_WARNING, "ng_bridge: %s:"
" loopback detected on %s%s\n",
ng_bridge_nodename(node),
NG_HOOK_NAME(hook), suffix);
}
/* Mark link as linka non grata */
link->loopCount = priv->conf.loopTimeout;
link->stats.loopDetects++;
/* Forget all hosts on this link */
ng_bridge_remove_hosts(priv, linkNum);
/* Drop packet */
link->stats.loopDrops++;
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (ELOOP); /* XXX appropriate? */
}
/* Move host over to new link */
host->linkNum = linkNum;
host->age = 0;
}
} else {
if (!ng_bridge_put(priv, eh->ether_shost, linkNum)) {
link->stats.memoryFailures++;
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (ENOMEM);
}
}
/* Run packet through ipfw processing, if enabled */
#if 0
if (priv->conf.ipfw[linkNum] && V_fw_enable && V_ip_fw_chk_ptr != NULL) {
/* XXX not implemented yet */
}
#endif
/*
* If unicast and destination host known, deliver to host's link,
* unless it is the same link as the packet came in on.
*/
if (!manycast) {
/* Determine packet destination link */
if ((host = ng_bridge_get(priv, eh->ether_dhost)) != NULL) {
struct ng_bridge_link *const destLink
= priv->links[host->linkNum];
/* If destination same as incoming link, do nothing */
KASSERT(destLink != NULL,
("%s: link%d null", __func__, host->linkNum));
if (destLink == link) {
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (0);
}
/* Deliver packet out the destination link */
destLink->stats.xmitPackets++;
destLink->stats.xmitOctets += m->m_pkthdr.len;
NG_FWD_NEW_DATA(error, item, destLink->hook, m);
return (error);
}
/* Destination host is not known */
link->stats.recvUnknown++;
}
/* Distribute unknown, multicast, broadcast pkts to all other links */
firstLink = NULL;
for (linkNum = linksSeen = 0; linksSeen <= priv->numLinks; linkNum++) {
struct ng_bridge_link *destLink;
struct mbuf *m2 = NULL;
/*
* If we have checked all the links then now
* send the original on its reserved link
*/
if (linksSeen == priv->numLinks) {
/* If we never saw a good link, leave. */
if (firstLink == NULL) {
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (0);
}
destLink = firstLink;
} else {
destLink = priv->links[linkNum];
if (destLink != NULL)
linksSeen++;
/* Skip incoming link and disconnected links */
if (destLink == NULL || destLink == link) {
continue;
}
if (firstLink == NULL) {
/*
* This is the first usable link we have found.
* Reserve it for the originals.
* If we never find another we save a copy.
*/
firstLink = destLink;
continue;
}
/*
* It's usable link but not the reserved (first) one.
* Copy mbuf info for sending.
*/
m2 = m_dup(m, M_NOWAIT); /* XXX m_copypacket() */
if (m2 == NULL) {
link->stats.memoryFailures++;
NG_FREE_ITEM(item);
NG_FREE_M(m);
return (ENOBUFS);
}
}
/* Update stats */
destLink->stats.xmitPackets++;
destLink->stats.xmitOctets += m->m_pkthdr.len;
switch (manycast) {
case 0: /* unicast */
break;
case 1: /* multicast */
destLink->stats.xmitMulticasts++;
break;
case 2: /* broadcast */
destLink->stats.xmitBroadcasts++;
break;
}
/* Send packet */
if (destLink == firstLink) {
/*
* If we've sent all the others, send the original
* on the first link we found.
*/
NG_FWD_NEW_DATA(error, item, destLink->hook, m);
break; /* always done last - not really needed. */
} else {
NG_SEND_DATA_ONLY(error, destLink->hook, m2);
}
}
return (error);
}
/*
* Shutdown node
*/
static int
ng_bridge_shutdown(node_p node)
{
const priv_p priv = NG_NODE_PRIVATE(node);
/*
* Shut down everything including the timer. Even if the
* callout has already been dequeued and is about to be
* run, ng_bridge_timeout() won't be fired as the node
* is already marked NGF_INVALID, so we're safe to free
* the node now.
*/
KASSERT(priv->numLinks == 0 && priv->numHosts == 0,
("%s: numLinks=%d numHosts=%d",
__func__, priv->numLinks, priv->numHosts));
ng_uncallout(&priv->timer, node);
NG_NODE_SET_PRIVATE(node, NULL);
NG_NODE_UNREF(node);
free(priv->tab, M_NETGRAPH_BRIDGE);
free(priv, M_NETGRAPH_BRIDGE);
return (0);
}
/*
* Hook disconnection.
*/
static int
ng_bridge_disconnect(hook_p hook)
{
const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
int linkNum;
/* Get link number */
linkNum = (intptr_t)NG_HOOK_PRIVATE(hook);
KASSERT(linkNum >= 0 && linkNum < NG_BRIDGE_MAX_LINKS,
("%s: linkNum=%u", __func__, linkNum));
/* Remove all hosts associated with this link */
ng_bridge_remove_hosts(priv, linkNum);
/* Free associated link information */
KASSERT(priv->links[linkNum] != NULL, ("%s: no link", __func__));
free(priv->links[linkNum], M_NETGRAPH_BRIDGE);
priv->links[linkNum] = NULL;
priv->numLinks--;
/* If no more hooks, go away */
if ((NG_NODE_NUMHOOKS(NG_HOOK_NODE(hook)) == 0)
&& (NG_NODE_IS_VALID(NG_HOOK_NODE(hook)))
&& !priv->persistent) {
ng_rmnode_self(NG_HOOK_NODE(hook));
}
return (0);
}
/******************************************************************
HASH TABLE FUNCTIONS
******************************************************************/
/*
* Hash algorithm
*/
#define HASH(addr,mask) ( (((const u_int16_t *)(addr))[0] \
^ ((const u_int16_t *)(addr))[1] \
^ ((const u_int16_t *)(addr))[2]) & (mask) )
/*
* Find a host entry in the table.
*/
static struct ng_bridge_host *
ng_bridge_get(priv_p priv, const u_char *addr)
{
const int bucket = HASH(addr, priv->hashMask);
struct ng_bridge_hent *hent;
SLIST_FOREACH(hent, &priv->tab[bucket], next) {
if (ETHER_EQUAL(hent->host.addr, addr))
return (&hent->host);
}
return (NULL);
}
/*
* Add a new host entry to the table. This assumes the host doesn't
* already exist in the table. Returns 1 on success, 0 if there
* was a memory allocation failure.
*/
static int
ng_bridge_put(priv_p priv, const u_char *addr, int linkNum)
{
const int bucket = HASH(addr, priv->hashMask);
struct ng_bridge_hent *hent;
#ifdef INVARIANTS
/* Assert that entry does not already exist in hashtable */
SLIST_FOREACH(hent, &priv->tab[bucket], next) {
KASSERT(!ETHER_EQUAL(hent->host.addr, addr),
("%s: entry %6D exists in table", __func__, addr, ":"));
}
#endif
/* Allocate and initialize new hashtable entry */
hent = malloc(sizeof(*hent), M_NETGRAPH_BRIDGE, M_NOWAIT);
if (hent == NULL)
return (0);
bcopy(addr, hent->host.addr, ETHER_ADDR_LEN);
hent->host.linkNum = linkNum;
hent->host.staleness = 0;
hent->host.age = 0;
/* Add new element to hash bucket */
SLIST_INSERT_HEAD(&priv->tab[bucket], hent, next);
priv->numHosts++;
/* Resize table if necessary */
ng_bridge_rehash(priv);
return (1);
}
/*
* Resize the hash table. We try to maintain the number of buckets
* such that the load factor is in the range 0.25 to 1.0.
*
* If we can't get the new memory then we silently fail. This is OK
* because things will still work and we'll try again soon anyway.
*/
static void
ng_bridge_rehash(priv_p priv)
{
struct ng_bridge_bucket *newTab;
int oldBucket, newBucket;
int newNumBuckets;
u_int newMask;
/* Is table too full or too empty? */
if (priv->numHosts > priv->numBuckets
&& (priv->numBuckets << 1) <= MAX_BUCKETS)
newNumBuckets = priv->numBuckets << 1;
else if (priv->numHosts < (priv->numBuckets >> 2)
&& (priv->numBuckets >> 2) >= MIN_BUCKETS)
newNumBuckets = priv->numBuckets >> 2;
else
return;
newMask = newNumBuckets - 1;
/* Allocate and initialize new table */
- newTab = mallocarray(newNumBuckets, sizeof(*newTab),
+ newTab = malloc(newNumBuckets * sizeof(*newTab),
M_NETGRAPH_BRIDGE, M_NOWAIT | M_ZERO);
if (newTab == NULL)
return;
/* Move all entries from old table to new table */
for (oldBucket = 0; oldBucket < priv->numBuckets; oldBucket++) {
struct ng_bridge_bucket *const oldList = &priv->tab[oldBucket];
while (!SLIST_EMPTY(oldList)) {
struct ng_bridge_hent *const hent
= SLIST_FIRST(oldList);
SLIST_REMOVE_HEAD(oldList, next);
newBucket = HASH(hent->host.addr, newMask);
SLIST_INSERT_HEAD(&newTab[newBucket], hent, next);
}
}
/* Replace old table with new one */
if (priv->conf.debugLevel >= 3) {
log(LOG_INFO, "ng_bridge: %s: table size %d -> %d\n",
ng_bridge_nodename(priv->node),
priv->numBuckets, newNumBuckets);
}
free(priv->tab, M_NETGRAPH_BRIDGE);
priv->numBuckets = newNumBuckets;
priv->hashMask = newMask;
priv->tab = newTab;
return;
}
/******************************************************************
MISC FUNCTIONS
******************************************************************/
/*
* Remove all hosts associated with a specific link from the hashtable.
* If linkNum == -1, then remove all hosts in the table.
*/
static void
ng_bridge_remove_hosts(priv_p priv, int linkNum)
{
int bucket;
for (bucket = 0; bucket < priv->numBuckets; bucket++) {
struct ng_bridge_hent **hptr = &SLIST_FIRST(&priv->tab[bucket]);
while (*hptr != NULL) {
struct ng_bridge_hent *const hent = *hptr;
if (linkNum == -1 || hent->host.linkNum == linkNum) {
*hptr = SLIST_NEXT(hent, next);
free(hent, M_NETGRAPH_BRIDGE);
priv->numHosts--;
} else
hptr = &SLIST_NEXT(hent, next);
}
}
}
/*
* Handle our once-per-second timeout event. We do two things:
* we decrement link->loopCount for those links being muted due to
* a detected loopback condition, and we remove any hosts from
* the hashtable whom we haven't heard from in a long while.
*/
static void
ng_bridge_timeout(node_p node, hook_p hook, void *arg1, int arg2)
{
const priv_p priv = NG_NODE_PRIVATE(node);
int bucket;
int counter = 0;
int linkNum;
/* Update host time counters and remove stale entries */
for (bucket = 0; bucket < priv->numBuckets; bucket++) {
struct ng_bridge_hent **hptr = &SLIST_FIRST(&priv->tab[bucket]);
while (*hptr != NULL) {
struct ng_bridge_hent *const hent = *hptr;
/* Make sure host's link really exists */
KASSERT(priv->links[hent->host.linkNum] != NULL,
("%s: host %6D on nonexistent link %d\n",
__func__, hent->host.addr, ":",
hent->host.linkNum));
/* Remove hosts we haven't heard from in a while */
if (++hent->host.staleness >= priv->conf.maxStaleness) {
*hptr = SLIST_NEXT(hent, next);
free(hent, M_NETGRAPH_BRIDGE);
priv->numHosts--;
} else {
if (hent->host.age < 0xffff)
hent->host.age++;
hptr = &SLIST_NEXT(hent, next);
counter++;
}
}
}
KASSERT(priv->numHosts == counter,
("%s: hosts: %d != %d", __func__, priv->numHosts, counter));
/* Decrease table size if necessary */
ng_bridge_rehash(priv);
/* Decrease loop counter on muted looped back links */
for (counter = linkNum = 0; linkNum < NG_BRIDGE_MAX_LINKS; linkNum++) {
struct ng_bridge_link *const link = priv->links[linkNum];
if (link != NULL) {
if (link->loopCount != 0) {
link->loopCount--;
if (link->loopCount == 0
&& priv->conf.debugLevel >= 2) {
log(LOG_INFO, "ng_bridge: %s:"
" restoring looped back link%d\n",
ng_bridge_nodename(node), linkNum);
}
}
counter++;
}
}
KASSERT(priv->numLinks == counter,
("%s: links: %d != %d", __func__, priv->numLinks, counter));
/* Register a new timeout, keeping the existing node reference */
ng_callout(&priv->timer, node, NULL, hz, ng_bridge_timeout, NULL, 0);
}
/*
* Return node's "name", even if it doesn't have one.
*/
static const char *
ng_bridge_nodename(node_p node)
{
static char name[NG_NODESIZ];
if (NG_NODE_HAS_NAME(node))
snprintf(name, sizeof(name), "%s", NG_NODE_NAME(node));
else
snprintf(name, sizeof(name), "[%x]", ng_node2ID(node));
return name;
}
Index: head/sys/netgraph/ng_deflate.c
===================================================================
--- head/sys/netgraph/ng_deflate.c (revision 328217)
+++ head/sys/netgraph/ng_deflate.c (revision 328218)
@@ -1,700 +1,700 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2006 Alexander Motin <mav@alkar.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Deflate PPP compression netgraph node type.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/syslog.h>
#include <sys/zlib.h>
#include <netgraph/ng_message.h>
#include <netgraph/netgraph.h>
#include <netgraph/ng_parse.h>
#include <netgraph/ng_deflate.h>
#include "opt_netgraph.h"
static MALLOC_DEFINE(M_NETGRAPH_DEFLATE, "netgraph_deflate",
"netgraph deflate node");
/* DEFLATE header length */
#define DEFLATE_HDRLEN 2
#define PROT_COMPD 0x00fd
#define DEFLATE_BUF_SIZE 4096
/* Node private data */
struct ng_deflate_private {
struct ng_deflate_config cfg; /* configuration */
u_char inbuf[DEFLATE_BUF_SIZE]; /* input buffer */
u_char outbuf[DEFLATE_BUF_SIZE]; /* output buffer */
z_stream cx; /* compression context */
struct ng_deflate_stats stats; /* statistics */
ng_ID_t ctrlnode; /* path to controlling node */
uint16_t seqnum; /* sequence number */
u_char compress; /* compress/decompress flag */
};
typedef struct ng_deflate_private *priv_p;
/* Netgraph node methods */
static ng_constructor_t ng_deflate_constructor;
static ng_rcvmsg_t ng_deflate_rcvmsg;
static ng_shutdown_t ng_deflate_shutdown;
static ng_newhook_t ng_deflate_newhook;
static ng_rcvdata_t ng_deflate_rcvdata;
static ng_disconnect_t ng_deflate_disconnect;
/* Helper functions */
static void *z_alloc(void *, u_int items, u_int size);
static void z_free(void *, void *ptr);
static int ng_deflate_compress(node_p node,
struct mbuf *m, struct mbuf **resultp);
static int ng_deflate_decompress(node_p node,
struct mbuf *m, struct mbuf **resultp);
static void ng_deflate_reset_req(node_p node);
/* Parse type for struct ng_deflate_config. */
static const struct ng_parse_struct_field ng_deflate_config_type_fields[]
= NG_DEFLATE_CONFIG_INFO;
static const struct ng_parse_type ng_deflate_config_type = {
&ng_parse_struct_type,
ng_deflate_config_type_fields
};
/* Parse type for struct ng_deflate_stat. */
static const struct ng_parse_struct_field ng_deflate_stats_type_fields[]
= NG_DEFLATE_STATS_INFO;
static const struct ng_parse_type ng_deflate_stat_type = {
&ng_parse_struct_type,
ng_deflate_stats_type_fields
};
/* List of commands and how to convert arguments to/from ASCII. */
static const struct ng_cmdlist ng_deflate_cmds[] = {
{
NGM_DEFLATE_COOKIE,
NGM_DEFLATE_CONFIG,
"config",
&ng_deflate_config_type,
NULL
},
{
NGM_DEFLATE_COOKIE,
NGM_DEFLATE_RESETREQ,
"resetreq",
NULL,
NULL
},
{
NGM_DEFLATE_COOKIE,
NGM_DEFLATE_GET_STATS,
"getstats",
NULL,
&ng_deflate_stat_type
},
{
NGM_DEFLATE_COOKIE,
NGM_DEFLATE_CLR_STATS,
"clrstats",
NULL,
NULL
},
{
NGM_DEFLATE_COOKIE,
NGM_DEFLATE_GETCLR_STATS,
"getclrstats",
NULL,
&ng_deflate_stat_type
},
{ 0 }
};
/* Node type descriptor */
static struct ng_type ng_deflate_typestruct = {
.version = NG_ABI_VERSION,
.name = NG_DEFLATE_NODE_TYPE,
.constructor = ng_deflate_constructor,
.rcvmsg = ng_deflate_rcvmsg,
.shutdown = ng_deflate_shutdown,
.newhook = ng_deflate_newhook,
.rcvdata = ng_deflate_rcvdata,
.disconnect = ng_deflate_disconnect,
.cmdlist = ng_deflate_cmds,
};
NETGRAPH_INIT(deflate, &ng_deflate_typestruct);
/* Depend on separate zlib module. */
MODULE_DEPEND(ng_deflate, zlib, 1, 1, 1);
#define ERROUT(x) do { error = (x); goto done; } while (0)
/************************************************************************
NETGRAPH NODE STUFF
************************************************************************/
/*
* Node type constructor
*/
static int
ng_deflate_constructor(node_p node)
{
priv_p priv;
/* Allocate private structure. */
priv = malloc(sizeof(*priv), M_NETGRAPH_DEFLATE, M_WAITOK | M_ZERO);
NG_NODE_SET_PRIVATE(node, priv);
/* This node is not thread safe. */
NG_NODE_FORCE_WRITER(node);
/* Done */
return (0);
}
/*
* Give our OK for a hook to be added.
*/
static int
ng_deflate_newhook(node_p node, hook_p hook, const char *name)
{
const priv_p priv = NG_NODE_PRIVATE(node);
if (NG_NODE_NUMHOOKS(node) > 0)
return (EINVAL);
if (strcmp(name, NG_DEFLATE_HOOK_COMP) == 0)
priv->compress = 1;
else if (strcmp(name, NG_DEFLATE_HOOK_DECOMP) == 0)
priv->compress = 0;
else
return (EINVAL);
return (0);
}
/*
* Receive a control message
*/
static int
ng_deflate_rcvmsg(node_p node, item_p item, hook_p lasthook)
{
const priv_p priv = NG_NODE_PRIVATE(node);
struct ng_mesg *resp = NULL;
int error = 0;
struct ng_mesg *msg;
NGI_GET_MSG(item, msg);
if (msg->header.typecookie != NGM_DEFLATE_COOKIE)
ERROUT(EINVAL);
switch (msg->header.cmd) {
case NGM_DEFLATE_CONFIG:
{
struct ng_deflate_config *const cfg
= (struct ng_deflate_config *)msg->data;
/* Check configuration. */
if (msg->header.arglen != sizeof(*cfg))
ERROUT(EINVAL);
if (cfg->enable) {
if (cfg->windowBits < 8 || cfg->windowBits > 15)
ERROUT(EINVAL);
} else
cfg->windowBits = 0;
/* Clear previous state. */
if (priv->cfg.enable) {
if (priv->compress)
deflateEnd(&priv->cx);
else
inflateEnd(&priv->cx);
priv->cfg.enable = 0;
}
/* Configuration is OK, reset to it. */
priv->cfg = *cfg;
if (priv->cfg.enable) {
priv->cx.next_in = NULL;
priv->cx.zalloc = z_alloc;
priv->cx.zfree = z_free;
int res;
if (priv->compress) {
if ((res = deflateInit2(&priv->cx,
Z_DEFAULT_COMPRESSION, Z_DEFLATED,
-cfg->windowBits, 8,
Z_DEFAULT_STRATEGY)) != Z_OK) {
log(LOG_NOTICE,
"deflateInit2: error %d, %s\n",
res, priv->cx.msg);
priv->cfg.enable = 0;
ERROUT(ENOMEM);
}
} else {
if ((res = inflateInit2(&priv->cx,
-cfg->windowBits)) != Z_OK) {
log(LOG_NOTICE,
"inflateInit2: error %d, %s\n",
res, priv->cx.msg);
priv->cfg.enable = 0;
ERROUT(ENOMEM);
}
}
}
/* Initialize other state. */
priv->seqnum = 0;
/* Save return address so we can send reset-req's */
priv->ctrlnode = NGI_RETADDR(item);
break;
}
case NGM_DEFLATE_RESETREQ:
ng_deflate_reset_req(node);
break;
case NGM_DEFLATE_GET_STATS:
case NGM_DEFLATE_CLR_STATS:
case NGM_DEFLATE_GETCLR_STATS:
/* Create response if requested. */
if (msg->header.cmd != NGM_DEFLATE_CLR_STATS) {
NG_MKRESPONSE(resp, msg,
sizeof(struct ng_deflate_stats), M_NOWAIT);
if (resp == NULL)
ERROUT(ENOMEM);
bcopy(&priv->stats, resp->data,
sizeof(struct ng_deflate_stats));
}
/* Clear stats if requested. */
if (msg->header.cmd != NGM_DEFLATE_GET_STATS)
bzero(&priv->stats,
sizeof(struct ng_deflate_stats));
break;
default:
error = EINVAL;
break;
}
done:
NG_RESPOND_MSG(error, node, item, resp);
NG_FREE_MSG(msg);
return (error);
}
/*
* Receive incoming data on our hook.
*/
static int
ng_deflate_rcvdata(hook_p hook, item_p item)
{
const node_p node = NG_HOOK_NODE(hook);
const priv_p priv = NG_NODE_PRIVATE(node);
struct mbuf *m, *out;
int error;
if (!priv->cfg.enable) {
NG_FREE_ITEM(item);
return (ENXIO);
}
NGI_GET_M(item, m);
/* Compress */
if (priv->compress) {
if ((error = ng_deflate_compress(node, m, &out)) != 0) {
NG_FREE_ITEM(item);
log(LOG_NOTICE, "%s: error: %d\n", __func__, error);
return (error);
}
} else { /* Decompress */
if ((error = ng_deflate_decompress(node, m, &out)) != 0) {
NG_FREE_ITEM(item);
log(LOG_NOTICE, "%s: error: %d\n", __func__, error);
if (priv->ctrlnode != 0) {
struct ng_mesg *msg;
/* Need to send a reset-request. */
NG_MKMESSAGE(msg, NGM_DEFLATE_COOKIE,
NGM_DEFLATE_RESETREQ, 0, M_NOWAIT);
if (msg == NULL)
return (error);
NG_SEND_MSG_ID(error, node, msg,
priv->ctrlnode, 0);
}
return (error);
}
}
NG_FWD_NEW_DATA(error, item, hook, out);
return (error);
}
/*
* Destroy node.
*/
static int
ng_deflate_shutdown(node_p node)
{
const priv_p priv = NG_NODE_PRIVATE(node);
/* Take down netgraph node. */
if (priv->cfg.enable) {
if (priv->compress)
deflateEnd(&priv->cx);
else
inflateEnd(&priv->cx);
}
free(priv, M_NETGRAPH_DEFLATE);
NG_NODE_SET_PRIVATE(node, NULL);
NG_NODE_UNREF(node); /* let the node escape */
return (0);
}
/*
* Hook disconnection
*/
static int
ng_deflate_disconnect(hook_p hook)
{
const node_p node = NG_HOOK_NODE(hook);
const priv_p priv = NG_NODE_PRIVATE(node);
if (priv->cfg.enable) {
if (priv->compress)
deflateEnd(&priv->cx);
else
inflateEnd(&priv->cx);
priv->cfg.enable = 0;
}
/* Go away if no longer connected. */
if ((NG_NODE_NUMHOOKS(node) == 0) && NG_NODE_IS_VALID(node))
ng_rmnode_self(node);
return (0);
}
/************************************************************************
HELPER STUFF
************************************************************************/
/*
* Space allocation and freeing routines for use by zlib routines.
*/
static void *
z_alloc(void *notused, u_int items, u_int size)
{
- return (mallocarray(items, size, M_NETGRAPH_DEFLATE, M_NOWAIT));
+ return (malloc(items * size, M_NETGRAPH_DEFLATE, M_NOWAIT));
}
static void
z_free(void *notused, void *ptr)
{
free(ptr, M_NETGRAPH_DEFLATE);
}
/*
* Compress/encrypt a packet and put the result in a new mbuf at *resultp.
* The original mbuf is not free'd.
*/
static int
ng_deflate_compress(node_p node, struct mbuf *m, struct mbuf **resultp)
{
const priv_p priv = NG_NODE_PRIVATE(node);
int outlen, inlen;
int rtn;
/* Initialize. */
*resultp = NULL;
inlen = m->m_pkthdr.len;
priv->stats.FramesPlain++;
priv->stats.InOctets+=inlen;
if (inlen > DEFLATE_BUF_SIZE) {
priv->stats.Errors++;
NG_FREE_M(m);
return (ENOMEM);
}
/* We must own the mbuf chain exclusively to modify it. */
m = m_unshare(m, M_NOWAIT);
if (m == NULL) {
priv->stats.Errors++;
return (ENOMEM);
}
/* Work with contiguous regions of memory. */
m_copydata(m, 0, inlen, (caddr_t)priv->inbuf);
outlen = DEFLATE_BUF_SIZE;
/* Compress "inbuf" into "outbuf". */
/* Prepare to compress. */
if (priv->inbuf[0] != 0) {
priv->cx.next_in = priv->inbuf;
priv->cx.avail_in = inlen;
} else {
priv->cx.next_in = priv->inbuf + 1; /* compress protocol */
priv->cx.avail_in = inlen - 1;
}
priv->cx.next_out = priv->outbuf + 2 + DEFLATE_HDRLEN;
priv->cx.avail_out = outlen - 2 - DEFLATE_HDRLEN;
/* Compress. */
rtn = deflate(&priv->cx, Z_PACKET_FLUSH);
/* Check return value. */
if (rtn != Z_OK) {
priv->stats.Errors++;
log(LOG_NOTICE, "ng_deflate: compression error: %d (%s)\n",
rtn, priv->cx.msg);
NG_FREE_M(m);
return (EINVAL);
}
/* Calculate resulting size. */
outlen -= priv->cx.avail_out;
/* If we can't compress this packet, send it as-is. */
if (outlen > inlen) {
/* Return original packet uncompressed. */
*resultp = m;
priv->stats.FramesUncomp++;
priv->stats.OutOctets+=inlen;
} else {
/* Install header. */
be16enc(priv->outbuf, PROT_COMPD);
be16enc(priv->outbuf + 2, priv->seqnum);
/* Return packet in an mbuf. */
m_copyback(m, 0, outlen, (caddr_t)priv->outbuf);
if (m->m_pkthdr.len < outlen) {
m_freem(m);
priv->stats.Errors++;
return (ENOMEM);
} else if (outlen < m->m_pkthdr.len)
m_adj(m, outlen - m->m_pkthdr.len);
*resultp = m;
priv->stats.FramesComp++;
priv->stats.OutOctets+=outlen;
}
/* Update sequence number. */
priv->seqnum++;
return (0);
}
/*
* Decompress/decrypt packet and put the result in a new mbuf at *resultp.
* The original mbuf is not free'd.
*/
static int
ng_deflate_decompress(node_p node, struct mbuf *m, struct mbuf **resultp)
{
const priv_p priv = NG_NODE_PRIVATE(node);
int outlen, inlen;
int rtn;
uint16_t proto;
int offset;
uint16_t rseqnum;
/* Initialize. */
*resultp = NULL;
inlen = m->m_pkthdr.len;
if (inlen > DEFLATE_BUF_SIZE) {
priv->stats.Errors++;
NG_FREE_M(m);
priv->seqnum = 0;
return (ENOMEM);
}
/* We must own the mbuf chain exclusively to modify it. */
m = m_unshare(m, M_NOWAIT);
if (m == NULL) {
priv->stats.Errors++;
return (ENOMEM);
}
/* Work with contiguous regions of memory. */
m_copydata(m, 0, inlen, (caddr_t)priv->inbuf);
/* Separate proto. */
if ((priv->inbuf[0] & 0x01) != 0) {
proto = priv->inbuf[0];
offset = 1;
} else {
proto = be16dec(priv->inbuf);
offset = 2;
}
priv->stats.InOctets += inlen;
/* Packet is compressed, so decompress. */
if (proto == PROT_COMPD) {
priv->stats.FramesComp++;
/* Check sequence number. */
rseqnum = be16dec(priv->inbuf + offset);
offset += 2;
if (rseqnum != priv->seqnum) {
priv->stats.Errors++;
log(LOG_NOTICE, "ng_deflate: wrong sequence: %u "
"instead of %u\n", rseqnum, priv->seqnum);
NG_FREE_M(m);
priv->seqnum = 0;
return (EPIPE);
}
outlen = DEFLATE_BUF_SIZE;
/* Decompress "inbuf" into "outbuf". */
/* Prepare to decompress. */
priv->cx.next_in = priv->inbuf + offset;
priv->cx.avail_in = inlen - offset;
/* Reserve space for protocol decompression. */
priv->cx.next_out = priv->outbuf + 1;
priv->cx.avail_out = outlen - 1;
/* Decompress. */
rtn = inflate(&priv->cx, Z_PACKET_FLUSH);
/* Check return value. */
if (rtn != Z_OK && rtn != Z_STREAM_END) {
priv->stats.Errors++;
NG_FREE_M(m);
priv->seqnum = 0;
log(LOG_NOTICE, "%s: decompression error: %d (%s)\n",
__func__, rtn, priv->cx.msg);
switch (rtn) {
case Z_MEM_ERROR:
return (ENOMEM);
case Z_DATA_ERROR:
return (EIO);
default:
return (EINVAL);
}
}
/* Calculate resulting size. */
outlen -= priv->cx.avail_out;
/* Decompress protocol. */
if ((priv->outbuf[1] & 0x01) != 0) {
priv->outbuf[0] = 0;
/* Return packet in an mbuf. */
m_copyback(m, 0, outlen, (caddr_t)priv->outbuf);
} else {
outlen--;
/* Return packet in an mbuf. */
m_copyback(m, 0, outlen, (caddr_t)(priv->outbuf + 1));
}
if (m->m_pkthdr.len < outlen) {
m_freem(m);
priv->stats.Errors++;
priv->seqnum = 0;
return (ENOMEM);
} else if (outlen < m->m_pkthdr.len)
m_adj(m, outlen - m->m_pkthdr.len);
*resultp = m;
priv->stats.FramesPlain++;
priv->stats.OutOctets+=outlen;
} else { /* Packet is not compressed, just update dictionary. */
priv->stats.FramesUncomp++;
if (priv->inbuf[0] == 0) {
priv->cx.next_in = priv->inbuf + 1; /* compress protocol */
priv->cx.avail_in = inlen - 1;
} else {
priv->cx.next_in = priv->inbuf;
priv->cx.avail_in = inlen;
}
rtn = inflateIncomp(&priv->cx);
/* Check return value */
if (rtn != Z_OK) {
priv->stats.Errors++;
log(LOG_NOTICE, "%s: inflateIncomp error: %d (%s)\n",
__func__, rtn, priv->cx.msg);
NG_FREE_M(m);
priv->seqnum = 0;
return (EINVAL);
}
*resultp = m;
priv->stats.FramesPlain++;
priv->stats.OutOctets += inlen;
}
/* Update sequence number. */
priv->seqnum++;
return (0);
}
/*
* The peer has sent us a CCP ResetRequest, so reset our transmit state.
*/
static void
ng_deflate_reset_req(node_p node)
{
const priv_p priv = NG_NODE_PRIVATE(node);
priv->seqnum = 0;
if (priv->cfg.enable) {
if (priv->compress)
deflateReset(&priv->cx);
else
inflateReset(&priv->cx);
}
}
Index: head/sys/netgraph/ng_parse.c
===================================================================
--- head/sys/netgraph/ng_parse.c (revision 328217)
+++ head/sys/netgraph/ng_parse.c (revision 328218)
@@ -1,1903 +1,1902 @@
/*
* ng_parse.c
*/
/*-
* Copyright (c) 1999 Whistle Communications, Inc.
* All rights reserved.
*
* Subject to the following obligations and disclaimer of warranty, use and
* redistribution of this software, in source or object code forms, with or
* without modifications are expressly permitted by Whistle Communications;
* provided, however, that:
* 1. Any and all reproductions of the source or object code must include the
* copyright notice above and the following disclaimer of warranties; and
* 2. No rights are granted, in any manner or form, to use Whistle
* Communications, Inc. trademarks, including the mark "WHISTLE
* COMMUNICATIONS" on advertising, endorsements, or otherwise except as
* such appears in the above copyright notice or in the software.
*
* THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
* INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
* WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
* REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
* SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
* IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
* RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
* WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* Author: Archie Cobbs <archie@freebsd.org>
*
* $Whistle: ng_parse.c,v 1.3 1999/11/29 01:43:48 archie Exp $
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/errno.h>
#include <sys/limits.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/ctype.h>
#include <machine/stdarg.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netgraph/ng_message.h>
#include <netgraph/netgraph.h>
#include <netgraph/ng_parse.h>
#ifdef NG_SEPARATE_MALLOC
static MALLOC_DEFINE(M_NETGRAPH_PARSE, "netgraph_parse", "netgraph parse info");
#else
#define M_NETGRAPH_PARSE M_NETGRAPH
#endif
/* Compute alignment for primitive integral types */
struct int16_temp {
char x;
int16_t y;
};
struct int32_temp {
char x;
int32_t y;
};
struct int64_temp {
char x;
int64_t y;
};
#define INT8_ALIGNMENT 1
#define INT16_ALIGNMENT ((size_t)&((struct int16_temp *)0)->y)
#define INT32_ALIGNMENT ((size_t)&((struct int32_temp *)0)->y)
#define INT64_ALIGNMENT ((size_t)&((struct int64_temp *)0)->y)
/* Output format for integral types */
#define INT_UNSIGNED 0
#define INT_SIGNED 1
#define INT_HEX 2
/* Type of composite object: struct, array, or fixedarray */
enum comptype {
CT_STRUCT,
CT_ARRAY,
CT_FIXEDARRAY,
};
/* Composite types helper functions */
static int ng_parse_composite(const struct ng_parse_type *type,
const char *s, int *off, const u_char *start,
u_char *const buf, int *buflen, enum comptype ctype);
static int ng_unparse_composite(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen,
enum comptype ctype);
static int ng_get_composite_elem_default(const struct ng_parse_type *type,
int index, const u_char *start, u_char *buf,
int *buflen, enum comptype ctype);
static int ng_get_composite_len(const struct ng_parse_type *type,
const u_char *start, const u_char *buf,
enum comptype ctype);
static const struct ng_parse_type *ng_get_composite_etype(const struct
ng_parse_type *type, int index, enum comptype ctype);
static int ng_parse_get_elem_pad(const struct ng_parse_type *type,
int index, enum comptype ctype, int posn);
/* Parsing helper functions */
static int ng_parse_skip_value(const char *s, int off, int *lenp);
static int ng_parse_append(char **cbufp, int *cbuflenp,
const char *fmt, ...);
/* Poor man's virtual method calls */
#define METHOD(t,m) (ng_get_ ## m ## _method(t))
#define INVOKE(t,m) (*METHOD(t,m))
static ng_parse_t *ng_get_parse_method(const struct ng_parse_type *t);
static ng_unparse_t *ng_get_unparse_method(const struct ng_parse_type *t);
static ng_getDefault_t *ng_get_getDefault_method(const
struct ng_parse_type *t);
static ng_getAlign_t *ng_get_getAlign_method(const struct ng_parse_type *t);
#define ALIGNMENT(t) (METHOD(t, getAlign) == NULL ? \
0 : INVOKE(t, getAlign)(t))
/************************************************************************
PUBLIC FUNCTIONS
************************************************************************/
/*
* Convert an ASCII string to binary according to the supplied type descriptor
*/
int
ng_parse(const struct ng_parse_type *type,
const char *string, int *off, u_char *buf, int *buflen)
{
return INVOKE(type, parse)(type, string, off, buf, buf, buflen);
}
/*
* Convert binary to an ASCII string according to the supplied type descriptor
*/
int
ng_unparse(const struct ng_parse_type *type,
const u_char *data, char *cbuf, int cbuflen)
{
int off = 0;
return INVOKE(type, unparse)(type, data, &off, cbuf, cbuflen);
}
/*
* Fill in the default value according to the supplied type descriptor
*/
int
ng_parse_getDefault(const struct ng_parse_type *type, u_char *buf, int *buflen)
{
ng_getDefault_t *const func = METHOD(type, getDefault);
if (func == NULL)
return (EOPNOTSUPP);
return (*func)(type, buf, buf, buflen);
}
/************************************************************************
STRUCTURE TYPE
************************************************************************/
static int
ng_struct_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
return ng_parse_composite(type, s, off, start, buf, buflen, CT_STRUCT);
}
static int
ng_struct_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
return ng_unparse_composite(type, data, off, cbuf, cbuflen, CT_STRUCT);
}
static int
ng_struct_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int off = 0;
return ng_parse_composite(type,
"{}", &off, start, buf, buflen, CT_STRUCT);
}
static int
ng_struct_getAlign(const struct ng_parse_type *type)
{
const struct ng_parse_struct_field *field;
int align = 0;
for (field = type->info; field->name != NULL; field++) {
int falign = ALIGNMENT(field->type);
if (falign > align)
align = falign;
}
return align;
}
const struct ng_parse_type ng_parse_struct_type = {
NULL,
NULL,
NULL,
ng_struct_parse,
ng_struct_unparse,
ng_struct_getDefault,
ng_struct_getAlign
};
/************************************************************************
FIXED LENGTH ARRAY TYPE
************************************************************************/
static int
ng_fixedarray_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
return ng_parse_composite(type,
s, off, start, buf, buflen, CT_FIXEDARRAY);
}
static int
ng_fixedarray_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
return ng_unparse_composite(type,
data, off, cbuf, cbuflen, CT_FIXEDARRAY);
}
static int
ng_fixedarray_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int off = 0;
return ng_parse_composite(type,
"[]", &off, start, buf, buflen, CT_FIXEDARRAY);
}
static int
ng_fixedarray_getAlign(const struct ng_parse_type *type)
{
const struct ng_parse_fixedarray_info *fi = type->info;
return ALIGNMENT(fi->elementType);
}
const struct ng_parse_type ng_parse_fixedarray_type = {
NULL,
NULL,
NULL,
ng_fixedarray_parse,
ng_fixedarray_unparse,
ng_fixedarray_getDefault,
ng_fixedarray_getAlign
};
/************************************************************************
VARIABLE LENGTH ARRAY TYPE
************************************************************************/
static int
ng_array_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
return ng_parse_composite(type, s, off, start, buf, buflen, CT_ARRAY);
}
static int
ng_array_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
return ng_unparse_composite(type, data, off, cbuf, cbuflen, CT_ARRAY);
}
static int
ng_array_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int off = 0;
return ng_parse_composite(type,
"[]", &off, start, buf, buflen, CT_ARRAY);
}
static int
ng_array_getAlign(const struct ng_parse_type *type)
{
const struct ng_parse_array_info *ai = type->info;
return ALIGNMENT(ai->elementType);
}
const struct ng_parse_type ng_parse_array_type = {
NULL,
NULL,
NULL,
ng_array_parse,
ng_array_unparse,
ng_array_getDefault,
ng_array_getAlign
};
/************************************************************************
INT8 TYPE
************************************************************************/
static int
ng_int8_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
long val;
int8_t val8;
char *eptr;
val = strtol(s + *off, &eptr, 0);
if (val < (int8_t)0x80 || val > (u_int8_t)0xff || eptr == s + *off)
return (EINVAL);
*off = eptr - s;
val8 = (int8_t)val;
bcopy(&val8, buf, sizeof(int8_t));
*buflen = sizeof(int8_t);
return (0);
}
static int
ng_int8_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const char *fmt;
int fval;
int error;
int8_t val;
bcopy(data + *off, &val, sizeof(int8_t));
switch ((intptr_t)type->info) {
case INT_SIGNED:
fmt = "%d";
fval = val;
break;
case INT_UNSIGNED:
fmt = "%u";
fval = (u_int8_t)val;
break;
case INT_HEX:
fmt = "0x%x";
fval = (u_int8_t)val;
break;
default:
panic("%s: unknown type", __func__);
}
if ((error = ng_parse_append(&cbuf, &cbuflen, fmt, fval)) != 0)
return (error);
*off += sizeof(int8_t);
return (0);
}
static int
ng_int8_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int8_t val;
if (*buflen < sizeof(int8_t))
return (ERANGE);
val = 0;
bcopy(&val, buf, sizeof(int8_t));
*buflen = sizeof(int8_t);
return (0);
}
static int
ng_int8_getAlign(const struct ng_parse_type *type)
{
return INT8_ALIGNMENT;
}
const struct ng_parse_type ng_parse_int8_type = {
NULL,
(void *)INT_SIGNED,
NULL,
ng_int8_parse,
ng_int8_unparse,
ng_int8_getDefault,
ng_int8_getAlign
};
const struct ng_parse_type ng_parse_uint8_type = {
&ng_parse_int8_type,
(void *)INT_UNSIGNED
};
const struct ng_parse_type ng_parse_hint8_type = {
&ng_parse_int8_type,
(void *)INT_HEX
};
/************************************************************************
INT16 TYPE
************************************************************************/
static int
ng_int16_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
long val;
int16_t val16;
char *eptr;
val = strtol(s + *off, &eptr, 0);
if (val < (int16_t)0x8000
|| val > (u_int16_t)0xffff || eptr == s + *off)
return (EINVAL);
*off = eptr - s;
val16 = (int16_t)val;
bcopy(&val16, buf, sizeof(int16_t));
*buflen = sizeof(int16_t);
return (0);
}
static int
ng_int16_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const char *fmt;
int fval;
int error;
int16_t val;
bcopy(data + *off, &val, sizeof(int16_t));
switch ((intptr_t)type->info) {
case INT_SIGNED:
fmt = "%d";
fval = val;
break;
case INT_UNSIGNED:
fmt = "%u";
fval = (u_int16_t)val;
break;
case INT_HEX:
fmt = "0x%x";
fval = (u_int16_t)val;
break;
default:
panic("%s: unknown type", __func__);
}
if ((error = ng_parse_append(&cbuf, &cbuflen, fmt, fval)) != 0)
return (error);
*off += sizeof(int16_t);
return (0);
}
static int
ng_int16_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int16_t val;
if (*buflen < sizeof(int16_t))
return (ERANGE);
val = 0;
bcopy(&val, buf, sizeof(int16_t));
*buflen = sizeof(int16_t);
return (0);
}
static int
ng_int16_getAlign(const struct ng_parse_type *type)
{
return INT16_ALIGNMENT;
}
const struct ng_parse_type ng_parse_int16_type = {
NULL,
(void *)INT_SIGNED,
NULL,
ng_int16_parse,
ng_int16_unparse,
ng_int16_getDefault,
ng_int16_getAlign
};
const struct ng_parse_type ng_parse_uint16_type = {
&ng_parse_int16_type,
(void *)INT_UNSIGNED
};
const struct ng_parse_type ng_parse_hint16_type = {
&ng_parse_int16_type,
(void *)INT_HEX
};
/************************************************************************
INT32 TYPE
************************************************************************/
static int
ng_int32_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
long val; /* assumes long is at least 32 bits */
int32_t val32;
char *eptr;
if ((intptr_t)type->info == INT_SIGNED)
val = strtol(s + *off, &eptr, 0);
else
val = strtoul(s + *off, &eptr, 0);
if (val < (int32_t)0x80000000
|| val > (u_int32_t)0xffffffff || eptr == s + *off)
return (EINVAL);
*off = eptr - s;
val32 = (int32_t)val;
bcopy(&val32, buf, sizeof(int32_t));
*buflen = sizeof(int32_t);
return (0);
}
static int
ng_int32_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const char *fmt;
long fval;
int error;
int32_t val;
bcopy(data + *off, &val, sizeof(int32_t));
switch ((intptr_t)type->info) {
case INT_SIGNED:
fmt = "%ld";
fval = val;
break;
case INT_UNSIGNED:
fmt = "%lu";
fval = (u_int32_t)val;
break;
case INT_HEX:
fmt = "0x%lx";
fval = (u_int32_t)val;
break;
default:
panic("%s: unknown type", __func__);
}
if ((error = ng_parse_append(&cbuf, &cbuflen, fmt, fval)) != 0)
return (error);
*off += sizeof(int32_t);
return (0);
}
static int
ng_int32_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int32_t val;
if (*buflen < sizeof(int32_t))
return (ERANGE);
val = 0;
bcopy(&val, buf, sizeof(int32_t));
*buflen = sizeof(int32_t);
return (0);
}
static int
ng_int32_getAlign(const struct ng_parse_type *type)
{
return INT32_ALIGNMENT;
}
const struct ng_parse_type ng_parse_int32_type = {
NULL,
(void *)INT_SIGNED,
NULL,
ng_int32_parse,
ng_int32_unparse,
ng_int32_getDefault,
ng_int32_getAlign
};
const struct ng_parse_type ng_parse_uint32_type = {
&ng_parse_int32_type,
(void *)INT_UNSIGNED
};
const struct ng_parse_type ng_parse_hint32_type = {
&ng_parse_int32_type,
(void *)INT_HEX
};
/************************************************************************
INT64 TYPE
************************************************************************/
static int
ng_int64_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
quad_t val;
int64_t val64;
char *eptr;
val = strtoq(s + *off, &eptr, 0);
if (eptr == s + *off)
return (EINVAL);
*off = eptr - s;
val64 = (int64_t)val;
bcopy(&val64, buf, sizeof(int64_t));
*buflen = sizeof(int64_t);
return (0);
}
static int
ng_int64_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const char *fmt;
long long fval;
int64_t val;
int error;
bcopy(data + *off, &val, sizeof(int64_t));
switch ((intptr_t)type->info) {
case INT_SIGNED:
fmt = "%lld";
fval = val;
break;
case INT_UNSIGNED:
fmt = "%llu";
fval = (u_int64_t)val;
break;
case INT_HEX:
fmt = "0x%llx";
fval = (u_int64_t)val;
break;
default:
panic("%s: unknown type", __func__);
}
if ((error = ng_parse_append(&cbuf, &cbuflen, fmt, fval)) != 0)
return (error);
*off += sizeof(int64_t);
return (0);
}
static int
ng_int64_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
int64_t val;
if (*buflen < sizeof(int64_t))
return (ERANGE);
val = 0;
bcopy(&val, buf, sizeof(int64_t));
*buflen = sizeof(int64_t);
return (0);
}
static int
ng_int64_getAlign(const struct ng_parse_type *type)
{
return INT64_ALIGNMENT;
}
const struct ng_parse_type ng_parse_int64_type = {
NULL,
(void *)INT_SIGNED,
NULL,
ng_int64_parse,
ng_int64_unparse,
ng_int64_getDefault,
ng_int64_getAlign
};
const struct ng_parse_type ng_parse_uint64_type = {
&ng_parse_int64_type,
(void *)INT_UNSIGNED
};
const struct ng_parse_type ng_parse_hint64_type = {
&ng_parse_int64_type,
(void *)INT_HEX
};
/************************************************************************
STRING TYPE
************************************************************************/
static int
ng_string_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
char *sval;
int len;
int slen;
if ((sval = ng_get_string_token(s, off, &len, &slen)) == NULL)
return (EINVAL);
*off += len;
bcopy(sval, buf, slen + 1);
free(sval, M_NETGRAPH_PARSE);
*buflen = slen + 1;
return (0);
}
static int
ng_string_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const char *const raw = (const char *)data + *off;
char *const s = ng_encode_string(raw, strlen(raw));
int error;
if (s == NULL)
return (ENOMEM);
if ((error = ng_parse_append(&cbuf, &cbuflen, "%s", s)) != 0) {
free(s, M_NETGRAPH_PARSE);
return (error);
}
*off += strlen(raw) + 1;
free(s, M_NETGRAPH_PARSE);
return (0);
}
static int
ng_string_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
if (*buflen < 1)
return (ERANGE);
buf[0] = (u_char)'\0';
*buflen = 1;
return (0);
}
const struct ng_parse_type ng_parse_string_type = {
NULL,
NULL,
NULL,
ng_string_parse,
ng_string_unparse,
ng_string_getDefault,
NULL
};
/************************************************************************
FIXED BUFFER STRING TYPE
************************************************************************/
static int
ng_fixedstring_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
const struct ng_parse_fixedstring_info *const fi = type->info;
char *sval;
int len;
int slen;
if ((sval = ng_get_string_token(s, off, &len, &slen)) == NULL)
return (EINVAL);
if (slen + 1 > fi->bufSize) {
free(sval, M_NETGRAPH_PARSE);
return (E2BIG);
}
*off += len;
bcopy(sval, buf, slen);
free(sval, M_NETGRAPH_PARSE);
bzero(buf + slen, fi->bufSize - slen);
*buflen = fi->bufSize;
return (0);
}
static int
ng_fixedstring_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const struct ng_parse_fixedstring_info *const fi = type->info;
int error, temp = *off;
if ((error = ng_string_unparse(type, data, &temp, cbuf, cbuflen)) != 0)
return (error);
*off += fi->bufSize;
return (0);
}
static int
ng_fixedstring_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
const struct ng_parse_fixedstring_info *const fi = type->info;
if (*buflen < fi->bufSize)
return (ERANGE);
bzero(buf, fi->bufSize);
*buflen = fi->bufSize;
return (0);
}
const struct ng_parse_type ng_parse_fixedstring_type = {
NULL,
NULL,
NULL,
ng_fixedstring_parse,
ng_fixedstring_unparse,
ng_fixedstring_getDefault,
NULL
};
const struct ng_parse_fixedstring_info ng_parse_nodebuf_info = {
NG_NODESIZ
};
const struct ng_parse_type ng_parse_nodebuf_type = {
&ng_parse_fixedstring_type,
&ng_parse_nodebuf_info
};
const struct ng_parse_fixedstring_info ng_parse_hookbuf_info = {
NG_HOOKSIZ
};
const struct ng_parse_type ng_parse_hookbuf_type = {
&ng_parse_fixedstring_type,
&ng_parse_hookbuf_info
};
const struct ng_parse_fixedstring_info ng_parse_pathbuf_info = {
NG_PATHSIZ
};
const struct ng_parse_type ng_parse_pathbuf_type = {
&ng_parse_fixedstring_type,
&ng_parse_pathbuf_info
};
const struct ng_parse_fixedstring_info ng_parse_typebuf_info = {
NG_TYPESIZ
};
const struct ng_parse_type ng_parse_typebuf_type = {
&ng_parse_fixedstring_type,
&ng_parse_typebuf_info
};
const struct ng_parse_fixedstring_info ng_parse_cmdbuf_info = {
NG_CMDSTRSIZ
};
const struct ng_parse_type ng_parse_cmdbuf_type = {
&ng_parse_fixedstring_type,
&ng_parse_cmdbuf_info
};
/************************************************************************
EXPLICITLY SIZED STRING TYPE
************************************************************************/
static int
ng_sizedstring_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
char *sval;
int len;
int slen;
if ((sval = ng_get_string_token(s, off, &len, &slen)) == NULL)
return (EINVAL);
if (slen > USHRT_MAX) {
free(sval, M_NETGRAPH_PARSE);
return (EINVAL);
}
*off += len;
*((u_int16_t *)buf) = (u_int16_t)slen;
bcopy(sval, buf + 2, slen);
free(sval, M_NETGRAPH_PARSE);
*buflen = 2 + slen;
return (0);
}
static int
ng_sizedstring_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
const char *const raw = (const char *)data + *off + 2;
const int slen = *((const u_int16_t *)(data + *off));
char *const s = ng_encode_string(raw, slen);
int error;
if (s == NULL)
return (ENOMEM);
if ((error = ng_parse_append(&cbuf, &cbuflen, "%s", s)) != 0) {
free(s, M_NETGRAPH_PARSE);
return (error);
}
free(s, M_NETGRAPH_PARSE);
*off += slen + 2;
return (0);
}
static int
ng_sizedstring_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
if (*buflen < 2)
return (ERANGE);
bzero(buf, 2);
*buflen = 2;
return (0);
}
const struct ng_parse_type ng_parse_sizedstring_type = {
NULL,
NULL,
NULL,
ng_sizedstring_parse,
ng_sizedstring_unparse,
ng_sizedstring_getDefault,
NULL
};
/************************************************************************
IP ADDRESS TYPE
************************************************************************/
static int
ng_ipaddr_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
int i, error;
for (i = 0; i < 4; i++) {
if ((error = ng_int8_parse(&ng_parse_int8_type,
s, off, start, buf + i, buflen)) != 0)
return (error);
if (i < 3 && s[*off] != '.')
return (EINVAL);
(*off)++;
}
*buflen = 4;
return (0);
}
static int
ng_ipaddr_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
struct in_addr ip;
int error;
bcopy(data + *off, &ip, sizeof(ip));
if ((error = ng_parse_append(&cbuf, &cbuflen, "%d.%d.%d.%d",
((u_char *)&ip)[0], ((u_char *)&ip)[1],
((u_char *)&ip)[2], ((u_char *)&ip)[3])) != 0)
return (error);
*off += sizeof(ip);
return (0);
}
static int
ng_ipaddr_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
struct in_addr ip = { 0 };
if (*buflen < sizeof(ip))
return (ERANGE);
bcopy(&ip, buf, sizeof(ip));
*buflen = sizeof(ip);
return (0);
}
const struct ng_parse_type ng_parse_ipaddr_type = {
NULL,
NULL,
NULL,
ng_ipaddr_parse,
ng_ipaddr_unparse,
ng_ipaddr_getDefault,
ng_int32_getAlign
};
/************************************************************************
ETHERNET ADDRESS TYPE
************************************************************************/
static int
ng_enaddr_parse(const struct ng_parse_type *type,
const char *s, int *const off, const u_char *const start,
u_char *const buf, int *const buflen)
{
char *eptr;
u_long val;
int i;
if (*buflen < ETHER_ADDR_LEN)
return (ERANGE);
for (i = 0; i < ETHER_ADDR_LEN; i++) {
val = strtoul(s + *off, &eptr, 16);
if (val > 0xff || eptr == s + *off)
return (EINVAL);
buf[i] = (u_char)val;
*off = (eptr - s);
if (i < ETHER_ADDR_LEN - 1) {
if (*eptr != ':')
return (EINVAL);
(*off)++;
}
}
*buflen = ETHER_ADDR_LEN;
return (0);
}
static int
ng_enaddr_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
int len;
len = snprintf(cbuf, cbuflen, "%02x:%02x:%02x:%02x:%02x:%02x",
data[*off], data[*off + 1], data[*off + 2],
data[*off + 3], data[*off + 4], data[*off + 5]);
if (len >= cbuflen)
return (ERANGE);
*off += ETHER_ADDR_LEN;
return (0);
}
const struct ng_parse_type ng_parse_enaddr_type = {
NULL,
NULL,
NULL,
ng_enaddr_parse,
ng_enaddr_unparse,
NULL,
0
};
/************************************************************************
BYTE ARRAY TYPE
************************************************************************/
/* Get the length of a byte array */
static int
ng_parse_bytearray_subtype_getLength(const struct ng_parse_type *type,
const u_char *start, const u_char *buf)
{
ng_parse_array_getLength_t *const getLength = type->private;
return (*getLength)(type, start, buf);
}
/* Byte array element type is hex int8 */
static const struct ng_parse_array_info ng_parse_bytearray_subtype_info = {
&ng_parse_hint8_type,
&ng_parse_bytearray_subtype_getLength,
NULL
};
static const struct ng_parse_type ng_parse_bytearray_subtype = {
&ng_parse_array_type,
&ng_parse_bytearray_subtype_info
};
static int
ng_bytearray_parse(const struct ng_parse_type *type,
const char *s, int *off, const u_char *const start,
u_char *const buf, int *buflen)
{
char *str;
int toklen;
int slen;
/* We accept either an array of bytes or a string constant */
if ((str = ng_get_string_token(s, off, &toklen, &slen)) != NULL) {
ng_parse_array_getLength_t *const getLength = type->info;
int arraylen;
arraylen = (*getLength)(type, start, buf);
if (arraylen > *buflen) {
free(str, M_NETGRAPH_PARSE);
return (ERANGE);
}
if (slen > arraylen) {
free(str, M_NETGRAPH_PARSE);
return (E2BIG);
}
bcopy(str, buf, slen);
bzero(buf + slen, arraylen - slen);
free(str, M_NETGRAPH_PARSE);
*off += toklen;
*buflen = arraylen;
return (0);
} else {
struct ng_parse_type subtype;
subtype = ng_parse_bytearray_subtype;
subtype.private = __DECONST(void *, type->info);
return ng_array_parse(&subtype, s, off, start, buf, buflen);
}
}
static int
ng_bytearray_unparse(const struct ng_parse_type *type,
const u_char *data, int *off, char *cbuf, int cbuflen)
{
struct ng_parse_type subtype;
subtype = ng_parse_bytearray_subtype;
subtype.private = __DECONST(void *, type->info);
return ng_array_unparse(&subtype, data, off, cbuf, cbuflen);
}
static int
ng_bytearray_getDefault(const struct ng_parse_type *type,
const u_char *const start, u_char *buf, int *buflen)
{
struct ng_parse_type subtype;
subtype = ng_parse_bytearray_subtype;
subtype.private = __DECONST(void *, type->info);
return ng_array_getDefault(&subtype, start, buf, buflen);
}
const struct ng_parse_type ng_parse_bytearray_type = {
NULL,
NULL,
NULL,
ng_bytearray_parse,
ng_bytearray_unparse,
ng_bytearray_getDefault,
NULL
};
/************************************************************************
STRUCT NG_MESG TYPE
************************************************************************/
/* Get msg->header.arglen when "buf" is pointing to msg->data */
static int
ng_parse_ng_mesg_getLength(const struct ng_parse_type *type,
const u_char *start, const u_char *buf)
{
const struct ng_mesg *msg;
msg = (const struct ng_mesg *)(buf - sizeof(*msg));
return msg->header.arglen;
}
/* Type for the variable length data portion of a struct ng_mesg */
static const struct ng_parse_type ng_msg_data_type = {
&ng_parse_bytearray_type,
&ng_parse_ng_mesg_getLength
};
/* Type for the entire struct ng_mesg header with data section */
static const struct ng_parse_struct_field ng_parse_ng_mesg_type_fields[]
= NG_GENERIC_NG_MESG_INFO(&ng_msg_data_type);
const struct ng_parse_type ng_parse_ng_mesg_type = {
&ng_parse_struct_type,
&ng_parse_ng_mesg_type_fields,
};
/************************************************************************
COMPOSITE HELPER ROUTINES
************************************************************************/
/*
* Convert a structure or array from ASCII to binary
*/
static int
ng_parse_composite(const struct ng_parse_type *type, const char *s,
int *off, const u_char *const start, u_char *const buf, int *buflen,
const enum comptype ctype)
{
const int num = ng_get_composite_len(type, start, buf, ctype);
int nextIndex = 0; /* next implicit array index */
u_int index; /* field or element index */
int *foff; /* field value offsets in string */
int align, len, blen, error = 0;
/* Initialize */
- foff = mallocarray(num, sizeof(*foff), M_NETGRAPH_PARSE,
- M_NOWAIT | M_ZERO);
+ foff = malloc(num * sizeof(*foff), M_NETGRAPH_PARSE, M_NOWAIT | M_ZERO);
if (foff == NULL) {
error = ENOMEM;
goto done;
}
/* Get opening brace/bracket */
if (ng_parse_get_token(s, off, &len)
!= (ctype == CT_STRUCT ? T_LBRACE : T_LBRACKET)) {
error = EINVAL;
goto done;
}
*off += len;
/* Get individual element value positions in the string */
for (;;) {
enum ng_parse_token tok;
/* Check for closing brace/bracket */
tok = ng_parse_get_token(s, off, &len);
if (tok == (ctype == CT_STRUCT ? T_RBRACE : T_RBRACKET)) {
*off += len;
break;
}
/* For arrays, the 'name' (ie, index) is optional, so
distinguish name from values by seeing if the next
token is an equals sign */
if (ctype != CT_STRUCT) {
u_long ul;
int len2, off2;
char *eptr;
/* If an opening brace/bracket, index is implied */
if (tok == T_LBRACE || tok == T_LBRACKET) {
index = nextIndex++;
goto gotIndex;
}
/* Might be an index, might be a value, either way... */
if (tok != T_WORD) {
error = EINVAL;
goto done;
}
/* If no equals sign follows, index is implied */
off2 = *off + len;
if (ng_parse_get_token(s, &off2, &len2) != T_EQUALS) {
index = nextIndex++;
goto gotIndex;
}
/* Index was specified explicitly; parse it */
ul = strtoul(s + *off, &eptr, 0);
if (ul == ULONG_MAX || eptr - (s + *off) != len) {
error = EINVAL;
goto done;
}
index = (u_int)ul;
nextIndex = index + 1;
*off += len + len2;
} else { /* a structure field */
const struct ng_parse_struct_field *const
fields = type->info;
/* Find the field by name (required) in field list */
if (tok != T_WORD) {
error = EINVAL;
goto done;
}
for (index = 0; index < num; index++) {
const struct ng_parse_struct_field *const
field = &fields[index];
if (strncmp(&s[*off], field->name, len) == 0
&& field->name[len] == '\0')
break;
}
if (index == num) {
error = ENOENT;
goto done;
}
*off += len;
/* Get equals sign */
if (ng_parse_get_token(s, off, &len) != T_EQUALS) {
error = EINVAL;
goto done;
}
*off += len;
}
gotIndex:
/* Check array index */
if (index >= num) {
error = E2BIG;
goto done;
}
/* Save value's position and skip over it for now */
if (foff[index] != 0) {
error = EALREADY; /* duplicate */
goto done;
}
while (isspace(s[*off]))
(*off)++;
foff[index] = *off;
if ((error = ng_parse_skip_value(s, *off, &len)) != 0)
goto done;
*off += len;
}
/* Now build binary structure from supplied values and defaults */
for (blen = index = 0; index < num; index++) {
const struct ng_parse_type *const
etype = ng_get_composite_etype(type, index, ctype);
int k, pad, vlen;
/* Zero-pad any alignment bytes */
pad = ng_parse_get_elem_pad(type, index, ctype, blen);
for (k = 0; k < pad; k++) {
if (blen >= *buflen) {
error = ERANGE;
goto done;
}
buf[blen++] = 0;
}
/* Get value */
vlen = *buflen - blen;
if (foff[index] == 0) { /* use default value */
error = ng_get_composite_elem_default(type, index,
start, buf + blen, &vlen, ctype);
} else { /* parse given value */
*off = foff[index];
error = INVOKE(etype, parse)(etype,
s, off, start, buf + blen, &vlen);
}
if (error != 0)
goto done;
blen += vlen;
}
/* Make total composite structure size a multiple of its alignment */
if ((align = ALIGNMENT(type)) != 0) {
while (blen % align != 0) {
if (blen >= *buflen) {
error = ERANGE;
goto done;
}
buf[blen++] = 0;
}
}
/* Done */
*buflen = blen;
done:
if (foff != NULL)
free(foff, M_NETGRAPH_PARSE);
return (error);
}
/*
* Convert an array or structure from binary to ASCII
*/
static int
ng_unparse_composite(const struct ng_parse_type *type, const u_char *data,
int *off, char *cbuf, int cbuflen, const enum comptype ctype)
{
const struct ng_mesg *const hdr
= (const struct ng_mesg *)(data - sizeof(*hdr));
const int num = ng_get_composite_len(type, data, data + *off, ctype);
const int workSize = 20 * 1024; /* XXX hard coded constant */
int nextIndex = 0, didOne = 0;
int error, index;
u_char *workBuf;
/* Get workspace for checking default values */
workBuf = malloc(workSize, M_NETGRAPH_PARSE, M_NOWAIT);
if (workBuf == NULL)
return (ENOMEM);
/* Opening brace/bracket */
if ((error = ng_parse_append(&cbuf, &cbuflen, "%c",
(ctype == CT_STRUCT) ? '{' : '[')) != 0)
goto fail;
/* Do each item */
for (index = 0; index < num; index++) {
const struct ng_parse_type *const
etype = ng_get_composite_etype(type, index, ctype);
/* Skip any alignment pad bytes */
*off += ng_parse_get_elem_pad(type, index, ctype, *off);
/*
* See if element is equal to its default value; skip if so.
* Copy struct ng_mesg header for types that peek into it.
*/
if (sizeof(*hdr) + *off < workSize) {
int tempsize = workSize - sizeof(*hdr) - *off;
bcopy(hdr, workBuf, sizeof(*hdr) + *off);
if (ng_get_composite_elem_default(type, index, workBuf
+ sizeof(*hdr), workBuf + sizeof(*hdr) + *off,
&tempsize, ctype) == 0
&& bcmp(workBuf + sizeof(*hdr) + *off,
data + *off, tempsize) == 0) {
*off += tempsize;
continue;
}
}
/* Print name= */
if ((error = ng_parse_append(&cbuf, &cbuflen, " ")) != 0)
goto fail;
if (ctype != CT_STRUCT) {
if (index != nextIndex) {
nextIndex = index;
if ((error = ng_parse_append(&cbuf,
&cbuflen, "%d=", index)) != 0)
goto fail;
}
nextIndex++;
} else {
const struct ng_parse_struct_field *const
fields = type->info;
if ((error = ng_parse_append(&cbuf,
&cbuflen, "%s=", fields[index].name)) != 0)
goto fail;
}
/* Print value */
if ((error = INVOKE(etype, unparse)
(etype, data, off, cbuf, cbuflen)) != 0) {
free(workBuf, M_NETGRAPH_PARSE);
return (error);
}
cbuflen -= strlen(cbuf);
cbuf += strlen(cbuf);
didOne = 1;
}
/* Closing brace/bracket */
error = ng_parse_append(&cbuf, &cbuflen, "%s%c",
didOne ? " " : "", (ctype == CT_STRUCT) ? '}' : ']');
fail:
/* Clean up after failure */
free(workBuf, M_NETGRAPH_PARSE);
return (error);
}
/*
* Generate the default value for an element of an array or structure
* Returns EOPNOTSUPP if default value is unspecified.
*/
static int
ng_get_composite_elem_default(const struct ng_parse_type *type,
int index, const u_char *const start, u_char *buf, int *buflen,
const enum comptype ctype)
{
const struct ng_parse_type *etype;
ng_getDefault_t *func;
switch (ctype) {
case CT_STRUCT:
break;
case CT_ARRAY:
{
const struct ng_parse_array_info *const ai = type->info;
if (ai->getDefault != NULL) {
return (*ai->getDefault)(type,
index, start, buf, buflen);
}
break;
}
case CT_FIXEDARRAY:
{
const struct ng_parse_fixedarray_info *const fi = type->info;
if (*fi->getDefault != NULL) {
return (*fi->getDefault)(type,
index, start, buf, buflen);
}
break;
}
default:
panic("%s", __func__);
}
/* Default to element type default */
etype = ng_get_composite_etype(type, index, ctype);
func = METHOD(etype, getDefault);
if (func == NULL)
return (EOPNOTSUPP);
return (*func)(etype, start, buf, buflen);
}
/*
* Get the number of elements in a struct, variable or fixed array.
*/
static int
ng_get_composite_len(const struct ng_parse_type *type,
const u_char *const start, const u_char *buf,
const enum comptype ctype)
{
switch (ctype) {
case CT_STRUCT:
{
const struct ng_parse_struct_field *const fields = type->info;
int numFields = 0;
for (numFields = 0; ; numFields++) {
const struct ng_parse_struct_field *const
fi = &fields[numFields];
if (fi->name == NULL)
break;
}
return (numFields);
}
case CT_ARRAY:
{
const struct ng_parse_array_info *const ai = type->info;
return (*ai->getLength)(type, start, buf);
}
case CT_FIXEDARRAY:
{
const struct ng_parse_fixedarray_info *const fi = type->info;
return fi->length;
}
default:
panic("%s", __func__);
}
return (0);
}
/*
* Return the type of the index'th element of a composite structure
*/
static const struct ng_parse_type *
ng_get_composite_etype(const struct ng_parse_type *type,
int index, const enum comptype ctype)
{
const struct ng_parse_type *etype = NULL;
switch (ctype) {
case CT_STRUCT:
{
const struct ng_parse_struct_field *const fields = type->info;
etype = fields[index].type;
break;
}
case CT_ARRAY:
{
const struct ng_parse_array_info *const ai = type->info;
etype = ai->elementType;
break;
}
case CT_FIXEDARRAY:
{
const struct ng_parse_fixedarray_info *const fi = type->info;
etype = fi->elementType;
break;
}
default:
panic("%s", __func__);
}
return (etype);
}
/*
* Get the number of bytes to skip to align for the next
* element in a composite structure.
*/
static int
ng_parse_get_elem_pad(const struct ng_parse_type *type,
int index, enum comptype ctype, int posn)
{
const struct ng_parse_type *const
etype = ng_get_composite_etype(type, index, ctype);
int align;
/* Get element's alignment, and possibly override */
align = ALIGNMENT(etype);
if (ctype == CT_STRUCT) {
const struct ng_parse_struct_field *const fields = type->info;
if (fields[index].alignment != 0)
align = fields[index].alignment;
}
/* Return number of bytes to skip to align */
return (align ? (align - (posn % align)) % align : 0);
}
/************************************************************************
PARSING HELPER ROUTINES
************************************************************************/
/*
* Append to a fixed length string buffer.
*/
static int
ng_parse_append(char **cbufp, int *cbuflenp, const char *fmt, ...)
{
va_list args;
int len;
va_start(args, fmt);
len = vsnprintf(*cbufp, *cbuflenp, fmt, args);
va_end(args);
if (len >= *cbuflenp)
return ERANGE;
*cbufp += len;
*cbuflenp -= len;
return (0);
}
/*
* Skip over a value
*/
static int
ng_parse_skip_value(const char *s, int off0, int *lenp)
{
int len, nbracket, nbrace;
int off = off0;
len = nbracket = nbrace = 0;
do {
switch (ng_parse_get_token(s, &off, &len)) {
case T_LBRACKET:
nbracket++;
break;
case T_LBRACE:
nbrace++;
break;
case T_RBRACKET:
if (nbracket-- == 0)
return (EINVAL);
break;
case T_RBRACE:
if (nbrace-- == 0)
return (EINVAL);
break;
case T_EOF:
return (EINVAL);
default:
break;
}
off += len;
} while (nbracket > 0 || nbrace > 0);
*lenp = off - off0;
return (0);
}
/*
* Find the next token in the string, starting at offset *startp.
* Returns the token type, with *startp pointing to the first char
* and *lenp the length.
*/
enum ng_parse_token
ng_parse_get_token(const char *s, int *startp, int *lenp)
{
char *t;
int i;
while (isspace(s[*startp]))
(*startp)++;
switch (s[*startp]) {
case '\0':
*lenp = 0;
return T_EOF;
case '{':
*lenp = 1;
return T_LBRACE;
case '}':
*lenp = 1;
return T_RBRACE;
case '[':
*lenp = 1;
return T_LBRACKET;
case ']':
*lenp = 1;
return T_RBRACKET;
case '=':
*lenp = 1;
return T_EQUALS;
case '"':
if ((t = ng_get_string_token(s, startp, lenp, NULL)) == NULL)
return T_ERROR;
free(t, M_NETGRAPH_PARSE);
return T_STRING;
default:
for (i = *startp + 1; s[i] != '\0' && !isspace(s[i])
&& s[i] != '{' && s[i] != '}' && s[i] != '['
&& s[i] != ']' && s[i] != '=' && s[i] != '"'; i++)
;
*lenp = i - *startp;
return T_WORD;
}
}
/*
* Get a string token, which must be enclosed in double quotes.
* The normal C backslash escapes are recognized.
*/
char *
ng_get_string_token(const char *s, int *startp, int *lenp, int *slenp)
{
char *cbuf, *p;
int start, off;
int slen;
while (isspace(s[*startp]))
(*startp)++;
start = *startp;
if (s[*startp] != '"')
return (NULL);
cbuf = malloc(strlen(s + start), M_NETGRAPH_PARSE, M_NOWAIT);
if (cbuf == NULL)
return (NULL);
strcpy(cbuf, s + start + 1);
for (slen = 0, off = 1, p = cbuf; *p != '\0'; slen++, off++, p++) {
if (*p == '"') {
*p = '\0';
*lenp = off + 1;
if (slenp != NULL)
*slenp = slen;
return (cbuf);
} else if (p[0] == '\\' && p[1] != '\0') {
int x, k;
char *v;
strcpy(p, p + 1);
v = p;
switch (*p) {
case 't':
*v = '\t';
off++;
continue;
case 'n':
*v = '\n';
off++;
continue;
case 'r':
*v = '\r';
off++;
continue;
case 'v':
*v = '\v';
off++;
continue;
case 'f':
*v = '\f';
off++;
continue;
case '"':
*v = '"';
off++;
continue;
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
for (x = k = 0;
k < 3 && *v >= '0' && *v <= '7'; v++) {
x = (x << 3) + (*v - '0');
off++;
}
*--v = (char)x;
break;
case 'x':
for (v++, x = k = 0;
k < 2 && isxdigit(*v); v++) {
x = (x << 4) + (isdigit(*v) ?
(*v - '0') :
(tolower(*v) - 'a' + 10));
off++;
}
*--v = (char)x;
break;
default:
continue;
}
strcpy(p, v);
}
}
free(cbuf, M_NETGRAPH_PARSE);
return (NULL); /* no closing quote */
}
/*
* Encode a string so it can be safely put in double quotes.
* Caller must free the result. Exactly "slen" characters
* are encoded.
*/
char *
ng_encode_string(const char *raw, int slen)
{
char *cbuf;
int off = 0;
int i;
cbuf = malloc(strlen(raw) * 4 + 3, M_NETGRAPH_PARSE, M_NOWAIT);
if (cbuf == NULL)
return (NULL);
cbuf[off++] = '"';
for (i = 0; i < slen; i++, raw++) {
switch (*raw) {
case '\t':
cbuf[off++] = '\\';
cbuf[off++] = 't';
break;
case '\f':
cbuf[off++] = '\\';
cbuf[off++] = 'f';
break;
case '\n':
cbuf[off++] = '\\';
cbuf[off++] = 'n';
break;
case '\r':
cbuf[off++] = '\\';
cbuf[off++] = 'r';
break;
case '\v':
cbuf[off++] = '\\';
cbuf[off++] = 'v';
break;
case '"':
case '\\':
cbuf[off++] = '\\';
cbuf[off++] = *raw;
break;
default:
if (*raw < 0x20 || *raw > 0x7e) {
off += sprintf(cbuf + off,
"\\x%02x", (u_char)*raw);
break;
}
cbuf[off++] = *raw;
break;
}
}
cbuf[off++] = '"';
cbuf[off] = '\0';
return (cbuf);
}
/************************************************************************
VIRTUAL METHOD LOOKUP
************************************************************************/
static ng_parse_t *
ng_get_parse_method(const struct ng_parse_type *t)
{
while (t != NULL && t->parse == NULL)
t = t->supertype;
return (t ? t->parse : NULL);
}
static ng_unparse_t *
ng_get_unparse_method(const struct ng_parse_type *t)
{
while (t != NULL && t->unparse == NULL)
t = t->supertype;
return (t ? t->unparse : NULL);
}
static ng_getDefault_t *
ng_get_getDefault_method(const struct ng_parse_type *t)
{
while (t != NULL && t->getDefault == NULL)
t = t->supertype;
return (t ? t->getDefault : NULL);
}
static ng_getAlign_t *
ng_get_getAlign_method(const struct ng_parse_type *t)
{
while (t != NULL && t->getAlign == NULL)
t = t->supertype;
return (t ? t->getAlign : NULL);
}
Index: head/sys/netinet6/in6_jail.c
===================================================================
--- head/sys/netinet6/in6_jail.c (revision 328217)
+++ head/sys/netinet6/in6_jail.c (revision 328218)
@@ -1,418 +1,418 @@
/*-
* Copyright (c) 1999 Poul-Henning Kamp.
* Copyright (c) 2008 Bjoern A. Zeeb.
* Copyright (c) 2009 James Gritton.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_compat.h"
#include "opt_ddb.h"
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/sysproto.h>
#include <sys/malloc.h>
#include <sys/osd.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/taskqueue.h>
#include <sys/fcntl.h>
#include <sys/jail.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/racct.h>
#include <sys/refcount.h>
#include <sys/sx.h>
#include <sys/sysent.h>
#include <sys/namei.h>
#include <sys/mount.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <net/if.h>
#include <net/vnet.h>
#include <netinet/in.h>
int
prison_qcmp_v6(const void *ip1, const void *ip2)
{
const struct in6_addr *ia6a, *ia6b;
int i, rc;
ia6a = (const struct in6_addr *)ip1;
ia6b = (const struct in6_addr *)ip2;
rc = 0;
for (i = 0; rc == 0 && i < sizeof(struct in6_addr); i++) {
if (ia6a->s6_addr[i] > ia6b->s6_addr[i])
rc = 1;
else if (ia6a->s6_addr[i] < ia6b->s6_addr[i])
rc = -1;
}
return (rc);
}
int
prison_restrict_ip6(struct prison *pr, struct in6_addr *newip6)
{
int ii, ij, used;
struct prison *ppr;
ppr = pr->pr_parent;
if (!(pr->pr_flags & PR_IP6_USER)) {
/* This has no user settings, so just copy the parent's list. */
if (pr->pr_ip6s < ppr->pr_ip6s) {
/*
* There's no room for the parent's list. Use the
* new list buffer, which is assumed to be big enough
* (if it was passed). If there's no buffer, try to
* allocate one.
*/
used = 1;
if (newip6 == NULL) {
- newip6 = mallocarray(ppr->pr_ip6s,
- sizeof(*newip6), M_PRISON, M_NOWAIT);
+ newip6 = malloc(ppr->pr_ip6s * sizeof(*newip6),
+ M_PRISON, M_NOWAIT);
if (newip6 != NULL)
used = 0;
}
if (newip6 != NULL) {
bcopy(ppr->pr_ip6, newip6,
ppr->pr_ip6s * sizeof(*newip6));
free(pr->pr_ip6, M_PRISON);
pr->pr_ip6 = newip6;
pr->pr_ip6s = ppr->pr_ip6s;
}
return (used);
}
pr->pr_ip6s = ppr->pr_ip6s;
if (pr->pr_ip6s > 0)
bcopy(ppr->pr_ip6, pr->pr_ip6,
pr->pr_ip6s * sizeof(*newip6));
else if (pr->pr_ip6 != NULL) {
free(pr->pr_ip6, M_PRISON);
pr->pr_ip6 = NULL;
}
} else if (pr->pr_ip6s > 0) {
/* Remove addresses that aren't in the parent. */
for (ij = 0; ij < ppr->pr_ip6s; ij++)
if (IN6_ARE_ADDR_EQUAL(&pr->pr_ip6[0],
&ppr->pr_ip6[ij]))
break;
if (ij < ppr->pr_ip6s)
ii = 1;
else {
bcopy(pr->pr_ip6 + 1, pr->pr_ip6,
--pr->pr_ip6s * sizeof(*pr->pr_ip6));
ii = 0;
}
for (ij = 1; ii < pr->pr_ip6s; ) {
if (IN6_ARE_ADDR_EQUAL(&pr->pr_ip6[ii],
&ppr->pr_ip6[0])) {
ii++;
continue;
}
switch (ij >= ppr->pr_ip6s ? -1 :
prison_qcmp_v6(&pr->pr_ip6[ii], &ppr->pr_ip6[ij])) {
case -1:
bcopy(pr->pr_ip6 + ii + 1, pr->pr_ip6 + ii,
(--pr->pr_ip6s - ii) * sizeof(*pr->pr_ip6));
break;
case 0:
ii++;
ij++;
break;
case 1:
ij++;
break;
}
}
if (pr->pr_ip6s == 0) {
free(pr->pr_ip6, M_PRISON);
pr->pr_ip6 = NULL;
}
}
return 0;
}
/*
* Pass back primary IPv6 address for this jail.
*
* If not restricted return success but do not alter the address. Caller has
* to make sure to initialize it correctly (e.g. IN6ADDR_ANY_INIT).
*
* Returns 0 on success, EAFNOSUPPORT if the jail doesn't allow IPv6.
*/
int
prison_get_ip6(struct ucred *cred, struct in6_addr *ia6)
{
struct prison *pr;
KASSERT(cred != NULL, ("%s: cred is NULL", __func__));
KASSERT(ia6 != NULL, ("%s: ia6 is NULL", __func__));
pr = cred->cr_prison;
if (!(pr->pr_flags & PR_IP6))
return (0);
mtx_lock(&pr->pr_mtx);
if (!(pr->pr_flags & PR_IP6)) {
mtx_unlock(&pr->pr_mtx);
return (0);
}
if (pr->pr_ip6 == NULL) {
mtx_unlock(&pr->pr_mtx);
return (EAFNOSUPPORT);
}
bcopy(&pr->pr_ip6[0], ia6, sizeof(struct in6_addr));
mtx_unlock(&pr->pr_mtx);
return (0);
}
/*
* Return 1 if we should do proper source address selection or are not jailed.
* We will return 0 if we should bypass source address selection in favour
* of the primary jail IPv6 address. Only in this case *ia will be updated and
* returned in NBO.
* Return EAFNOSUPPORT, in case this jail does not allow IPv6.
*/
int
prison_saddrsel_ip6(struct ucred *cred, struct in6_addr *ia6)
{
struct prison *pr;
struct in6_addr lia6;
int error;
KASSERT(cred != NULL, ("%s: cred is NULL", __func__));
KASSERT(ia6 != NULL, ("%s: ia6 is NULL", __func__));
if (!jailed(cred))
return (1);
pr = cred->cr_prison;
if (pr->pr_flags & PR_IP6_SADDRSEL)
return (1);
lia6 = in6addr_any;
error = prison_get_ip6(cred, &lia6);
if (error)
return (error);
if (IN6_IS_ADDR_UNSPECIFIED(&lia6))
return (1);
bcopy(&lia6, ia6, sizeof(struct in6_addr));
return (0);
}
/*
* Return true if pr1 and pr2 have the same IPv6 address restrictions.
*/
int
prison_equal_ip6(struct prison *pr1, struct prison *pr2)
{
if (pr1 == pr2)
return (1);
while (pr1 != &prison0 &&
#ifdef VIMAGE
!(pr1->pr_flags & PR_VNET) &&
#endif
!(pr1->pr_flags & PR_IP6_USER))
pr1 = pr1->pr_parent;
while (pr2 != &prison0 &&
#ifdef VIMAGE
!(pr2->pr_flags & PR_VNET) &&
#endif
!(pr2->pr_flags & PR_IP6_USER))
pr2 = pr2->pr_parent;
return (pr1 == pr2);
}
/*
* Make sure our (source) address is set to something meaningful to this jail.
*
* v6only should be set based on (inp->inp_flags & IN6P_IPV6_V6ONLY != 0)
* when needed while binding.
*
* Returns 0 if jail doesn't restrict IPv6 or if address belongs to jail,
* EADDRNOTAVAIL if the address doesn't belong, or EAFNOSUPPORT if the jail
* doesn't allow IPv6.
*/
int
prison_local_ip6(struct ucred *cred, struct in6_addr *ia6, int v6only)
{
struct prison *pr;
int error;
KASSERT(cred != NULL, ("%s: cred is NULL", __func__));
KASSERT(ia6 != NULL, ("%s: ia6 is NULL", __func__));
pr = cred->cr_prison;
if (!(pr->pr_flags & PR_IP6))
return (0);
mtx_lock(&pr->pr_mtx);
if (!(pr->pr_flags & PR_IP6)) {
mtx_unlock(&pr->pr_mtx);
return (0);
}
if (pr->pr_ip6 == NULL) {
mtx_unlock(&pr->pr_mtx);
return (EAFNOSUPPORT);
}
if (IN6_IS_ADDR_UNSPECIFIED(ia6)) {
/*
* In case there is only 1 IPv6 address, and v6only is true,
* then bind directly.
*/
if (v6only != 0 && pr->pr_ip6s == 1)
bcopy(&pr->pr_ip6[0], ia6, sizeof(struct in6_addr));
mtx_unlock(&pr->pr_mtx);
return (0);
}
error = prison_check_ip6_locked(pr, ia6);
if (error == EADDRNOTAVAIL && IN6_IS_ADDR_LOOPBACK(ia6)) {
bcopy(&pr->pr_ip6[0], ia6, sizeof(struct in6_addr));
error = 0;
}
mtx_unlock(&pr->pr_mtx);
return (error);
}
/*
* Rewrite destination address in case we will connect to loopback address.
*
* Returns 0 on success, EAFNOSUPPORT if the jail doesn't allow IPv6.
*/
int
prison_remote_ip6(struct ucred *cred, struct in6_addr *ia6)
{
struct prison *pr;
KASSERT(cred != NULL, ("%s: cred is NULL", __func__));
KASSERT(ia6 != NULL, ("%s: ia6 is NULL", __func__));
pr = cred->cr_prison;
if (!(pr->pr_flags & PR_IP6))
return (0);
mtx_lock(&pr->pr_mtx);
if (!(pr->pr_flags & PR_IP6)) {
mtx_unlock(&pr->pr_mtx);
return (0);
}
if (pr->pr_ip6 == NULL) {
mtx_unlock(&pr->pr_mtx);
return (EAFNOSUPPORT);
}
if (IN6_IS_ADDR_LOOPBACK(ia6) &&
prison_check_ip6_locked(pr, ia6) == EADDRNOTAVAIL) {
bcopy(&pr->pr_ip6[0], ia6, sizeof(struct in6_addr));
mtx_unlock(&pr->pr_mtx);
return (0);
}
/*
* Return success because nothing had to be changed.
*/
mtx_unlock(&pr->pr_mtx);
return (0);
}
/*
* Check if given address belongs to the jail referenced by cred/prison.
*
* Returns 0 if address belongs to jail,
* EADDRNOTAVAIL if the address doesn't belong to the jail.
*/
int
prison_check_ip6_locked(const struct prison *pr, const struct in6_addr *ia6)
{
int i, a, z, d;
/*
* Check the primary IP.
*/
if (IN6_ARE_ADDR_EQUAL(&pr->pr_ip6[0], ia6))
return (0);
/*
* All the other IPs are sorted so we can do a binary search.
*/
a = 0;
z = pr->pr_ip6s - 2;
while (a <= z) {
i = (a + z) / 2;
d = prison_qcmp_v6(&pr->pr_ip6[i+1], ia6);
if (d > 0)
z = i - 1;
else if (d < 0)
a = i + 1;
else
return (0);
}
return (EADDRNOTAVAIL);
}
int
prison_check_ip6(const struct ucred *cred, const struct in6_addr *ia6)
{
struct prison *pr;
int error;
KASSERT(cred != NULL, ("%s: cred is NULL", __func__));
KASSERT(ia6 != NULL, ("%s: ia6 is NULL", __func__));
pr = cred->cr_prison;
if (!(pr->pr_flags & PR_IP6))
return (0);
mtx_lock(&pr->pr_mtx);
if (!(pr->pr_flags & PR_IP6)) {
mtx_unlock(&pr->pr_mtx);
return (0);
}
if (pr->pr_ip6 == NULL) {
mtx_unlock(&pr->pr_mtx);
return (EAFNOSUPPORT);
}
error = prison_check_ip6_locked(pr, ia6);
mtx_unlock(&pr->pr_mtx);
return (error);
}
Index: head/sys/powerpc/pseries/phyp_vscsi.c
===================================================================
--- head/sys/powerpc/pseries/phyp_vscsi.c (revision 328217)
+++ head/sys/powerpc/pseries/phyp_vscsi.c (revision 328218)
@@ -1,995 +1,995 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright 2013 Nathan Whitehorn
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/selinfo.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/eventhandler.h>
#include <sys/rman.h>
#include <sys/bus_dma.h>
#include <sys/bio.h>
#include <sys/ioccom.h>
#include <sys/uio.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/vmem.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
#include <cam/cam_periph.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_periph.h>
#include <cam/cam_xpt_sim.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <powerpc/pseries/phyp-hvcall.h>
struct vscsi_softc;
/* VSCSI CRQ format from table 260 of PAPR spec 2.4 (page 760) */
struct vscsi_crq {
uint8_t valid;
uint8_t format;
uint8_t reserved;
uint8_t status;
uint16_t timeout;
uint16_t iu_length;
uint64_t iu_data;
};
struct vscsi_xfer {
TAILQ_ENTRY(vscsi_xfer) queue;
struct vscsi_softc *sc;
union ccb *ccb;
bus_dmamap_t dmamap;
uint64_t tag;
vmem_addr_t srp_iu_offset;
vmem_size_t srp_iu_size;
};
TAILQ_HEAD(vscsi_xferq, vscsi_xfer);
struct vscsi_softc {
device_t dev;
struct cam_devq *devq;
struct cam_sim *sim;
struct cam_path *path;
struct mtx io_lock;
cell_t unit;
int bus_initialized;
int bus_logged_in;
int max_transactions;
int irqid;
struct resource *irq;
void *irq_cookie;
bus_dma_tag_t crq_tag;
struct vscsi_crq *crq_queue;
int n_crqs, cur_crq;
bus_dmamap_t crq_map;
bus_addr_t crq_phys;
vmem_t *srp_iu_arena;
void *srp_iu_queue;
bus_addr_t srp_iu_phys;
bus_dma_tag_t data_tag;
struct vscsi_xfer loginxp;
struct vscsi_xfer *xfer;
struct vscsi_xferq active_xferq;
struct vscsi_xferq free_xferq;
};
struct srp_login {
uint8_t type;
uint8_t reserved[7];
uint64_t tag;
uint64_t max_cmd_length;
uint32_t reserved2;
uint16_t buffer_formats;
uint8_t flags;
uint8_t reserved3[5];
uint8_t initiator_port_id[16];
uint8_t target_port_id[16];
} __packed;
struct srp_login_rsp {
uint8_t type;
uint8_t reserved[3];
uint32_t request_limit_delta;
uint8_t tag;
uint32_t max_i_to_t_len;
uint32_t max_t_to_i_len;
uint16_t buffer_formats;
uint8_t flags;
/* Some reserved bits follow */
} __packed;
struct srp_cmd {
uint8_t type;
uint8_t flags1;
uint8_t reserved[3];
uint8_t formats;
uint8_t out_buffer_count;
uint8_t in_buffer_count;
uint64_t tag;
uint32_t reserved2;
uint64_t lun;
uint8_t reserved3[3];
uint8_t additional_cdb;
uint8_t cdb[16];
uint8_t data_payload[0];
} __packed;
struct srp_rsp {
uint8_t type;
uint8_t reserved[3];
uint32_t request_limit_delta;
uint64_t tag;
uint16_t reserved2;
uint8_t flags;
uint8_t status;
uint32_t data_out_resid;
uint32_t data_in_resid;
uint32_t sense_data_len;
uint32_t response_data_len;
uint8_t data_payload[0];
} __packed;
struct srp_tsk_mgmt {
uint8_t type;
uint8_t reserved[7];
uint64_t tag;
uint32_t reserved2;
uint64_t lun;
uint8_t reserved3[2];
uint8_t function;
uint8_t reserved4;
uint64_t manage_tag;
uint64_t reserved5;
} __packed;
/* Message code type */
#define SRP_LOGIN_REQ 0x00
#define SRP_TSK_MGMT 0x01
#define SRP_CMD 0x02
#define SRP_I_LOGOUT 0x03
#define SRP_LOGIN_RSP 0xC0
#define SRP_RSP 0xC1
#define SRP_LOGIN_REJ 0xC2
#define SRP_T_LOGOUT 0x80
#define SRP_CRED_REQ 0x81
#define SRP_AER_REQ 0x82
#define SRP_CRED_RSP 0x41
#define SRP_AER_RSP 0x41
/* Flags for srp_rsp flags field */
#define SRP_RSPVALID 0x01
#define SRP_SNSVALID 0x02
#define SRP_DOOVER 0x04
#define SRP_DOUNDER 0x08
#define SRP_DIOVER 0x10
#define SRP_DIUNDER 0x20
#define MAD_SUCESS 0x00
#define MAD_NOT_SUPPORTED 0xf1
#define MAD_FAILED 0xf7
#define MAD_EMPTY_IU 0x01
#define MAD_ERROR_LOGGING_REQUEST 0x02
#define MAD_ADAPTER_INFO_REQUEST 0x03
#define MAD_CAPABILITIES_EXCHANGE 0x05
#define MAD_PHYS_ADAP_INFO_REQUEST 0x06
#define MAD_TAPE_PASSTHROUGH_REQUEST 0x07
#define MAD_ENABLE_FAST_FAIL 0x08
static int vscsi_probe(device_t);
static int vscsi_attach(device_t);
static int vscsi_detach(device_t);
static void vscsi_cam_action(struct cam_sim *, union ccb *);
static void vscsi_cam_poll(struct cam_sim *);
static void vscsi_intr(void *arg);
static void vscsi_check_response_queue(struct vscsi_softc *sc);
static void vscsi_setup_bus(struct vscsi_softc *sc);
static void vscsi_srp_login(struct vscsi_softc *sc);
static void vscsi_crq_load_cb(void *, bus_dma_segment_t *, int, int);
static void vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs,
int nsegs, int err);
static void vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb);
static void vscsi_srp_response(struct vscsi_xfer *, struct vscsi_crq *);
static devclass_t vscsi_devclass;
static device_method_t vscsi_methods[] = {
DEVMETHOD(device_probe, vscsi_probe),
DEVMETHOD(device_attach, vscsi_attach),
DEVMETHOD(device_detach, vscsi_detach),
DEVMETHOD_END
};
static driver_t vscsi_driver = {
"vscsi",
vscsi_methods,
sizeof(struct vscsi_softc)
};
DRIVER_MODULE(vscsi, vdevice, vscsi_driver, vscsi_devclass, 0, 0);
MALLOC_DEFINE(M_VSCSI, "vscsi", "CAM device queue for VSCSI");
static int
vscsi_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "IBM,v-scsi"))
return (ENXIO);
device_set_desc(dev, "POWER Hypervisor Virtual SCSI Bus");
return (0);
}
static int
vscsi_attach(device_t dev)
{
struct vscsi_softc *sc;
struct vscsi_xfer *xp;
int error, i;
sc = device_get_softc(dev);
if (sc == NULL)
return (EINVAL);
sc->dev = dev;
mtx_init(&sc->io_lock, "vscsi", NULL, MTX_DEF);
/* Get properties */
OF_getencprop(ofw_bus_get_node(dev), "reg", &sc->unit,
sizeof(sc->unit));
/* Setup interrupt */
sc->irqid = 0;
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
RF_ACTIVE);
if (!sc->irq) {
device_printf(dev, "Could not allocate IRQ\n");
mtx_destroy(&sc->io_lock);
return (ENXIO);
}
bus_setup_intr(dev, sc->irq, INTR_TYPE_CAM | INTR_MPSAFE |
INTR_ENTROPY, NULL, vscsi_intr, sc, &sc->irq_cookie);
/* Data DMA */
error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
256, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &sc->io_lock,
&sc->data_tag);
TAILQ_INIT(&sc->active_xferq);
TAILQ_INIT(&sc->free_xferq);
/* First XFER for login data */
sc->loginxp.sc = sc;
bus_dmamap_create(sc->data_tag, 0, &sc->loginxp.dmamap);
TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
/* CRQ area */
error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 8*PAGE_SIZE,
1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->crq_tag);
error = bus_dmamem_alloc(sc->crq_tag, (void **)&sc->crq_queue,
BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->crq_map);
sc->crq_phys = 0;
sc->n_crqs = 0;
error = bus_dmamap_load(sc->crq_tag, sc->crq_map, sc->crq_queue,
8*PAGE_SIZE, vscsi_crq_load_cb, sc, 0);
mtx_lock(&sc->io_lock);
vscsi_setup_bus(sc);
- sc->xfer = mallocarray(sc->max_transactions, sizeof(sc->xfer[0]),
- M_VSCSI, M_NOWAIT);
+ sc->xfer = malloc(sizeof(sc->xfer[0])*sc->max_transactions, M_VSCSI,
+ M_NOWAIT);
for (i = 0; i < sc->max_transactions; i++) {
xp = &sc->xfer[i];
xp->sc = sc;
error = bus_dmamap_create(sc->data_tag, 0, &xp->dmamap);
if (error) {
device_printf(dev, "Could not create DMA map (%d)\n",
error);
break;
}
TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
}
mtx_unlock(&sc->io_lock);
/* Allocate CAM bits */
if ((sc->devq = cam_simq_alloc(sc->max_transactions)) == NULL)
return (ENOMEM);
sc->sim = cam_sim_alloc(vscsi_cam_action, vscsi_cam_poll, "vscsi", sc,
device_get_unit(dev), &sc->io_lock,
sc->max_transactions, sc->max_transactions,
sc->devq);
if (sc->sim == NULL) {
cam_simq_free(sc->devq);
sc->devq = NULL;
device_printf(dev, "CAM SIM attach failed\n");
return (EINVAL);
}
mtx_lock(&sc->io_lock);
if (xpt_bus_register(sc->sim, dev, 0) != 0) {
device_printf(dev, "XPT bus registration failed\n");
cam_sim_free(sc->sim, FALSE);
sc->sim = NULL;
cam_simq_free(sc->devq);
sc->devq = NULL;
mtx_unlock(&sc->io_lock);
return (EINVAL);
}
mtx_unlock(&sc->io_lock);
return (0);
}
static int
vscsi_detach(device_t dev)
{
struct vscsi_softc *sc;
sc = device_get_softc(dev);
if (sc == NULL)
return (EINVAL);
if (sc->sim != NULL) {
mtx_lock(&sc->io_lock);
xpt_bus_deregister(cam_sim_path(sc->sim));
cam_sim_free(sc->sim, FALSE);
sc->sim = NULL;
mtx_unlock(&sc->io_lock);
}
if (sc->devq != NULL) {
cam_simq_free(sc->devq);
sc->devq = NULL;
}
mtx_destroy(&sc->io_lock);
return (0);
}
static void
vscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct vscsi_softc *sc = cam_sim_softc(sim);
mtx_assert(&sc->io_lock, MA_OWNED);
switch (ccb->ccb_h.func_code) {
case XPT_PATH_INQ:
{
struct ccb_pathinq *cpi = &ccb->cpi;
cpi->version_num = 1;
cpi->hba_inquiry = PI_TAG_ABLE;
cpi->hba_misc = PIM_EXTLUNS;
cpi->target_sprt = 0;
cpi->hba_eng_cnt = 0;
cpi->max_target = 0;
cpi->max_lun = 0;
cpi->initiator_id = ~0;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "IBM", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 150000;
cpi->transport = XPORT_SRP;
cpi->transport_version = 0;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_SPC4;
cpi->ccb_h.status = CAM_REQ_CMP;
break;
}
case XPT_RESET_BUS:
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_RESET_DEV:
ccb->ccb_h.status = CAM_REQ_INPROG;
vscsi_task_management(sc, ccb);
return;
case XPT_GET_TRAN_SETTINGS:
ccb->cts.protocol = PROTO_SCSI;
ccb->cts.protocol_version = SCSI_REV_SPC4;
ccb->cts.transport = XPORT_SRP;
ccb->cts.transport_version = 0;
ccb->cts.proto_specific.valid = 0;
ccb->cts.xport_specific.valid = 0;
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_SET_TRAN_SETTINGS:
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_SCSI_IO:
{
struct vscsi_xfer *xp;
ccb->ccb_h.status = CAM_REQ_INPROG;
xp = TAILQ_FIRST(&sc->free_xferq);
if (xp == NULL)
panic("SCSI queue flooded");
xp->ccb = ccb;
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
bus_dmamap_load_ccb(sc->data_tag, xp->dmamap,
ccb, vscsi_scsi_command, xp, 0);
return;
}
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
xpt_done(ccb);
return;
}
static void
vscsi_srp_login(struct vscsi_softc *sc)
{
struct vscsi_xfer *xp;
struct srp_login *login;
struct vscsi_crq crq;
int err;
mtx_assert(&sc->io_lock, MA_OWNED);
xp = TAILQ_FIRST(&sc->free_xferq);
if (xp == NULL)
panic("SCSI queue flooded");
xp->ccb = NULL;
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
/* Set up command */
xp->srp_iu_size = crq.iu_length = 64;
err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
if (err)
panic("Error during VMEM allocation (%d)", err);
login = (struct srp_login *)((uint8_t *)xp->sc->srp_iu_queue +
(uintptr_t)xp->srp_iu_offset);
bzero(login, xp->srp_iu_size);
login->type = SRP_LOGIN_REQ;
login->tag = (uint64_t)(xp);
login->max_cmd_length = htobe64(256);
login->buffer_formats = htobe16(0x1 | 0x2); /* Direct and indirect */
login->flags = 0;
/* Create CRQ entry */
crq.valid = 0x80;
crq.format = 0x01;
crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
err = phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
((uint64_t *)(&crq))[1]);
if (err != 0)
panic("CRQ send failure (%d)", err);
}
static void
vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb)
{
struct srp_tsk_mgmt *cmd;
struct vscsi_xfer *xp;
struct vscsi_crq crq;
int err;
mtx_assert(&sc->io_lock, MA_OWNED);
xp = TAILQ_FIRST(&sc->free_xferq);
if (xp == NULL)
panic("SCSI queue flooded");
xp->ccb = ccb;
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
xp->srp_iu_size = crq.iu_length = sizeof(*cmd);
err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
if (err)
panic("Error during VMEM allocation (%d)", err);
cmd = (struct srp_tsk_mgmt *)((uint8_t *)xp->sc->srp_iu_queue +
(uintptr_t)xp->srp_iu_offset);
bzero(cmd, xp->srp_iu_size);
cmd->type = SRP_TSK_MGMT;
cmd->tag = (uint64_t)xp;
cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
switch (ccb->ccb_h.func_code) {
case XPT_RESET_DEV:
cmd->function = 0x08;
break;
default:
panic("Unimplemented code %d", ccb->ccb_h.func_code);
break;
}
bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
/* Create CRQ entry */
crq.valid = 0x80;
crq.format = 0x01;
crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
err = phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
((uint64_t *)(&crq))[1]);
if (err != 0)
panic("CRQ send failure (%d)", err);
}
static void
vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs, int nsegs, int err)
{
struct vscsi_xfer *xp = xxp;
uint8_t *cdb;
union ccb *ccb = xp->ccb;
struct srp_cmd *cmd;
uint64_t chunk_addr;
uint32_t chunk_size;
int desc_start, i;
struct vscsi_crq crq;
KASSERT(err == 0, ("DMA error %d\n", err));
mtx_assert(&xp->sc->io_lock, MA_OWNED);
cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
/* Command format from Table 20, page 37 of SRP spec */
crq.iu_length = 48 + ((nsegs > 1) ? 20 : 16) +
((ccb->csio.cdb_len > 16) ? (ccb->csio.cdb_len - 16) : 0);
xp->srp_iu_size = crq.iu_length;
if (nsegs > 1)
xp->srp_iu_size += nsegs*16;
xp->srp_iu_size = roundup(xp->srp_iu_size, 16);
err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
if (err)
panic("Error during VMEM allocation (%d)", err);
cmd = (struct srp_cmd *)((uint8_t *)xp->sc->srp_iu_queue +
(uintptr_t)xp->srp_iu_offset);
bzero(cmd, xp->srp_iu_size);
cmd->type = SRP_CMD;
if (ccb->csio.cdb_len > 16)
cmd->additional_cdb = (ccb->csio.cdb_len - 16) << 2;
memcpy(cmd->cdb, cdb, ccb->csio.cdb_len);
cmd->tag = (uint64_t)(xp); /* Let the responder find this again */
cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
if (nsegs > 1) {
/* Use indirect descriptors */
switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
case CAM_DIR_OUT:
cmd->formats = (2 << 4);
break;
case CAM_DIR_IN:
cmd->formats = 2;
break;
default:
panic("Does not support bidirectional commands (%d)",
ccb->ccb_h.flags & CAM_DIR_MASK);
break;
}
desc_start = ((ccb->csio.cdb_len > 16) ?
ccb->csio.cdb_len - 16 : 0);
chunk_addr = xp->sc->srp_iu_phys + xp->srp_iu_offset + 20 +
desc_start + sizeof(*cmd);
chunk_size = 16*nsegs;
memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
chunk_size = 0;
for (i = 0; i < nsegs; i++)
chunk_size += segs[i].ds_len;
memcpy(&cmd->data_payload[desc_start+16], &chunk_size, 4);
desc_start += 20;
for (i = 0; i < nsegs; i++) {
chunk_addr = segs[i].ds_addr;
chunk_size = segs[i].ds_len;
memcpy(&cmd->data_payload[desc_start + 16*i],
&chunk_addr, 8);
/* Set handle tag to 0 */
memcpy(&cmd->data_payload[desc_start + 16*i + 12],
&chunk_size, 4);
}
} else if (nsegs == 1) {
switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
case CAM_DIR_OUT:
cmd->formats = (1 << 4);
break;
case CAM_DIR_IN:
cmd->formats = 1;
break;
default:
panic("Does not support bidirectional commands (%d)",
ccb->ccb_h.flags & CAM_DIR_MASK);
break;
}
/*
* Memory descriptor:
* 8 byte address
* 4 byte handle
* 4 byte length
*/
chunk_addr = segs[0].ds_addr;
chunk_size = segs[0].ds_len;
desc_start = ((ccb->csio.cdb_len > 16) ?
ccb->csio.cdb_len - 16 : 0);
memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
/* Set handle tag to 0 */
memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
KASSERT(xp->srp_iu_size >= 48 + ((ccb->csio.cdb_len > 16) ?
ccb->csio.cdb_len : 16), ("SRP IU command length"));
} else {
cmd->formats = 0;
}
bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
/* Create CRQ entry */
crq.valid = 0x80;
crq.format = 0x01;
crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
err = phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
((uint64_t *)(&crq))[1]);
if (err != 0)
panic("CRQ send failure (%d)", err);
}
static void
vscsi_crq_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
{
struct vscsi_softc *sc = xsc;
sc->crq_phys = segs[0].ds_addr;
sc->n_crqs = PAGE_SIZE/sizeof(struct vscsi_crq);
sc->srp_iu_queue = (uint8_t *)(sc->crq_queue);
sc->srp_iu_phys = segs[0].ds_addr;
sc->srp_iu_arena = vmem_create("VSCSI SRP IU", PAGE_SIZE,
segs[0].ds_len - PAGE_SIZE, 16, 0, M_BESTFIT | M_NOWAIT);
}
static void
vscsi_setup_bus(struct vscsi_softc *sc)
{
struct vscsi_crq crq;
struct vscsi_xfer *xp;
int error;
struct {
uint32_t type;
uint16_t status;
uint16_t length;
uint64_t tag;
uint64_t buffer;
struct {
char srp_version[8];
char partition_name[96];
uint32_t partition_number;
uint32_t mad_version;
uint32_t os_type;
uint32_t port_max_txu[8];
} payload;
} mad_adapter_info;
bzero(&crq, sizeof(crq));
/* Init message */
crq.valid = 0xc0;
crq.format = 0x01;
do {
error = phyp_hcall(H_FREE_CRQ, sc->unit);
} while (error == H_BUSY);
/* See initialization sequence page 757 */
bzero(sc->crq_queue, sc->n_crqs*sizeof(sc->crq_queue[0]));
sc->cur_crq = 0;
sc->bus_initialized = 0;
sc->bus_logged_in = 0;
bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
error = phyp_hcall(H_REG_CRQ, sc->unit, sc->crq_phys,
sc->n_crqs*sizeof(sc->crq_queue[0]));
KASSERT(error == 0, ("CRQ registration success"));
error = phyp_hcall(H_SEND_CRQ, sc->unit, ((uint64_t *)(&crq))[0],
((uint64_t *)(&crq))[1]);
if (error != 0)
panic("CRQ setup failure (%d)", error);
while (sc->bus_initialized == 0)
vscsi_check_response_queue(sc);
/* Send MAD adapter info */
mad_adapter_info.type = MAD_ADAPTER_INFO_REQUEST;
mad_adapter_info.status = 0;
mad_adapter_info.length = sizeof(mad_adapter_info.payload);
strcpy(mad_adapter_info.payload.srp_version, "16.a");
strcpy(mad_adapter_info.payload.partition_name, "UNKNOWN");
mad_adapter_info.payload.partition_number = -1;
mad_adapter_info.payload.mad_version = 1;
mad_adapter_info.payload.os_type = 2; /* Claim we are Linux */
mad_adapter_info.payload.port_max_txu[0] = 0;
/* If this fails, we get the defaults above */
OF_getprop(OF_finddevice("/"), "ibm,partition-name",
mad_adapter_info.payload.partition_name,
sizeof(mad_adapter_info.payload.partition_name));
OF_getprop(OF_finddevice("/"), "ibm,partition-no",
&mad_adapter_info.payload.partition_number,
sizeof(mad_adapter_info.payload.partition_number));
xp = TAILQ_FIRST(&sc->free_xferq);
xp->ccb = NULL;
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
xp->srp_iu_size = crq.iu_length = sizeof(mad_adapter_info);
vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
mad_adapter_info.buffer = xp->sc->srp_iu_phys + xp->srp_iu_offset + 24;
mad_adapter_info.tag = (uint64_t)xp;
memcpy((uint8_t *)xp->sc->srp_iu_queue + (uintptr_t)xp->srp_iu_offset,
&mad_adapter_info, sizeof(mad_adapter_info));
crq.valid = 0x80;
crq.format = 0x02;
crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
((uint64_t *)(&crq))[1]);
while (TAILQ_EMPTY(&sc->free_xferq))
vscsi_check_response_queue(sc);
/* Send SRP login */
vscsi_srp_login(sc);
while (sc->bus_logged_in == 0)
vscsi_check_response_queue(sc);
error = phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
}
static void
vscsi_intr(void *xsc)
{
struct vscsi_softc *sc = xsc;
mtx_lock(&sc->io_lock);
vscsi_check_response_queue(sc);
mtx_unlock(&sc->io_lock);
}
static void
vscsi_srp_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
{
union ccb *ccb = xp->ccb;
struct vscsi_softc *sc = xp->sc;
struct srp_rsp *rsp;
uint32_t sense_len;
/* SRP response packet in original request */
rsp = (struct srp_rsp *)((uint8_t *)sc->srp_iu_queue +
(uintptr_t)xp->srp_iu_offset);
ccb->csio.scsi_status = rsp->status;
if (ccb->csio.scsi_status == SCSI_STATUS_OK)
ccb->ccb_h.status = CAM_REQ_CMP;
else
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
#ifdef NOTYET
/* Collect fast fail codes */
if (crq->status != 0)
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
#endif
if (ccb->ccb_h.status != CAM_REQ_CMP) {
ccb->ccb_h.status |= CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
}
if (!(rsp->flags & SRP_RSPVALID))
rsp->response_data_len = 0;
if (!(rsp->flags & SRP_SNSVALID))
rsp->sense_data_len = 0;
if (!(rsp->flags & (SRP_DOOVER | SRP_DOUNDER)))
rsp->data_out_resid = 0;
if (!(rsp->flags & (SRP_DIOVER | SRP_DIUNDER)))
rsp->data_in_resid = 0;
if (rsp->flags & SRP_SNSVALID) {
bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data));
ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
sense_len = min(be32toh(rsp->sense_data_len),
ccb->csio.sense_len);
memcpy(&ccb->csio.sense_data,
&rsp->data_payload[be32toh(rsp->response_data_len)],
sense_len);
ccb->csio.sense_resid = ccb->csio.sense_len -
be32toh(rsp->sense_data_len);
}
switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
case CAM_DIR_OUT:
ccb->csio.resid = rsp->data_out_resid;
break;
case CAM_DIR_IN:
ccb->csio.resid = rsp->data_in_resid;
break;
}
bus_dmamap_sync(sc->data_tag, xp->dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->data_tag, xp->dmamap);
xpt_done(ccb);
xp->ccb = NULL;
}
static void
vscsi_login_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
{
struct vscsi_softc *sc = xp->sc;
struct srp_login_rsp *rsp;
/* SRP response packet in original request */
rsp = (struct srp_login_rsp *)((uint8_t *)sc->srp_iu_queue +
(uintptr_t)xp->srp_iu_offset);
KASSERT(be16toh(rsp->buffer_formats) & 0x3, ("Both direct and indirect "
"buffers supported"));
sc->max_transactions = be32toh(rsp->request_limit_delta);
device_printf(sc->dev, "Queue depth %d commands\n",
sc->max_transactions);
sc->bus_logged_in = 1;
}
static void
vscsi_cam_poll(struct cam_sim *sim)
{
struct vscsi_softc *sc = cam_sim_softc(sim);
vscsi_check_response_queue(sc);
}
static void
vscsi_check_response_queue(struct vscsi_softc *sc)
{
struct vscsi_crq *crq;
struct vscsi_xfer *xp;
int code;
mtx_assert(&sc->io_lock, MA_OWNED);
while (sc->crq_queue[sc->cur_crq].valid != 0) {
/* The hypercalls at both ends of this are not optimal */
phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_POSTREAD);
crq = &sc->crq_queue[sc->cur_crq];
switch (crq->valid) {
case 0xc0:
if (crq->format == 0x02)
sc->bus_initialized = 1;
break;
case 0x80:
/* IU data is set to tag pointer (the XP) */
xp = (struct vscsi_xfer *)crq->iu_data;
switch (crq->format) {
case 0x01:
code = *((uint8_t *)sc->srp_iu_queue +
(uintptr_t)xp->srp_iu_offset);
switch (code) {
case SRP_RSP:
vscsi_srp_response(xp, crq);
break;
case SRP_LOGIN_RSP:
vscsi_login_response(xp, crq);
break;
default:
device_printf(sc->dev, "Unknown SRP "
"response code %d\n", code);
break;
}
break;
case 0x02:
/* Ignore management datagrams */
break;
default:
panic("Unknown CRQ format %d\n", crq->format);
break;
}
vmem_free(sc->srp_iu_arena, xp->srp_iu_offset,
xp->srp_iu_size);
TAILQ_REMOVE(&sc->active_xferq, xp, queue);
TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
break;
default:
device_printf(sc->dev,
"Unknown CRQ message type %d\n", crq->valid);
break;
}
crq->valid = 0;
sc->cur_crq = (sc->cur_crq + 1) % sc->n_crqs;
bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
}
}
Index: head/sys/x86/cpufreq/est.c
===================================================================
--- head/sys/x86/cpufreq/est.c (revision 328217)
+++ head/sys/x86/cpufreq/est.c (revision 328218)
@@ -1,1403 +1,1403 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2004 Colin Percival
* Copyright (c) 2005 Nate Lawson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/cpu.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include "cpufreq_if.h"
#include <machine/clock.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
#include "acpi_if.h"
/* Status/control registers (from the IA-32 System Programming Guide). */
#define MSR_PERF_STATUS 0x198
#define MSR_PERF_CTL 0x199
/* Register and bit for enabling SpeedStep. */
#define MSR_MISC_ENABLE 0x1a0
#define MSR_SS_ENABLE (1<<16)
/* Frequency and MSR control values. */
typedef struct {
uint16_t freq;
uint16_t volts;
uint16_t id16;
int power;
} freq_info;
/* Identifying characteristics of a processor and supported frequencies. */
typedef struct {
const u_int vendor_id;
uint32_t id32;
freq_info *freqtab;
} cpu_info;
struct est_softc {
device_t dev;
int acpi_settings;
int msr_settings;
freq_info *freq_list;
};
/* Convert MHz and mV into IDs for passing to the MSR. */
#define ID16(MHz, mV, bus_clk) \
(((MHz / bus_clk) << 8) | ((mV ? mV - 700 : 0) >> 4))
#define ID32(MHz_hi, mV_hi, MHz_lo, mV_lo, bus_clk) \
((ID16(MHz_lo, mV_lo, bus_clk) << 16) | (ID16(MHz_hi, mV_hi, bus_clk)))
/* Format for storing IDs in our table. */
#define FREQ_INFO_PWR(MHz, mV, bus_clk, mW) \
{ MHz, mV, ID16(MHz, mV, bus_clk), mW }
#define FREQ_INFO(MHz, mV, bus_clk) \
FREQ_INFO_PWR(MHz, mV, bus_clk, CPUFREQ_VAL_UNKNOWN)
#define INTEL(tab, zhi, vhi, zlo, vlo, bus_clk) \
{ CPU_VENDOR_INTEL, ID32(zhi, vhi, zlo, vlo, bus_clk), tab }
#define CENTAUR(tab, zhi, vhi, zlo, vlo, bus_clk) \
{ CPU_VENDOR_CENTAUR, ID32(zhi, vhi, zlo, vlo, bus_clk), tab }
static int msr_info_enabled = 0;
TUNABLE_INT("hw.est.msr_info", &msr_info_enabled);
static int strict = -1;
TUNABLE_INT("hw.est.strict", &strict);
/* Default bus clock value for Centrino processors. */
#define INTEL_BUS_CLK 100
/* XXX Update this if new CPUs have more settings. */
#define EST_MAX_SETTINGS 10
CTASSERT(EST_MAX_SETTINGS <= MAX_SETTINGS);
/* Estimate in microseconds of latency for performing a transition. */
#define EST_TRANS_LAT 1000
/*
* Frequency (MHz) and voltage (mV) settings.
*
* Dothan processors have multiple VID#s with different settings for
* each VID#. Since we can't uniquely identify this info
* without undisclosed methods from Intel, we can't support newer
* processors with this table method. If ACPI Px states are supported,
* we get info from them.
*
* Data from the "Intel Pentium M Processor Datasheet",
* Order Number 252612-003, Table 5.
*/
static freq_info PM17_130[] = {
/* 130nm 1.70GHz Pentium M */
FREQ_INFO(1700, 1484, INTEL_BUS_CLK),
FREQ_INFO(1400, 1308, INTEL_BUS_CLK),
FREQ_INFO(1200, 1228, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1004, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM16_130[] = {
/* 130nm 1.60GHz Pentium M */
FREQ_INFO(1600, 1484, INTEL_BUS_CLK),
FREQ_INFO(1400, 1420, INTEL_BUS_CLK),
FREQ_INFO(1200, 1276, INTEL_BUS_CLK),
FREQ_INFO(1000, 1164, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM15_130[] = {
/* 130nm 1.50GHz Pentium M */
FREQ_INFO(1500, 1484, INTEL_BUS_CLK),
FREQ_INFO(1400, 1452, INTEL_BUS_CLK),
FREQ_INFO(1200, 1356, INTEL_BUS_CLK),
FREQ_INFO(1000, 1228, INTEL_BUS_CLK),
FREQ_INFO( 800, 1116, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM14_130[] = {
/* 130nm 1.40GHz Pentium M */
FREQ_INFO(1400, 1484, INTEL_BUS_CLK),
FREQ_INFO(1200, 1436, INTEL_BUS_CLK),
FREQ_INFO(1000, 1308, INTEL_BUS_CLK),
FREQ_INFO( 800, 1180, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM13_130[] = {
/* 130nm 1.30GHz Pentium M */
FREQ_INFO(1300, 1388, INTEL_BUS_CLK),
FREQ_INFO(1200, 1356, INTEL_BUS_CLK),
FREQ_INFO(1000, 1292, INTEL_BUS_CLK),
FREQ_INFO( 800, 1260, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM13_LV_130[] = {
/* 130nm 1.30GHz Low Voltage Pentium M */
FREQ_INFO(1300, 1180, INTEL_BUS_CLK),
FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
FREQ_INFO(1100, 1100, INTEL_BUS_CLK),
FREQ_INFO(1000, 1020, INTEL_BUS_CLK),
FREQ_INFO( 900, 1004, INTEL_BUS_CLK),
FREQ_INFO( 800, 988, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM12_LV_130[] = {
/* 130 nm 1.20GHz Low Voltage Pentium M */
FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
FREQ_INFO(1100, 1164, INTEL_BUS_CLK),
FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
FREQ_INFO( 900, 1020, INTEL_BUS_CLK),
FREQ_INFO( 800, 1004, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM11_LV_130[] = {
/* 130 nm 1.10GHz Low Voltage Pentium M */
FREQ_INFO(1100, 1180, INTEL_BUS_CLK),
FREQ_INFO(1000, 1164, INTEL_BUS_CLK),
FREQ_INFO( 900, 1100, INTEL_BUS_CLK),
FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
FREQ_INFO( 600, 956, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM11_ULV_130[] = {
/* 130 nm 1.10GHz Ultra Low Voltage Pentium M */
FREQ_INFO(1100, 1004, INTEL_BUS_CLK),
FREQ_INFO(1000, 988, INTEL_BUS_CLK),
FREQ_INFO( 900, 972, INTEL_BUS_CLK),
FREQ_INFO( 800, 956, INTEL_BUS_CLK),
FREQ_INFO( 600, 844, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM10_ULV_130[] = {
/* 130 nm 1.00GHz Ultra Low Voltage Pentium M */
FREQ_INFO(1000, 1004, INTEL_BUS_CLK),
FREQ_INFO( 900, 988, INTEL_BUS_CLK),
FREQ_INFO( 800, 972, INTEL_BUS_CLK),
FREQ_INFO( 600, 844, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
/*
* Data from "Intel Pentium M Processor on 90nm Process with
* 2-MB L2 Cache Datasheet", Order Number 302189-008, Table 5.
*/
static freq_info PM_765A_90[] = {
/* 90 nm 2.10GHz Pentium M, VID #A */
FREQ_INFO(2100, 1340, INTEL_BUS_CLK),
FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_765B_90[] = {
/* 90 nm 2.10GHz Pentium M, VID #B */
FREQ_INFO(2100, 1324, INTEL_BUS_CLK),
FREQ_INFO(1800, 1260, INTEL_BUS_CLK),
FREQ_INFO(1600, 1212, INTEL_BUS_CLK),
FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_765C_90[] = {
/* 90 nm 2.10GHz Pentium M, VID #C */
FREQ_INFO(2100, 1308, INTEL_BUS_CLK),
FREQ_INFO(1800, 1244, INTEL_BUS_CLK),
FREQ_INFO(1600, 1212, INTEL_BUS_CLK),
FREQ_INFO(1400, 1164, INTEL_BUS_CLK),
FREQ_INFO(1200, 1116, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_765E_90[] = {
/* 90 nm 2.10GHz Pentium M, VID #E */
FREQ_INFO(2100, 1356, INTEL_BUS_CLK),
FREQ_INFO(1800, 1292, INTEL_BUS_CLK),
FREQ_INFO(1600, 1244, INTEL_BUS_CLK),
FREQ_INFO(1400, 1196, INTEL_BUS_CLK),
FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_755A_90[] = {
/* 90 nm 2.00GHz Pentium M, VID #A */
FREQ_INFO(2000, 1340, INTEL_BUS_CLK),
FREQ_INFO(1800, 1292, INTEL_BUS_CLK),
FREQ_INFO(1600, 1244, INTEL_BUS_CLK),
FREQ_INFO(1400, 1196, INTEL_BUS_CLK),
FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_755B_90[] = {
/* 90 nm 2.00GHz Pentium M, VID #B */
FREQ_INFO(2000, 1324, INTEL_BUS_CLK),
FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_755C_90[] = {
/* 90 nm 2.00GHz Pentium M, VID #C */
FREQ_INFO(2000, 1308, INTEL_BUS_CLK),
FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_755D_90[] = {
/* 90 nm 2.00GHz Pentium M, VID #D */
FREQ_INFO(2000, 1276, INTEL_BUS_CLK),
FREQ_INFO(1800, 1244, INTEL_BUS_CLK),
FREQ_INFO(1600, 1196, INTEL_BUS_CLK),
FREQ_INFO(1400, 1164, INTEL_BUS_CLK),
FREQ_INFO(1200, 1116, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_745A_90[] = {
/* 90 nm 1.80GHz Pentium M, VID #A */
FREQ_INFO(1800, 1340, INTEL_BUS_CLK),
FREQ_INFO(1600, 1292, INTEL_BUS_CLK),
FREQ_INFO(1400, 1228, INTEL_BUS_CLK),
FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_745B_90[] = {
/* 90 nm 1.80GHz Pentium M, VID #B */
FREQ_INFO(1800, 1324, INTEL_BUS_CLK),
FREQ_INFO(1600, 1276, INTEL_BUS_CLK),
FREQ_INFO(1400, 1212, INTEL_BUS_CLK),
FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_745C_90[] = {
/* 90 nm 1.80GHz Pentium M, VID #C */
FREQ_INFO(1800, 1308, INTEL_BUS_CLK),
FREQ_INFO(1600, 1260, INTEL_BUS_CLK),
FREQ_INFO(1400, 1212, INTEL_BUS_CLK),
FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_745D_90[] = {
/* 90 nm 1.80GHz Pentium M, VID #D */
FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_735A_90[] = {
/* 90 nm 1.70GHz Pentium M, VID #A */
FREQ_INFO(1700, 1340, INTEL_BUS_CLK),
FREQ_INFO(1400, 1244, INTEL_BUS_CLK),
FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_735B_90[] = {
/* 90 nm 1.70GHz Pentium M, VID #B */
FREQ_INFO(1700, 1324, INTEL_BUS_CLK),
FREQ_INFO(1400, 1244, INTEL_BUS_CLK),
FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_735C_90[] = {
/* 90 nm 1.70GHz Pentium M, VID #C */
FREQ_INFO(1700, 1308, INTEL_BUS_CLK),
FREQ_INFO(1400, 1228, INTEL_BUS_CLK),
FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_735D_90[] = {
/* 90 nm 1.70GHz Pentium M, VID #D */
FREQ_INFO(1700, 1276, INTEL_BUS_CLK),
FREQ_INFO(1400, 1212, INTEL_BUS_CLK),
FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_725A_90[] = {
/* 90 nm 1.60GHz Pentium M, VID #A */
FREQ_INFO(1600, 1340, INTEL_BUS_CLK),
FREQ_INFO(1400, 1276, INTEL_BUS_CLK),
FREQ_INFO(1200, 1212, INTEL_BUS_CLK),
FREQ_INFO(1000, 1132, INTEL_BUS_CLK),
FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_725B_90[] = {
/* 90 nm 1.60GHz Pentium M, VID #B */
FREQ_INFO(1600, 1324, INTEL_BUS_CLK),
FREQ_INFO(1400, 1260, INTEL_BUS_CLK),
FREQ_INFO(1200, 1196, INTEL_BUS_CLK),
FREQ_INFO(1000, 1132, INTEL_BUS_CLK),
FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_725C_90[] = {
/* 90 nm 1.60GHz Pentium M, VID #C */
FREQ_INFO(1600, 1308, INTEL_BUS_CLK),
FREQ_INFO(1400, 1244, INTEL_BUS_CLK),
FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_725D_90[] = {
/* 90 nm 1.60GHz Pentium M, VID #D */
FREQ_INFO(1600, 1276, INTEL_BUS_CLK),
FREQ_INFO(1400, 1228, INTEL_BUS_CLK),
FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_715A_90[] = {
/* 90 nm 1.50GHz Pentium M, VID #A */
FREQ_INFO(1500, 1340, INTEL_BUS_CLK),
FREQ_INFO(1200, 1228, INTEL_BUS_CLK),
FREQ_INFO(1000, 1148, INTEL_BUS_CLK),
FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_715B_90[] = {
/* 90 nm 1.50GHz Pentium M, VID #B */
FREQ_INFO(1500, 1324, INTEL_BUS_CLK),
FREQ_INFO(1200, 1212, INTEL_BUS_CLK),
FREQ_INFO(1000, 1148, INTEL_BUS_CLK),
FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_715C_90[] = {
/* 90 nm 1.50GHz Pentium M, VID #C */
FREQ_INFO(1500, 1308, INTEL_BUS_CLK),
FREQ_INFO(1200, 1212, INTEL_BUS_CLK),
FREQ_INFO(1000, 1132, INTEL_BUS_CLK),
FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_715D_90[] = {
/* 90 nm 1.50GHz Pentium M, VID #D */
FREQ_INFO(1500, 1276, INTEL_BUS_CLK),
FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_778_90[] = {
/* 90 nm 1.60GHz Low Voltage Pentium M */
FREQ_INFO(1600, 1116, INTEL_BUS_CLK),
FREQ_INFO(1500, 1116, INTEL_BUS_CLK),
FREQ_INFO(1400, 1100, INTEL_BUS_CLK),
FREQ_INFO(1300, 1084, INTEL_BUS_CLK),
FREQ_INFO(1200, 1068, INTEL_BUS_CLK),
FREQ_INFO(1100, 1052, INTEL_BUS_CLK),
FREQ_INFO(1000, 1052, INTEL_BUS_CLK),
FREQ_INFO( 900, 1036, INTEL_BUS_CLK),
FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_758_90[] = {
/* 90 nm 1.50GHz Low Voltage Pentium M */
FREQ_INFO(1500, 1116, INTEL_BUS_CLK),
FREQ_INFO(1400, 1116, INTEL_BUS_CLK),
FREQ_INFO(1300, 1100, INTEL_BUS_CLK),
FREQ_INFO(1200, 1084, INTEL_BUS_CLK),
FREQ_INFO(1100, 1068, INTEL_BUS_CLK),
FREQ_INFO(1000, 1052, INTEL_BUS_CLK),
FREQ_INFO( 900, 1036, INTEL_BUS_CLK),
FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_738_90[] = {
/* 90 nm 1.40GHz Low Voltage Pentium M */
FREQ_INFO(1400, 1116, INTEL_BUS_CLK),
FREQ_INFO(1300, 1116, INTEL_BUS_CLK),
FREQ_INFO(1200, 1100, INTEL_BUS_CLK),
FREQ_INFO(1100, 1068, INTEL_BUS_CLK),
FREQ_INFO(1000, 1052, INTEL_BUS_CLK),
FREQ_INFO( 900, 1036, INTEL_BUS_CLK),
FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
FREQ_INFO( 600, 988, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_773G_90[] = {
/* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #G */
FREQ_INFO(1300, 956, INTEL_BUS_CLK),
FREQ_INFO(1200, 940, INTEL_BUS_CLK),
FREQ_INFO(1100, 924, INTEL_BUS_CLK),
FREQ_INFO(1000, 908, INTEL_BUS_CLK),
FREQ_INFO( 900, 876, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_773H_90[] = {
/* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #H */
FREQ_INFO(1300, 940, INTEL_BUS_CLK),
FREQ_INFO(1200, 924, INTEL_BUS_CLK),
FREQ_INFO(1100, 908, INTEL_BUS_CLK),
FREQ_INFO(1000, 892, INTEL_BUS_CLK),
FREQ_INFO( 900, 876, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_773I_90[] = {
/* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #I */
FREQ_INFO(1300, 924, INTEL_BUS_CLK),
FREQ_INFO(1200, 908, INTEL_BUS_CLK),
FREQ_INFO(1100, 892, INTEL_BUS_CLK),
FREQ_INFO(1000, 876, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_773J_90[] = {
/* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #J */
FREQ_INFO(1300, 908, INTEL_BUS_CLK),
FREQ_INFO(1200, 908, INTEL_BUS_CLK),
FREQ_INFO(1100, 892, INTEL_BUS_CLK),
FREQ_INFO(1000, 876, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_773K_90[] = {
/* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #K */
FREQ_INFO(1300, 892, INTEL_BUS_CLK),
FREQ_INFO(1200, 892, INTEL_BUS_CLK),
FREQ_INFO(1100, 876, INTEL_BUS_CLK),
FREQ_INFO(1000, 860, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_773L_90[] = {
/* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #L */
FREQ_INFO(1300, 876, INTEL_BUS_CLK),
FREQ_INFO(1200, 876, INTEL_BUS_CLK),
FREQ_INFO(1100, 860, INTEL_BUS_CLK),
FREQ_INFO(1000, 860, INTEL_BUS_CLK),
FREQ_INFO( 900, 844, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_753G_90[] = {
/* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #G */
FREQ_INFO(1200, 956, INTEL_BUS_CLK),
FREQ_INFO(1100, 940, INTEL_BUS_CLK),
FREQ_INFO(1000, 908, INTEL_BUS_CLK),
FREQ_INFO( 900, 892, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_753H_90[] = {
/* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #H */
FREQ_INFO(1200, 940, INTEL_BUS_CLK),
FREQ_INFO(1100, 924, INTEL_BUS_CLK),
FREQ_INFO(1000, 908, INTEL_BUS_CLK),
FREQ_INFO( 900, 876, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_753I_90[] = {
/* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #I */
FREQ_INFO(1200, 924, INTEL_BUS_CLK),
FREQ_INFO(1100, 908, INTEL_BUS_CLK),
FREQ_INFO(1000, 892, INTEL_BUS_CLK),
FREQ_INFO( 900, 876, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_753J_90[] = {
/* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #J */
FREQ_INFO(1200, 908, INTEL_BUS_CLK),
FREQ_INFO(1100, 892, INTEL_BUS_CLK),
FREQ_INFO(1000, 876, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_753K_90[] = {
/* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #K */
FREQ_INFO(1200, 892, INTEL_BUS_CLK),
FREQ_INFO(1100, 892, INTEL_BUS_CLK),
FREQ_INFO(1000, 876, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_753L_90[] = {
/* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #L */
FREQ_INFO(1200, 876, INTEL_BUS_CLK),
FREQ_INFO(1100, 876, INTEL_BUS_CLK),
FREQ_INFO(1000, 860, INTEL_BUS_CLK),
FREQ_INFO( 900, 844, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733JG_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #G */
FREQ_INFO(1100, 956, INTEL_BUS_CLK),
FREQ_INFO(1000, 940, INTEL_BUS_CLK),
FREQ_INFO( 900, 908, INTEL_BUS_CLK),
FREQ_INFO( 800, 876, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733JH_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #H */
FREQ_INFO(1100, 940, INTEL_BUS_CLK),
FREQ_INFO(1000, 924, INTEL_BUS_CLK),
FREQ_INFO( 900, 892, INTEL_BUS_CLK),
FREQ_INFO( 800, 876, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733JI_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #I */
FREQ_INFO(1100, 924, INTEL_BUS_CLK),
FREQ_INFO(1000, 908, INTEL_BUS_CLK),
FREQ_INFO( 900, 892, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733JJ_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #J */
FREQ_INFO(1100, 908, INTEL_BUS_CLK),
FREQ_INFO(1000, 892, INTEL_BUS_CLK),
FREQ_INFO( 900, 876, INTEL_BUS_CLK),
FREQ_INFO( 800, 860, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733JK_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #K */
FREQ_INFO(1100, 892, INTEL_BUS_CLK),
FREQ_INFO(1000, 876, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733JL_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #L */
FREQ_INFO(1100, 876, INTEL_BUS_CLK),
FREQ_INFO(1000, 876, INTEL_BUS_CLK),
FREQ_INFO( 900, 860, INTEL_BUS_CLK),
FREQ_INFO( 800, 844, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
};
static freq_info PM_733_90[] = {
/* 90 nm 1.10GHz Ultra Low Voltage Pentium M */
FREQ_INFO(1100, 940, INTEL_BUS_CLK),
FREQ_INFO(1000, 924, INTEL_BUS_CLK),
FREQ_INFO( 900, 892, INTEL_BUS_CLK),
FREQ_INFO( 800, 876, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
static freq_info PM_723_90[] = {
/* 90 nm 1.00GHz Ultra Low Voltage Pentium M */
FREQ_INFO(1000, 940, INTEL_BUS_CLK),
FREQ_INFO( 900, 908, INTEL_BUS_CLK),
FREQ_INFO( 800, 876, INTEL_BUS_CLK),
FREQ_INFO( 600, 812, INTEL_BUS_CLK),
FREQ_INFO( 0, 0, 1),
};
/*
* VIA C7-M 500 MHz FSB, 400 MHz FSB, and ULV variants.
* Data from the "VIA C7-M Processor BIOS Writer's Guide (v2.17)" datasheet.
*/
static freq_info C7M_795[] = {
/* 2.00GHz Centaur C7-M 533 Mhz FSB */
FREQ_INFO_PWR(2000, 1148, 133, 20000),
FREQ_INFO_PWR(1867, 1132, 133, 18000),
FREQ_INFO_PWR(1600, 1100, 133, 15000),
FREQ_INFO_PWR(1467, 1052, 133, 13000),
FREQ_INFO_PWR(1200, 1004, 133, 10000),
FREQ_INFO_PWR( 800, 844, 133, 7000),
FREQ_INFO_PWR( 667, 844, 133, 6000),
FREQ_INFO_PWR( 533, 844, 133, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_785[] = {
/* 1.80GHz Centaur C7-M 533 Mhz FSB */
FREQ_INFO_PWR(1867, 1148, 133, 18000),
FREQ_INFO_PWR(1600, 1100, 133, 15000),
FREQ_INFO_PWR(1467, 1052, 133, 13000),
FREQ_INFO_PWR(1200, 1004, 133, 10000),
FREQ_INFO_PWR( 800, 844, 133, 7000),
FREQ_INFO_PWR( 667, 844, 133, 6000),
FREQ_INFO_PWR( 533, 844, 133, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_765[] = {
/* 1.60GHz Centaur C7-M 533 Mhz FSB */
FREQ_INFO_PWR(1600, 1084, 133, 15000),
FREQ_INFO_PWR(1467, 1052, 133, 13000),
FREQ_INFO_PWR(1200, 1004, 133, 10000),
FREQ_INFO_PWR( 800, 844, 133, 7000),
FREQ_INFO_PWR( 667, 844, 133, 6000),
FREQ_INFO_PWR( 533, 844, 133, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_794[] = {
/* 2.00GHz Centaur C7-M 400 Mhz FSB */
FREQ_INFO_PWR(2000, 1148, 100, 20000),
FREQ_INFO_PWR(1800, 1132, 100, 18000),
FREQ_INFO_PWR(1600, 1100, 100, 15000),
FREQ_INFO_PWR(1400, 1052, 100, 13000),
FREQ_INFO_PWR(1000, 1004, 100, 10000),
FREQ_INFO_PWR( 800, 844, 100, 7000),
FREQ_INFO_PWR( 600, 844, 100, 6000),
FREQ_INFO_PWR( 400, 844, 100, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_784[] = {
/* 1.80GHz Centaur C7-M 400 Mhz FSB */
FREQ_INFO_PWR(1800, 1148, 100, 18000),
FREQ_INFO_PWR(1600, 1100, 100, 15000),
FREQ_INFO_PWR(1400, 1052, 100, 13000),
FREQ_INFO_PWR(1000, 1004, 100, 10000),
FREQ_INFO_PWR( 800, 844, 100, 7000),
FREQ_INFO_PWR( 600, 844, 100, 6000),
FREQ_INFO_PWR( 400, 844, 100, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_764[] = {
/* 1.60GHz Centaur C7-M 400 Mhz FSB */
FREQ_INFO_PWR(1600, 1084, 100, 15000),
FREQ_INFO_PWR(1400, 1052, 100, 13000),
FREQ_INFO_PWR(1000, 1004, 100, 10000),
FREQ_INFO_PWR( 800, 844, 100, 7000),
FREQ_INFO_PWR( 600, 844, 100, 6000),
FREQ_INFO_PWR( 400, 844, 100, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_754[] = {
/* 1.50GHz Centaur C7-M 400 Mhz FSB */
FREQ_INFO_PWR(1500, 1004, 100, 12000),
FREQ_INFO_PWR(1400, 988, 100, 11000),
FREQ_INFO_PWR(1000, 940, 100, 9000),
FREQ_INFO_PWR( 800, 844, 100, 7000),
FREQ_INFO_PWR( 600, 844, 100, 6000),
FREQ_INFO_PWR( 400, 844, 100, 5000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_771[] = {
/* 1.20GHz Centaur C7-M 400 Mhz FSB */
FREQ_INFO_PWR(1200, 860, 100, 7000),
FREQ_INFO_PWR(1000, 860, 100, 6000),
FREQ_INFO_PWR( 800, 844, 100, 5500),
FREQ_INFO_PWR( 600, 844, 100, 5000),
FREQ_INFO_PWR( 400, 844, 100, 4000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_775_ULV[] = {
/* 1.50GHz Centaur C7-M ULV */
FREQ_INFO_PWR(1500, 956, 100, 7500),
FREQ_INFO_PWR(1400, 940, 100, 6000),
FREQ_INFO_PWR(1000, 860, 100, 5000),
FREQ_INFO_PWR( 800, 828, 100, 2800),
FREQ_INFO_PWR( 600, 796, 100, 2500),
FREQ_INFO_PWR( 400, 796, 100, 2000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_772_ULV[] = {
/* 1.20GHz Centaur C7-M ULV */
FREQ_INFO_PWR(1200, 844, 100, 5000),
FREQ_INFO_PWR(1000, 844, 100, 4000),
FREQ_INFO_PWR( 800, 828, 100, 2800),
FREQ_INFO_PWR( 600, 796, 100, 2500),
FREQ_INFO_PWR( 400, 796, 100, 2000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_779_ULV[] = {
/* 1.00GHz Centaur C7-M ULV */
FREQ_INFO_PWR(1000, 796, 100, 3500),
FREQ_INFO_PWR( 800, 796, 100, 2800),
FREQ_INFO_PWR( 600, 796, 100, 2500),
FREQ_INFO_PWR( 400, 796, 100, 2000),
FREQ_INFO(0, 0, 1),
};
static freq_info C7M_770_ULV[] = {
/* 1.00GHz Centaur C7-M ULV */
FREQ_INFO_PWR(1000, 844, 100, 5000),
FREQ_INFO_PWR( 800, 796, 100, 2800),
FREQ_INFO_PWR( 600, 796, 100, 2500),
FREQ_INFO_PWR( 400, 796, 100, 2000),
FREQ_INFO(0, 0, 1),
};
static cpu_info ESTprocs[] = {
INTEL(PM17_130, 1700, 1484, 600, 956, INTEL_BUS_CLK),
INTEL(PM16_130, 1600, 1484, 600, 956, INTEL_BUS_CLK),
INTEL(PM15_130, 1500, 1484, 600, 956, INTEL_BUS_CLK),
INTEL(PM14_130, 1400, 1484, 600, 956, INTEL_BUS_CLK),
INTEL(PM13_130, 1300, 1388, 600, 956, INTEL_BUS_CLK),
INTEL(PM13_LV_130, 1300, 1180, 600, 956, INTEL_BUS_CLK),
INTEL(PM12_LV_130, 1200, 1180, 600, 956, INTEL_BUS_CLK),
INTEL(PM11_LV_130, 1100, 1180, 600, 956, INTEL_BUS_CLK),
INTEL(PM11_ULV_130, 1100, 1004, 600, 844, INTEL_BUS_CLK),
INTEL(PM10_ULV_130, 1000, 1004, 600, 844, INTEL_BUS_CLK),
INTEL(PM_765A_90, 2100, 1340, 600, 988, INTEL_BUS_CLK),
INTEL(PM_765B_90, 2100, 1324, 600, 988, INTEL_BUS_CLK),
INTEL(PM_765C_90, 2100, 1308, 600, 988, INTEL_BUS_CLK),
INTEL(PM_765E_90, 2100, 1356, 600, 988, INTEL_BUS_CLK),
INTEL(PM_755A_90, 2000, 1340, 600, 988, INTEL_BUS_CLK),
INTEL(PM_755B_90, 2000, 1324, 600, 988, INTEL_BUS_CLK),
INTEL(PM_755C_90, 2000, 1308, 600, 988, INTEL_BUS_CLK),
INTEL(PM_755D_90, 2000, 1276, 600, 988, INTEL_BUS_CLK),
INTEL(PM_745A_90, 1800, 1340, 600, 988, INTEL_BUS_CLK),
INTEL(PM_745B_90, 1800, 1324, 600, 988, INTEL_BUS_CLK),
INTEL(PM_745C_90, 1800, 1308, 600, 988, INTEL_BUS_CLK),
INTEL(PM_745D_90, 1800, 1276, 600, 988, INTEL_BUS_CLK),
INTEL(PM_735A_90, 1700, 1340, 600, 988, INTEL_BUS_CLK),
INTEL(PM_735B_90, 1700, 1324, 600, 988, INTEL_BUS_CLK),
INTEL(PM_735C_90, 1700, 1308, 600, 988, INTEL_BUS_CLK),
INTEL(PM_735D_90, 1700, 1276, 600, 988, INTEL_BUS_CLK),
INTEL(PM_725A_90, 1600, 1340, 600, 988, INTEL_BUS_CLK),
INTEL(PM_725B_90, 1600, 1324, 600, 988, INTEL_BUS_CLK),
INTEL(PM_725C_90, 1600, 1308, 600, 988, INTEL_BUS_CLK),
INTEL(PM_725D_90, 1600, 1276, 600, 988, INTEL_BUS_CLK),
INTEL(PM_715A_90, 1500, 1340, 600, 988, INTEL_BUS_CLK),
INTEL(PM_715B_90, 1500, 1324, 600, 988, INTEL_BUS_CLK),
INTEL(PM_715C_90, 1500, 1308, 600, 988, INTEL_BUS_CLK),
INTEL(PM_715D_90, 1500, 1276, 600, 988, INTEL_BUS_CLK),
INTEL(PM_778_90, 1600, 1116, 600, 988, INTEL_BUS_CLK),
INTEL(PM_758_90, 1500, 1116, 600, 988, INTEL_BUS_CLK),
INTEL(PM_738_90, 1400, 1116, 600, 988, INTEL_BUS_CLK),
INTEL(PM_773G_90, 1300, 956, 600, 812, INTEL_BUS_CLK),
INTEL(PM_773H_90, 1300, 940, 600, 812, INTEL_BUS_CLK),
INTEL(PM_773I_90, 1300, 924, 600, 812, INTEL_BUS_CLK),
INTEL(PM_773J_90, 1300, 908, 600, 812, INTEL_BUS_CLK),
INTEL(PM_773K_90, 1300, 892, 600, 812, INTEL_BUS_CLK),
INTEL(PM_773L_90, 1300, 876, 600, 812, INTEL_BUS_CLK),
INTEL(PM_753G_90, 1200, 956, 600, 812, INTEL_BUS_CLK),
INTEL(PM_753H_90, 1200, 940, 600, 812, INTEL_BUS_CLK),
INTEL(PM_753I_90, 1200, 924, 600, 812, INTEL_BUS_CLK),
INTEL(PM_753J_90, 1200, 908, 600, 812, INTEL_BUS_CLK),
INTEL(PM_753K_90, 1200, 892, 600, 812, INTEL_BUS_CLK),
INTEL(PM_753L_90, 1200, 876, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733JG_90, 1100, 956, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733JH_90, 1100, 940, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733JI_90, 1100, 924, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733JJ_90, 1100, 908, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733JK_90, 1100, 892, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733JL_90, 1100, 876, 600, 812, INTEL_BUS_CLK),
INTEL(PM_733_90, 1100, 940, 600, 812, INTEL_BUS_CLK),
INTEL(PM_723_90, 1000, 940, 600, 812, INTEL_BUS_CLK),
CENTAUR(C7M_795, 2000, 1148, 533, 844, 133),
CENTAUR(C7M_794, 2000, 1148, 400, 844, 100),
CENTAUR(C7M_785, 1867, 1148, 533, 844, 133),
CENTAUR(C7M_784, 1800, 1148, 400, 844, 100),
CENTAUR(C7M_765, 1600, 1084, 533, 844, 133),
CENTAUR(C7M_764, 1600, 1084, 400, 844, 100),
CENTAUR(C7M_754, 1500, 1004, 400, 844, 100),
CENTAUR(C7M_775_ULV, 1500, 956, 400, 796, 100),
CENTAUR(C7M_771, 1200, 860, 400, 844, 100),
CENTAUR(C7M_772_ULV, 1200, 844, 400, 796, 100),
CENTAUR(C7M_779_ULV, 1000, 796, 400, 796, 100),
CENTAUR(C7M_770_ULV, 1000, 844, 400, 796, 100),
{ 0, 0, NULL },
};
static void est_identify(driver_t *driver, device_t parent);
static int est_features(driver_t *driver, u_int *features);
static int est_probe(device_t parent);
static int est_attach(device_t parent);
static int est_detach(device_t parent);
static int est_get_info(device_t dev);
static int est_acpi_info(device_t dev, freq_info **freqs);
static int est_table_info(device_t dev, uint64_t msr, freq_info **freqs);
static int est_msr_info(device_t dev, uint64_t msr, freq_info **freqs);
static freq_info *est_get_current(freq_info *freq_list);
static int est_settings(device_t dev, struct cf_setting *sets, int *count);
static int est_set(device_t dev, const struct cf_setting *set);
static int est_get(device_t dev, struct cf_setting *set);
static int est_type(device_t dev, int *type);
static int est_set_id16(device_t dev, uint16_t id16, int need_check);
static void est_get_id16(uint16_t *id16_p);
static device_method_t est_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, est_identify),
DEVMETHOD(device_probe, est_probe),
DEVMETHOD(device_attach, est_attach),
DEVMETHOD(device_detach, est_detach),
/* cpufreq interface */
DEVMETHOD(cpufreq_drv_set, est_set),
DEVMETHOD(cpufreq_drv_get, est_get),
DEVMETHOD(cpufreq_drv_type, est_type),
DEVMETHOD(cpufreq_drv_settings, est_settings),
/* ACPI interface */
DEVMETHOD(acpi_get_features, est_features),
{0, 0}
};
static driver_t est_driver = {
"est",
est_methods,
sizeof(struct est_softc),
};
static devclass_t est_devclass;
DRIVER_MODULE(est, cpu, est_driver, est_devclass, 0, 0);
static int
est_features(driver_t *driver, u_int *features)
{
/*
* Notify the ACPI CPU that we support direct access to MSRs.
* XXX C1 "I/O then Halt" seems necessary for some broken BIOS.
*/
*features = ACPI_CAP_PERF_MSRS | ACPI_CAP_C1_IO_HALT;
return (0);
}
static void
est_identify(driver_t *driver, device_t parent)
{
device_t child;
/* Make sure we're not being doubly invoked. */
if (device_find_child(parent, "est", -1) != NULL)
return;
/* Check that CPUID is supported and the vendor is Intel.*/
if (cpu_high == 0 || (cpu_vendor_id != CPU_VENDOR_INTEL &&
cpu_vendor_id != CPU_VENDOR_CENTAUR))
return;
/*
* Check if the CPU supports EST.
*/
if (!(cpu_feature2 & CPUID2_EST))
return;
/*
* We add a child for each CPU since settings must be performed
* on each CPU in the SMP case.
*/
child = BUS_ADD_CHILD(parent, 10, "est", -1);
if (child == NULL)
device_printf(parent, "add est child failed\n");
}
static int
est_probe(device_t dev)
{
device_t perf_dev;
uint64_t msr;
int error, type;
if (resource_disabled("est", 0))
return (ENXIO);
/*
* If the ACPI perf driver has attached and is not just offering
* info, let it manage things.
*/
perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
if (perf_dev && device_is_attached(perf_dev)) {
error = CPUFREQ_DRV_TYPE(perf_dev, &type);
if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
return (ENXIO);
}
/* Attempt to enable SpeedStep if not currently enabled. */
msr = rdmsr(MSR_MISC_ENABLE);
if ((msr & MSR_SS_ENABLE) == 0) {
wrmsr(MSR_MISC_ENABLE, msr | MSR_SS_ENABLE);
if (bootverbose)
device_printf(dev, "enabling SpeedStep\n");
/* Check if the enable failed. */
msr = rdmsr(MSR_MISC_ENABLE);
if ((msr & MSR_SS_ENABLE) == 0) {
device_printf(dev, "failed to enable SpeedStep\n");
return (ENXIO);
}
}
device_set_desc(dev, "Enhanced SpeedStep Frequency Control");
return (0);
}
static int
est_attach(device_t dev)
{
struct est_softc *sc;
sc = device_get_softc(dev);
sc->dev = dev;
/* On SMP system we can't guarantie independent freq setting. */
if (strict == -1 && mp_ncpus > 1)
strict = 0;
/* Check CPU for supported settings. */
if (est_get_info(dev))
return (ENXIO);
cpufreq_register(dev);
return (0);
}
static int
est_detach(device_t dev)
{
struct est_softc *sc;
int error;
error = cpufreq_unregister(dev);
if (error)
return (error);
sc = device_get_softc(dev);
if (sc->acpi_settings || sc->msr_settings)
free(sc->freq_list, M_DEVBUF);
return (0);
}
/*
* Probe for supported CPU settings. First, check our static table of
* settings. If no match, try using the ones offered by acpi_perf
* (i.e., _PSS). We use ACPI second because some systems (IBM R/T40
* series) export both legacy SMM IO-based access and direct MSR access
* but the direct access specifies invalid values for _PSS.
*/
static int
est_get_info(device_t dev)
{
struct est_softc *sc;
uint64_t msr;
int error;
sc = device_get_softc(dev);
msr = rdmsr(MSR_PERF_STATUS);
error = est_table_info(dev, msr, &sc->freq_list);
if (error)
error = est_acpi_info(dev, &sc->freq_list);
if (error)
error = est_msr_info(dev, msr, &sc->freq_list);
if (error) {
printf(
"est: CPU supports Enhanced Speedstep, but is not recognized.\n"
"est: cpu_vendor %s, msr %0jx\n", cpu_vendor, msr);
return (ENXIO);
}
return (0);
}
static int
est_acpi_info(device_t dev, freq_info **freqs)
{
struct est_softc *sc;
struct cf_setting *sets;
freq_info *table;
device_t perf_dev;
int count, error, i, j;
uint16_t saved_id16;
perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
if (perf_dev == NULL || !device_is_attached(perf_dev))
return (ENXIO);
/* Fetch settings from acpi_perf. */
sc = device_get_softc(dev);
table = NULL;
sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
if (sets == NULL)
return (ENOMEM);
count = MAX_SETTINGS;
error = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count);
if (error)
goto out;
/* Parse settings into our local table format. */
- table = mallocarray(count + 1, sizeof(freq_info), M_DEVBUF, M_NOWAIT);
+ table = malloc((count + 1) * sizeof(freq_info), M_DEVBUF, M_NOWAIT);
if (table == NULL) {
error = ENOMEM;
goto out;
}
est_get_id16(&saved_id16);
for (i = 0, j = 0; i < count; i++) {
/*
* Confirm id16 value is correct.
*/
if (sets[i].freq > 0) {
error = est_set_id16(dev, sets[i].spec[0], strict);
if (error != 0) {
if (bootverbose)
device_printf(dev, "Invalid freq %u, "
"ignored.\n", sets[i].freq);
continue;
}
table[j].freq = sets[i].freq;
table[j].volts = sets[i].volts;
table[j].id16 = sets[i].spec[0];
table[j].power = sets[i].power;
++j;
}
}
/* restore saved setting */
est_set_id16(dev, saved_id16, 0);
/* Mark end of table with a terminator. */
bzero(&table[j], sizeof(freq_info));
sc->acpi_settings = TRUE;
*freqs = table;
error = 0;
out:
if (sets)
free(sets, M_TEMP);
if (error && table)
free(table, M_DEVBUF);
return (error);
}
static int
est_table_info(device_t dev, uint64_t msr, freq_info **freqs)
{
cpu_info *p;
uint32_t id;
/* Find a table which matches (vendor, id32). */
id = msr >> 32;
for (p = ESTprocs; p->id32 != 0; p++) {
if (p->vendor_id == cpu_vendor_id && p->id32 == id)
break;
}
if (p->id32 == 0)
return (EOPNOTSUPP);
/* Make sure the current setpoint is valid. */
if (est_get_current(p->freqtab) == NULL) {
device_printf(dev, "current setting not found in table\n");
return (EOPNOTSUPP);
}
*freqs = p->freqtab;
return (0);
}
static int
bus_speed_ok(int bus)
{
switch (bus) {
case 100:
case 133:
case 333:
return (1);
default:
return (0);
}
}
/*
* Flesh out a simple rate table containing the high and low frequencies
* based on the current clock speed and the upper 32 bits of the MSR.
*/
static int
est_msr_info(device_t dev, uint64_t msr, freq_info **freqs)
{
struct est_softc *sc;
freq_info *fp;
int bus, freq, volts;
uint16_t id;
if (!msr_info_enabled)
return (EOPNOTSUPP);
/* Figure out the bus clock. */
freq = atomic_load_acq_64(&tsc_freq) / 1000000;
id = msr >> 32;
bus = freq / (id >> 8);
device_printf(dev, "Guessed bus clock (high) of %d MHz\n", bus);
if (!bus_speed_ok(bus)) {
/* We may be running on the low frequency. */
id = msr >> 48;
bus = freq / (id >> 8);
device_printf(dev, "Guessed bus clock (low) of %d MHz\n", bus);
if (!bus_speed_ok(bus))
return (EOPNOTSUPP);
/* Calculate high frequency. */
id = msr >> 32;
freq = ((id >> 8) & 0xff) * bus;
}
/* Fill out a new freq table containing just the high and low freqs. */
sc = device_get_softc(dev);
fp = malloc(sizeof(freq_info) * 3, M_DEVBUF, M_WAITOK | M_ZERO);
/* First, the high frequency. */
volts = id & 0xff;
if (volts != 0) {
volts <<= 4;
volts += 700;
}
fp[0].freq = freq;
fp[0].volts = volts;
fp[0].id16 = id;
fp[0].power = CPUFREQ_VAL_UNKNOWN;
device_printf(dev, "Guessed high setting of %d MHz @ %d Mv\n", freq,
volts);
/* Second, the low frequency. */
id = msr >> 48;
freq = ((id >> 8) & 0xff) * bus;
volts = id & 0xff;
if (volts != 0) {
volts <<= 4;
volts += 700;
}
fp[1].freq = freq;
fp[1].volts = volts;
fp[1].id16 = id;
fp[1].power = CPUFREQ_VAL_UNKNOWN;
device_printf(dev, "Guessed low setting of %d MHz @ %d Mv\n", freq,
volts);
/* Table is already terminated due to M_ZERO. */
sc->msr_settings = TRUE;
*freqs = fp;
return (0);
}
static void
est_get_id16(uint16_t *id16_p)
{
*id16_p = rdmsr(MSR_PERF_STATUS) & 0xffff;
}
static int
est_set_id16(device_t dev, uint16_t id16, int need_check)
{
uint64_t msr;
uint16_t new_id16;
int ret = 0;
/* Read the current register, mask out the old, set the new id. */
msr = rdmsr(MSR_PERF_CTL);
msr = (msr & ~0xffff) | id16;
wrmsr(MSR_PERF_CTL, msr);
if (need_check) {
/* Wait a short while and read the new status. */
DELAY(EST_TRANS_LAT);
est_get_id16(&new_id16);
if (new_id16 != id16) {
if (bootverbose)
device_printf(dev, "Invalid id16 (set, cur) "
"= (%u, %u)\n", id16, new_id16);
ret = ENXIO;
}
}
return (ret);
}
static freq_info *
est_get_current(freq_info *freq_list)
{
freq_info *f;
int i;
uint16_t id16;
/*
* Try a few times to get a valid value. Sometimes, if the CPU
* is in the middle of an asynchronous transition (i.e., P4TCC),
* we get a temporary invalid result.
*/
for (i = 0; i < 5; i++) {
est_get_id16(&id16);
for (f = freq_list; f->id16 != 0; f++) {
if (f->id16 == id16)
return (f);
}
DELAY(100);
}
return (NULL);
}
static int
est_settings(device_t dev, struct cf_setting *sets, int *count)
{
struct est_softc *sc;
freq_info *f;
int i;
sc = device_get_softc(dev);
if (*count < EST_MAX_SETTINGS)
return (E2BIG);
i = 0;
for (f = sc->freq_list; f->freq != 0; f++, i++) {
sets[i].freq = f->freq;
sets[i].volts = f->volts;
sets[i].power = f->power;
sets[i].lat = EST_TRANS_LAT;
sets[i].dev = dev;
}
*count = i;
return (0);
}
static int
est_set(device_t dev, const struct cf_setting *set)
{
struct est_softc *sc;
freq_info *f;
/* Find the setting matching the requested one. */
sc = device_get_softc(dev);
for (f = sc->freq_list; f->freq != 0; f++) {
if (f->freq == set->freq)
break;
}
if (f->freq == 0)
return (EINVAL);
/* Read the current register, mask out the old, set the new id. */
est_set_id16(dev, f->id16, 0);
return (0);
}
static int
est_get(device_t dev, struct cf_setting *set)
{
struct est_softc *sc;
freq_info *f;
sc = device_get_softc(dev);
f = est_get_current(sc->freq_list);
if (f == NULL)
return (ENXIO);
set->freq = f->freq;
set->volts = f->volts;
set->power = f->power;
set->lat = EST_TRANS_LAT;
set->dev = dev;
return (0);
}
static int
est_type(device_t dev, int *type)
{
if (type == NULL)
return (EINVAL);
*type = CPUFREQ_TYPE_ABSOLUTE;
return (0);
}

File Metadata

Mime Type
application/octet-stream
Expires
Fri, May 3, 5:15 AM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
3O6yGqPvyqZP
Default Alt Text
(4 MB)

Event Timeline