Index: head/lib/libc/sparc64/fpu/fpu.c =================================================================== --- head/lib/libc/sparc64/fpu/fpu.c (revision 258779) +++ head/lib/libc/sparc64/fpu/fpu.c (revision 258780) @@ -1,461 +1,461 @@ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright 2001 by Thomas Moestl . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * @(#)fpu.c 8.1 (Berkeley) 6/11/93 * $NetBSD: fpu.c,v 1.11 2000/12/06 01:47:50 mrg Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include "namespace.h" #include #include #ifdef FPU_DEBUG #include #endif #include #include #include "un-namespace.h" #include "libc_private.h" #include #include #include #include #include #include #include "__sparc_utrap_private.h" #include "fpu_emu.h" #include "fpu_extern.h" /* * Translate current exceptions into `first' exception. The * bits go the wrong way for ffs() (0x10 is most important, etc). * There are only 5, so do it the obvious way. */ #define X1(x) x #define X2(x) x,x #define X4(x) x,x,x,x #define X8(x) X4(x),X4(x) #define X16(x) X8(x),X8(x) static const char cx_to_trapx[] = { X1(FSR_NX), X2(FSR_DZ), X4(FSR_UF), X8(FSR_OF), X16(FSR_NV) }; #ifdef FPU_DEBUG #ifdef FPU_DEBUG_MASK int __fpe_debug = FPU_DEBUG_MASK; #else int __fpe_debug = 0; #endif #endif /* FPU_DEBUG */ static int __fpu_execute(struct utrapframe *, struct fpemu *, u_int32_t, u_long); /* * Need to use an fpstate on the stack; we could switch, so we cannot safely * modify the pcb one, it might get overwritten. */ int __fpu_exception(struct utrapframe *uf) { struct fpemu fe; u_long fsr, tstate; u_int insn; int sig; fsr = uf->uf_fsr; switch (FSR_GET_FTT(fsr)) { case FSR_FTT_NONE: __utrap_write("lost FPU trap type\n"); return (0); case FSR_FTT_IEEE: return (SIGFPE); case FSR_FTT_SEQERR: __utrap_write("FPU sequence error\n"); return (SIGFPE); case FSR_FTT_HWERR: __utrap_write("FPU hardware error\n"); return (SIGFPE); case FSR_FTT_UNFIN: case FSR_FTT_UNIMP: break; default: __utrap_write("unknown FPU error\n"); return (SIGFPE); } fe.fe_fsr = fsr & ~FSR_FTT_MASK; insn = *(u_int32_t *)uf->uf_pc; if (IF_OP(insn) != IOP_MISC || (IF_F3_OP3(insn) != INS2_FPop1 && IF_F3_OP3(insn) != INS2_FPop2)) __utrap_panic("bogus FP fault"); tstate = uf->uf_state; sig = __fpu_execute(uf, &fe, insn, tstate); if (sig != 0) return (sig); __asm __volatile("ldx %0, %%fsr" : : "m" (fe.fe_fsr)); return (0); } #ifdef FPU_DEBUG /* * Dump a `fpn' structure. */ void __fpu_dumpfpn(struct fpn *fp) { static const char *const class[] = { "SNAN", "QNAN", "ZERO", "NUM", "INF" }; printf("%s %c.%x %x %x %xE%d", class[fp->fp_class + 2], fp->fp_sign ? '-' : ' ', fp->fp_mant[0], fp->fp_mant[1], fp->fp_mant[2], fp->fp_mant[3], fp->fp_exp); } #endif static const int opmask[] = {0, 0, 1, 3, 1}; /* Decode 5 bit register field depending on the type. */ #define RN_DECODE(tp, rn) \ ((tp) >= FTYPE_DBL ? INSFPdq_RN(rn) & ~opmask[tp] : (rn)) /* * Helper for forming the below case statements. Build only the op3 and opf * field of the instruction, these are the only ones that need to match. */ #define FOP(op3, opf) \ ((op3) << IF_F3_OP3_SHIFT | (opf) << IF_F3_OPF_SHIFT) /* * Implement a move operation for all supported operand types. The additional * nand and xor parameters will be applied to the upper 32 bit word of the * source operand. This allows to implement fabs and fneg (for fp operands - * only!) using this functions, too, by passing (1 << 31) for one of the + * only!) using this functions, too, by passing (1U << 31) for one of the * parameters, and 0 for the other. */ static void __fpu_mov(struct fpemu *fe, int type, int rd, int rs2, u_int32_t nand, u_int32_t xor) { if (type == FTYPE_INT || type == FTYPE_SNG) __fpu_setreg(rd, (__fpu_getreg(rs2) & ~nand) ^ xor); else { /* * Need to use the double versions to be able to access * the upper 32 fp registers. */ __fpu_setreg64(rd, (__fpu_getreg64(rs2) & ~((u_int64_t)nand << 32)) ^ ((u_int64_t)xor << 32)); if (type == FTYPE_EXT) __fpu_setreg64(rd + 2, __fpu_getreg64(rs2 + 2)); } } static __inline void __fpu_ccmov(struct fpemu *fe, int type, int rd, int rs2, u_int32_t insn, int fcc) { if (IF_F4_COND(insn) == fcc) __fpu_mov(fe, type, rd, rs2, 0, 0); } static int __fpu_cmpck(struct fpemu *fe) { u_long fsr; int cx; /* * The only possible exception here is NV; catch it * early and get out, as there is no result register. */ cx = fe->fe_cx; fsr = fe->fe_fsr | (cx << FSR_CEXC_SHIFT); if (cx != 0) { if (fsr & (FSR_NV << FSR_TEM_SHIFT)) { fe->fe_fsr = (fsr & ~FSR_FTT_MASK) | FSR_FTT(FSR_FTT_IEEE); return (SIGFPE); } fsr |= FSR_NV << FSR_AEXC_SHIFT; } fe->fe_fsr = fsr; return (0); } /* * Execute an FPU instruction (one that runs entirely in the FPU; not * FBfcc or STF, for instance). On return, fe->fe_fs->fs_fsr will be * modified to reflect the setting the hardware would have left. * * Note that we do not catch all illegal opcodes, so you can, for instance, * multiply two integers this way. */ static int __fpu_execute(struct utrapframe *uf, struct fpemu *fe, u_int32_t insn, u_long tstate) { struct fpn *fp; int opf, rs1, rs2, rd, type, mask, cx, cond; u_long reg, fsr; u_int space[4]; /* * `Decode' and execute instruction. Start with no exceptions. * The type of almost any OPF opcode is in the bottom two bits, so we * squish them out here. */ opf = insn & (IF_MASK(IF_F3_OP3_SHIFT, IF_F3_OP3_BITS) | IF_MASK(IF_F3_OPF_SHIFT + 2, IF_F3_OPF_BITS - 2)); type = IF_F3_OPF(insn) & 3; rs1 = RN_DECODE(type, IF_F3_RS1(insn)); rs2 = RN_DECODE(type, IF_F3_RS2(insn)); rd = RN_DECODE(type, IF_F3_RD(insn)); cond = 0; #ifdef notdef if ((rs1 | rs2 | rd) & opmask[type]) return (SIGILL); #endif fsr = fe->fe_fsr; fe->fe_fsr &= ~FSR_CEXC_MASK; fe->fe_cx = 0; switch (opf) { case FOP(INS2_FPop2, INSFP2_FMOV_CC(IFCC_FCC(0))): __fpu_ccmov(fe, type, rd, rs2, insn, FSR_GET_FCC0(fsr)); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_CC(IFCC_FCC(1))): __fpu_ccmov(fe, type, rd, rs2, insn, FSR_GET_FCC1(fsr)); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_CC(IFCC_FCC(2))): __fpu_ccmov(fe, type, rd, rs2, insn, FSR_GET_FCC2(fsr)); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_CC(IFCC_FCC(3))): __fpu_ccmov(fe, type, rd, rs2, insn, FSR_GET_FCC3(fsr)); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_CC(IFCC_ICC)): __fpu_ccmov(fe, type, rd, rs2, insn, (tstate & TSTATE_ICC_MASK) >> TSTATE_ICC_SHIFT); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_CC(IFCC_XCC)): __fpu_ccmov(fe, type, rd, rs2, insn, (tstate & TSTATE_XCC_MASK) >> (TSTATE_XCC_SHIFT)); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_RC(IRCOND_Z)): reg = __emul_fetch_reg(uf, IF_F4_RS1(insn)); if (reg == 0) __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_RC(IRCOND_LEZ)): reg = __emul_fetch_reg(uf, IF_F4_RS1(insn)); if (reg <= 0) __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_RC(IRCOND_LZ)): reg = __emul_fetch_reg(uf, IF_F4_RS1(insn)); if (reg < 0) __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_RC(IRCOND_NZ)): reg = __emul_fetch_reg(uf, IF_F4_RS1(insn)); if (reg != 0) __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_RC(IRCOND_GZ)): reg = __emul_fetch_reg(uf, IF_F4_RS1(insn)); if (reg > 0) __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop2, INSFP2_FMOV_RC(IRCOND_GEZ)): reg = __emul_fetch_reg(uf, IF_F4_RS1(insn)); if (reg >= 0) __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop2, INSFP2_FCMP): __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); __fpu_compare(fe, 0, IF_F3_CC(insn)); return (__fpu_cmpck(fe)); case FOP(INS2_FPop2, INSFP2_FCMPE): __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); __fpu_compare(fe, 1, IF_F3_CC(insn)); return (__fpu_cmpck(fe)); case FOP(INS2_FPop1, INSFP1_FMOV): __fpu_mov(fe, type, rd, rs2, 0, 0); return (0); case FOP(INS2_FPop1, INSFP1_FNEG): - __fpu_mov(fe, type, rd, rs2, 0, (1 << 31)); + __fpu_mov(fe, type, rd, rs2, 0, (1U << 31)); return (0); case FOP(INS2_FPop1, INSFP1_FABS): - __fpu_mov(fe, type, rd, rs2, (1 << 31), 0); + __fpu_mov(fe, type, rd, rs2, (1U << 31), 0); return (0); case FOP(INS2_FPop1, INSFP1_FSQRT): __fpu_explode(fe, &fe->fe_f1, type, rs2); fp = __fpu_sqrt(fe); break; case FOP(INS2_FPop1, INSFP1_FADD): __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); fp = __fpu_add(fe); break; case FOP(INS2_FPop1, INSFP1_FSUB): __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); fp = __fpu_sub(fe); break; case FOP(INS2_FPop1, INSFP1_FMUL): __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); fp = __fpu_mul(fe); break; case FOP(INS2_FPop1, INSFP1_FDIV): __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); fp = __fpu_div(fe); break; case FOP(INS2_FPop1, INSFP1_FsMULd): case FOP(INS2_FPop1, INSFP1_FdMULq): if (type == FTYPE_EXT) return (SIGILL); __fpu_explode(fe, &fe->fe_f1, type, rs1); __fpu_explode(fe, &fe->fe_f2, type, rs2); type++; /* single to double, or double to quad */ /* * Recalculate rd (the old type applied for the source regs * only, the target one has a different size). */ rd = RN_DECODE(type, IF_F3_RD(insn)); fp = __fpu_mul(fe); break; case FOP(INS2_FPop1, INSFP1_FxTOs): case FOP(INS2_FPop1, INSFP1_FxTOd): case FOP(INS2_FPop1, INSFP1_FxTOq): type = FTYPE_LNG; rs2 = RN_DECODE(type, IF_F3_RS2(insn)); __fpu_explode(fe, fp = &fe->fe_f1, type, rs2); /* sneaky; depends on instruction encoding */ type = (IF_F3_OPF(insn) >> 2) & 3; rd = RN_DECODE(type, IF_F3_RD(insn)); break; case FOP(INS2_FPop1, INSFP1_FTOx): __fpu_explode(fe, fp = &fe->fe_f1, type, rs2); type = FTYPE_LNG; rd = RN_DECODE(type, IF_F3_RD(insn)); break; case FOP(INS2_FPop1, INSFP1_FTOs): case FOP(INS2_FPop1, INSFP1_FTOd): case FOP(INS2_FPop1, INSFP1_FTOq): case FOP(INS2_FPop1, INSFP1_FTOi): __fpu_explode(fe, fp = &fe->fe_f1, type, rs2); /* sneaky; depends on instruction encoding */ type = (IF_F3_OPF(insn) >> 2) & 3; rd = RN_DECODE(type, IF_F3_RD(insn)); break; default: return (SIGILL); } /* * ALU operation is complete. Collapse the result and then check * for exceptions. If we got any, and they are enabled, do not * alter the destination register, just stop with an exception. * Otherwise set new current exceptions and accrue. */ __fpu_implode(fe, fp, type, space); cx = fe->fe_cx; if (cx != 0) { mask = (fsr >> FSR_TEM_SHIFT) & FSR_TEM_MASK; if (cx & mask) { /* not accrued??? */ fsr = (fsr & ~FSR_FTT_MASK) | FSR_FTT(FSR_FTT_IEEE) | FSR_CEXC(cx_to_trapx[(cx & mask) - 1]); return (SIGFPE); } fsr |= (cx << FSR_CEXC_SHIFT) | (cx << FSR_AEXC_SHIFT); } fe->fe_fsr = fsr; if (type == FTYPE_INT || type == FTYPE_SNG) __fpu_setreg(rd, space[0]); else { __fpu_setreg64(rd, ((u_int64_t)space[0] << 32) | space[1]); if (type == FTYPE_EXT) __fpu_setreg64(rd + 2, ((u_int64_t)space[2] << 32) | space[3]); } return (0); /* success */ } Index: head/lib/libc/sparc64/fpu/fpu_sqrt.c =================================================================== --- head/lib/libc/sparc64/fpu/fpu_sqrt.c (revision 258779) +++ head/lib/libc/sparc64/fpu/fpu_sqrt.c (revision 258780) @@ -1,397 +1,397 @@ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)fpu_sqrt.c 8.1 (Berkeley) 6/11/93 * $NetBSD: fpu_sqrt.c,v 1.2 1994/11/20 20:52:46 deraadt Exp $ */ #include __FBSDID("$FreeBSD$"); /* * Perform an FPU square root (return sqrt(x)). */ #include #include #include #include "fpu_arith.h" #include "fpu_emu.h" #include "fpu_extern.h" /* * Our task is to calculate the square root of a floating point number x0. * This number x normally has the form: * * exp * x = mant * 2 (where 1 <= mant < 2 and exp is an integer) * * This can be left as it stands, or the mantissa can be doubled and the * exponent decremented: * * exp-1 * x = (2 * mant) * 2 (where 2 <= 2 * mant < 4) * * If the exponent `exp' is even, the square root of the number is best * handled using the first form, and is by definition equal to: * * exp/2 * sqrt(x) = sqrt(mant) * 2 * * If exp is odd, on the other hand, it is convenient to use the second * form, giving: * * (exp-1)/2 * sqrt(x) = sqrt(2 * mant) * 2 * * In the first case, we have * * 1 <= mant < 2 * * and therefore * * sqrt(1) <= sqrt(mant) < sqrt(2) * * while in the second case we have * * 2 <= 2*mant < 4 * * and therefore * * sqrt(2) <= sqrt(2*mant) < sqrt(4) * * so that in any case, we are sure that * * sqrt(1) <= sqrt(n * mant) < sqrt(4), n = 1 or 2 * * or * * 1 <= sqrt(n * mant) < 2, n = 1 or 2. * * This root is therefore a properly formed mantissa for a floating * point number. The exponent of sqrt(x) is either exp/2 or (exp-1)/2 * as above. This leaves us with the problem of finding the square root * of a fixed-point number in the range [1..4). * * Though it may not be instantly obvious, the following square root * algorithm works for any integer x of an even number of bits, provided * that no overflows occur: * * let q = 0 * for k = NBITS-1 to 0 step -1 do -- for each digit in the answer... * x *= 2 -- multiply by radix, for next digit * if x >= 2q + 2^k then -- if adding 2^k does not * x -= 2q + 2^k -- exceed the correct root, * q += 2^k -- add 2^k and adjust x * fi * done * sqrt = q / 2^(NBITS/2) -- (and any remainder is in x) * * If NBITS is odd (so that k is initially even), we can just add another * zero bit at the top of x. Doing so means that q is not going to acquire * a 1 bit in the first trip around the loop (since x0 < 2^NBITS). If the * final value in x is not needed, or can be off by a factor of 2, this is * equivalant to moving the `x *= 2' step to the bottom of the loop: * * for k = NBITS-1 to 0 step -1 do if ... fi; x *= 2; done * * and the result q will then be sqrt(x0) * 2^floor(NBITS / 2). * (Since the algorithm is destructive on x, we will call x's initial * value, for which q is some power of two times its square root, x0.) * * If we insert a loop invariant y = 2q, we can then rewrite this using * C notation as: * * q = y = 0; x = x0; * for (k = NBITS; --k >= 0;) { * #if (NBITS is even) * x *= 2; * #endif * t = y + (1 << k); * if (x >= t) { * x -= t; * q += 1 << k; * y += 1 << (k + 1); * } * #if (NBITS is odd) * x *= 2; * #endif * } * * If x0 is fixed point, rather than an integer, we can simply alter the * scale factor between q and sqrt(x0). As it happens, we can easily arrange * for the scale factor to be 2**0 or 1, so that sqrt(x0) == q. * * In our case, however, x0 (and therefore x, y, q, and t) are multiword * integers, which adds some complication. But note that q is built one * bit at a time, from the top down, and is not used itself in the loop * (we use 2q as held in y instead). This means we can build our answer * in an integer, one word at a time, which saves a bit of work. Also, * since 1 << k is always a `new' bit in q, 1 << k and 1 << (k+1) are * `new' bits in y and we can set them with an `or' operation rather than * a full-blown multiword add. * * We are almost done, except for one snag. We must prove that none of our * intermediate calculations can overflow. We know that x0 is in [1..4) * and therefore the square root in q will be in [1..2), but what about x, * y, and t? * * We know that y = 2q at the beginning of each loop. (The relation only * fails temporarily while y and q are being updated.) Since q < 2, y < 4. * The sum in t can, in our case, be as much as y+(1<<1) = y+2 < 6, and. * Furthermore, we can prove with a bit of work that x never exceeds y by * more than 2, so that even after doubling, 0 <= x < 8. (This is left as * an exercise to the reader, mostly because I have become tired of working * on this comment.) * * If our floating point mantissas (which are of the form 1.frac) occupy * B+1 bits, our largest intermediary needs at most B+3 bits, or two extra. * In fact, we want even one more bit (for a carry, to avoid compares), or * three extra. There is a comment in fpu_emu.h reminding maintainers of * this, so we have some justification in assuming it. */ struct fpn * __fpu_sqrt(fe) struct fpemu *fe; { struct fpn *x = &fe->fe_f1; u_int bit, q, tt; u_int x0, x1, x2, x3; u_int y0, y1, y2, y3; u_int d0, d1, d2, d3; int e; /* * Take care of special cases first. In order: * * sqrt(NaN) = NaN * sqrt(+0) = +0 * sqrt(-0) = -0 * sqrt(x < 0) = NaN (including sqrt(-Inf)) * sqrt(+Inf) = +Inf * * Then all that remains are numbers with mantissas in [1..2). */ if (ISNAN(x) || ISZERO(x)) return (x); if (x->fp_sign) return (__fpu_newnan(fe)); if (ISINF(x)) return (x); /* * Calculate result exponent. As noted above, this may involve * doubling the mantissa. We will also need to double x each * time around the loop, so we define a macro for this here, and * we break out the multiword mantissa. */ #ifdef FPU_SHL1_BY_ADD #define DOUBLE_X { \ FPU_ADDS(x3, x3, x3); FPU_ADDCS(x2, x2, x2); \ FPU_ADDCS(x1, x1, x1); FPU_ADDC(x0, x0, x0); \ } #else #define DOUBLE_X { \ x0 = (x0 << 1) | (x1 >> 31); x1 = (x1 << 1) | (x2 >> 31); \ x2 = (x2 << 1) | (x3 >> 31); x3 <<= 1; \ } #endif #if (FP_NMANT & 1) != 0 # define ODD_DOUBLE DOUBLE_X # define EVEN_DOUBLE /* nothing */ #else # define ODD_DOUBLE /* nothing */ # define EVEN_DOUBLE DOUBLE_X #endif x0 = x->fp_mant[0]; x1 = x->fp_mant[1]; x2 = x->fp_mant[2]; x3 = x->fp_mant[3]; e = x->fp_exp; if (e & 1) /* exponent is odd; use sqrt(2mant) */ DOUBLE_X; /* THE FOLLOWING ASSUMES THAT RIGHT SHIFT DOES SIGN EXTENSION */ x->fp_exp = e >> 1; /* calculates (e&1 ? (e-1)/2 : e/2 */ /* * Now calculate the mantissa root. Since x is now in [1..4), * we know that the first trip around the loop will definitely * set the top bit in q, so we can do that manually and start * the loop at the next bit down instead. We must be sure to * double x correctly while doing the `known q=1.0'. * * We do this one mantissa-word at a time, as noted above, to - * save work. To avoid `(1 << 31) << 1', we also do the top bit + * save work. To avoid `(1U << 31) << 1', we also do the top bit * outside of each per-word loop. * * The calculation `t = y + bit' breaks down into `t0 = y0, ..., * t3 = y3, t? |= bit' for the appropriate word. Since the bit * is always a `new' one, this means that three of the `t?'s are * just the corresponding `y?'; we use `#define's here for this. * The variable `tt' holds the actual `t?' variable. */ /* calculate q0 */ #define t0 tt bit = FP_1; EVEN_DOUBLE; /* if (x >= (t0 = y0 | bit)) { */ /* always true */ q = bit; x0 -= bit; y0 = bit << 1; /* } */ ODD_DOUBLE; while ((bit >>= 1) != 0) { /* for remaining bits in q0 */ EVEN_DOUBLE; t0 = y0 | bit; /* t = y + bit */ if (x0 >= t0) { /* if x >= t then */ x0 -= t0; /* x -= t */ q |= bit; /* q += bit */ y0 |= bit << 1; /* y += bit << 1 */ } ODD_DOUBLE; } x->fp_mant[0] = q; #undef t0 /* calculate q1. note (y0&1)==0. */ #define t0 y0 #define t1 tt q = 0; y1 = 0; bit = 1 << 31; EVEN_DOUBLE; t1 = bit; FPU_SUBS(d1, x1, t1); FPU_SUBC(d0, x0, t0); /* d = x - t */ if ((int)d0 >= 0) { /* if d >= 0 (i.e., x >= t) then */ x0 = d0, x1 = d1; /* x -= t */ q = bit; /* q += bit */ y0 |= 1; /* y += bit << 1 */ } ODD_DOUBLE; while ((bit >>= 1) != 0) { /* for remaining bits in q1 */ EVEN_DOUBLE; /* as before */ t1 = y1 | bit; FPU_SUBS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1; q |= bit; y1 |= bit << 1; } ODD_DOUBLE; } x->fp_mant[1] = q; #undef t1 /* calculate q2. note (y1&1)==0; y0 (aka t0) is fixed. */ #define t1 y1 #define t2 tt q = 0; y2 = 0; bit = 1 << 31; EVEN_DOUBLE; t2 = bit; FPU_SUBS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; q = bit; y1 |= 1; /* now t1, y1 are set in concrete */ } ODD_DOUBLE; while ((bit >>= 1) != 0) { EVEN_DOUBLE; t2 = y2 | bit; FPU_SUBS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; q |= bit; y2 |= bit << 1; } ODD_DOUBLE; } x->fp_mant[2] = q; #undef t2 /* calculate q3. y0, t0, y1, t1 all fixed; y2, t2, almost done. */ #define t2 y2 #define t3 tt q = 0; y3 = 0; bit = 1 << 31; EVEN_DOUBLE; t3 = bit; FPU_SUBS(d3, x3, t3); FPU_SUBCS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; x3 = d3; q = bit; y2 |= 1; } ODD_DOUBLE; while ((bit >>= 1) != 0) { EVEN_DOUBLE; t3 = y3 | bit; FPU_SUBS(d3, x3, t3); FPU_SUBCS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; x3 = d3; q |= bit; y3 |= bit << 1; } ODD_DOUBLE; } x->fp_mant[3] = q; /* * The result, which includes guard and round bits, is exact iff * x is now zero; any nonzero bits in x represent sticky bits. */ x->fp_sticky = x0 | x1 | x2 | x3; return (x); } Index: head/lib/libc/xdr/xdr_rec.c =================================================================== --- head/lib/libc/xdr/xdr_rec.c (revision 258779) +++ head/lib/libc/xdr/xdr_rec.c (revision 258780) @@ -1,795 +1,795 @@ /* $NetBSD: xdr_rec.c,v 1.18 2000/07/06 03:10:35 christos Exp $ */ /* * Sun RPC is a product of Sun Microsystems, Inc. and is provided for * unrestricted use provided that this legend is included on all tape * media and as a part of the software program in whole or part. Users * may copy or modify Sun RPC without charge, but are not authorized * to license or distribute it to anyone else except as part of a product or * program developed by the user. * * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. * * Sun RPC is provided with no support and without any obligation on the * part of Sun Microsystems, Inc. to assist in its use, correction, * modification or enhancement. * * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC * OR ANY PART THEREOF. * * In no event will Sun Microsystems, Inc. be liable for any lost revenue * or profits or other special, indirect and consequential damages, even if * Sun has been advised of the possibility of such damages. * * Sun Microsystems, Inc. * 2550 Garcia Avenue * Mountain View, California 94043 */ #if defined(LIBC_SCCS) && !defined(lint) static char *sccsid2 = "@(#)xdr_rec.c 1.21 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)xdr_rec.c 2.2 88/08/01 4.0 RPCSRC"; #endif #include __FBSDID("$FreeBSD$"); /* * xdr_rec.c, Implements TCP/IP based XDR streams with a "record marking" * layer above tcp (for rpc's use). * * Copyright (C) 1984, Sun Microsystems, Inc. * * These routines interface XDRSTREAMS to a tcp/ip connection. * There is a record marking layer between the xdr stream * and the tcp transport level. A record is composed on one or more * record fragments. A record fragment is a thirty-two bit header followed * by n bytes of data, where n is contained in the header. The header * is represented as a htonl(u_long). Thegh order bit encodes * whether or not the fragment is the last fragment of the record * (1 => fragment is last, 0 => more fragments to follow. * The other 31 bits encode the byte length of the fragment. */ #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "rpc_com.h" static bool_t xdrrec_getlong(XDR *, long *); static bool_t xdrrec_putlong(XDR *, const long *); static bool_t xdrrec_getbytes(XDR *, char *, u_int); static bool_t xdrrec_putbytes(XDR *, const char *, u_int); static u_int xdrrec_getpos(XDR *); static bool_t xdrrec_setpos(XDR *, u_int); static int32_t *xdrrec_inline(XDR *, u_int); static void xdrrec_destroy(XDR *); static const struct xdr_ops xdrrec_ops = { xdrrec_getlong, xdrrec_putlong, xdrrec_getbytes, xdrrec_putbytes, xdrrec_getpos, xdrrec_setpos, xdrrec_inline, xdrrec_destroy }; /* * A record is composed of one or more record fragments. * A record fragment is a four-byte header followed by zero to * 2**32-1 bytes. The header is treated as a long unsigned and is * encode/decoded to the network via htonl/ntohl. The low order 31 bits * are a byte count of the fragment. The highest order bit is a boolean: * 1 => this fragment is the last fragment of the record, * 0 => this fragment is followed by more fragment(s). * * The fragment/record machinery is not general; it is constructed to * meet the needs of xdr and rpc based on tcp. */ -#define LAST_FRAG ((u_int32_t)(1 << 31)) +#define LAST_FRAG ((u_int32_t)(1U << 31)) typedef struct rec_strm { char *tcp_handle; /* * out-goung bits */ int (*writeit)(void *, void *, int); char *out_base; /* output buffer (points to frag header) */ char *out_finger; /* next output position */ char *out_boundry; /* data cannot up to this address */ u_int32_t *frag_header; /* beginning of curren fragment */ bool_t frag_sent; /* true if buffer sent in middle of record */ /* * in-coming bits */ int (*readit)(void *, void *, int); u_long in_size; /* fixed size of the input buffer */ char *in_base; char *in_finger; /* location of next byte to be had */ char *in_boundry; /* can read up to this location */ long fbtbc; /* fragment bytes to be consumed */ bool_t last_frag; u_int sendsize; u_int recvsize; bool_t nonblock; bool_t in_haveheader; u_int32_t in_header; char *in_hdrp; int in_hdrlen; int in_reclen; int in_received; int in_maxrec; } RECSTREAM; static u_int fix_buf_size(u_int); static bool_t flush_out(RECSTREAM *, bool_t); static bool_t fill_input_buf(RECSTREAM *); static bool_t get_input_bytes(RECSTREAM *, char *, int); static bool_t set_input_fragment(RECSTREAM *); static bool_t skip_input_bytes(RECSTREAM *, long); static bool_t realloc_stream(RECSTREAM *, int); /* * Create an xdr handle for xdrrec * xdrrec_create fills in xdrs. Sendsize and recvsize are * send and recv buffer sizes (0 => use default). * tcp_handle is an opaque handle that is passed as the first parameter to * the procedures readit and writeit. Readit and writeit are read and * write respectively. They are like the system * calls expect that they take an opaque handle rather than an fd. */ void xdrrec_create(xdrs, sendsize, recvsize, tcp_handle, readit, writeit) XDR *xdrs; u_int sendsize; u_int recvsize; void *tcp_handle; /* like read, but pass it a tcp_handle, not sock */ int (*readit)(void *, void *, int); /* like write, but pass it a tcp_handle, not sock */ int (*writeit)(void *, void *, int); { RECSTREAM *rstrm = mem_alloc(sizeof(RECSTREAM)); if (rstrm == NULL) { warnx("xdrrec_create: out of memory"); /* * This is bad. Should rework xdrrec_create to * return a handle, and in this case return NULL */ return; } rstrm->sendsize = sendsize = fix_buf_size(sendsize); rstrm->out_base = mem_alloc(rstrm->sendsize); if (rstrm->out_base == NULL) { warnx("xdrrec_create: out of memory"); mem_free(rstrm, sizeof(RECSTREAM)); return; } rstrm->recvsize = recvsize = fix_buf_size(recvsize); rstrm->in_base = mem_alloc(recvsize); if (rstrm->in_base == NULL) { warnx("xdrrec_create: out of memory"); mem_free(rstrm->out_base, sendsize); mem_free(rstrm, sizeof(RECSTREAM)); return; } /* * now the rest ... */ xdrs->x_ops = &xdrrec_ops; xdrs->x_private = rstrm; rstrm->tcp_handle = tcp_handle; rstrm->readit = readit; rstrm->writeit = writeit; rstrm->out_finger = rstrm->out_boundry = rstrm->out_base; rstrm->frag_header = (u_int32_t *)(void *)rstrm->out_base; rstrm->out_finger += sizeof(u_int32_t); rstrm->out_boundry += sendsize; rstrm->frag_sent = FALSE; rstrm->in_size = recvsize; rstrm->in_boundry = rstrm->in_base; rstrm->in_finger = (rstrm->in_boundry += recvsize); rstrm->fbtbc = 0; rstrm->last_frag = TRUE; rstrm->in_haveheader = FALSE; rstrm->in_hdrlen = 0; rstrm->in_hdrp = (char *)(void *)&rstrm->in_header; rstrm->nonblock = FALSE; rstrm->in_reclen = 0; rstrm->in_received = 0; } /* * The reoutines defined below are the xdr ops which will go into the * xdr handle filled in by xdrrec_create. */ static bool_t xdrrec_getlong(xdrs, lp) XDR *xdrs; long *lp; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); int32_t *buflp = (int32_t *)(void *)(rstrm->in_finger); int32_t mylong; /* first try the inline, fast case */ if ((rstrm->fbtbc >= sizeof(int32_t)) && (((long)rstrm->in_boundry - (long)buflp) >= sizeof(int32_t))) { *lp = (long)ntohl((u_int32_t)(*buflp)); rstrm->fbtbc -= sizeof(int32_t); rstrm->in_finger += sizeof(int32_t); } else { if (! xdrrec_getbytes(xdrs, (char *)(void *)&mylong, sizeof(int32_t))) return (FALSE); *lp = (long)ntohl((u_int32_t)mylong); } return (TRUE); } static bool_t xdrrec_putlong(xdrs, lp) XDR *xdrs; const long *lp; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); int32_t *dest_lp = ((int32_t *)(void *)(rstrm->out_finger)); if ((rstrm->out_finger += sizeof(int32_t)) > rstrm->out_boundry) { /* * this case should almost never happen so the code is * inefficient */ rstrm->out_finger -= sizeof(int32_t); rstrm->frag_sent = TRUE; if (! flush_out(rstrm, FALSE)) return (FALSE); dest_lp = ((int32_t *)(void *)(rstrm->out_finger)); rstrm->out_finger += sizeof(int32_t); } *dest_lp = (int32_t)htonl((u_int32_t)(*lp)); return (TRUE); } static bool_t /* must manage buffers, fragments, and records */ xdrrec_getbytes(xdrs, addr, len) XDR *xdrs; char *addr; u_int len; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); int current; while (len > 0) { current = (int)rstrm->fbtbc; if (current == 0) { if (rstrm->last_frag) return (FALSE); if (! set_input_fragment(rstrm)) return (FALSE); continue; } current = (len < current) ? len : current; if (! get_input_bytes(rstrm, addr, current)) return (FALSE); addr += current; rstrm->fbtbc -= current; len -= current; } return (TRUE); } static bool_t xdrrec_putbytes(xdrs, addr, len) XDR *xdrs; const char *addr; u_int len; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); size_t current; while (len > 0) { current = (size_t)((u_long)rstrm->out_boundry - (u_long)rstrm->out_finger); current = (len < current) ? len : current; memmove(rstrm->out_finger, addr, current); rstrm->out_finger += current; addr += current; len -= current; if (rstrm->out_finger == rstrm->out_boundry) { rstrm->frag_sent = TRUE; if (! flush_out(rstrm, FALSE)) return (FALSE); } } return (TRUE); } static u_int xdrrec_getpos(xdrs) XDR *xdrs; { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; off_t pos; pos = lseek((int)(u_long)rstrm->tcp_handle, (off_t)0, 1); if (pos == -1) pos = 0; switch (xdrs->x_op) { case XDR_ENCODE: pos += rstrm->out_finger - rstrm->out_base; break; case XDR_DECODE: pos -= rstrm->in_boundry - rstrm->in_finger; break; default: pos = (off_t) -1; break; } return ((u_int) pos); } static bool_t xdrrec_setpos(xdrs, pos) XDR *xdrs; u_int pos; { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; u_int currpos = xdrrec_getpos(xdrs); int delta = currpos - pos; char *newpos; if ((int)currpos != -1) switch (xdrs->x_op) { case XDR_ENCODE: newpos = rstrm->out_finger - delta; if ((newpos > (char *)(void *)(rstrm->frag_header)) && (newpos < rstrm->out_boundry)) { rstrm->out_finger = newpos; return (TRUE); } break; case XDR_DECODE: newpos = rstrm->in_finger - delta; if ((delta < (int)(rstrm->fbtbc)) && (newpos <= rstrm->in_boundry) && (newpos >= rstrm->in_base)) { rstrm->in_finger = newpos; rstrm->fbtbc -= delta; return (TRUE); } break; case XDR_FREE: break; } return (FALSE); } static int32_t * xdrrec_inline(xdrs, len) XDR *xdrs; u_int len; { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; int32_t *buf = NULL; switch (xdrs->x_op) { case XDR_ENCODE: if ((rstrm->out_finger + len) <= rstrm->out_boundry) { buf = (int32_t *)(void *)rstrm->out_finger; rstrm->out_finger += len; } break; case XDR_DECODE: if ((len <= rstrm->fbtbc) && ((rstrm->in_finger + len) <= rstrm->in_boundry)) { buf = (int32_t *)(void *)rstrm->in_finger; rstrm->fbtbc -= len; rstrm->in_finger += len; } break; case XDR_FREE: break; } return (buf); } static void xdrrec_destroy(xdrs) XDR *xdrs; { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; mem_free(rstrm->out_base, rstrm->sendsize); mem_free(rstrm->in_base, rstrm->recvsize); mem_free(rstrm, sizeof(RECSTREAM)); } /* * Exported routines to manage xdr records */ /* * Before reading (deserializing from the stream, one should always call * this procedure to guarantee proper record alignment. */ bool_t xdrrec_skiprecord(xdrs) XDR *xdrs; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); enum xprt_stat xstat; if (rstrm->nonblock) { if (__xdrrec_getrec(xdrs, &xstat, FALSE)) { rstrm->fbtbc = 0; return TRUE; } if (rstrm->in_finger == rstrm->in_boundry && xstat == XPRT_MOREREQS) { rstrm->fbtbc = 0; return TRUE; } return FALSE; } while (rstrm->fbtbc > 0 || (! rstrm->last_frag)) { if (! skip_input_bytes(rstrm, rstrm->fbtbc)) return (FALSE); rstrm->fbtbc = 0; if ((! rstrm->last_frag) && (! set_input_fragment(rstrm))) return (FALSE); } rstrm->last_frag = FALSE; return (TRUE); } /* * Look ahead function. * Returns TRUE iff there is no more input in the buffer * after consuming the rest of the current record. */ bool_t xdrrec_eof(xdrs) XDR *xdrs; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); while (rstrm->fbtbc > 0 || (! rstrm->last_frag)) { if (! skip_input_bytes(rstrm, rstrm->fbtbc)) return (TRUE); rstrm->fbtbc = 0; if ((! rstrm->last_frag) && (! set_input_fragment(rstrm))) return (TRUE); } if (rstrm->in_finger == rstrm->in_boundry) return (TRUE); return (FALSE); } /* * The client must tell the package when an end-of-record has occurred. * The second paraemters tells whether the record should be flushed to the * (output) tcp stream. (This let's the package support batched or * pipelined procedure calls.) TRUE => immmediate flush to tcp connection. */ bool_t xdrrec_endofrecord(xdrs, sendnow) XDR *xdrs; bool_t sendnow; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); u_long len; /* fragment length */ if (sendnow || rstrm->frag_sent || ((u_long)rstrm->out_finger + sizeof(u_int32_t) >= (u_long)rstrm->out_boundry)) { rstrm->frag_sent = FALSE; return (flush_out(rstrm, TRUE)); } len = (u_long)(rstrm->out_finger) - (u_long)(rstrm->frag_header) - sizeof(u_int32_t); *(rstrm->frag_header) = htonl((u_int32_t)len | LAST_FRAG); rstrm->frag_header = (u_int32_t *)(void *)rstrm->out_finger; rstrm->out_finger += sizeof(u_int32_t); return (TRUE); } /* * Fill the stream buffer with a record for a non-blocking connection. * Return true if a record is available in the buffer, false if not. */ bool_t __xdrrec_getrec(xdrs, statp, expectdata) XDR *xdrs; enum xprt_stat *statp; bool_t expectdata; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); ssize_t n; int fraglen; if (!rstrm->in_haveheader) { n = rstrm->readit(rstrm->tcp_handle, rstrm->in_hdrp, (int)sizeof (rstrm->in_header) - rstrm->in_hdrlen); if (n == 0) { *statp = expectdata ? XPRT_DIED : XPRT_IDLE; return FALSE; } if (n < 0) { *statp = XPRT_DIED; return FALSE; } rstrm->in_hdrp += n; rstrm->in_hdrlen += n; if (rstrm->in_hdrlen < sizeof (rstrm->in_header)) { *statp = XPRT_MOREREQS; return FALSE; } rstrm->in_header = ntohl(rstrm->in_header); fraglen = (int)(rstrm->in_header & ~LAST_FRAG); if (fraglen == 0 || fraglen > rstrm->in_maxrec || (rstrm->in_reclen + fraglen) > rstrm->in_maxrec) { *statp = XPRT_DIED; return FALSE; } rstrm->in_reclen += fraglen; if (rstrm->in_reclen > rstrm->recvsize) realloc_stream(rstrm, rstrm->in_reclen); if (rstrm->in_header & LAST_FRAG) { rstrm->in_header &= ~LAST_FRAG; rstrm->last_frag = TRUE; } /* * We can only reasonably expect to read once from a * non-blocking stream. Reading the fragment header * may have drained the stream. */ expectdata = FALSE; } n = rstrm->readit(rstrm->tcp_handle, rstrm->in_base + rstrm->in_received, (rstrm->in_reclen - rstrm->in_received)); if (n < 0) { *statp = XPRT_DIED; return FALSE; } if (n == 0) { *statp = expectdata ? XPRT_DIED : XPRT_IDLE; return FALSE; } rstrm->in_received += n; if (rstrm->in_received == rstrm->in_reclen) { rstrm->in_haveheader = FALSE; rstrm->in_hdrp = (char *)(void *)&rstrm->in_header; rstrm->in_hdrlen = 0; if (rstrm->last_frag) { rstrm->fbtbc = rstrm->in_reclen; rstrm->in_boundry = rstrm->in_base + rstrm->in_reclen; rstrm->in_finger = rstrm->in_base; rstrm->in_reclen = rstrm->in_received = 0; *statp = XPRT_MOREREQS; return TRUE; } } *statp = XPRT_MOREREQS; return FALSE; } bool_t __xdrrec_setnonblock(xdrs, maxrec) XDR *xdrs; int maxrec; { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); rstrm->nonblock = TRUE; if (maxrec == 0) maxrec = rstrm->recvsize; rstrm->in_maxrec = maxrec; return TRUE; } /* * Internal useful routines */ static bool_t flush_out(rstrm, eor) RECSTREAM *rstrm; bool_t eor; { u_int32_t eormask = (eor == TRUE) ? LAST_FRAG : 0; u_int32_t len = (u_int32_t)((u_long)(rstrm->out_finger) - (u_long)(rstrm->frag_header) - sizeof(u_int32_t)); *(rstrm->frag_header) = htonl(len | eormask); len = (u_int32_t)((u_long)(rstrm->out_finger) - (u_long)(rstrm->out_base)); if ((*(rstrm->writeit))(rstrm->tcp_handle, rstrm->out_base, (int)len) != (int)len) return (FALSE); rstrm->frag_header = (u_int32_t *)(void *)rstrm->out_base; rstrm->out_finger = (char *)rstrm->out_base + sizeof(u_int32_t); return (TRUE); } static bool_t /* knows nothing about records! Only about input buffers */ fill_input_buf(rstrm) RECSTREAM *rstrm; { char *where; u_int32_t i; int len; if (rstrm->nonblock) return FALSE; where = rstrm->in_base; i = (u_int32_t)((u_long)rstrm->in_boundry % BYTES_PER_XDR_UNIT); where += i; len = (u_int32_t)(rstrm->in_size - i); if ((len = (*(rstrm->readit))(rstrm->tcp_handle, where, len)) == -1) return (FALSE); rstrm->in_finger = where; where += len; rstrm->in_boundry = where; return (TRUE); } static bool_t /* knows nothing about records! Only about input buffers */ get_input_bytes(rstrm, addr, len) RECSTREAM *rstrm; char *addr; int len; { size_t current; if (rstrm->nonblock) { if (len > (int)(rstrm->in_boundry - rstrm->in_finger)) return FALSE; memcpy(addr, rstrm->in_finger, (size_t)len); rstrm->in_finger += len; return TRUE; } while (len > 0) { current = (size_t)((long)rstrm->in_boundry - (long)rstrm->in_finger); if (current == 0) { if (! fill_input_buf(rstrm)) return (FALSE); continue; } current = (len < current) ? len : current; memmove(addr, rstrm->in_finger, current); rstrm->in_finger += current; addr += current; len -= current; } return (TRUE); } static bool_t /* next two bytes of the input stream are treated as a header */ set_input_fragment(rstrm) RECSTREAM *rstrm; { u_int32_t header; if (rstrm->nonblock) return FALSE; if (! get_input_bytes(rstrm, (char *)(void *)&header, sizeof(header))) return (FALSE); header = ntohl(header); rstrm->last_frag = ((header & LAST_FRAG) == 0) ? FALSE : TRUE; /* * Sanity check. Try not to accept wildly incorrect * record sizes. Unfortunately, the only record size * we can positively identify as being 'wildly incorrect' * is zero. Ridiculously large record sizes may look wrong, * but we don't have any way to be certain that they aren't * what the client actually intended to send us. */ if (header == 0) return(FALSE); rstrm->fbtbc = header & (~LAST_FRAG); return (TRUE); } static bool_t /* consumes input bytes; knows nothing about records! */ skip_input_bytes(rstrm, cnt) RECSTREAM *rstrm; long cnt; { u_int32_t current; while (cnt > 0) { current = (size_t)((long)rstrm->in_boundry - (long)rstrm->in_finger); if (current == 0) { if (! fill_input_buf(rstrm)) return (FALSE); continue; } current = (u_int32_t)((cnt < current) ? cnt : current); rstrm->in_finger += current; cnt -= current; } return (TRUE); } static u_int fix_buf_size(s) u_int s; { if (s < 100) s = 4000; return (RNDUP(s)); } /* * Reallocate the input buffer for a non-block stream. */ static bool_t realloc_stream(rstrm, size) RECSTREAM *rstrm; int size; { ptrdiff_t diff; char *buf; if (size > rstrm->recvsize) { buf = realloc(rstrm->in_base, (size_t)size); if (buf == NULL) return FALSE; diff = buf - rstrm->in_base; rstrm->in_finger += diff; rstrm->in_base = buf; rstrm->in_boundry = buf + size; rstrm->recvsize = size; rstrm->in_size = size; } return TRUE; } Index: head/sys/amd64/pci/pci_cfgreg.c =================================================================== --- head/sys/amd64/pci/pci_cfgreg.c (revision 258779) +++ head/sys/amd64/pci/pci_cfgreg.c (revision 258780) @@ -1,370 +1,370 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include enum { CFGMECH_NONE = 0, CFGMECH_1, CFGMECH_PCIE, }; static uint32_t pci_docfgregread(int bus, int slot, int func, int reg, int bytes); static int pciereg_cfgread(int bus, unsigned slot, unsigned func, unsigned reg, unsigned bytes); static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func, unsigned reg, int data, unsigned bytes); static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes); static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes); SYSCTL_DECL(_hw_pci); static int cfgmech; static vm_offset_t pcie_base; static int pcie_minbus, pcie_maxbus; static uint32_t pcie_badslots; static struct mtx pcicfg_mtx; static int mcfg_enable = 1; TUNABLE_INT("hw.pci.mcfg", &mcfg_enable); SYSCTL_INT(_hw_pci, OID_AUTO, mcfg, CTLFLAG_RDTUN, &mcfg_enable, 0, "Enable support for PCI-e memory mapped config access"); /* * Initialise access to PCI configuration space */ int pci_cfgregopen(void) { static int once = 0; uint64_t pciebar; uint16_t did, vid; if (!once) { mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); once = 1; } if (cfgmech != CFGMECH_NONE) return (1); cfgmech = CFGMECH_1; /* * Grope around in the PCI config space to see if this is a * chipset that is capable of doing memory-mapped config cycles. * This also implies that it can do PCIe extended config cycles. */ /* Check for supported chipsets */ vid = pci_cfgregread(0, 0, 0, PCIR_VENDOR, 2); did = pci_cfgregread(0, 0, 0, PCIR_DEVICE, 2); switch (vid) { case 0x8086: switch (did) { case 0x3590: case 0x3592: /* Intel 7520 or 7320 */ pciebar = pci_cfgregread(0, 0, 0, 0xce, 2) << 16; pcie_cfgregopen(pciebar, 0, 255); break; case 0x2580: case 0x2584: case 0x2590: /* Intel 915, 925, or 915GM */ pciebar = pci_cfgregread(0, 0, 0, 0x48, 4); pcie_cfgregopen(pciebar, 0, 255); break; } } return (1); } static uint32_t pci_docfgregread(int bus, int slot, int func, int reg, int bytes) { if (cfgmech == CFGMECH_PCIE && (bus >= pcie_minbus && bus <= pcie_maxbus) && (bus != 0 || !(1 << slot & pcie_badslots))) return (pciereg_cfgread(bus, slot, func, reg, bytes)); else return (pcireg_cfgread(bus, slot, func, reg, bytes)); } /* * Read configuration space register */ u_int32_t pci_cfgregread(int bus, int slot, int func, int reg, int bytes) { uint32_t line; /* * Some BIOS writers seem to want to ignore the spec and put * 0 in the intline rather than 255 to indicate none. Some use * numbers in the range 128-254 to indicate something strange and * apparently undocumented anywhere. Assume these are completely bogus * and map them to 255, which the rest of the PCI code recognizes as * as an invalid IRQ. */ if (reg == PCIR_INTLINE && bytes == 1) { line = pci_docfgregread(bus, slot, func, PCIR_INTLINE, 1); if (line == 0 || line >= 128) line = PCI_INVALID_IRQ; return (line); } return (pci_docfgregread(bus, slot, func, reg, bytes)); } /* * Write configuration space register */ void pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes) { if (cfgmech == CFGMECH_PCIE && (bus >= pcie_minbus && bus <= pcie_maxbus) && (bus != 0 || !(1 << slot & pcie_badslots))) pciereg_cfgwrite(bus, slot, func, reg, data, bytes); else pcireg_cfgwrite(bus, slot, func, reg, data, bytes); } /* * Configuration space access using direct register operations */ /* enable configuration space accesses and return data port address */ static int pci_cfgenable(unsigned bus, unsigned slot, unsigned func, int reg, int bytes) { int dataport = 0; if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX && (unsigned)reg <= PCI_REGMAX && bytes != 3 && (unsigned)bytes <= 4 && (reg & (bytes - 1)) == 0) { - outl(CONF1_ADDR_PORT, (1 << 31) | (bus << 16) | (slot << 11) + outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11) | (func << 8) | (reg & ~0x03)); dataport = CONF1_DATA_PORT + (reg & 0x03); } return (dataport); } /* disable configuration space accesses */ static void pci_cfgdisable(void) { /* * Do nothing. Writing a 0 to the address port can apparently * confuse some bridges and cause spurious access failures. */ } static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes) { int data = -1; int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: data = inb(port); break; case 2: data = inw(port); break; case 4: data = inl(port); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes) { int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: outb(port, data); break; case 2: outw(port, data); break; case 4: outl(port, data); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); } int pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) { uint32_t val1, val2; int slot; if (!mcfg_enable) return (0); if (minbus != 0) return (0); if (bootverbose) printf("PCIe: Memory Mapped configuration base @ 0x%lx\n", base); /* XXX: We should make sure this really fits into the direct map. */ pcie_base = (vm_offset_t)pmap_mapdev(base, (maxbus + 1) << 20); pcie_minbus = minbus; pcie_maxbus = maxbus; cfgmech = CFGMECH_PCIE; /* * On some AMD systems, some of the devices on bus 0 are * inaccessible using memory-mapped PCI config access. Walk * bus 0 looking for such devices. For these devices, we will * fall back to using type 1 config access instead. */ if (pci_cfgregopen() != 0) { for (slot = 0; slot <= PCI_SLOTMAX; slot++) { val1 = pcireg_cfgread(0, slot, 0, 0, 4); if (val1 == 0xffffffff) continue; val2 = pciereg_cfgread(0, slot, 0, 0, 4); if (val2 != val1) pcie_badslots |= (1 << slot); } } return (1); } #define PCIE_VADDR(base, reg, bus, slot, func) \ ((base) + \ ((((bus) & 0xff) << 20) | \ (((slot) & 0x1f) << 15) | \ (((func) & 0x7) << 12) | \ ((reg) & 0xfff))) /* * AMD BIOS And Kernel Developer's Guides for CPU families starting with 10h * have a requirement that all accesses to the memory mapped PCI configuration * space are done using AX class of registers. * Since other vendors do not currently have any contradicting requirements * the AMD access pattern is applied universally. */ static int pciereg_cfgread(int bus, unsigned slot, unsigned func, unsigned reg, unsigned bytes) { vm_offset_t va; int data = -1; if (bus < pcie_minbus || bus > pcie_maxbus || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return (-1); va = PCIE_VADDR(pcie_base, reg, bus, slot, func); switch (bytes) { case 4: __asm("movl %1, %0" : "=a" (data) : "m" (*(volatile uint32_t *)va)); break; case 2: __asm("movzwl %1, %0" : "=a" (data) : "m" (*(volatile uint16_t *)va)); break; case 1: __asm("movzbl %1, %0" : "=a" (data) : "m" (*(volatile uint8_t *)va)); break; } return (data); } static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func, unsigned reg, int data, unsigned bytes) { vm_offset_t va; if (bus < pcie_minbus || bus > pcie_maxbus || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return; va = PCIE_VADDR(pcie_base, reg, bus, slot, func); switch (bytes) { case 4: __asm("movl %1, %0" : "=m" (*(volatile uint32_t *)va) : "a" (data)); break; case 2: __asm("movw %1, %0" : "=m" (*(volatile uint16_t *)va) : "a" ((uint16_t)data)); break; case 1: __asm("movb %1, %0" : "=m" (*(volatile uint8_t *)va) : "a" ((uint8_t)data)); break; } } Index: head/sys/amd64/vmm/intel/vmcs.h =================================================================== --- head/sys/amd64/vmm/intel/vmcs.h (revision 258779) +++ head/sys/amd64/vmm/intel/vmcs.h (revision 258780) @@ -1,349 +1,349 @@ /*- * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _VMCS_H_ #define _VMCS_H_ #ifdef _KERNEL struct vmcs { uint32_t identifier; uint32_t abort_code; char _impl_specific[PAGE_SIZE - sizeof(uint32_t) * 2]; }; CTASSERT(sizeof(struct vmcs) == PAGE_SIZE); /* MSR save region is composed of an array of 'struct msr_entry' */ struct msr_entry { uint32_t index; uint32_t reserved; uint64_t val; }; int vmcs_set_msr_save(struct vmcs *vmcs, u_long g_area, u_int g_count); int vmcs_set_defaults(struct vmcs *vmcs, u_long host_rip, u_long host_rsp, uint64_t eptp, uint32_t pinbased_ctls, uint32_t procbased_ctls, uint32_t procbased_ctls2, uint32_t exit_ctls, uint32_t entry_ctls, u_long msr_bitmap, uint16_t vpid); int vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *rv); int vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val); int vmcs_getdesc(struct vmcs *vmcs, int ident, struct seg_desc *desc); int vmcs_setdesc(struct vmcs *vmcs, int ident, struct seg_desc *desc); uint64_t vmcs_read(uint32_t encoding); #define vmexit_instruction_length() vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH) #define vmcs_guest_rip() vmcs_read(VMCS_GUEST_RIP) #define vmcs_instruction_error() vmcs_read(VMCS_INSTRUCTION_ERROR) #define vmcs_exit_reason() (vmcs_read(VMCS_EXIT_REASON) & 0xffff) #define vmcs_exit_qualification() vmcs_read(VMCS_EXIT_QUALIFICATION) #define vmcs_guest_cr3() vmcs_read(VMCS_GUEST_CR3) #define vmcs_gpa() vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS) #define vmcs_gla() vmcs_read(VMCS_GUEST_LINEAR_ADDRESS) #define vmcs_idt_vectoring_info() vmcs_read(VMCS_IDT_VECTORING_INFO) #define vmcs_idt_vectoring_err() vmcs_read(VMCS_IDT_VECTORING_ERROR) #endif /* _KERNEL */ #define VMCS_INITIAL 0xffffffffffffffff #define VMCS_IDENT(encoding) ((encoding) | 0x80000000) /* * VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B. */ #define VMCS_INVALID_ENCODING 0xffffffff /* 16-bit control fields */ #define VMCS_VPID 0x00000000 /* 16-bit guest-state fields */ #define VMCS_GUEST_ES_SELECTOR 0x00000800 #define VMCS_GUEST_CS_SELECTOR 0x00000802 #define VMCS_GUEST_SS_SELECTOR 0x00000804 #define VMCS_GUEST_DS_SELECTOR 0x00000806 #define VMCS_GUEST_FS_SELECTOR 0x00000808 #define VMCS_GUEST_GS_SELECTOR 0x0000080A #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C #define VMCS_GUEST_TR_SELECTOR 0x0000080E /* 16-bit host-state fields */ #define VMCS_HOST_ES_SELECTOR 0x00000C00 #define VMCS_HOST_CS_SELECTOR 0x00000C02 #define VMCS_HOST_SS_SELECTOR 0x00000C04 #define VMCS_HOST_DS_SELECTOR 0x00000C06 #define VMCS_HOST_FS_SELECTOR 0x00000C08 #define VMCS_HOST_GS_SELECTOR 0x00000C0A #define VMCS_HOST_TR_SELECTOR 0x00000C0C /* 64-bit control fields */ #define VMCS_IO_BITMAP_A 0x00002000 #define VMCS_IO_BITMAP_B 0x00002002 #define VMCS_MSR_BITMAP 0x00002004 #define VMCS_EXIT_MSR_STORE 0x00002006 #define VMCS_EXIT_MSR_LOAD 0x00002008 #define VMCS_ENTRY_MSR_LOAD 0x0000200A #define VMCS_EXECUTIVE_VMCS 0x0000200C #define VMCS_TSC_OFFSET 0x00002010 #define VMCS_VIRTUAL_APIC 0x00002012 #define VMCS_APIC_ACCESS 0x00002014 #define VMCS_EPTP 0x0000201A /* 64-bit read-only fields */ #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 /* 64-bit guest-state fields */ #define VMCS_LINK_POINTER 0x00002800 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802 #define VMCS_GUEST_IA32_PAT 0x00002804 #define VMCS_GUEST_IA32_EFER 0x00002806 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808 #define VMCS_GUEST_PDPTE0 0x0000280A #define VMCS_GUEST_PDPTE1 0x0000280C #define VMCS_GUEST_PDPTE2 0x0000280E #define VMCS_GUEST_PDPTE3 0x00002810 /* 64-bit host-state fields */ #define VMCS_HOST_IA32_PAT 0x00002C00 #define VMCS_HOST_IA32_EFER 0x00002C02 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04 /* 32-bit control fields */ #define VMCS_PIN_BASED_CTLS 0x00004000 #define VMCS_PRI_PROC_BASED_CTLS 0x00004002 #define VMCS_EXCEPTION_BITMAP 0x00004004 #define VMCS_PF_ERROR_MASK 0x00004006 #define VMCS_PF_ERROR_MATCH 0x00004008 #define VMCS_CR3_TARGET_COUNT 0x0000400A #define VMCS_EXIT_CTLS 0x0000400C #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010 #define VMCS_ENTRY_CTLS 0x00004012 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014 #define VMCS_ENTRY_INTR_INFO 0x00004016 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018 #define VMCS_ENTRY_INST_LENGTH 0x0000401A #define VMCS_TPR_THRESHOLD 0x0000401C #define VMCS_SEC_PROC_BASED_CTLS 0x0000401E #define VMCS_PLE_GAP 0x00004020 #define VMCS_PLE_WINDOW 0x00004022 /* 32-bit read-only data fields */ #define VMCS_INSTRUCTION_ERROR 0x00004400 #define VMCS_EXIT_REASON 0x00004402 #define VMCS_EXIT_INTERRUPTION_INFO 0x00004404 #define VMCS_EXIT_INTERRUPTION_ERROR 0x00004406 #define VMCS_IDT_VECTORING_INFO 0x00004408 #define VMCS_IDT_VECTORING_ERROR 0x0000440A #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E /* 32-bit guest-state fields */ #define VMCS_GUEST_ES_LIMIT 0x00004800 #define VMCS_GUEST_CS_LIMIT 0x00004802 #define VMCS_GUEST_SS_LIMIT 0x00004804 #define VMCS_GUEST_DS_LIMIT 0x00004806 #define VMCS_GUEST_FS_LIMIT 0x00004808 #define VMCS_GUEST_GS_LIMIT 0x0000480A #define VMCS_GUEST_LDTR_LIMIT 0x0000480C #define VMCS_GUEST_TR_LIMIT 0x0000480E #define VMCS_GUEST_GDTR_LIMIT 0x00004810 #define VMCS_GUEST_IDTR_LIMIT 0x00004812 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824 #define VMCS_GUEST_ACTIVITY 0x00004826 #define VMCS_GUEST_SMBASE 0x00004828 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E /* 32-bit host state fields */ #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00 /* Natural Width control fields */ #define VMCS_CR0_MASK 0x00006000 #define VMCS_CR4_MASK 0x00006002 #define VMCS_CR0_SHADOW 0x00006004 #define VMCS_CR4_SHADOW 0x00006006 #define VMCS_CR3_TARGET0 0x00006008 #define VMCS_CR3_TARGET1 0x0000600A #define VMCS_CR3_TARGET2 0x0000600C #define VMCS_CR3_TARGET3 0x0000600E /* Natural Width read-only fields */ #define VMCS_EXIT_QUALIFICATION 0x00006400 #define VMCS_IO_RCX 0x00006402 #define VMCS_IO_RSI 0x00006404 #define VMCS_IO_RDI 0x00006406 #define VMCS_IO_RIP 0x00006408 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A /* Natural Width guest-state fields */ #define VMCS_GUEST_CR0 0x00006800 #define VMCS_GUEST_CR3 0x00006802 #define VMCS_GUEST_CR4 0x00006804 #define VMCS_GUEST_ES_BASE 0x00006806 #define VMCS_GUEST_CS_BASE 0x00006808 #define VMCS_GUEST_SS_BASE 0x0000680A #define VMCS_GUEST_DS_BASE 0x0000680C #define VMCS_GUEST_FS_BASE 0x0000680E #define VMCS_GUEST_GS_BASE 0x00006810 #define VMCS_GUEST_LDTR_BASE 0x00006812 #define VMCS_GUEST_TR_BASE 0x00006814 #define VMCS_GUEST_GDTR_BASE 0x00006816 #define VMCS_GUEST_IDTR_BASE 0x00006818 #define VMCS_GUEST_DR7 0x0000681A #define VMCS_GUEST_RSP 0x0000681C #define VMCS_GUEST_RIP 0x0000681E #define VMCS_GUEST_RFLAGS 0x00006820 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826 /* Natural Width host-state fields */ #define VMCS_HOST_CR0 0x00006C00 #define VMCS_HOST_CR3 0x00006C02 #define VMCS_HOST_CR4 0x00006C04 #define VMCS_HOST_FS_BASE 0x00006C06 #define VMCS_HOST_GS_BASE 0x00006C08 #define VMCS_HOST_TR_BASE 0x00006C0A #define VMCS_HOST_GDTR_BASE 0x00006C0C #define VMCS_HOST_IDTR_BASE 0x00006C0E #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12 #define VMCS_HOST_RSP 0x00006C14 #define VMCS_HOST_RIP 0x00006c16 /* * VM instruction error numbers */ #define VMRESUME_WITH_NON_LAUNCHED_VMCS 5 /* * VMCS exit reasons */ #define EXIT_REASON_EXCEPTION 0 #define EXIT_REASON_EXT_INTR 1 #define EXIT_REASON_TRIPLE_FAULT 2 #define EXIT_REASON_INIT 3 #define EXIT_REASON_SIPI 4 #define EXIT_REASON_IO_SMI 5 #define EXIT_REASON_SMI 6 #define EXIT_REASON_INTR_WINDOW 7 #define EXIT_REASON_NMI_WINDOW 8 #define EXIT_REASON_TASK_SWITCH 9 #define EXIT_REASON_CPUID 10 #define EXIT_REASON_GETSEC 11 #define EXIT_REASON_HLT 12 #define EXIT_REASON_INVD 13 #define EXIT_REASON_INVLPG 14 #define EXIT_REASON_RDPMC 15 #define EXIT_REASON_RDTSC 16 #define EXIT_REASON_RSM 17 #define EXIT_REASON_VMCALL 18 #define EXIT_REASON_VMCLEAR 19 #define EXIT_REASON_VMLAUNCH 20 #define EXIT_REASON_VMPTRLD 21 #define EXIT_REASON_VMPTRST 22 #define EXIT_REASON_VMREAD 23 #define EXIT_REASON_VMRESUME 24 #define EXIT_REASON_VMWRITE 25 #define EXIT_REASON_VMXOFF 26 #define EXIT_REASON_VMXON 27 #define EXIT_REASON_CR_ACCESS 28 #define EXIT_REASON_DR_ACCESS 29 #define EXIT_REASON_INOUT 30 #define EXIT_REASON_RDMSR 31 #define EXIT_REASON_WRMSR 32 #define EXIT_REASON_INVAL_VMCS 33 #define EXIT_REASON_INVAL_MSR 34 #define EXIT_REASON_MWAIT 36 #define EXIT_REASON_MTF 37 #define EXIT_REASON_MONITOR 39 #define EXIT_REASON_PAUSE 40 #define EXIT_REASON_MCE 41 #define EXIT_REASON_TPR 43 #define EXIT_REASON_APIC 44 #define EXIT_REASON_GDTR_IDTR 46 #define EXIT_REASON_LDTR_TR 47 #define EXIT_REASON_EPT_FAULT 48 #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_INVEPT 50 #define EXIT_REASON_RDTSCP 51 #define EXIT_REASON_VMX_PREEMPT 52 #define EXIT_REASON_INVVPID 53 #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 /* * VMCS interrupt information fields */ #define VMCS_INTERRUPTION_INFO_VALID (1U << 31) #define VMCS_INTERRUPTION_INFO_HW_INTR (0 << 8) #define VMCS_INTERRUPTION_INFO_NMI (2 << 8) /* * VMCS IDT-Vectoring information fields */ -#define VMCS_IDT_VEC_VALID (1 << 31) +#define VMCS_IDT_VEC_VALID (1U << 31) #define VMCS_IDT_VEC_ERRCODE_VALID (1 << 11) /* * VMCS Guest interruptibility field */ #define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0) #define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1) #define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2) #define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3) /* * Exit qualification for EXIT_REASON_INVAL_VMCS */ #define EXIT_QUAL_NMI_WHILE_STI_BLOCKING 3 /* * Exit qualification for EPT violation */ #define EPT_VIOLATION_DATA_READ (1UL << 0) #define EPT_VIOLATION_DATA_WRITE (1UL << 1) #define EPT_VIOLATION_INST_FETCH (1UL << 2) #define EPT_VIOLATION_GPA_READABLE (1UL << 3) #define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4) #define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5) #define EPT_VIOLATION_GLA_VALID (1UL << 7) #define EPT_VIOLATION_XLAT_VALID (1UL << 8) #endif Index: head/sys/amd64/vmm/intel/vmx_controls.h =================================================================== --- head/sys/amd64/vmm/intel/vmx_controls.h (revision 258779) +++ head/sys/amd64/vmm/intel/vmx_controls.h (revision 258780) @@ -1,93 +1,93 @@ /*- * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _VMX_CONTROLS_H_ #define _VMX_CONTROLS_H_ /* Pin-Based VM-Execution Controls */ #define PINBASED_EXTINT_EXITING (1 << 0) #define PINBASED_NMI_EXITING (1 << 3) #define PINBASED_VIRTUAL_NMI (1 << 5) #define PINBASED_PREMPTION_TIMER (1 << 6) /* Primary Processor-Based VM-Execution Controls */ #define PROCBASED_INT_WINDOW_EXITING (1 << 2) #define PROCBASED_TSC_OFFSET (1 << 3) #define PROCBASED_HLT_EXITING (1 << 7) #define PROCBASED_INVLPG_EXITING (1 << 9) #define PROCBASED_MWAIT_EXITING (1 << 10) #define PROCBASED_RDPMC_EXITING (1 << 11) #define PROCBASED_RDTSC_EXITING (1 << 12) #define PROCBASED_CR3_LOAD_EXITING (1 << 15) #define PROCBASED_CR3_STORE_EXITING (1 << 16) #define PROCBASED_CR8_LOAD_EXITING (1 << 19) #define PROCBASED_CR8_STORE_EXITING (1 << 20) #define PROCBASED_USE_TPR_SHADOW (1 << 21) #define PROCBASED_NMI_WINDOW_EXITING (1 << 22) #define PROCBASED_MOV_DR_EXITING (1 << 23) #define PROCBASED_IO_EXITING (1 << 24) #define PROCBASED_IO_BITMAPS (1 << 25) #define PROCBASED_MTF (1 << 27) #define PROCBASED_MSR_BITMAPS (1 << 28) #define PROCBASED_MONITOR_EXITING (1 << 29) #define PROCBASED_PAUSE_EXITING (1 << 30) -#define PROCBASED_SECONDARY_CONTROLS (1 << 31) +#define PROCBASED_SECONDARY_CONTROLS (1U << 31) /* Secondary Processor-Based VM-Execution Controls */ #define PROCBASED2_VIRTUALIZE_APIC (1 << 0) #define PROCBASED2_ENABLE_EPT (1 << 1) #define PROCBASED2_DESC_TABLE_EXITING (1 << 2) #define PROCBASED2_ENABLE_RDTSCP (1 << 3) #define PROCBASED2_VIRTUALIZE_X2APIC (1 << 4) #define PROCBASED2_ENABLE_VPID (1 << 5) #define PROCBASED2_WBINVD_EXITING (1 << 6) #define PROCBASED2_UNRESTRICTED_GUEST (1 << 7) #define PROCBASED2_PAUSE_LOOP_EXITING (1 << 10) #define PROCBASED2_ENABLE_INVPCID (1 << 12) /* VM Exit Controls */ #define VM_EXIT_SAVE_DEBUG_CONTROLS (1 << 2) #define VM_EXIT_HOST_LMA (1 << 9) #define VM_EXIT_LOAD_PERF_GLOBAL_CTRL (1 << 12) #define VM_EXIT_ACKNOWLEDGE_INTERRUPT (1 << 15) #define VM_EXIT_SAVE_PAT (1 << 18) #define VM_EXIT_LOAD_PAT (1 << 19) #define VM_EXIT_SAVE_EFER (1 << 20) #define VM_EXIT_LOAD_EFER (1 << 21) #define VM_EXIT_SAVE_PREEMPTION_TIMER (1 << 22) /* VM Entry Controls */ #define VM_ENTRY_LOAD_DEBUG_CONTROLS (1 << 2) #define VM_ENTRY_GUEST_LMA (1 << 9) #define VM_ENTRY_INTO_SMM (1 << 10) #define VM_ENTRY_DEACTIVATE_DUAL_MONITOR (1 << 11) #define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL (1 << 13) #define VM_ENTRY_LOAD_PAT (1 << 14) #define VM_ENTRY_LOAD_EFER (1 << 15) #endif Index: head/sys/amd64/vmm/intel/vtd.c =================================================================== --- head/sys/amd64/vmm/intel/vtd.c (revision 258779) +++ head/sys/amd64/vmm/intel/vtd.c (revision 258780) @@ -1,687 +1,687 @@ /*- * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "io/iommu.h" /* * Documented in the "Intel Virtualization Technology for Directed I/O", * Architecture Spec, September 2008. */ /* Section 10.4 "Register Descriptions" */ struct vtdmap { volatile uint32_t version; volatile uint32_t res0; volatile uint64_t cap; volatile uint64_t ext_cap; volatile uint32_t gcr; volatile uint32_t gsr; volatile uint64_t rta; volatile uint64_t ccr; }; #define VTD_CAP_SAGAW(cap) (((cap) >> 8) & 0x1F) #define VTD_CAP_ND(cap) ((cap) & 0x7) #define VTD_CAP_CM(cap) (((cap) >> 7) & 0x1) #define VTD_CAP_SPS(cap) (((cap) >> 34) & 0xF) #define VTD_CAP_RWBF(cap) (((cap) >> 4) & 0x1) #define VTD_ECAP_DI(ecap) (((ecap) >> 2) & 0x1) #define VTD_ECAP_COHERENCY(ecap) ((ecap) & 0x1) #define VTD_ECAP_IRO(ecap) (((ecap) >> 8) & 0x3FF) #define VTD_GCR_WBF (1 << 27) #define VTD_GCR_SRTP (1 << 30) -#define VTD_GCR_TE (1 << 31) +#define VTD_GCR_TE (1U << 31) #define VTD_GSR_WBFS (1 << 27) #define VTD_GSR_RTPS (1 << 30) -#define VTD_GSR_TES (1 << 31) +#define VTD_GSR_TES (1U << 31) #define VTD_CCR_ICC (1UL << 63) /* invalidate context cache */ #define VTD_CCR_CIRG_GLOBAL (1UL << 61) /* global invalidation */ #define VTD_IIR_IVT (1UL << 63) /* invalidation IOTLB */ #define VTD_IIR_IIRG_GLOBAL (1ULL << 60) /* global IOTLB invalidation */ #define VTD_IIR_IIRG_DOMAIN (2ULL << 60) /* domain IOTLB invalidation */ #define VTD_IIR_IIRG_PAGE (3ULL << 60) /* page IOTLB invalidation */ #define VTD_IIR_DRAIN_READS (1ULL << 49) /* drain pending DMA reads */ #define VTD_IIR_DRAIN_WRITES (1ULL << 48) /* drain pending DMA writes */ #define VTD_IIR_DOMAIN_P 32 #define VTD_ROOT_PRESENT 0x1 #define VTD_CTX_PRESENT 0x1 #define VTD_CTX_TT_ALL (1UL << 2) #define VTD_PTE_RD (1UL << 0) #define VTD_PTE_WR (1UL << 1) #define VTD_PTE_SUPERPAGE (1UL << 7) #define VTD_PTE_ADDR_M (0x000FFFFFFFFFF000UL) struct domain { uint64_t *ptp; /* first level page table page */ int pt_levels; /* number of page table levels */ int addrwidth; /* 'AW' field in context entry */ int spsmask; /* supported super page sizes */ u_int id; /* domain id */ vm_paddr_t maxaddr; /* highest address to be mapped */ SLIST_ENTRY(domain) next; }; static SLIST_HEAD(, domain) domhead; #define DRHD_MAX_UNITS 8 static int drhd_num; static struct vtdmap *vtdmaps[DRHD_MAX_UNITS]; static int max_domains; typedef int (*drhd_ident_func_t)(void); static uint64_t root_table[PAGE_SIZE / sizeof(uint64_t)] __aligned(4096); static uint64_t ctx_tables[256][PAGE_SIZE / sizeof(uint64_t)] __aligned(4096); static MALLOC_DEFINE(M_VTD, "vtd", "vtd"); static int vtd_max_domains(struct vtdmap *vtdmap) { int nd; nd = VTD_CAP_ND(vtdmap->cap); switch (nd) { case 0: return (16); case 1: return (64); case 2: return (256); case 3: return (1024); case 4: return (4 * 1024); case 5: return (16 * 1024); case 6: return (64 * 1024); default: panic("vtd_max_domains: invalid value of nd (0x%0x)", nd); } } static u_int domain_id(void) { u_int id; struct domain *dom; /* Skip domain id 0 - it is reserved when Caching Mode field is set */ for (id = 1; id < max_domains; id++) { SLIST_FOREACH(dom, &domhead, next) { if (dom->id == id) break; } if (dom == NULL) break; /* found it */ } if (id >= max_domains) panic("domain ids exhausted"); return (id); } static void vtd_wbflush(struct vtdmap *vtdmap) { if (VTD_ECAP_COHERENCY(vtdmap->ext_cap) == 0) pmap_invalidate_cache(); if (VTD_CAP_RWBF(vtdmap->cap)) { vtdmap->gcr = VTD_GCR_WBF; while ((vtdmap->gsr & VTD_GSR_WBFS) != 0) ; } } static void vtd_ctx_global_invalidate(struct vtdmap *vtdmap) { vtdmap->ccr = VTD_CCR_ICC | VTD_CCR_CIRG_GLOBAL; while ((vtdmap->ccr & VTD_CCR_ICC) != 0) ; } static void vtd_iotlb_global_invalidate(struct vtdmap *vtdmap) { int offset; volatile uint64_t *iotlb_reg, val; vtd_wbflush(vtdmap); offset = VTD_ECAP_IRO(vtdmap->ext_cap) * 16; iotlb_reg = (volatile uint64_t *)((caddr_t)vtdmap + offset + 8); *iotlb_reg = VTD_IIR_IVT | VTD_IIR_IIRG_GLOBAL | VTD_IIR_DRAIN_READS | VTD_IIR_DRAIN_WRITES; while (1) { val = *iotlb_reg; if ((val & VTD_IIR_IVT) == 0) break; } } static void vtd_translation_enable(struct vtdmap *vtdmap) { vtdmap->gcr = VTD_GCR_TE; while ((vtdmap->gsr & VTD_GSR_TES) == 0) ; } static void vtd_translation_disable(struct vtdmap *vtdmap) { vtdmap->gcr = 0; while ((vtdmap->gsr & VTD_GSR_TES) != 0) ; } static int vtd_init(void) { int i, units, remaining; struct vtdmap *vtdmap; vm_paddr_t ctx_paddr; char *end, envname[32]; unsigned long mapaddr; ACPI_STATUS status; ACPI_TABLE_DMAR *dmar; ACPI_DMAR_HEADER *hdr; ACPI_DMAR_HARDWARE_UNIT *drhd; /* * Allow the user to override the ACPI DMAR table by specifying the * physical address of each remapping unit. * * The following example specifies two remapping units at * physical addresses 0xfed90000 and 0xfeda0000 respectively. * set vtd.regmap.0.addr=0xfed90000 * set vtd.regmap.1.addr=0xfeda0000 */ for (units = 0; units < DRHD_MAX_UNITS; units++) { snprintf(envname, sizeof(envname), "vtd.regmap.%d.addr", units); if (getenv_ulong(envname, &mapaddr) == 0) break; vtdmaps[units] = (struct vtdmap *)PHYS_TO_DMAP(mapaddr); } if (units > 0) goto skip_dmar; /* Search for DMAR table. */ status = AcpiGetTable(ACPI_SIG_DMAR, 0, (ACPI_TABLE_HEADER **)&dmar); if (ACPI_FAILURE(status)) return (ENXIO); end = (char *)dmar + dmar->Header.Length; remaining = dmar->Header.Length - sizeof(ACPI_TABLE_DMAR); while (remaining > sizeof(ACPI_DMAR_HEADER)) { hdr = (ACPI_DMAR_HEADER *)(end - remaining); if (hdr->Length > remaining) break; /* * From Intel VT-d arch spec, version 1.3: * BIOS implementations must report mapping structures * in numerical order, i.e. All remapping structures of * type 0 (DRHD) enumerated before remapping structures of * type 1 (RMRR) and so forth. */ if (hdr->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) break; drhd = (ACPI_DMAR_HARDWARE_UNIT *)hdr; vtdmaps[units++] = (struct vtdmap *)PHYS_TO_DMAP(drhd->Address); if (units >= DRHD_MAX_UNITS) break; remaining -= hdr->Length; } if (units <= 0) return (ENXIO); skip_dmar: drhd_num = units; vtdmap = vtdmaps[0]; if (VTD_CAP_CM(vtdmap->cap) != 0) panic("vtd_init: invalid caching mode"); max_domains = vtd_max_domains(vtdmap); /* * Set up the root-table to point to the context-entry tables */ for (i = 0; i < 256; i++) { ctx_paddr = vtophys(ctx_tables[i]); if (ctx_paddr & PAGE_MASK) panic("ctx table (0x%0lx) not page aligned", ctx_paddr); root_table[i * 2] = ctx_paddr | VTD_ROOT_PRESENT; } return (0); } static void vtd_cleanup(void) { } static void vtd_enable(void) { int i; struct vtdmap *vtdmap; for (i = 0; i < drhd_num; i++) { vtdmap = vtdmaps[i]; vtd_wbflush(vtdmap); /* Update the root table address */ vtdmap->rta = vtophys(root_table); vtdmap->gcr = VTD_GCR_SRTP; while ((vtdmap->gsr & VTD_GSR_RTPS) == 0) ; vtd_ctx_global_invalidate(vtdmap); vtd_iotlb_global_invalidate(vtdmap); vtd_translation_enable(vtdmap); } } static void vtd_disable(void) { int i; struct vtdmap *vtdmap; for (i = 0; i < drhd_num; i++) { vtdmap = vtdmaps[i]; vtd_translation_disable(vtdmap); } } static void vtd_add_device(void *arg, int bus, int slot, int func) { int idx; uint64_t *ctxp; struct domain *dom = arg; vm_paddr_t pt_paddr; struct vtdmap *vtdmap; if (bus < 0 || bus > PCI_BUSMAX || slot < 0 || slot > PCI_SLOTMAX || func < 0 || func > PCI_FUNCMAX) panic("vtd_add_device: invalid bsf %d/%d/%d", bus, slot, func); vtdmap = vtdmaps[0]; ctxp = ctx_tables[bus]; pt_paddr = vtophys(dom->ptp); idx = (slot << 3 | func) * 2; if (ctxp[idx] & VTD_CTX_PRESENT) { panic("vtd_add_device: device %d/%d/%d is already owned by " "domain %d", bus, slot, func, (uint16_t)(ctxp[idx + 1] >> 8)); } /* * Order is important. The 'present' bit is set only after all fields * of the context pointer are initialized. */ ctxp[idx + 1] = dom->addrwidth | (dom->id << 8); if (VTD_ECAP_DI(vtdmap->ext_cap)) ctxp[idx] = VTD_CTX_TT_ALL; else ctxp[idx] = 0; ctxp[idx] |= pt_paddr | VTD_CTX_PRESENT; /* * 'Not Present' entries are not cached in either the Context Cache * or in the IOTLB, so there is no need to invalidate either of them. */ } static void vtd_remove_device(void *arg, int bus, int slot, int func) { int i, idx; uint64_t *ctxp; struct vtdmap *vtdmap; if (bus < 0 || bus > PCI_BUSMAX || slot < 0 || slot > PCI_SLOTMAX || func < 0 || func > PCI_FUNCMAX) panic("vtd_add_device: invalid bsf %d/%d/%d", bus, slot, func); ctxp = ctx_tables[bus]; idx = (slot << 3 | func) * 2; /* * Order is important. The 'present' bit is must be cleared first. */ ctxp[idx] = 0; ctxp[idx + 1] = 0; /* * Invalidate the Context Cache and the IOTLB. * * XXX use device-selective invalidation for Context Cache * XXX use domain-selective invalidation for IOTLB */ for (i = 0; i < drhd_num; i++) { vtdmap = vtdmaps[i]; vtd_ctx_global_invalidate(vtdmap); vtd_iotlb_global_invalidate(vtdmap); } } #define CREATE_MAPPING 0 #define REMOVE_MAPPING 1 static uint64_t vtd_update_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len, int remove) { struct domain *dom; int i, spshift, ptpshift, ptpindex, nlevels; uint64_t spsize, *ptp; dom = arg; ptpindex = 0; ptpshift = 0; if (gpa & PAGE_MASK) panic("vtd_create_mapping: unaligned gpa 0x%0lx", gpa); if (hpa & PAGE_MASK) panic("vtd_create_mapping: unaligned hpa 0x%0lx", hpa); if (len & PAGE_MASK) panic("vtd_create_mapping: unaligned len 0x%0lx", len); /* * Compute the size of the mapping that we can accomodate. * * This is based on three factors: * - supported super page size * - alignment of the region starting at 'gpa' and 'hpa' * - length of the region 'len' */ spshift = 48; for (i = 3; i >= 0; i--) { spsize = 1UL << spshift; if ((dom->spsmask & (1 << i)) != 0 && (gpa & (spsize - 1)) == 0 && (hpa & (spsize - 1)) == 0 && (len >= spsize)) { break; } spshift -= 9; } ptp = dom->ptp; nlevels = dom->pt_levels; while (--nlevels >= 0) { ptpshift = 12 + nlevels * 9; ptpindex = (gpa >> ptpshift) & 0x1FF; /* We have reached the leaf mapping */ if (spshift >= ptpshift) { break; } /* * We are working on a non-leaf page table page. * * Create a downstream page table page if necessary and point * to it from the current page table. */ if (ptp[ptpindex] == 0) { void *nlp = malloc(PAGE_SIZE, M_VTD, M_WAITOK | M_ZERO); ptp[ptpindex] = vtophys(nlp)| VTD_PTE_RD | VTD_PTE_WR; } ptp = (uint64_t *)PHYS_TO_DMAP(ptp[ptpindex] & VTD_PTE_ADDR_M); } if ((gpa & ((1UL << ptpshift) - 1)) != 0) panic("gpa 0x%lx and ptpshift %d mismatch", gpa, ptpshift); /* * Update the 'gpa' -> 'hpa' mapping */ if (remove) { ptp[ptpindex] = 0; } else { ptp[ptpindex] = hpa | VTD_PTE_RD | VTD_PTE_WR; if (nlevels > 0) ptp[ptpindex] |= VTD_PTE_SUPERPAGE; } return (1UL << ptpshift); } static uint64_t vtd_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len) { return (vtd_update_mapping(arg, gpa, hpa, len, CREATE_MAPPING)); } static uint64_t vtd_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len) { return (vtd_update_mapping(arg, gpa, 0, len, REMOVE_MAPPING)); } static void vtd_invalidate_tlb(void *dom) { int i; struct vtdmap *vtdmap; /* * Invalidate the IOTLB. * XXX use domain-selective invalidation for IOTLB */ for (i = 0; i < drhd_num; i++) { vtdmap = vtdmaps[i]; vtd_iotlb_global_invalidate(vtdmap); } } static void * vtd_create_domain(vm_paddr_t maxaddr) { struct domain *dom; vm_paddr_t addr; int tmp, i, gaw, agaw, sagaw, res, pt_levels, addrwidth; struct vtdmap *vtdmap; if (drhd_num <= 0) panic("vtd_create_domain: no dma remapping hardware available"); vtdmap = vtdmaps[0]; /* * Calculate AGAW. * Section 3.4.2 "Adjusted Guest Address Width", Architecture Spec. */ addr = 0; for (gaw = 0; addr < maxaddr; gaw++) addr = 1ULL << gaw; res = (gaw - 12) % 9; if (res == 0) agaw = gaw; else agaw = gaw + 9 - res; if (agaw > 64) agaw = 64; /* * Select the smallest Supported AGAW and the corresponding number * of page table levels. */ pt_levels = 2; sagaw = 30; addrwidth = 0; tmp = VTD_CAP_SAGAW(vtdmap->cap); for (i = 0; i < 5; i++) { if ((tmp & (1 << i)) != 0 && sagaw >= agaw) break; pt_levels++; addrwidth++; sagaw += 9; if (sagaw > 64) sagaw = 64; } if (i >= 5) { panic("vtd_create_domain: SAGAW 0x%lx does not support AGAW %d", VTD_CAP_SAGAW(vtdmap->cap), agaw); } dom = malloc(sizeof(struct domain), M_VTD, M_ZERO | M_WAITOK); dom->pt_levels = pt_levels; dom->addrwidth = addrwidth; dom->id = domain_id(); dom->maxaddr = maxaddr; dom->ptp = malloc(PAGE_SIZE, M_VTD, M_ZERO | M_WAITOK); if ((uintptr_t)dom->ptp & PAGE_MASK) panic("vtd_create_domain: ptp (%p) not page aligned", dom->ptp); #ifdef notyet /* * XXX superpage mappings for the iommu do not work correctly. * * By default all physical memory is mapped into the host_domain. * When a VM is allocated wired memory the pages belonging to it * are removed from the host_domain and added to the vm's domain. * * If the page being removed was mapped using a superpage mapping * in the host_domain then we need to demote the mapping before * removing the page. * * There is not any code to deal with the demotion at the moment * so we disable superpage mappings altogether. */ dom->spsmask = VTD_CAP_SPS(vtdmap->cap); #endif SLIST_INSERT_HEAD(&domhead, dom, next); return (dom); } static void vtd_free_ptp(uint64_t *ptp, int level) { int i; uint64_t *nlp; if (level > 1) { for (i = 0; i < 512; i++) { if ((ptp[i] & (VTD_PTE_RD | VTD_PTE_WR)) == 0) continue; if ((ptp[i] & VTD_PTE_SUPERPAGE) != 0) continue; nlp = (uint64_t *)PHYS_TO_DMAP(ptp[i] & VTD_PTE_ADDR_M); vtd_free_ptp(nlp, level - 1); } } bzero(ptp, PAGE_SIZE); free(ptp, M_VTD); } static void vtd_destroy_domain(void *arg) { struct domain *dom; dom = arg; SLIST_REMOVE(&domhead, dom, domain, next); vtd_free_ptp(dom->ptp, dom->pt_levels); free(dom, M_VTD); } struct iommu_ops iommu_ops_intel = { vtd_init, vtd_cleanup, vtd_enable, vtd_disable, vtd_create_domain, vtd_destroy_domain, vtd_create_mapping, vtd_remove_mapping, vtd_add_device, vtd_remove_device, vtd_invalidate_tlb, }; Index: head/sys/arm/arm/cpufunc_asm_pj4b.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_pj4b.S (revision 258779) +++ head/sys/arm/arm/cpufunc_asm_pj4b.S (revision 258780) @@ -1,130 +1,130 @@ /*- * Copyright (C) 2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include .Lpj4b_sf_ctrl_reg: .word 0xf1021820 ENTRY(pj4b_setttb) /* Cache synchronization is not required as this core has PIPT caches */ mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */ #ifdef SMP orr r0, r0, #2 /* Set TTB shared memory flag */ #endif mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ RET END(pj4b_setttb) ENTRY(pj4b_drain_readbuf) mcr p15, 0, r0, c7, c5, 4 /* flush prefetch buffers */ RET END(pj4b_drain_readbuf) ENTRY(pj4b_flush_brnchtgt_all) mcr p15, 0, r0, c7, c5, 6 /* flush entrie branch target cache */ RET END(pj4b_flush_brnchtgt_all) ENTRY(pj4b_flush_brnchtgt_va) mcr p15, 0, r0, c7, c5, 7 /* flush branch target cache by VA */ RET END(pj4b_flush_brnchtgt_va) ENTRY(get_core_id) mrc p15, 0, r0, c0, c0, 5 RET END(get_core_id) ENTRY(pj4b_config) /* Set Auxiliary Debug Modes Control 0 register */ mrc p15, 1, r0, c15, c1, 0 /* ARMADAXP errata fix: ARM-CPU-6136 */ bic r0, r0, #(1 << 12) /* LDSTM first issue is single word */ orr r0, r0, #(1 << 22) /* DVM_WAKEUP disable */ mcr p15, 1, r0, c15, c1, 0 /* Set Auxiliary Debug Modes Control 1 register */ mrc p15, 1, r0, c15, c1, 1 /* ARMADAXP errata fix: ARM-CPU-6409 */ bic r0, r0, #(1 << 2) /* Disable static branch prediction */ orr r0, r0, #(1 << 5) /* STREX backoff disable */ orr r0, r0, #(1 << 8) /* Internal parity handling disable */ orr r0, r0, #(1 << 16) /* Disable data transfer for clean line */ mcr p15, 1, r0, c15, c1, 1 /* Set Auxiliary Function Modes Control 0 register */ mrc p15, 1, r0, c15, c2, 0 #if defined(SMP) orr r0, r0, #(1 << 1) /* SMP/nAMP enabled (coherency) */ #endif orr r0, r0, #(1 << 2) /* L1 parite enable */ orr r0, r0, #(1 << 8) /* Cache and TLB maintenance broadcast enable */ mcr p15, 1, r0, c15, c2, 0 /* Set Auxiliary Debug Modes Control 2 register */ mrc p15, 1, r0, c15, c1, 2 bic r0, r0, #(1 << 23) /* Enable fast LDR */ orr r0, r0, #(1 << 25) /* Intervention Interleave disable */ orr r0, r0, #(1 << 27) /* Critical word first sequencing disable */ orr r0, r0, #(1 << 29) /* Disable MO device read / write */ orr r0, r0, #(1 << 30) /* L1 cache strict round-robin replacement policy*/ - orr r0, r0, #(1 << 31) /* Enable write evict */ + orr r0, r0, #(1U << 31) /* Enable write evict */ mcr p15, 1, r0, c15, c1, 2 #if defined(SMP) /* Set SMP mode in Auxiliary Control Register */ mrc p15, 0, r0, c1, c0, 1 orr r0, r0, #(1 << 5) mcr p15, 0, r0, c1, c0, 1 #endif /* Load CPU number */ mrc p15, 0, r0, c0, c0, 5 and r0, r0, #0xf /* SF Enable and invalidate */ ldr r1, .Lpj4b_sf_ctrl_reg ldr r2, [r1, r0, lsl #8] orr r2, r2, #(1 << 0) bic r2, r2, #(1 << 8) str r2, [r1, r0, lsl #8] RET END(pj4b_config) Index: head/sys/arm/arm/db_trace.c =================================================================== --- head/sys/arm/arm/db_trace.c (revision 258779) +++ head/sys/arm/arm/db_trace.c (revision 258780) @@ -1,642 +1,642 @@ /* $NetBSD: db_trace.c,v 1.8 2003/01/17 22:28:48 thorpej Exp $ */ /*- * Copyright (c) 2000, 2001 Ben Harris * Copyright (c) 1996 Scott K. Stevens * * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ARM_EABI__ /* * Definitions for the instruction interpreter. * * The ARM EABI specifies how to perform the frame unwinding in the * Exception Handling ABI for the ARM Architecture document. To perform * the unwind we need to know the initial frame pointer, stack pointer, * link register and program counter. We then find the entry within the * index table that points to the function the program counter is within. * This gives us either a list of three instructions to process, a 31-bit * relative offset to a table of instructions, or a value telling us * we can't unwind any further. * * When we have the instructions to process we need to decode them * following table 4 in section 9.3. This describes a collection of bit * patterns to encode that steps to take to update the stack pointer and * link register to the correct values at the start of the function. */ /* A special case when we are unable to unwind past this function */ #define EXIDX_CANTUNWIND 1 /* The register names */ #define FP 11 #define SP 13 #define LR 14 #define PC 15 /* * These are set in the linker script. Their addresses will be * either the start or end of the exception table or index. */ extern int extab_start, extab_end, exidx_start, exidx_end; /* * Entry types. * These are the only entry types that have been seen in the kernel. */ #define ENTRY_MASK 0xff000000 #define ENTRY_ARM_SU16 0x80000000 #define ENTRY_ARM_LU16 0x81000000 /* Instruction masks. */ #define INSN_VSP_MASK 0xc0 #define INSN_VSP_SIZE_MASK 0x3f #define INSN_STD_MASK 0xf0 #define INSN_STD_DATA_MASK 0x0f #define INSN_POP_TYPE_MASK 0x08 #define INSN_POP_COUNT_MASK 0x07 #define INSN_VSP_LARGE_INC_MASK 0xff /* Instruction definitions */ #define INSN_VSP_INC 0x00 #define INSN_VSP_DEC 0x40 #define INSN_POP_MASKED 0x80 #define INSN_VSP_REG 0x90 #define INSN_POP_COUNT 0xa0 #define INSN_FINISH 0xb0 #define INSN_POP_REGS 0xb1 #define INSN_VSP_LARGE_INC 0xb2 /* An item in the exception index table */ struct unwind_idx { uint32_t offset; uint32_t insn; }; /* The state of the unwind process */ struct unwind_state { uint32_t registers[16]; uint32_t start_pc; uint32_t *insn; u_int entries; u_int byte; uint16_t update_mask; }; /* Expand a 31-bit signed value to a 32-bit signed value */ static __inline int32_t db_expand_prel31(uint32_t prel31) { return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2; } /* * Perform a binary search of the index table to find the function * with the largest address that doesn't exceed addr. */ static struct unwind_idx * db_find_index(uint32_t addr) { unsigned int min, mid, max; struct unwind_idx *start; struct unwind_idx *item; int32_t prel31_addr; uint32_t func_addr; start = (struct unwind_idx *)&exidx_start; min = 0; max = (&exidx_end - &exidx_start) / 2; while (min != max) { mid = min + (max - min + 1) / 2; item = &start[mid]; prel31_addr = db_expand_prel31(item->offset); func_addr = (uint32_t)&item->offset + prel31_addr; if (func_addr <= addr) { min = mid; } else { max = mid - 1; } } return &start[min]; } /* Reads the next byte from the instruction list */ static uint8_t db_unwind_exec_read_byte(struct unwind_state *state) { uint8_t insn; /* Read the unwind instruction */ insn = (*state->insn) >> (state->byte * 8); /* Update the location of the next instruction */ if (state->byte == 0) { state->byte = 3; state->insn++; state->entries--; } else state->byte--; return insn; } /* Executes the next instruction on the list */ static int db_unwind_exec_insn(struct unwind_state *state) { unsigned int insn; uint32_t *vsp = (uint32_t *)state->registers[SP]; int update_vsp = 0; /* This should never happen */ if (state->entries == 0) return 1; /* Read the next instruction */ insn = db_unwind_exec_read_byte(state); if ((insn & INSN_VSP_MASK) == INSN_VSP_INC) { state->registers[SP] += ((insn & INSN_VSP_SIZE_MASK) << 2) + 4; } else if ((insn & INSN_VSP_MASK) == INSN_VSP_DEC) { state->registers[SP] -= ((insn & INSN_VSP_SIZE_MASK) << 2) + 4; } else if ((insn & INSN_STD_MASK) == INSN_POP_MASKED) { unsigned int mask, reg; /* Load the mask */ mask = db_unwind_exec_read_byte(state); mask |= (insn & INSN_STD_DATA_MASK) << 8; /* We have a refuse to unwind instruction */ if (mask == 0) return 1; /* Update SP */ update_vsp = 1; /* Load the registers */ for (reg = 4; mask && reg < 16; mask >>= 1, reg++) { if (mask & 1) { state->registers[reg] = *vsp++; state->update_mask |= 1 << reg; /* If we have updated SP kep its value */ if (reg == SP) update_vsp = 0; } } } else if ((insn & INSN_STD_MASK) == INSN_VSP_REG && ((insn & INSN_STD_DATA_MASK) != 13) && ((insn & INSN_STD_DATA_MASK) != 15)) { /* sp = register */ state->registers[SP] = state->registers[insn & INSN_STD_DATA_MASK]; } else if ((insn & INSN_STD_MASK) == INSN_POP_COUNT) { unsigned int count, reg; /* Read how many registers to load */ count = insn & INSN_POP_COUNT_MASK; /* Update sp */ update_vsp = 1; /* Pop the registers */ for (reg = 4; reg <= 4 + count; reg++) { state->registers[reg] = *vsp++; state->update_mask |= 1 << reg; } /* Check if we are in the pop r14 version */ if ((insn & INSN_POP_TYPE_MASK) != 0) { state->registers[14] = *vsp++; } } else if (insn == INSN_FINISH) { /* Stop processing */ state->entries = 0; } else if ((insn == INSN_POP_REGS)) { unsigned int mask, reg; mask = db_unwind_exec_read_byte(state); if (mask == 0 || (mask & 0xf0) != 0) return 1; /* Update SP */ update_vsp = 1; /* Load the registers */ for (reg = 0; mask && reg < 4; mask >>= 1, reg++) { if (mask & 1) { state->registers[reg] = *vsp++; state->update_mask |= 1 << reg; } } } else if ((insn & INSN_VSP_LARGE_INC_MASK) == INSN_VSP_LARGE_INC) { unsigned int uleb128; /* Read the increment value */ uleb128 = db_unwind_exec_read_byte(state); state->registers[SP] += 0x204 + (uleb128 << 2); } else { /* We hit a new instruction that needs to be implemented */ db_printf("Unhandled instruction %.2x\n", insn); return 1; } if (update_vsp) { state->registers[SP] = (uint32_t)vsp; } #if 0 db_printf("fp = %08x, sp = %08x, lr = %08x, pc = %08x\n", state->registers[FP], state->registers[SP], state->registers[LR], state->registers[PC]); #endif return 0; } /* Performs the unwind of a function */ static int db_unwind_tab(struct unwind_state *state) { uint32_t entry; /* Set PC to a known value */ state->registers[PC] = 0; /* Read the personality */ entry = *state->insn & ENTRY_MASK; if (entry == ENTRY_ARM_SU16) { state->byte = 2; state->entries = 1; } else if (entry == ENTRY_ARM_LU16) { state->byte = 1; state->entries = ((*state->insn >> 16) & 0xFF) + 1; } else { db_printf("Unknown entry: %x\n", entry); return 1; } while (state->entries > 0) { if (db_unwind_exec_insn(state) != 0) return 1; } /* * The program counter was not updated, load it from the link register. */ if (state->registers[PC] == 0) state->registers[PC] = state->registers[LR]; return 0; } static void db_stack_trace_cmd(struct unwind_state *state) { struct unwind_idx *index; const char *name; db_expr_t value; db_expr_t offset; c_db_sym_t sym; u_int reg, i; char *sep; uint16_t upd_mask; bool finished; finished = false; while (!finished) { /* Reset the mask of updated registers */ state->update_mask = 0; /* The pc value is correct and will be overwritten, save it */ state->start_pc = state->registers[PC]; /* Find the item to run */ index = db_find_index(state->start_pc); if (index->insn != EXIDX_CANTUNWIND) { - if (index->insn & (1 << 31)) { + if (index->insn & (1U << 31)) { /* The data is within the instruction */ state->insn = &index->insn; } else { /* A prel31 offset to the unwind table */ state->insn = (uint32_t *) ((uintptr_t)&index->insn + db_expand_prel31(index->insn)); } /* Run the unwind function */ finished = db_unwind_tab(state); } /* Print the frame details */ sym = db_search_symbol(state->start_pc, DB_STGY_ANY, &offset); if (sym == C_DB_SYM_NULL) { value = 0; name = "(null)"; } else db_symbol_values(sym, &name, &value); db_printf("%s() at ", name); db_printsym(state->start_pc, DB_STGY_PROC); db_printf("\n"); db_printf("\t pc = 0x%08x lr = 0x%08x (", state->start_pc, state->registers[LR]); db_printsym(state->registers[LR], DB_STGY_PROC); db_printf(")\n"); db_printf("\t sp = 0x%08x fp = 0x%08x", state->registers[SP], state->registers[FP]); /* Don't print the registers we have already printed */ upd_mask = state->update_mask & ~((1 << SP) | (1 << FP) | (1 << LR) | (1 << PC)); sep = "\n\t"; for (i = 0, reg = 0; upd_mask != 0; upd_mask >>= 1, reg++) { if ((upd_mask & 1) != 0) { db_printf("%s%sr%d = 0x%08x", sep, (reg < 10) ? " " : "", reg, state->registers[reg]); i++; if (i == 2) { sep = "\n\t"; i = 0; } else sep = " "; } } db_printf("\n"); /* * Stop if directed to do so, or if we've unwound back to the * kernel entry point, or if the unwind function didn't change * anything (to avoid getting stuck in this loop forever). * If the latter happens, it's an indication that the unwind * information is incorrect somehow for the function named in * the last frame printed before you see the unwind failure * message (maybe it needs a STOP_UNWINDING). */ if (index->insn == EXIDX_CANTUNWIND) { db_printf("Unable to unwind further\n"); finished = true; } else if (state->registers[PC] < VM_MIN_KERNEL_ADDRESS) { db_printf("Unable to unwind into user mode\n"); finished = true; } else if (state->update_mask == 0) { db_printf("Unwind failure (no registers changed)\n"); finished = true; } } } #endif /* * APCS stack frames are awkward beasts, so I don't think even trying to use * a structure to represent them is a good idea. * * Here's the diagram from the APCS. Increasing address is _up_ the page. * * save code pointer [fp] <- fp points to here * return link value [fp, #-4] * return sp value [fp, #-8] * return fp value [fp, #-12] * [saved v7 value] * [saved v6 value] * [saved v5 value] * [saved v4 value] * [saved v3 value] * [saved v2 value] * [saved v1 value] * [saved a4 value] * [saved a3 value] * [saved a2 value] * [saved a1 value] * * The save code pointer points twelve bytes beyond the start of the * code sequence (usually a single STM) that created the stack frame. * We have to disassemble it if we want to know which of the optional * fields are actually present. */ #ifndef __ARM_EABI__ /* The frame format is differend in AAPCS */ static void db_stack_trace_cmd(db_expr_t addr, db_expr_t count, boolean_t kernel_only) { u_int32_t *frame, *lastframe; c_db_sym_t sym; const char *name; db_expr_t value; db_expr_t offset; int scp_offset; frame = (u_int32_t *)addr; lastframe = NULL; scp_offset = -(get_pc_str_offset() >> 2); while (count-- && frame != NULL && !db_pager_quit) { db_addr_t scp; u_int32_t savecode; int r; u_int32_t *rp; const char *sep; /* * In theory, the SCP isn't guaranteed to be in the function * that generated the stack frame. We hope for the best. */ scp = frame[FR_SCP]; sym = db_search_symbol(scp, DB_STGY_ANY, &offset); if (sym == C_DB_SYM_NULL) { value = 0; name = "(null)"; } else db_symbol_values(sym, &name, &value); db_printf("%s() at ", name); db_printsym(scp, DB_STGY_PROC); db_printf("\n"); #ifdef __PROG26 db_printf("\tscp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV] & R15_PC); db_printsym(frame[FR_RLV] & R15_PC, DB_STGY_PROC); db_printf(")\n"); #else db_printf("\tscp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV]); db_printsym(frame[FR_RLV], DB_STGY_PROC); db_printf(")\n"); #endif db_printf("\trsp=0x%08x rfp=0x%08x", frame[FR_RSP], frame[FR_RFP]); savecode = ((u_int32_t *)scp)[scp_offset]; if ((savecode & 0x0e100000) == 0x08000000) { /* Looks like an STM */ rp = frame - 4; sep = "\n\t"; for (r = 10; r >= 0; r--) { if (savecode & (1 << r)) { db_printf("%sr%d=0x%08x", sep, r, *rp--); sep = (frame - rp) % 4 == 2 ? "\n\t" : " "; } } } db_printf("\n"); /* * Switch to next frame up */ if (frame[FR_RFP] == 0) break; /* Top of stack */ lastframe = frame; frame = (u_int32_t *)(frame[FR_RFP]); if (INKERNEL((int)frame)) { /* staying in kernel */ if (frame <= lastframe) { db_printf("Bad frame pointer: %p\n", frame); break; } } else if (INKERNEL((int)lastframe)) { /* switch from user to kernel */ if (kernel_only) break; /* kernel stack only */ } else { /* in user */ if (frame <= lastframe) { db_printf("Bad user frame pointer: %p\n", frame); break; } } } } #endif /* XXX stubs */ void db_md_list_watchpoints() { } int db_md_clr_watchpoint(db_expr_t addr, db_expr_t size) { return (0); } int db_md_set_watchpoint(db_expr_t addr, db_expr_t size) { return (0); } int db_trace_thread(struct thread *thr, int count) { #ifdef __ARM_EABI__ struct unwind_state state; #endif struct pcb *ctx; if (thr != curthread) { ctx = kdb_thr_ctx(thr); #ifdef __ARM_EABI__ state.registers[FP] = ctx->un_32.pcb32_r11; state.registers[SP] = ctx->un_32.pcb32_sp; state.registers[LR] = ctx->un_32.pcb32_lr; state.registers[PC] = ctx->un_32.pcb32_pc; db_stack_trace_cmd(&state); #else db_stack_trace_cmd(ctx->un_32.pcb32_r11, -1, TRUE); #endif } else db_trace_self(); return (0); } void db_trace_self(void) { #ifdef __ARM_EABI__ struct unwind_state state; uint32_t sp; /* Read the stack pointer */ __asm __volatile("mov %0, sp" : "=&r" (sp)); state.registers[FP] = (uint32_t)__builtin_frame_address(0); state.registers[SP] = sp; state.registers[LR] = (uint32_t)__builtin_return_address(0); state.registers[PC] = (uint32_t)db_trace_self; db_stack_trace_cmd(&state); #else db_addr_t addr; addr = (db_addr_t)__builtin_frame_address(0); db_stack_trace_cmd(addr, -1, FALSE); #endif } Index: head/sys/arm/arm/pl190.c =================================================================== --- head/sys/arm/arm/pl190.c (revision 258779) +++ head/sys/arm/arm/pl190.c (revision 258780) @@ -1,187 +1,187 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define dprintf(fmt, args...) printf(fmt, ##args) #else #define dprintf(fmt, args...) #endif #define VICIRQSTATUS 0x000 #define VICFIQSTATUS 0x004 #define VICRAWINTR 0x008 #define VICINTSELECT 0x00C #define VICINTENABLE 0x010 #define VICINTENCLEAR 0x014 #define VICSOFTINT 0x018 #define VICSOFTINTCLEAR 0x01C #define VICPROTECTION 0x020 #define VICPERIPHID 0xFE0 #define VICPRIMECELLID 0xFF0 #define VIC_NIRQS 32 struct pl190_intc_softc { device_t sc_dev; struct resource * intc_res; }; static struct pl190_intc_softc *pl190_intc_sc = NULL; #define intc_vic_read_4(reg) \ bus_read_4(pl190_intc_sc->intc_res, (reg)) #define intc_vic_write_4(reg, val) \ bus_write_4(pl190_intc_sc->intc_res, (reg), (val)) static int pl190_intc_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "arm,versatile-vic")) return (ENXIO); device_set_desc(dev, "ARM PL190 VIC"); return (BUS_PROBE_DEFAULT); } static int pl190_intc_attach(device_t dev) { struct pl190_intc_softc *sc = device_get_softc(dev); uint32_t id; int i, rid; sc->sc_dev = dev; if (pl190_intc_sc) return (ENXIO); /* Request memory resources */ rid = 0; sc->intc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->intc_res == NULL) { device_printf(dev, "Error: could not allocate memory resources\n"); return (ENXIO); } pl190_intc_sc = sc; /* * All interrupts should use IRQ line */ intc_vic_write_4(VICINTSELECT, 0x00000000); /* Disable all interrupts */ intc_vic_write_4(VICINTENCLEAR, 0xffffffff); /* Enable INT31, SIC IRQ */ - intc_vic_write_4(VICINTENABLE, (1 << 31)); + intc_vic_write_4(VICINTENABLE, (1U << 31)); id = 0; for (i = 3; i >= 0; i--) { id = (id << 8) | (intc_vic_read_4(VICPERIPHID + i*4) & 0xff); } device_printf(dev, "Peripheral ID: %08x\n", id); id = 0; for (i = 3; i >= 0; i--) { id = (id << 8) | (intc_vic_read_4(VICPRIMECELLID + i*4) & 0xff); } device_printf(dev, "PrimeCell ID: %08x\n", id); return (0); } static device_method_t pl190_intc_methods[] = { DEVMETHOD(device_probe, pl190_intc_probe), DEVMETHOD(device_attach, pl190_intc_attach), { 0, 0 } }; static driver_t pl190_intc_driver = { "intc", pl190_intc_methods, sizeof(struct pl190_intc_softc), }; static devclass_t pl190_intc_devclass; DRIVER_MODULE(intc, simplebus, pl190_intc_driver, pl190_intc_devclass, 0, 0); int arm_get_next_irq(int last_irq) { uint32_t pending; int32_t irq = last_irq + 1; /* Sanity check */ if (irq < 0) irq = 0; pending = intc_vic_read_4(VICIRQSTATUS); while (irq < VIC_NIRQS) { if (pending & (1 << irq)) return (irq); irq++; } return (-1); } void arm_mask_irq(uintptr_t nb) { dprintf("%s: %d\n", __func__, nb); intc_vic_write_4(VICINTENCLEAR, (1 << nb)); } void arm_unmask_irq(uintptr_t nb) { dprintf("%s: %d\n", __func__, nb); intc_vic_write_4(VICINTENABLE, (1 << nb)); } Index: head/sys/arm/at91/if_macbvar.h =================================================================== --- head/sys/arm/at91/if_macbvar.h (revision 258779) +++ head/sys/arm/at91/if_macbvar.h (revision 258780) @@ -1,138 +1,138 @@ /* * $FreeBSD$ */ #ifndef _IF_MACB_H #define _IF_MACB_H #define MACB_MAX_TX_BUFFERS 64 #define MACB_MAX_RX_BUFFERS 256 #define MAX_FRAGMENT 20 #define DATA_SIZE 128 #define MACB_DESC_INC(x, y) ((x) = ((x) + 1) % (y)) #define MACB_TIMEOUT 1000 struct eth_tx_desc { uint32_t addr; uint32_t flags; -#define TD_OWN (1 << 31) +#define TD_OWN (1U << 31) #define TD_LAST (1 << 15) #define TD_WRAP_MASK (1 << 30) }; struct eth_rx_desc { uint32_t addr; #define RD_LEN_MASK 0x7ff #define RD_WRAP_MASK 0x00000002 #define RD_OWN 0x00000001 uint32_t flags; -#define RD_BROADCAST (1 << 31) +#define RD_BROADCAST (1U << 31) #define RD_MULTICAST (1 << 30) #define RD_UNICAST (1 << 29) #define RD_EXTERNAL (1 << 28) #define RD_TYPE_ID (1 << 22) #define RD_PRIORITY (1 << 20) #define RD_VLAN (1 << 21) #define RD_CONCAT (1 << 16) #define RD_EOF (1 << 15) #define RD_SOF (1 << 14) #define RD_OFFSET_MASK (1 << 13)|(1 << 12) #define RD_LENGTH_MASK (0x00000FFF) }; struct rx_desc_info { struct mbuf *buff; bus_dmamap_t dmamap; }; struct tx_desc_info { struct mbuf *buff; bus_dmamap_t dmamap; }; struct macb_chain_data{ struct mbuf *rxhead; struct mbuf *rxtail; }; struct macb_softc { struct ifnet *ifp; /* ifnet pointer */ struct mtx sc_mtx; /* global mutex */ bus_dma_tag_t sc_parent_tag; /* parent bus DMA tag */ device_t dev; /* Myself */ device_t miibus; /* My child miibus */ void *intrhand; /* Interrupt handle */ void *intrhand_qf; /* queue full */ void *intrhand_tx; /* tx complete */ void *intrhand_status; /* error status */ struct resource *irq_res; /* transmit */ struct resource *irq_res_rec; /* receive */ struct resource *irq_res_qf; /* queue full */ struct resource *irq_res_status; /* status */ struct resource *mem_res; /* Memory resource */ struct callout tick_ch; /* Tick callout */ struct taskqueue *sc_tq; struct task sc_intr_task; struct task sc_tx_task; struct task sc_link_task; bus_dmamap_t dmamap_ring_tx; bus_dmamap_t dmamap_ring_rx; /*dma tag for ring*/ bus_dma_tag_t dmatag_ring_tx; bus_dma_tag_t dmatag_ring_rx; /*dma tag for data*/ bus_dma_tag_t dmatag_data_tx; bus_dma_tag_t dmatag_data_rx; /*the ring*/ struct eth_tx_desc *desc_tx; struct eth_rx_desc *desc_rx; /*ring physical address*/ bus_addr_t ring_paddr_tx; bus_addr_t ring_paddr_rx; /*index of last received descriptor*/ int rx_cons; struct rx_desc_info rx_desc[MACB_MAX_RX_BUFFERS]; /* tx producer index */ uint32_t tx_prod; /* tx consumer index */ uint32_t tx_cons; int tx_cnt; struct tx_desc_info tx_desc[MACB_MAX_TX_BUFFERS]; int macb_watchdog_timer; #define MACB_FLAG_LINK 0x0001 int flags; int if_flags; struct at91_pmc_clock *clk; struct macb_chain_data macb_cdata; int clock; }; #endif Index: head/sys/arm/broadcom/bcm2835/bcm2835_dma.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2835_dma.c (revision 258779) +++ head/sys/arm/broadcom/bcm2835/bcm2835_dma.c (revision 258780) @@ -1,728 +1,728 @@ /* * Copyright (c) 2013 Daisuke Aoyama * Copyright (c) 2013 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bcm2835_dma.h" #include "bcm2835_vcbus.h" #define MAX_REG 9 /* private flags */ #define BCM_DMA_CH_USED 0x00000001 #define BCM_DMA_CH_FREE 0x40000000 #define BCM_DMA_CH_UNMAP 0x80000000 /* Register Map (4.2.1.2) */ #define BCM_DMA_CS(n) (0x100*(n) + 0x00) #define CS_ACTIVE (1 << 0) #define CS_END (1 << 1) #define CS_INT (1 << 2) #define CS_DREQ (1 << 3) #define CS_ISPAUSED (1 << 4) #define CS_ISHELD (1 << 5) #define CS_ISWAIT (1 << 6) #define CS_ERR (1 << 8) #define CS_WAITWRT (1 << 28) #define CS_DISDBG (1 << 29) #define CS_ABORT (1 << 30) -#define CS_RESET (1 << 31) +#define CS_RESET (1U << 31) #define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04) #define BCM_DMA_INFO(n) (0x100*(n) + 0x08) #define INFO_INT_EN (1 << 0) #define INFO_TDMODE (1 << 1) #define INFO_WAIT_RESP (1 << 3) #define INFO_D_INC (1 << 4) #define INFO_D_WIDTH (1 << 5) #define INFO_D_DREQ (1 << 6) #define INFO_S_INC (1 << 8) #define INFO_S_WIDTH (1 << 9) #define INFO_S_DREQ (1 << 10) #define INFO_WAITS_SHIFT (21) #define INFO_PERMAP_SHIFT (16) #define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT) #define BCM_DMA_SRC(n) (0x100*(n) + 0x0C) #define BCM_DMA_DST(n) (0x100*(n) + 0x10) #define BCM_DMA_LEN(n) (0x100*(n) + 0x14) #define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18) #define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C) #define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20) #define DEBUG_ERROR_MASK (7) #define BCM_DMA_INT_STATUS 0xfe0 #define BCM_DMA_ENABLE 0xff0 /* relative offset from BCM_VC_DMA0_BASE (p.39) */ #define BCM_DMA_CH(n) (0x100*(n)) /* DMA Control Block - 256bit aligned (p.40) */ struct bcm_dma_cb { uint32_t info; /* Transfer Information */ uint32_t src; /* Source Address */ uint32_t dst; /* Destination Address */ uint32_t len; /* Transfer Length */ uint32_t stride; /* 2D Mode Stride */ uint32_t next; /* Next Control Block Address */ uint32_t rsvd1; /* Reserved */ uint32_t rsvd2; /* Reserved */ }; #ifdef DEBUG static void bcm_dma_cb_dump(struct bcm_dma_cb *cb); static void bcm_dma_reg_dump(int ch); #endif /* DMA channel private info */ struct bcm_dma_ch { int ch; uint32_t flags; struct bcm_dma_cb * cb; uint32_t vc_cb; bus_dmamap_t dma_map; void (*intr_func)(int, void *); void * intr_arg; }; struct bcm_dma_softc { device_t sc_dev; struct mtx sc_mtx; struct resource * sc_mem; struct resource * sc_irq[BCM_DMA_CH_MAX]; void * sc_intrhand[BCM_DMA_CH_MAX]; struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX]; bus_dma_tag_t sc_dma_tag; }; static struct bcm_dma_softc *bcm_dma_sc = NULL; static void bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t*)arg; *addr = PHYS_TO_VCBUS(segs[0].ds_addr); } static void bcm_dma_reset(device_t dev, int ch) { struct bcm_dma_softc *sc = device_get_softc(dev); struct bcm_dma_cb *cb; uint32_t cs; int count; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return; cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); if (cs & CS_ACTIVE) { /* pause current task */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0); count = 1000; do { cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); } while (!(cs & CS_ISPAUSED) && (count-- > 0)); if (!(cs & CS_ISPAUSED)) { device_printf(dev, "Can't abort DMA transfer at channel %d\n", ch); } bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); /* Complete everything, clear interrupt */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ABORT | CS_INT | CS_END| CS_ACTIVE); } /* clear control blocks */ bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0); bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); /* Reset control block */ cb = sc->sc_dma_ch[ch].cb; bzero(cb, sizeof(*cb)); cb->info = INFO_WAIT_RESP; } static int bcm_dma_init(device_t dev) { struct bcm_dma_softc *sc = device_get_softc(dev); uint32_t mask; struct bcm_dma_ch *ch; void *cb_virt; vm_paddr_t cb_phys; int err; int i; /* disable and clear interrupt status */ bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, 0); bus_write_4(sc->sc_mem, BCM_DMA_INT_STATUS, 0); /* Allocate DMA chunks control blocks */ /* p.40 of spec - control block should be 32-bit aligned */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct bcm_dma_cb), 1, sizeof(struct bcm_dma_cb), BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_dma_tag); if (err) { device_printf(dev, "failed allocate DMA tag"); return (err); } /* setup initial settings */ for (i = 0; i < BCM_DMA_CH_MAX; i++) { ch = &sc->sc_dma_ch[i]; err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ch->dma_map); if (err) { device_printf(dev, "cannot allocate DMA memory\n"); break; } /* * Least alignment for busdma-allocated stuff is cache * line size, so just make sure nothing stupid happend * and we got properly aligned address */ if ((uintptr_t)cb_virt & 0x1f) { device_printf(dev, "DMA address is not 32-bytes aligned: %p\n", (void*)cb_virt); break; } err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt, sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys, BUS_DMA_WAITOK); if (err) { device_printf(dev, "cannot load DMA memory\n"); break; } bzero(ch, sizeof(struct bcm_dma_ch)); ch->ch = i; ch->cb = cb_virt; ch->vc_cb = cb_phys; ch->intr_func = NULL; ch->intr_arg = NULL; ch->flags = BCM_DMA_CH_UNMAP; ch->cb->info = INFO_WAIT_RESP; /* reset DMA engine */ bcm_dma_reset(dev, i); } /* now use DMA2/DMA3 only */ sc->sc_dma_ch[2].flags = BCM_DMA_CH_FREE; sc->sc_dma_ch[3].flags = BCM_DMA_CH_FREE; /* enable DMAs */ mask = 0; for (i = 0; i < BCM_DMA_CH_MAX; i++) if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) mask |= (1 << i); bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, mask); return (0); } /* * Allocate DMA channel for further use, returns channel # or * BCM_DMA_CH_INVALID */ int bcm_dma_allocate(int req_ch) { struct bcm_dma_softc *sc = bcm_dma_sc; int ch = BCM_DMA_CH_INVALID; int i; if (req_ch >= BCM_DMA_CH_MAX) return (BCM_DMA_CH_INVALID); /* Auto(req_ch < 0) or CH specified */ mtx_lock(&sc->sc_mtx); if (req_ch < 0) { for (i = 0; i < BCM_DMA_CH_MAX; i++) { if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) { ch = i; sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; break; } } } else { if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) { ch = req_ch; sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; } } mtx_unlock(&sc->sc_mtx); return (ch); } /* * Frees allocated channel. Returns 0 on success, -1 otherwise */ int bcm_dma_free(int ch) { struct bcm_dma_softc *sc = bcm_dma_sc; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); mtx_lock(&sc->sc_mtx); if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) { sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE; sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED; sc->sc_dma_ch[ch].intr_func = NULL; sc->sc_dma_ch[ch].intr_arg = NULL; /* reset DMA engine */ bcm_dma_reset(sc->sc_dev, ch); } mtx_unlock(&sc->sc_mtx); return (0); } /* * Assign handler function for channel interrupt * Returns 0 on success, -1 otherwise */ int bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); sc->sc_dma_ch[ch].intr_func = func; sc->sc_dma_ch[ch].intr_arg = arg; cb = sc->sc_dma_ch[ch].cb; cb->info |= INFO_INT_EN; return (0); } /* * Setup DMA source parameters * ch - channel number * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if * source is physical memory * inc_addr - BCM_DMA_INC_ADDR if source address * should be increased after each access or * BCM_DMA_SAME_ADDR if address should remain * the same * width - size of read operation, BCM_DMA_32BIT * for 32bit bursts, BCM_DMA_128BIT for 128 bits * * Returns 0 on success, -1 otherwise */ int bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width) { struct bcm_dma_softc *sc = bcm_dma_sc; uint32_t info; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); info = sc->sc_dma_ch[ch].cb->info; info &= ~INFO_PERMAP_MASK; info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; if (dreq) info |= INFO_S_DREQ; else info &= ~INFO_S_DREQ; if (width == BCM_DMA_128BIT) info |= INFO_S_WIDTH; else info &= ~INFO_S_WIDTH; if (inc_addr == BCM_DMA_INC_ADDR) info |= INFO_S_INC; else info &= ~INFO_S_INC; sc->sc_dma_ch[ch].cb->info = info; return (0); } /* * Setup DMA destination parameters * ch - channel number * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if * destination is physical memory * inc_addr - BCM_DMA_INC_ADDR if source address * should be increased after each access or * BCM_DMA_SAME_ADDR if address should remain * the same * width - size of write operation, BCM_DMA_32BIT * for 32bit bursts, BCM_DMA_128BIT for 128 bits * * Returns 0 on success, -1 otherwise */ int bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width) { struct bcm_dma_softc *sc = bcm_dma_sc; uint32_t info; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); info = sc->sc_dma_ch[ch].cb->info; info &= ~INFO_PERMAP_MASK; info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; if (dreq) info |= INFO_D_DREQ; else info &= ~INFO_D_DREQ; if (width == BCM_DMA_128BIT) info |= INFO_D_WIDTH; else info &= ~INFO_D_WIDTH; if (inc_addr == BCM_DMA_INC_ADDR) info |= INFO_D_INC; else info &= ~INFO_D_INC; sc->sc_dma_ch[ch].cb->info = info; return (0); } #ifdef DEBUG void bcm_dma_cb_dump(struct bcm_dma_cb *cb) { printf("DMA CB "); printf("INFO: %8.8x ", cb->info); printf("SRC: %8.8x ", cb->src); printf("DST: %8.8x ", cb->dst); printf("LEN: %8.8x ", cb->len); printf("\n"); printf("STRIDE: %8.8x ", cb->stride); printf("NEXT: %8.8x ", cb->next); printf("RSVD1: %8.8x ", cb->rsvd1); printf("RSVD2: %8.8x ", cb->rsvd2); printf("\n"); } void bcm_dma_reg_dump(int ch) { struct bcm_dma_softc *sc = bcm_dma_sc; int i; uint32_t reg; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return; printf("DMA%d: ", ch); for (i = 0; i < MAX_REG; i++) { reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4); printf("%8.8x ", reg); } printf("\n"); } #endif /* * Start DMA transaction * ch - channel number * src, dst - source and destination address in * ARM physical memory address space. * len - amount of bytes to be transfered * * Returns 0 on success, -1 otherwise */ int bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); cb = sc->sc_dma_ch[ch].cb; if (BCM2835_ARM_IS_IO(src)) cb->src = IO_TO_VCBUS(src); else cb->src = PHYS_TO_VCBUS(src); if (BCM2835_ARM_IS_IO(dst)) cb->dst = IO_TO_VCBUS(dst); else cb->dst = PHYS_TO_VCBUS(dst); cb->len = len; bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE); bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), sc->sc_dma_ch[ch].vc_cb); bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE); #ifdef DEBUG bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb); bcm_dma_reg_dump(ch); #endif return (0); } /* * Get length requested for DMA transaction * ch - channel number * * Returns size of transaction, 0 if channel is invalid */ uint32_t bcm_dma_length(int ch) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (0); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (0); cb = sc->sc_dma_ch[ch].cb; return (cb->len); } static void bcm_dma_intr(void *arg) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg; uint32_t cs, debug; /* my interrupt? */ cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch)); if (!(cs & (CS_INT | CS_ERR))) return; /* running? */ if (!(ch->flags & BCM_DMA_CH_USED)) { device_printf(sc->sc_dev, "unused DMA intr CH=%d, CS=%x\n", ch->ch, cs); return; } if (cs & CS_ERR) { debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch)); device_printf(sc->sc_dev, "DMA error %d on CH%d\n", debug & DEBUG_ERROR_MASK, ch->ch); bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch), debug & DEBUG_ERROR_MASK); bcm_dma_reset(sc->sc_dev, ch->ch); } if (cs & CS_INT) { /* acknowledge interrupt */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch), CS_INT | CS_END); /* Prepare for possible access to len field */ bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map, BUS_DMASYNC_POSTWRITE); /* save callback function and argument */ if (ch->intr_func) ch->intr_func(ch->ch, ch->intr_arg); } } static int bcm_dma_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-dma")) return (ENXIO); device_set_desc(dev, "BCM2835 DMA Controller"); return (BUS_PROBE_DEFAULT); } static int bcm_dma_attach(device_t dev) { struct bcm_dma_softc *sc = device_get_softc(dev); int rid, err = 0; int i; sc->sc_dev = dev; if (bcm_dma_sc) return (ENXIO); for (i = 0; i < BCM_DMA_CH_MAX; i++) { sc->sc_irq[i] = NULL; sc->sc_intrhand[i] = NULL; } /* DMA0 - DMA14 */ rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */ for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) { sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->sc_irq[rid] == NULL) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_dma_intr, &sc->sc_dma_ch[rid], &sc->sc_intrhand[rid])) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } } mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF); bcm_dma_sc = sc; err = bcm_dma_init(dev); if (err) goto fail; return (err); fail: if (sc->sc_mem) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem); for (i = 0; i < BCM_DMA_CH_MAX; i++) { if (sc->sc_intrhand[i]) bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]); if (sc->sc_irq[i]) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]); } return (err); } static device_method_t bcm_dma_methods[] = { DEVMETHOD(device_probe, bcm_dma_probe), DEVMETHOD(device_attach, bcm_dma_attach), { 0, 0 } }; static driver_t bcm_dma_driver = { "bcm_dma", bcm_dma_methods, sizeof(struct bcm_dma_softc), }; static devclass_t bcm_dma_devclass; DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, bcm_dma_devclass, 0, 0); MODULE_VERSION(bcm_dma, 1); Index: head/sys/arm/econa/if_ece.c =================================================================== --- head/sys/arm/econa/if_ece.c (revision 258779) +++ head/sys/arm/econa/if_ece.c (revision 258780) @@ -1,1947 +1,1947 @@ /*- * Copyright (c) 2009 Yohanes Nugroho * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" static uint8_t vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19}; /* * Boot loader expects the hardware state to be the same when we * restart the device (warm boot), so we need to save the initial * config values. */ int initial_switch_config; int initial_cpu_config; int initial_port0_config; int initial_port1_config; static inline uint32_t read_4(struct ece_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void write_4(struct ece_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } #define ECE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define ECE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define ECE_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ MTX_NETWORK_LOCK, MTX_DEF) #define ECE_TXLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_tx) #define ECE_TXUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_tx) #define ECE_TXLOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev), \ "ECE TX Lock", MTX_DEF) #define ECE_CLEANUPLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_cleanup) #define ECE_CLEANUPUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_cleanup) #define ECE_CLEANUPLOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev), \ "ECE cleanup Lock", MTX_DEF) #define ECE_RXLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_rx) #define ECE_RXUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_rx) #define ECE_RXLOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev), \ "ECE RX Lock", MTX_DEF) #define ECE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define ECE_TXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_tx); #define ECE_RXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_rx); #define ECE_CLEANUPLOCK_DESTROY(_sc) \ mtx_destroy(&_sc->sc_mtx_cleanup); #define ECE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); static devclass_t ece_devclass; /* ifnet entry points */ static void eceinit_locked(void *); static void ecestart_locked(struct ifnet *); static void eceinit(void *); static void ecestart(struct ifnet *); static void ecestop(struct ece_softc *); static int eceioctl(struct ifnet * ifp, u_long, caddr_t); /* bus entry points */ static int ece_probe(device_t dev); static int ece_attach(device_t dev); static int ece_detach(device_t dev); static void ece_intr(void *); static void ece_intr_qf(void *); static void ece_intr_status(void *xsc); /* helper routines */ static int ece_activate(device_t dev); static void ece_deactivate(device_t dev); static int ece_ifmedia_upd(struct ifnet *ifp); static void ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int ece_get_mac(struct ece_softc *sc, u_char *eaddr); static void ece_set_mac(struct ece_softc *sc, u_char *eaddr); static int configure_cpu_port(struct ece_softc *sc); static int configure_lan_port(struct ece_softc *sc, int phy_type); static void set_pvid(struct ece_softc *sc, int port0, int port1, int cpu); static void set_vlan_vid(struct ece_softc *sc, int vlan); static void set_vlan_member(struct ece_softc *sc, int vlan); static void set_vlan_tag(struct ece_softc *sc, int vlan); static int hardware_init(struct ece_softc *sc); static void ece_intr_rx_locked(struct ece_softc *sc, int count); static void ece_free_desc_dma_tx(struct ece_softc *sc); static void ece_free_desc_dma_rx(struct ece_softc *sc); static void ece_intr_task(void *arg, int pending __unused); static void ece_tx_task(void *arg, int pending __unused); static void ece_cleanup_task(void *arg, int pending __unused); static int ece_allocate_dma(struct ece_softc *sc); static void ece_intr_tx(void *xsc); static void clear_mac_entries(struct ece_softc *ec, int include_this_mac); static uint32_t read_mac_entry(struct ece_softc *ec, uint8_t *mac_result, int first); /*PHY related functions*/ static inline int phy_read(struct ece_softc *sc, int phy, int reg) { int val; int ii; int status; write_4(sc, PHY_CONTROL, PHY_RW_OK); write_4(sc, PHY_CONTROL, (PHY_ADDRESS(phy)|PHY_READ_COMMAND | PHY_REGISTER(reg))); for (ii = 0; ii < 0x1000; ii++) { status = read_4(sc, PHY_CONTROL); if (status & PHY_RW_OK) { /* Clear the rw_ok status, and clear other * bits value. */ write_4(sc, PHY_CONTROL, PHY_RW_OK); val = PHY_GET_DATA(status); return (val); } } return (0); } static inline void phy_write(struct ece_softc *sc, int phy, int reg, int data) { int ii; write_4(sc, PHY_CONTROL, PHY_RW_OK); write_4(sc, PHY_CONTROL, PHY_ADDRESS(phy) | PHY_REGISTER(reg) | PHY_WRITE_COMMAND | PHY_DATA(data)); for (ii = 0; ii < 0x1000; ii++) { if (read_4(sc, PHY_CONTROL) & PHY_RW_OK) { /* Clear the rw_ok status, and clear other * bits value. */ write_4(sc, PHY_CONTROL, PHY_RW_OK); return; } } } static int get_phy_type(struct ece_softc *sc) { uint16_t phy0_id = 0, phy1_id = 0; /* * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier * Register 1. */ phy0_id = phy_read(sc, 0, 0x2); phy1_id = phy_read(sc, 1, 0x2); if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F)) return (ASIX_GIGA_PHY); else if ((phy0_id == 0x0243) && (phy1_id == 0x0243)) return (TWO_SINGLE_PHY); else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007)) return (VSC8601_GIGA_PHY); else if ((phy0_id == 0x0243) && (phy1_id == 0xFFFF)) return (IC_PLUS_PHY); return (NOT_FOUND_PHY); } static int ece_probe(device_t dev) { device_set_desc(dev, "Econa Ethernet Controller"); return (0); } static int ece_attach(device_t dev) { struct ece_softc *sc; struct ifnet *ifp = NULL; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; u_char eaddr[ETHER_ADDR_LEN]; int err; int i, rid; uint32_t rnd; err = 0; sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) goto out; power_on_network_interface(); rid = 0; sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res_status == NULL) goto out; rid = 1; /*TSTC: Fm-Switch-Tx-Complete*/ sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res_tx == NULL) goto out; rid = 2; /*FSRC: Fm-Switch-Rx-Complete*/ sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res_rec == NULL) goto out; rid = 4; /*FSQF: Fm-Switch-Queue-Full*/ sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res_qf == NULL) goto out; err = ece_activate(dev); if (err) goto out; /* Sysctls */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); ECE_LOCK_INIT(sc); callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); if ((err = ece_get_mac(sc, eaddr)) != 0) { /* No MAC address configured. Generate the random one. */ if (bootverbose) device_printf(dev, "Generating random ethernet address.\n"); rnd = arc4random(); /*from if_ae.c/if_ate.c*/ /* * Set OUI to convenient locally assigned address. 'b' * is 0x62, which has the locally assigned bit set, and * the broadcast/multicast bit clear. */ eaddr[0] = 'b'; eaddr[1] = 's'; eaddr[2] = 'd'; eaddr[3] = (rnd >> 16) & 0xff; eaddr[4] = (rnd >> 8) & 0xff; eaddr[5] = rnd & 0xff; for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = vlan0_mac[i]; } ece_set_mac(sc, eaddr); sc->ifp = ifp = if_alloc(IFT_ETHER); /* Only one PHY at address 0 in this device. */ err = mii_attach(dev, &sc->miibus, ifp, ece_ifmedia_upd, ece_ifmedia_sts, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(dev, "attaching PHYs failed\n"); goto out; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP); ifp->if_capenable = ifp->if_capabilities; ifp->if_start = ecestart; ifp->if_ioctl = eceioctl; ifp->if_init = eceinit; ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS - 1); IFQ_SET_READY(&ifp->if_snd); /* Create local taskq. */ TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc); TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp); TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc); sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->sc_tq); if (sc->sc_tq == NULL) { device_printf(sc->dev, "could not create taskqueue\n"); goto out; } ether_ifattach(ifp, eaddr); /* * Activate interrupts */ err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE, NULL, ece_intr, sc, &sc->intrhand); if (err) { ether_ifdetach(ifp); ECE_LOCK_DESTROY(sc); goto out; } err = bus_setup_intr(dev, sc->irq_res_status, INTR_TYPE_NET | INTR_MPSAFE, NULL, ece_intr_status, sc, &sc->intrhand_status); if (err) { ether_ifdetach(ifp); ECE_LOCK_DESTROY(sc); goto out; } err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE, NULL,ece_intr_qf, sc, &sc->intrhand_qf); if (err) { ether_ifdetach(ifp); ECE_LOCK_DESTROY(sc); goto out; } err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE, NULL, ece_intr_tx, sc, &sc->intrhand_tx); if (err) { ether_ifdetach(ifp); ECE_LOCK_DESTROY(sc); goto out; } ECE_TXLOCK_INIT(sc); ECE_RXLOCK_INIT(sc); ECE_CLEANUPLOCK_INIT(sc); /* Enable all interrupt sources. */ write_4(sc, INTERRUPT_MASK, 0x00000000); /* Enable port 0. */ write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE)); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); out: if (err) ece_deactivate(dev); if (err && ifp) if_free(ifp); return (err); } static int ece_detach(device_t dev) { struct ece_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; ecestop(sc); if (ifp != NULL) { ether_ifdetach(ifp); if_free(ifp); } ece_deactivate(dev); return (0); } static void ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { u_int32_t *paddr; KASSERT(nsegs == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } static int ece_alloc_desc_dma_tx(struct ece_softc *sc) { int i; int error; /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, /* max size */ 1, /*nsegments */ sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->dmatag_data_tx); /* dmat */ /* Allocate memory for TX ring. */ error = bus_dmamem_alloc(sc->dmatag_data_tx, (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &(sc->dmamap_ring_tx)); if (error) { if_printf(sc->ifp, "failed to allocate DMA memory\n"); bus_dma_tag_destroy(sc->dmatag_data_tx); sc->dmatag_data_tx = 0; return (ENXIO); } /* Load Ring DMA. */ error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx, sc->desc_tx, sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, ece_getaddr, &(sc->ring_paddr_tx), BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "can't load descriptor\n"); bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx, sc->dmamap_ring_tx); sc->desc_tx = NULL; bus_dma_tag_destroy(sc->dmatag_data_tx); sc->dmatag_data_tx = 0; return (ENXIO); } /* Allocate a busdma tag for mbufs. Alignment is 2 bytes */ error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES*MAX_FRAGMENT, /* maxsize */ MAX_FRAGMENT, /* nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->dmatag_ring_tx); /* dmat */ if (error) { if_printf(sc->ifp, "failed to create busdma tag for mbufs\n"); return (ENXIO); } for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) { /* Create dma map for each descriptor. */ error = bus_dmamap_create(sc->dmatag_ring_tx, 0, &(sc->tx_desc[i].dmamap)); if (error) { if_printf(sc->ifp, "failed to create map for mbuf\n"); return (ENXIO); } } return (0); } static void ece_free_desc_dma_tx(struct ece_softc *sc) { int i; for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) { if (sc->tx_desc[i].buff) { m_freem(sc->tx_desc[i].buff); sc->tx_desc[i].buff= 0; } } if (sc->dmamap_ring_tx) { bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx); if (sc->desc_tx) { bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx, sc->dmamap_ring_tx); } sc->dmamap_ring_tx = 0; } if (sc->dmatag_data_tx) { bus_dma_tag_destroy(sc->dmatag_data_tx); sc->dmatag_data_tx = 0; } if (sc->dmatag_ring_tx) { for (i = 0; idmatag_ring_tx, sc->tx_desc[i].dmamap); sc->tx_desc[i].dmamap = 0; } bus_dma_tag_destroy(sc->dmatag_ring_tx); sc->dmatag_ring_tx = 0; } } static int ece_alloc_desc_dma_rx(struct ece_softc *sc) { int error; int i; /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ /* maxsize, nsegments */ sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1, /* maxsegsz, flags */ sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0, NULL, NULL, /* lockfunc, lockfuncarg */ &sc->dmatag_data_rx); /* dmat */ /* Allocate RX ring. */ error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx), BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &(sc->dmamap_ring_rx)); if (error) { if_printf(sc->ifp, "failed to allocate DMA memory\n"); return (ENXIO); } /* Load dmamap. */ error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx, sc->desc_rx, sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, ece_getaddr, &(sc->ring_paddr_rx), BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "can't load descriptor\n"); bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx, sc->dmamap_ring_rx); bus_dma_tag_destroy(sc->dmatag_data_rx); sc->desc_rx = NULL; return (ENXIO); } /* Allocate a busdma tag for mbufs. */ error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->dmatag_ring_rx); /* dmat */ if (error) { if_printf(sc->ifp, "failed to create busdma tag for mbufs\n"); return (ENXIO); } for (i = 0; idmatag_ring_rx, 0, &sc->rx_desc[i].dmamap); if (error) { if_printf(sc->ifp, "failed to create map for mbuf\n"); return (ENXIO); } } error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap); if (error) { if_printf(sc->ifp, "failed to create spare map\n"); return (ENXIO); } return (0); } static void ece_free_desc_dma_rx(struct ece_softc *sc) { int i; for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) { if (sc->rx_desc[i].buff) { m_freem(sc->rx_desc[i].buff); sc->rx_desc[i].buff= 0; } } if (sc->dmatag_data_rx) { bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx); bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx, sc->dmamap_ring_rx); bus_dma_tag_destroy(sc->dmatag_data_rx); sc->dmatag_data_rx = 0; sc->dmamap_ring_rx = 0; sc->desc_rx = 0; } if (sc->dmatag_ring_rx) { for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_desc[i].dmamap); bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap); bus_dma_tag_destroy(sc->dmatag_ring_rx); sc->dmatag_ring_rx = 0; } } static int ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo) { struct mbuf *new_mbuf; bus_dma_segment_t seg[1]; bus_dmamap_t map; int error; int nsegs; bus_dma_tag_t tag; tag = sc->dmatag_ring_rx; new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (new_mbuf == NULL) return (ENOBUFS); new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES; error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("Too many segments returned!")); if (nsegs != 1 || error) { m_free(new_mbuf); return (ENOBUFS); } if (descinfo->buff != NULL) { bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(tag, descinfo->dmamap); } map = descinfo->dmamap; descinfo->dmamap = sc->rx_sparemap; sc->rx_sparemap = map; bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD); descinfo->buff = new_mbuf; descinfo->desc->data_ptr = seg->ds_addr; descinfo->desc->length = seg->ds_len - 2; return (0); } static int ece_allocate_dma(struct ece_softc *sc) { eth_tx_desc_t *desctx; eth_rx_desc_t *descrx; int i; int error; /* Create parent tag for tx and rx */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, 0,/* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_parent_tag); ece_alloc_desc_dma_tx(sc); for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) { desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]); memset(desctx, 0, sizeof(eth_tx_desc_t)); desctx->length = MAX_PACKET_LEN; desctx->cown = 1; if (i == ECE_MAX_TX_BUFFERS - 1) desctx->eor = 1; } ece_alloc_desc_dma_rx(sc); for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) { descrx = &(sc->desc_rx[i]); memset(descrx, 0, sizeof(eth_rx_desc_t)); sc->rx_desc[i].desc = descrx; sc->rx_desc[i].buff = 0; ece_new_rxbuf(sc, &(sc->rx_desc[i])); if (i == ECE_MAX_RX_BUFFERS - 1) descrx->eor = 1; } sc->tx_prod = 0; sc->tx_cons = 0; sc->last_rx = 0; sc->desc_curr_tx = 0; return (0); } static int ece_activate(device_t dev) { struct ece_softc *sc; int err; uint32_t mac_port_config; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; initial_switch_config = read_4(sc, SWITCH_CONFIG); initial_cpu_config = read_4(sc, CPU_PORT_CONFIG); initial_port0_config = read_4(sc, MAC_PORT_0_CONFIG); initial_port1_config = read_4(sc, MAC_PORT_1_CONFIG); /* Disable Port 0 */ mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); mac_port_config |= (PORT_DISABLE); write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); /* Disable Port 1 */ mac_port_config = read_4(sc, MAC_PORT_1_CONFIG); mac_port_config |= (PORT_DISABLE); write_4(sc, MAC_PORT_1_CONFIG, mac_port_config); err = ece_allocate_dma(sc); if (err) { if_printf(sc->ifp, "failed allocating dma\n"); goto out; } write_4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx); write_4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx); write_4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx); write_4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx); write_4(sc, FS_DMA_CONTROL, 1); return (0); out: return (ENXIO); } static void ece_deactivate(device_t dev) { struct ece_softc *sc; sc = device_get_softc(dev); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand); sc->intrhand = 0; if (sc->intrhand_qf) bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf); sc->intrhand_qf = 0; bus_generic_detach(sc->dev); if (sc->miibus) device_delete_child(sc->dev, sc->miibus); if (sc->mem_res) bus_release_resource(dev, SYS_RES_IOPORT, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = 0; if (sc->irq_res_rec) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res_rec), sc->irq_res_rec); if (sc->irq_res_qf) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res_qf), sc->irq_res_qf); if (sc->irq_res_qf) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res_status), sc->irq_res_status); sc->irq_res_rec = 0; sc->irq_res_qf = 0; sc->irq_res_status = 0; ECE_TXLOCK_DESTROY(sc); ECE_RXLOCK_DESTROY(sc); ece_free_desc_dma_tx(sc); ece_free_desc_dma_rx(sc); return; } /* * Change media according to request. */ static int ece_ifmedia_upd(struct ifnet *ifp) { struct ece_softc *sc = ifp->if_softc; struct mii_data *mii; int error; mii = device_get_softc(sc->miibus); ECE_LOCK(sc); error = mii_mediachg(mii); ECE_UNLOCK(sc); return (error); } /* * Notify the world which media we're using. */ static void ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ece_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); ECE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; ECE_UNLOCK(sc); } static void ece_tick(void *xsc) { struct ece_softc *sc = xsc; struct mii_data *mii; int active; mii = device_get_softc(sc->miibus); active = mii->mii_media_active; mii_tick(mii); /* * Schedule another timeout one second from now. */ callout_reset(&sc->tick_ch, hz, ece_tick, sc); } static uint32_t read_mac_entry(struct ece_softc *ec, uint8_t *mac_result, int first) { uint32_t ii; struct arl_table_entry_t entry; uint32_t *entry_val; write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0); write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0); write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0); if (first) write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1); else write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2); for (ii = 0; ii < 0x1000; ii++) if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1)) break; entry_val = (uint32_t*) (&entry); entry_val[0] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_1); entry_val[1] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_2); if (mac_result) memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN); return (entry.table_end); } static uint32_t write_arl_table_entry(struct ece_softc *ec, uint32_t filter, uint32_t vlan_mac, uint32_t vlan_gid, uint32_t age_field, uint32_t port_map, const uint8_t *mac_addr) { uint32_t ii; uint32_t *entry_val; struct arl_table_entry_t entry; memset(&entry, 0, sizeof(entry)); entry.filter = filter; entry.vlan_mac = vlan_mac; entry.vlan_gid = vlan_gid; entry.age_field = age_field; entry.port_map = port_map; memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN); entry_val = (uint32_t*) (&entry); write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0); write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0); write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0); write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]); write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]); write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, ARL_WRITE_COMMAND); for (ii = 0; ii < 0x1000; ii++) if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & ARL_COMMAND_COMPLETE) return (1); /* Write OK. */ /* Write failed. */ return (0); } static void remove_mac_entry(struct ece_softc *sc, uint8_t *mac) { /* Invalid age_field mean erase this entry. */ write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, INVALID_ENTRY, VLAN0_GROUP, mac); } static void add_mac_entry(struct ece_softc *sc, uint8_t *mac) { write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, NEW_ENTRY, VLAN0_GROUP, mac); } /** * The behavior of ARL table reading and deletion is not well defined * in the documentation. To be safe, all mac addresses are put to a * list, then deleted. * */ static void clear_mac_entries(struct ece_softc *ec, int include_this_mac) { int table_end; struct mac_list * temp; struct mac_list * mac_list_header; struct mac_list * current; char mac[ETHER_ADDR_LEN]; current = 0; mac_list_header = 0; table_end = read_mac_entry(ec, mac, 1); while (!table_end) { if (!include_this_mac && memcmp(mac, vlan0_mac, ETHER_ADDR_LEN) == 0) { /* Read next entry. */ table_end = read_mac_entry(ec, mac, 0); continue; } temp = (struct mac_list*)malloc(sizeof(struct mac_list), M_DEVBUF, M_NOWAIT | M_ZERO); memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN); temp->next = 0; if (mac_list_header) { current->next = temp; current = temp; } else { mac_list_header = temp; current = temp; } /* Read next Entry */ table_end = read_mac_entry(ec, mac, 0); } current = mac_list_header; while (current) { remove_mac_entry(ec, current->mac_addr); temp = current; current = current->next; free(temp, M_DEVBUF); } } static int configure_lan_port(struct ece_softc *sc, int phy_type) { uint32_t sw_config; uint32_t mac_port_config; /* * Configure switch */ sw_config = read_4(sc, SWITCH_CONFIG); /* Enable fast aging. */ sw_config |= FAST_AGING; /* Enable IVL learning. */ sw_config |= IVL_LEARNING; /* Disable hardware NAT. */ sw_config &= ~(HARDWARE_NAT); sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE; write_4(sc, SWITCH_CONFIG, sw_config); sw_config = read_4(sc, SWITCH_CONFIG); mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); if (!(mac_port_config & 0x1) || (mac_port_config & 0x2)) if_printf(sc->ifp, "Link Down\n"); else write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); return (0); } static void set_pvid(struct ece_softc *sc, int port0, int port1, int cpu) { uint32_t val; val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 0)); write_4(sc, VLAN_PORT_PVID, val); val = read_4(sc, VLAN_PORT_PVID) | ((port0) & 0x07); write_4(sc, VLAN_PORT_PVID, val); val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 4)); write_4(sc, VLAN_PORT_PVID, val); val = read_4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4); write_4(sc, VLAN_PORT_PVID, val); val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 8)); write_4(sc, VLAN_PORT_PVID, val); val = read_4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8); write_4(sc, VLAN_PORT_PVID, val); } /* VLAN related functions */ static void set_vlan_vid(struct ece_softc *sc, int vlan) { const uint32_t regs[] = { VLAN_VID_0_1, VLAN_VID_0_1, VLAN_VID_2_3, VLAN_VID_2_3, VLAN_VID_4_5, VLAN_VID_4_5, VLAN_VID_6_7, VLAN_VID_6_7 }; const int vids[] = { VLAN0_VID, VLAN1_VID, VLAN2_VID, VLAN3_VID, VLAN4_VID, VLAN5_VID, VLAN6_VID, VLAN7_VID }; uint32_t val; uint32_t reg; int vid; reg = regs[vlan]; vid = vids[vlan]; if (vlan & 1) { val = read_4(sc, reg); write_4(sc, reg, val & (~(0xFFF << 0))); val = read_4(sc, reg); write_4(sc, reg, val|((vid & 0xFFF) << 0)); } else { val = read_4(sc, reg); write_4(sc, reg, val & (~(0xFFF << 12))); val = read_4(sc, reg); write_4(sc, reg, val|((vid & 0xFFF) << 12)); } } static void set_vlan_member(struct ece_softc *sc, int vlan) { unsigned char shift; uint32_t val; int group; const int groups[] = { VLAN0_GROUP, VLAN1_GROUP, VLAN2_GROUP, VLAN3_GROUP, VLAN4_GROUP, VLAN5_GROUP, VLAN6_GROUP, VLAN7_GROUP }; group = groups[vlan]; shift = vlan*3; val = read_4(sc, VLAN_MEMBER_PORT_MAP) & (~(0x7 << shift)); write_4(sc, VLAN_MEMBER_PORT_MAP, val); val = read_4(sc, VLAN_MEMBER_PORT_MAP); write_4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift)); } static void set_vlan_tag(struct ece_softc *sc, int vlan) { unsigned char shift; uint32_t val; int tag = 0; shift = vlan*3; val = read_4(sc, VLAN_TAG_PORT_MAP) & (~(0x7 << shift)); write_4(sc, VLAN_TAG_PORT_MAP, val); val = read_4(sc, VLAN_TAG_PORT_MAP); write_4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift)); } static int configure_cpu_port(struct ece_softc *sc) { uint32_t cpu_port_config; int i; cpu_port_config = read_4(sc, CPU_PORT_CONFIG); /* SA learning Disable */ cpu_port_config |= (SA_LEARNING_DISABLE); /* set data offset + 2 */ - cpu_port_config &= ~(1 << 31); + cpu_port_config &= ~(1U << 31); write_4(sc, CPU_PORT_CONFIG, cpu_port_config); if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, STATIC_ENTRY, VLAN0_GROUP, vlan0_mac)) return (1); set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID); for (i = 0; i < 8; i++) { set_vlan_vid(sc, i); set_vlan_member(sc, i); set_vlan_tag(sc, i); } /* disable all interrupt status sources */ write_4(sc, INTERRUPT_MASK, 0xffff1fff); /* clear previous interrupt sources */ write_4(sc, INTERRUPT_STATUS, 0x00001FFF); write_4(sc, TS_DMA_CONTROL, 0); write_4(sc, FS_DMA_CONTROL, 0); return (0); } static int hardware_init(struct ece_softc *sc) { int status = 0; static int gw_phy_type; gw_phy_type = get_phy_type(sc); /* Currently only ic_plus phy is supported. */ if (gw_phy_type != IC_PLUS_PHY) { device_printf(sc->dev, "PHY type is not supported (%d)\n", gw_phy_type); return (-1); } status = configure_lan_port(sc, gw_phy_type); configure_cpu_port(sc); return (0); } static void set_mac_address(struct ece_softc *sc, const char *mac, int mac_len) { /* Invalid age_field mean erase this entry. */ write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, INVALID_ENTRY, VLAN0_GROUP, mac); memcpy(vlan0_mac, mac, ETHER_ADDR_LEN); write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, STATIC_ENTRY, VLAN0_GROUP, mac); } static void ece_set_mac(struct ece_softc *sc, u_char *eaddr) { memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN); set_mac_address(sc, eaddr, ETHER_ADDR_LEN); } /* * TODO: the device doesn't have MAC stored, we should read the * configuration stored in FLASH, but the format depends on the * bootloader used.* */ static int ece_get_mac(struct ece_softc *sc, u_char *eaddr) { return (ENXIO); } static void ece_intr_rx_locked(struct ece_softc *sc, int count) { struct ifnet *ifp = sc->ifp; struct mbuf *mb; struct rx_desc_info *rxdesc; eth_rx_desc_t *desc; int fssd_curr; int fssd; int i; int idx; int rxcount; uint32_t status; fssd_curr = read_4(sc, FS_DESCRIPTOR_POINTER); fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4; desc = sc->rx_desc[sc->last_rx].desc; /* Prepare to read the data in the ring. */ bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (fssd > sc->last_rx) rxcount = fssd - sc->last_rx; else if (fssd < sc->last_rx) rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd; else { if (desc->cown == 0) return; else rxcount = ECE_MAX_RX_BUFFERS; } for (i= 0; i < rxcount; i++) { status = desc->cown; if (!status) break; idx = sc->last_rx; rxdesc = &sc->rx_desc[idx]; mb = rxdesc->buff; if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN || desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) { ifp->if_ierrors++; desc->cown = 0; desc->length = MCLBYTES - 2; /* Invalid packet, skip and process next * packet. */ continue; } if (ece_new_rxbuf(sc, rxdesc) != 0) { ifp->if_iqdrops++; desc->cown = 0; desc->length = MCLBYTES - 2; break; } /** * The device will write to addrress + 2 So we need to adjust * the address after the packet is received. */ mb->m_data += 2; mb->m_len = mb->m_pkthdr.len = desc->length; mb->m_flags |= M_PKTHDR; mb->m_pkthdr.rcvif = ifp; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { /*check for valid checksum*/ if ( (!desc->l4f) && (desc->prot != 3)) { mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; mb->m_pkthdr.csum_flags |= CSUM_IP_VALID; mb->m_pkthdr.csum_data = 0xffff; } } ECE_RXUNLOCK(sc); (*ifp->if_input)(ifp, mb); ECE_RXLOCK(sc); desc->cown = 0; desc->length = MCLBYTES - 2; bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->last_rx == ECE_MAX_RX_BUFFERS - 1) sc->last_rx = 0; else sc->last_rx++; desc = sc->rx_desc[sc->last_rx].desc; } /* Sync updated flags. */ bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return; } static void ece_intr_task(void *arg, int pending __unused) { struct ece_softc *sc = arg; ECE_RXLOCK(sc); ece_intr_rx_locked(sc, -1); ECE_RXUNLOCK(sc); } static void ece_intr(void *xsc) { struct ece_softc *sc = xsc; struct ifnet *ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { write_4(sc, FS_DMA_CONTROL, 0); return; } taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); } static void ece_intr_status(void *xsc) { struct ece_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int stat; stat = read_4(sc, INTERRUPT_STATUS); write_4(sc, INTERRUPT_STATUS, stat); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if ((stat & ERROR_MASK) != 0) ifp->if_iqdrops++; } } static void ece_cleanup_locked(struct ece_softc *sc) { eth_tx_desc_t *desc; if (sc->tx_cons == sc->tx_prod) return; /* Prepare to read the ring (owner bit). */ bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (sc->tx_cons != sc->tx_prod) { desc = sc->tx_desc[sc->tx_cons].desc; if (desc->cown != 0) { struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]); /* We are finished with this descriptor ... */ bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap, BUS_DMASYNC_POSTWRITE); /* ... and unload, so we can reuse. */ bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap); m_freem(td->buff); td->buff = 0; sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS; } else { break; } } } static void ece_cleanup_task(void *arg, int pending __unused) { struct ece_softc *sc = arg; ECE_CLEANUPLOCK(sc); ece_cleanup_locked(sc); ECE_CLEANUPUNLOCK(sc); } static void ece_intr_tx(void *xsc) { struct ece_softc *sc = xsc; struct ifnet *ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* This should not happen, stop DMA. */ write_4(sc, FS_DMA_CONTROL, 0); return; } taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task); } static void ece_intr_qf(void *xsc) { struct ece_softc *sc = xsc; struct ifnet *ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* This should not happen, stop DMA. */ write_4(sc, FS_DMA_CONTROL, 0); return; } taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); write_4(sc, FS_DMA_CONTROL, 1); } /* * Reset and initialize the chip */ static void eceinit_locked(void *xsc) { struct ece_softc *sc = xsc; struct ifnet *ifp = sc->ifp; struct mii_data *mii; uint32_t cfg_reg; uint32_t cpu_port_config; uint32_t mac_port_config; while (1) { cfg_reg = read_4(sc, BIST_RESULT_TEST_0); if ((cfg_reg & (1<<17))) break; DELAY(100); } /* Set to default values. */ write_4(sc, SWITCH_CONFIG, 0x007AA7A1); write_4(sc, MAC_PORT_0_CONFIG, 0x00423D00); write_4(sc, MAC_PORT_1_CONFIG, 0x00423D80); write_4(sc, CPU_PORT_CONFIG, 0x004C0000); hardware_init(sc); mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); /* Enable Port 0 */ mac_port_config &= (~(PORT_DISABLE)); write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); cpu_port_config = read_4(sc, CPU_PORT_CONFIG); /* Enable CPU. */ cpu_port_config &= ~(PORT_DISABLE); write_4(sc, CPU_PORT_CONFIG, cpu_port_config); /* * Set 'running' flag, and clear output active flag * and attempt to start the output */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; mii = device_get_softc(sc->miibus); mii_pollstat(mii); /* Enable DMA. */ write_4(sc, FS_DMA_CONTROL, 1); callout_reset(&sc->tick_ch, hz, ece_tick, sc); } static inline int ece_encap(struct ece_softc *sc, struct mbuf *m0) { struct ifnet *ifp; bus_dma_segment_t segs[MAX_FRAGMENT]; bus_dmamap_t mapp; eth_tx_desc_t *desc = 0; int csum_flags; int desc_no; int error; int nsegs; int seg; ifp = sc->ifp; /* Fetch unused map */ mapp = sc->tx_desc[sc->tx_prod].dmamap; error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { bus_dmamap_unload(sc->dmatag_ring_tx, mapp); return ((error != 0) ? error : -1); } desc = &(sc->desc_tx[sc->desc_curr_tx]); sc->tx_desc[sc->tx_prod].desc = desc; sc->tx_desc[sc->tx_prod].buff = m0; desc_no = sc->desc_curr_tx; for (seg = 0; seg < nsegs; seg++) { if (desc->cown == 0 ) { if_printf(ifp, "ERROR: descriptor is still used\n"); return (-1); } desc->length = segs[seg].ds_len; desc->data_ptr = segs[seg].ds_addr; if (seg == 0) { desc->fs = 1; } else { desc->fs = 0; } if (seg == nsegs - 1) { desc->ls = 1; } else { desc->ls = 0; } csum_flags = m0->m_pkthdr.csum_flags; desc->fr = 1; desc->pmap = 1; desc->insv = 0; desc->ico = 0; desc->tco = 0; desc->uco = 0; desc->interrupt = 1; if (csum_flags & CSUM_IP) { desc->ico = 1; if (csum_flags & CSUM_TCP) desc->tco = 1; if (csum_flags & CSUM_UDP) desc->uco = 1; } desc++; sc->desc_curr_tx = (sc->desc_curr_tx + 1) % ECE_MAX_TX_BUFFERS; if (sc->desc_curr_tx == 0) { desc = (eth_tx_desc_t *)&(sc->desc_tx[0]); } } desc = sc->tx_desc[sc->tx_prod].desc; sc->tx_prod = (sc->tx_prod + 1) % ECE_MAX_TX_BUFFERS; /* * After all descriptors are set, we set the flags to start the * sending proces. */ for (seg = 0; seg < nsegs; seg++) { desc->cown = 0; desc++; desc_no = (desc_no + 1) % ECE_MAX_TX_BUFFERS; if (desc_no == 0) desc = (eth_tx_desc_t *)&(sc->desc_tx[0]); } bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE); return (0); } /* * dequeu packets and transmit */ static void ecestart_locked(struct ifnet *ifp) { struct ece_softc *sc; struct mbuf *m0; uint32_t queued = 0; sc = ifp->if_softc; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (;;) { /* Get packet from the queue */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (ece_encap(sc, m0)) { IF_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } queued++; BPF_MTAP(ifp, m0); } if (queued) { bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); write_4(sc, TS_DMA_CONTROL, 1); } } static void eceinit(void *xsc) { struct ece_softc *sc = xsc; ECE_LOCK(sc); eceinit_locked(sc); ECE_UNLOCK(sc); } static void ece_tx_task(void *arg, int pending __unused) { struct ifnet *ifp; ifp = (struct ifnet *)arg; ecestart(ifp); } static void ecestart(struct ifnet *ifp) { struct ece_softc *sc = ifp->if_softc; ECE_TXLOCK(sc); ecestart_locked(ifp); ECE_TXUNLOCK(sc); } /* * Turn off interrupts, and stop the nic. Can be called with sc->ifp * NULL so be careful. */ static void ecestop(struct ece_softc *sc) { struct ifnet *ifp = sc->ifp; uint32_t mac_port_config; write_4(sc, TS_DMA_CONTROL, 0); write_4(sc, FS_DMA_CONTROL, 0); if (ifp) ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->tick_ch); /*Disable Port 0 */ mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); mac_port_config |= (PORT_DISABLE); write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); /*Disable Port 1 */ mac_port_config = read_4(sc, MAC_PORT_1_CONFIG); mac_port_config |= (PORT_DISABLE); write_4(sc, MAC_PORT_1_CONFIG, mac_port_config); /* Disable all interrupt status sources. */ write_4(sc, INTERRUPT_MASK, 0x00001FFF); /* Clear previous interrupt sources. */ write_4(sc, INTERRUPT_STATUS, 0x00001FFF); write_4(sc, SWITCH_CONFIG, initial_switch_config); write_4(sc, CPU_PORT_CONFIG, initial_cpu_config); write_4(sc, MAC_PORT_0_CONFIG, initial_port0_config); write_4(sc, MAC_PORT_1_CONFIG, initial_port1_config); clear_mac_entries(sc, 1); } static void ece_restart(struct ece_softc *sc) { struct ifnet *ifp = sc->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Enable port 0. */ write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE)); write_4(sc, INTERRUPT_MASK, 0x00000000); write_4(sc, FS_DMA_CONTROL, 1); callout_reset(&sc->tick_ch, hz, ece_tick, sc); } static void set_filter(struct ece_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t mac_port_config; ifp = sc->ifp; clear_mac_entries(sc, 0); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); mac_port_config &= ~(DISABLE_BROADCAST_PACKET); mac_port_config &= ~(DISABLE_MULTICAST_PACKET); write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); return; } if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; add_mac_entry(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); } if_maddr_runlock(ifp); } static int eceioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ece_softc *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *)data; int mask, error = 0; switch (cmd) { case SIOCSIFFLAGS: ECE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ecestop(sc); } else { /* Reinitialize card on any parameter change. */ if ((ifp->if_flags & IFF_UP) && !(ifp->if_drv_flags & IFF_DRV_RUNNING)) ece_restart(sc); } ECE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ECE_LOCK(sc); set_filter(sc); ECE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if (mask & IFCAP_VLAN_MTU) { ECE_LOCK(sc); ECE_UNLOCK(sc); } default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void ece_child_detached(device_t dev, device_t child) { struct ece_softc *sc; sc = device_get_softc(dev); if (child == sc->miibus) sc->miibus = NULL; } /* * MII bus support routines. */ static int ece_miibus_readreg(device_t dev, int phy, int reg) { struct ece_softc *sc; sc = device_get_softc(dev); return (phy_read(sc, phy, reg)); } static int ece_miibus_writereg(device_t dev, int phy, int reg, int data) { struct ece_softc *sc; sc = device_get_softc(dev); phy_write(sc, phy, reg, data); return (0); } static device_method_t ece_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ece_probe), DEVMETHOD(device_attach, ece_attach), DEVMETHOD(device_detach, ece_detach), /* Bus interface */ DEVMETHOD(bus_child_detached, ece_child_detached), /* MII interface */ DEVMETHOD(miibus_readreg, ece_miibus_readreg), DEVMETHOD(miibus_writereg, ece_miibus_writereg), { 0, 0 } }; static driver_t ece_driver = { "ece", ece_methods, sizeof(struct ece_softc), }; DRIVER_MODULE(ece, econaarm, ece_driver, ece_devclass, 0, 0); DRIVER_MODULE(miibus, ece, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(ece, miibus, 1, 1, 1); MODULE_DEPEND(ece, ether, 1, 1, 1); Index: head/sys/arm/freescale/imx/imx6_anatopreg.h =================================================================== --- head/sys/arm/freescale/imx/imx6_anatopreg.h (revision 258779) +++ head/sys/arm/freescale/imx/imx6_anatopreg.h (revision 258780) @@ -1,141 +1,141 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef IMX6_ANATOPREG_H #define IMX6_ANATOPREG_H #define IMX6_ANALOG_CCM_PLL_ARM 0x000 #define IMX6_ANALOG_CCM_PLL_ARM_SET 0x004 #define IMX6_ANALOG_CCM_PLL_ARM_CLR 0x008 #define IMX6_ANALOG_CCM_PLL_ARM_TOG 0x00C #define IMX6_ANALOG_CCM_PLL_USB1 0x010 #define IMX6_ANALOG_CCM_PLL_USB1_SET 0x014 #define IMX6_ANALOG_CCM_PLL_USB1_CLR 0x018 #define IMX6_ANALOG_CCM_PLL_USB1_TOG 0x01C -#define IMX6_ANALOG_CCM_PLL_USB_LOCK (1 << 31) +#define IMX6_ANALOG_CCM_PLL_USB_LOCK (1U << 31) #define IMX6_ANALOG_CCM_PLL_USB_BYPASS (1 << 16) #define IMX6_ANALOG_CCM_PLL_USB_ENABLE (1 << 13) #define IMX6_ANALOG_CCM_PLL_USB_POWER (1 << 12) #define IMX6_ANALOG_CCM_PLL_USB_EN_USB_CLKS (1 << 6) #define IMX6_ANALOG_CCM_PLL_USB2 0x020 #define IMX6_ANALOG_CCM_PLL_USB2_SET 0x024 #define IMX6_ANALOG_CCM_PLL_USB2_CLR 0x028 #define IMX6_ANALOG_CCM_PLL_USB2_TOG 0x02C #define IMX6_ANALOG_CCM_PLL_SYS 0x030 #define IMX6_ANALOG_CCM_PLL_SYS_SET 0x034 #define IMX6_ANALOG_CCM_PLL_SYS_CLR 0x038 #define IMX6_ANALOG_CCM_PLL_SYS_TOG 0x03C #define IMX6_ANALOG_CCM_PLL_SYS_SS 0x040 #define IMX6_ANALOG_CCM_PLL_SYS_NUM 0x050 #define IMX6_ANALOG_CCM_PLL_SYS_DENOM 0x060 #define IMX6_ANALOG_CCM_PLL_AUDIO 0x070 #define IMX6_ANALOG_CCM_PLL_AUDIO_SET 0x074 #define IMX6_ANALOG_CCM_PLL_AUDIO_CLR 0x078 #define IMX6_ANALOG_CCM_PLL_AUDIO_TOG 0x07C #define IMX6_ANALOG_CCM_PLL_AUDIO_NUM 0x080 #define IMX6_ANALOG_CCM_PLL_AUDIO_DENOM 0x090 #define IMX6_ANALOG_CCM_PLL_VIDEO 0x0A0 #define IMX6_ANALOG_CCM_PLL_VIDEO_SET 0x0A4 #define IMX6_ANALOG_CCM_PLL_VIDEO_CLR 0x0A8 #define IMX6_ANALOG_CCM_PLL_VIDEO_TOG 0x0AC #define IMX6_ANALOG_CCM_PLL_VIDEO_NUM 0x0B0 #define IMX6_ANALOG_CCM_PLL_VIDEO_DENOM 0x0C0 #define IMX6_ANALOG_CCM_PLL_MLB 0x0D0 #define IMX6_ANALOG_CCM_PLL_MLB_SET 0x0D4 #define IMX6_ANALOG_CCM_PLL_MLB_CLR 0x0D8 #define IMX6_ANALOG_CCM_PLL_MLB_TOG 0x0DC #define IMX6_ANALOG_CCM_PLL_ENET 0x0E0 #define IMX6_ANALOG_CCM_PLL_ENET_SET 0x0E4 #define IMX6_ANALOG_CCM_PLL_ENET_CLR 0x0E8 #define IMX6_ANALOG_CCM_PLL_ENET_TOG 0x0EC #define IMX6_ANALOG_CCM_PFD_480 0x0F0 #define IMX6_ANALOG_CCM_PFD_480_SET 0x0F4 #define IMX6_ANALOG_CCM_PFD_480_CLR 0x0F8 #define IMX6_ANALOG_CCM_PFD_480_TOG 0x0FC #define IMX6_ANALOG_CCM_PFD_528 0x100 #define IMX6_ANALOG_CCM_PFD_528_SET 0x104 #define IMX6_ANALOG_CCM_PFD_528_CLR 0x108 #define IMX6_ANALOG_CCM_PFD_528_TOG 0x10C #define IMX6_ANALOG_PMU_REG_CORE 0x140 #define IMX6_ANALOG_PMU_REG2_TARG_SHIFT 18 #define IMX6_ANALOG_PMU_REG2_TARG_MASK \ (0x1f << IMX6_ANALOG_PMU_REG2_TARG_SHIFT) #define IMX6_ANALOG_PMU_REG1_TARG_SHIFT 9 #define IMX6_ANALOG_PMU_REG1_TARG_MASK \ (0x1f << IMX6_ANALOG_PMU_REG1_TARG_SHIFT) #define IMX6_ANALOG_PMU_REG0_TARG_SHIFT 0 #define IMX6_ANALOG_PMU_REG0_TARG_MASK \ (0x1f << IMX6_ANALOG_PMU_REG0_TARG_SHIFT) #define IMX6_ANALOG_CCM_MISC0 0x150 #define IMX6_ANALOG_CCM_MISC0_SET 0x154 #define IMX6_ANALOG_CCM_MISC0_CLR 0x158 #define IMX6_ANALOG_CCM_MISC0_TOG 0x15C #define IMX6_ANALOG_CCM_MISC2 0x170 #define IMX6_ANALOG_CCM_MISC2_SET 0x174 #define IMX6_ANALOG_CCM_MISC2_CLR 0x178 #define IMX6_ANALOG_CCM_MISC2_TOG 0x17C #define IMX6_ANALOG_USB1_VBUS_DETECT 0x1A0 #define IMX6_ANALOG_USB1_VBUS_DETECT_SET 0x1A4 #define IMX6_ANALOG_USB1_VBUS_DETECT_CLR 0x1A8 #define IMX6_ANALOG_USB1_VBUS_DETECT_TOG 0x1AC #define IMX6_ANALOG_USB1_CHRG_DETECT 0x1B0 #define IMX6_ANALOG_USB1_CHRG_DETECT_SET 0x1B4 #define IMX6_ANALOG_USB1_CHRG_DETECT_CLR 0x1B8 #define IMX6_ANALOG_USB1_CHRG_DETECT_TOG 0x1BC #define IMX6_ANALOG_USB_CHRG_DETECT_N_ENABLE (1 << 20) /* EN_B */ #define IMX6_ANALOG_USB_CHRG_DETECT_N_CHK_CHRG (1 << 19) /* CHK_CHRG_B */ #define IMX6_ANALOG_USB_CHRG_DETECT_CHK_CONTACT (1 << 18) #define IMX6_ANALOG_USB1_VBUS_DETECT_STAT 0x1C0 #define IMX6_ANALOG_USB1_CHRG_DETECT_STAT 0x1D0 #define IMX6_ANALOG_USB1_MISC 0x1F0 #define IMX6_ANALOG_USB1_MISC_SET 0x1F4 #define IMX6_ANALOG_USB1_MISC_CLR 0x1F8 #define IMX6_ANALOG_USB1_MISC_TOG 0x1FC #define IMX6_ANALOG_USB2_VBUS_DETECT 0x200 #define IMX6_ANALOG_USB2_VBUS_DETECT_SET 0x204 #define IMX6_ANALOG_USB2_VBUS_DETECT_CLR 0x208 #define IMX6_ANALOG_USB2_VBUS_DETECT_TOG 0x20C #define IMX6_ANALOG_USB2_CHRG_DETECT 0x210 #define IMX6_ANALOG_USB2_CHRG_DETECT_SET 0x214 #define IMX6_ANALOG_USB2_CHRG_DETECT_CLR 0x218 #define IMX6_ANALOG_USB2_CHRG_DETECT_TOG 0x21C #define IMX6_ANALOG_USB2_VBUS_DETECT_STAT 0x220 #define IMX6_ANALOG_USB2_CHRG_DETECT_STAT 0x230 #define IMX6_ANALOG_USB2_MISC 0x250 #define IMX6_ANALOG_USB2_MISC_SET 0x254 #define IMX6_ANALOG_USB2_MISC_CLR 0x258 #define IMX6_ANALOG_USB2_MISC_TOG 0x25C #define IMX6_ANALOG_DIGPROG 0x260 #define IMX6_ANALOG_DIGPROG_SL 0x280 #define IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT 16 #define IMX6_ANALOG_DIGPROG_SOCTYPE_MASK \ (0xff << IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT) #endif Index: head/sys/arm/freescale/imx/imx6_usbphy.c =================================================================== --- head/sys/arm/freescale/imx/imx6_usbphy.c (revision 258779) +++ head/sys/arm/freescale/imx/imx6_usbphy.c (revision 258780) @@ -1,189 +1,189 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * USBPHY driver for Freescale i.MX6 family of SoCs. */ #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include /* * Hardware register defines. */ #define PWD_REG 0x0000 #define CTRL_STATUS_REG 0x0030 #define CTRL_SET_REG 0x0034 #define CTRL_CLR_REG 0x0038 #define CTRL_TOGGLE_REG 0x003c -#define CTRL_SFTRST (1 << 31) +#define CTRL_SFTRST (1U << 31) #define CTRL_CLKGATE (1 << 30) #define CTRL_ENUTMILEVEL3 (1 << 15) #define CTRL_ENUTMILEVEL2 (1 << 14) struct usbphy_softc { device_t dev; struct resource *mem_res; u_int phy_num; }; static int usbphy_detach(device_t dev) { struct usbphy_softc *sc; sc = device_get_softc(dev); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (0); } static int usbphy_attach(device_t dev) { struct usbphy_softc *sc; int err, regoff, rid; sc = device_get_softc(dev); err = 0; /* Allocate bus_space resources. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); err = ENXIO; goto out; } /* * XXX Totally lame way to get the unit number (but not quite as lame as * adding an ad-hoc property to the fdt data). This works as long as * this driver is used for imx6 only. */ const uint32_t PWD_PHY1_REG_PHYSADDR = 0x020c9000; if (BUS_SPACE_PHYSADDR(sc->mem_res, 0) == PWD_PHY1_REG_PHYSADDR) { sc->phy_num = 0; regoff = 0; } else { sc->phy_num = 1; regoff = 0x60; } /* * Based on a note in the u-boot source code, disable charger detection * to avoid degrading the differential signaling on the DP line. Note * that this disables (by design) both charger detection and contact * detection, because of the screwball mix of active-high and active-low * bits in this register. */ imx6_anatop_write_4(IMX6_ANALOG_USB1_CHRG_DETECT + regoff, IMX6_ANALOG_USB_CHRG_DETECT_N_ENABLE | IMX6_ANALOG_USB_CHRG_DETECT_N_CHK_CHRG); imx6_anatop_write_4(IMX6_ANALOG_USB1_CHRG_DETECT + regoff, IMX6_ANALOG_USB_CHRG_DETECT_N_ENABLE | IMX6_ANALOG_USB_CHRG_DETECT_N_CHK_CHRG); /* XXX Configure the overcurrent detection here. */ /* * Turn on the phy clocks. */ imx_ccm_usbphy_enable(dev); /* * Set the software reset bit, then clear both it and the clock gate bit * to bring the device out of reset with the clock running. */ bus_write_4(sc->mem_res, CTRL_SET_REG, CTRL_SFTRST); bus_write_4(sc->mem_res, CTRL_CLR_REG, CTRL_SFTRST | CTRL_CLKGATE); /* Power up: clear all bits in the powerdown register. */ bus_write_4(sc->mem_res, PWD_REG, 0); err = 0; out: if (err != 0) usbphy_detach(dev); return (err); } static int usbphy_probe(device_t dev) { if (ofw_bus_is_compatible(dev, "fsl,imx6q-usbphy") == 0) return (ENXIO); device_set_desc(dev, "Freescale i.MX6 USB PHY"); return (BUS_PROBE_DEFAULT); } static device_method_t usbphy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, usbphy_probe), DEVMETHOD(device_attach, usbphy_attach), DEVMETHOD(device_detach, usbphy_detach), DEVMETHOD_END }; static driver_t usbphy_driver = { "usbphy", usbphy_methods, sizeof(struct usbphy_softc) }; static devclass_t usbphy_devclass; DRIVER_MODULE(usbphy, simplebus, usbphy_driver, usbphy_devclass, 0, 0); Index: head/sys/arm/freescale/imx/imx_gptreg.h =================================================================== --- head/sys/arm/freescale/imx/imx_gptreg.h (revision 258779) +++ head/sys/arm/freescale/imx/imx_gptreg.h (revision 258780) @@ -1,101 +1,101 @@ /*- * Copyright (c) 2012, 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Oleksandr Rybalko under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Registers definition for Freescale i.MX515 Generic Periodic Timer */ #define IMX_GPT_CR 0x0000 /* Control Register R/W */ -#define GPT_CR_FO3 (1 << 31) +#define GPT_CR_FO3 (1U << 31) #define GPT_CR_FO2 (1 << 30) #define GPT_CR_FO1 (1 << 29) #define GPT_CR_OM3_SHIFT 26 #define GPT_CR_OM3_MASK 0x1c000000 #define GPT_CR_OM2_SHIFT 23 #define GPT_CR_OM2_MASK 0x03800000 #define GPT_CR_OM1_SHIFT 20 #define GPT_CR_OM1_MASK 0x00700000 #define GPT_CR_OMX_NONE 0 #define GPT_CR_OMX_TOGGLE 1 #define GPT_CR_OMX_CLEAR 2 #define GPT_CR_OMX_SET 3 #define GPT_CR_OMX_PULSE 4 /* Run CLKSRC on output pin */ #define GPT_CR_IM2_SHIFT 18 #define GPT_CR_IM2_MASK 0x000c0000 #define GPT_CR_IM1_SHIFT 16 #define GPT_CR_IM1_MASK 0x00030000 #define GPT_CR_IMX_NONE 0 #define GPT_CR_IMX_REDGE 1 #define GPT_CR_IMX_FEDGE 2 #define GPT_CR_IMX_BOTH 3 #define GPT_CR_SWR (1 << 15) #define GPT_CR_24MEN (1 << 10) #define GPT_CR_FRR (1 << 9) #define GPT_CR_CLKSRC_NONE (0 << 6) #define GPT_CR_CLKSRC_IPG (1 << 6) #define GPT_CR_CLKSRC_IPG_HIGH (2 << 6) #define GPT_CR_CLKSRC_EXT (3 << 6) #define GPT_CR_CLKSRC_32K (4 << 6) #define GPT_CR_CLKSRC_24M (5 << 6) #define GPT_CR_STOPEN (1 << 5) #define GPT_CR_DOZEEN (1 << 4) #define GPT_CR_WAITEN (1 << 3) #define GPT_CR_DBGEN (1 << 2) #define GPT_CR_ENMOD (1 << 1) #define GPT_CR_EN (1 << 0) #define IMX_GPT_PR 0x0004 /* Prescaler Register R/W */ #define GPT_PR_VALUE_SHIFT 0 #define GPT_PR_VALUE_MASK 0x00000fff #define GPT_PR_VALUE_SHIFT_24M 12 #define GPT_PR_VALUE_MASK_24M 0x0000f000 /* Same map for SR and IR */ #define IMX_GPT_SR 0x0008 /* Status Register R/W */ #define IMX_GPT_IR 0x000c /* Interrupt Register R/W */ #define GPT_IR_ROV (1 << 5) #define GPT_IR_IF2 (1 << 4) #define GPT_IR_IF1 (1 << 3) #define GPT_IR_OF3 (1 << 2) #define GPT_IR_OF2 (1 << 1) #define GPT_IR_OF1 (1 << 0) #define GPT_IR_ALL \ (GPT_IR_ROV | \ GPT_IR_IF2 | \ GPT_IR_IF1 | \ GPT_IR_OF3 | \ GPT_IR_OF2 | \ GPT_IR_OF1) #define IMX_GPT_OCR1 0x0010 /* Output Compare Register 1 R/W */ #define IMX_GPT_OCR2 0x0014 /* Output Compare Register 2 R/W */ #define IMX_GPT_OCR3 0x0018 /* Output Compare Register 3 R/W */ #define IMX_GPT_ICR1 0x001c /* Input capture Register 1 RO */ #define IMX_GPT_ICR2 0x0020 /* Input capture Register 2 RO */ #define IMX_GPT_CNT 0x0024 /* Counter Register RO */ Index: head/sys/arm/freescale/vybrid/vf_anadig.c =================================================================== --- head/sys/arm/freescale/vybrid/vf_anadig.c (revision 258779) +++ head/sys/arm/freescale/vybrid/vf_anadig.c (revision 258780) @@ -1,214 +1,214 @@ /*- * Copyright (c) 2013 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Vybrid Family Analog components control digital interface (ANADIG) * Chapter 11, Vybrid Reference Manual, Rev. 5, 07/2013 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ANADIG_PLL3_CTRL 0x010 /* PLL3 Control */ #define ANADIG_PLL7_CTRL 0x020 /* PLL7 Control */ #define ANADIG_PLL2_CTRL 0x030 /* PLL2 Control */ #define ANADIG_PLL2_SS 0x040 /* PLL2 Spread Spectrum */ #define ANADIG_PLL2_NUM 0x050 /* PLL2 Numerator */ #define ANADIG_PLL2_DENOM 0x060 /* PLL2 Denominator */ #define ANADIG_PLL4_CTRL 0x070 /* PLL4 Control */ #define ANADIG_PLL4_NUM 0x080 /* PLL4 Numerator */ #define ANADIG_PLL4_DENOM 0x090 /* PLL4 Denominator */ #define ANADIG_PLL6_CTRL 0x0A0 /* PLL6 Control */ #define ANADIG_PLL6_NUM 0x0B0 /* PLL6 Numerator */ #define ANADIG_PLL6_DENOM 0x0C0 /* PLL6 Denominator */ #define ANADIG_PLL5_CTRL 0x0E0 /* PLL5 Control */ #define ANADIG_PLL3_PFD 0x0F0 /* PLL3 PFD */ #define ANADIG_PLL2_PFD 0x100 /* PLL2 PFD */ #define ANADIG_REG_1P1 0x110 /* Regulator 1P1 */ #define ANADIG_REG_3P0 0x120 /* Regulator 3P0 */ #define ANADIG_REG_2P5 0x130 /* Regulator 2P5 */ #define ANADIG_ANA_MISC0 0x150 /* Analog Miscellaneous */ #define ANADIG_ANA_MISC1 0x160 /* Analog Miscellaneous */ #define ANADIG_ANADIG_DIGPROG 0x260 /* Digital Program */ #define ANADIG_PLL1_CTRL 0x270 /* PLL1 Control */ #define ANADIG_PLL1_SS 0x280 /* PLL1 Spread Spectrum */ #define ANADIG_PLL1_NUM 0x290 /* PLL1 Numerator */ #define ANADIG_PLL1_DENOM 0x2A0 /* PLL1 Denominator */ #define ANADIG_PLL1_PFD 0x2B0 /* PLL1_PFD */ #define ANADIG_PLL_LOCK 0x2C0 /* PLL Lock */ #define USB_VBUS_DETECT(n) (0x1A0 + 0x60 * n) #define USB_CHRG_DETECT(n) (0x1B0 + 0x60 * n) #define USB_VBUS_DETECT_STATUS(n) (0x1C0 + 0x60 * n) #define USB_CHRG_DETECT_STATUS(n) (0x1D0 + 0x60 * n) #define USB_LOOPBACK(n) (0x1E0 + 0x60 * n) #define USB_MISC(n) (0x1F0 + 0x60 * n) -#define ANADIG_PLL_LOCKED (1 << 31) +#define ANADIG_PLL_LOCKED (1U << 31) #define ENABLE_LINREG (1 << 0) #define EN_CLK_TO_UTMI (1 << 30) #define CTRL_BYPASS (1 << 16) #define CTRL_PWR (1 << 12) #define CTRL_PLL_EN (1 << 13) #define EN_USB_CLKS (1 << 6) struct anadig_softc { struct resource *res[1]; bus_space_tag_t bst; bus_space_handle_t bsh; }; static struct resource_spec anadig_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static int anadig_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "fsl,mvf600-anadig")) return (ENXIO); device_set_desc(dev, "Vybrid Family ANADIG Unit"); return (BUS_PROBE_DEFAULT); } static int enable_pll(struct anadig_softc *sc, int pll_ctrl) { int reg; reg = READ4(sc, pll_ctrl); reg &= ~(CTRL_BYPASS | CTRL_PWR); if (pll_ctrl == ANADIG_PLL3_CTRL || pll_ctrl == ANADIG_PLL7_CTRL) { /* It is USB PLL. Power bit logic is reversed */ reg |= (CTRL_PWR | EN_USB_CLKS); } WRITE4(sc, pll_ctrl, reg); /* Wait for PLL lock */ while (!(READ4(sc, pll_ctrl) & ANADIG_PLL_LOCKED)) ; reg = READ4(sc, pll_ctrl); reg |= (CTRL_PLL_EN); WRITE4(sc, pll_ctrl, reg); return (0); } static int anadig_attach(device_t dev) { struct anadig_softc *sc; int reg; sc = device_get_softc(dev); if (bus_alloc_resources(dev, anadig_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Memory interface */ sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); /* Enable USB PLLs */ enable_pll(sc, ANADIG_PLL3_CTRL); enable_pll(sc, ANADIG_PLL7_CTRL); /* Enable other */ enable_pll(sc, ANADIG_PLL1_CTRL); enable_pll(sc, ANADIG_PLL2_CTRL); enable_pll(sc, ANADIG_PLL4_CTRL); enable_pll(sc, ANADIG_PLL5_CTRL); enable_pll(sc, ANADIG_PLL6_CTRL); /* Enable USB voltage regulator */ reg = READ4(sc, ANADIG_REG_3P0); reg |= (ENABLE_LINREG); WRITE4(sc, ANADIG_REG_3P0, reg); /* Give clocks to USB */ reg = READ4(sc, USB_MISC(0)); reg |= (EN_CLK_TO_UTMI); WRITE4(sc, USB_MISC(0), reg); reg = READ4(sc, USB_MISC(1)); reg |= (EN_CLK_TO_UTMI); WRITE4(sc, USB_MISC(1), reg); #if 0 printf("USB_ANALOG_USB_MISC(0) == 0x%08x\n", READ4(sc, USB_ANALOG_USB_MISC(0))); printf("USB_ANALOG_USB_MISC(1) == 0x%08x\n", READ4(sc, USB_ANALOG_USB_MISC(1))); #endif return (0); } static device_method_t anadig_methods[] = { DEVMETHOD(device_probe, anadig_probe), DEVMETHOD(device_attach, anadig_attach), { 0, 0 } }; static driver_t anadig_driver = { "anadig", anadig_methods, sizeof(struct anadig_softc), }; static devclass_t anadig_devclass; DRIVER_MODULE(anadig, simplebus, anadig_driver, anadig_devclass, 0, 0); Index: head/sys/arm/freescale/vybrid/vf_ccm.c =================================================================== --- head/sys/arm/freescale/vybrid/vf_ccm.c (revision 258779) +++ head/sys/arm/freescale/vybrid/vf_ccm.c (revision 258780) @@ -1,188 +1,188 @@ /*- * Copyright (c) 2013 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Vybrid Family Clock Controller Module (CCM) * Chapter 10, Vybrid Reference Manual, Rev. 5, 07/2013 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CCM_CCR 0x00 /* Control Register */ #define CCM_CSR 0x04 /* Status Register */ #define CCM_CCSR 0x08 /* Clock Switcher Register */ #define CCM_CACRR 0x0C /* ARM Clock Root Register */ #define CCM_CSCMR1 0x10 /* Serial Clock Multiplexer Register 1 */ #define CCM_CSCDR1 0x14 /* Serial Clock Divider Register 1 */ #define CCM_CSCDR2 0x18 /* Serial Clock Divider Register 2 */ #define CCM_CSCDR3 0x1C /* Serial Clock Divider Register 3 */ #define CCM_CSCMR2 0x20 /* Serial Clock Multiplexer Register 2 */ #define CCM_CTOR 0x28 /* Testing Observability Register */ #define CCM_CLPCR 0x2C /* Low Power Control Register */ #define CCM_CISR 0x30 /* Interrupt Status Register */ #define CCM_CIMR 0x34 /* Interrupt Mask Register */ #define CCM_CCOSR 0x38 /* Clock Output Source Register */ #define CCM_CGPR 0x3C /* General Purpose Register */ #define CCM_CCGRN 12 #define CCM_CCGR(n) (0x40 + (n * 0x04)) /* Clock Gating Register */ #define CCM_CMEOR(n) (0x70 + (n * 0x70)) /* Module Enable Override Reg */ #define CCM_CCPGR(n) (0x90 + (n * 0x04)) /* Platform Clock Gating Reg */ #define CCM_CPPDSR 0x88 /* PLL PFD Disable Status Register */ #define CCM_CCOWR 0x8C /* CORE Wakeup Register */ -#define PLL3_PFD4_EN (1 << 31) +#define PLL3_PFD4_EN (1U << 31) #define PLL3_PFD3_EN (1 << 30) #define PLL3_PFD2_EN (1 << 29) #define PLL3_PFD1_EN (1 << 28) #define PLL2_PFD4_EN (1 << 15) #define PLL2_PFD3_EN (1 << 14) #define PLL2_PFD2_EN (1 << 13) #define PLL2_PFD1_EN (1 << 12) #define PLL1_PFD4_EN (1 << 11) #define PLL1_PFD3_EN (1 << 10) #define PLL1_PFD2_EN (1 << 9) #define PLL1_PFD1_EN (1 << 8) /* CCM_CCR */ #define FIRC_EN (1 << 16) #define FXOSC_EN (1 << 12) #define FXOSC_RDY (1 << 5) /* CCM_CSCDR1 */ #define ENET_TS_EN (1 << 23) #define RMII_CLK_EN (1 << 24) struct ccm_softc { struct resource *res[1]; bus_space_tag_t bst; bus_space_handle_t bsh; device_t dev; }; static struct resource_spec ccm_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static int ccm_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "fsl,mvf600-ccm")) return (ENXIO); device_set_desc(dev, "Vybrid Family CCM Unit"); return (BUS_PROBE_DEFAULT); } static int ccm_attach(device_t dev) { struct ccm_softc *sc; int reg; int i; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, ccm_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Memory interface */ sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); /* Enable oscillator */ reg = READ4(sc, CCM_CCR); reg |= (FIRC_EN | FXOSC_EN); WRITE4(sc, CCM_CCR, reg); /* Wait 10 times */ for (i = 0; i < 10; i++) { if (READ4(sc, CCM_CSR) & FXOSC_RDY) { device_printf(sc->dev, "On board oscillator is ready.\n"); break; } cpufunc_nullop(); } /* Clock is on during all modes, except stop mode. */ for (i = 0; i < CCM_CCGRN; i++) { WRITE4(sc, CCM_CCGR(i), 0xffffffff); } /* Enable ENET clocks */ reg = READ4(sc, CCM_CSCDR1); reg |= (ENET_TS_EN | RMII_CLK_EN); WRITE4(sc, CCM_CSCDR1, reg); return (0); } static device_method_t ccm_methods[] = { DEVMETHOD(device_probe, ccm_probe), DEVMETHOD(device_attach, ccm_attach), { 0, 0 } }; static driver_t ccm_driver = { "ccm", ccm_methods, sizeof(struct ccm_softc), }; static devclass_t ccm_devclass; DRIVER_MODULE(ccm, simplebus, ccm_driver, ccm_devclass, 0, 0); Index: head/sys/arm/freescale/vybrid/vf_ehci.c =================================================================== --- head/sys/arm/freescale/vybrid/vf_ehci.c (revision 258779) +++ head/sys/arm/freescale/vybrid/vf_ehci.c (revision 258780) @@ -1,416 +1,416 @@ /*- * Copyright (c) 2013 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Vybrid Family Universal Serial Bus (USB) Controller * Chapter 44-45, Vybrid Reference Manual, Rev. 5, 07/2013 */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #include "opt_platform.h" #define ENUTMILEVEL3 (1 << 15) #define ENUTMILEVEL2 (1 << 14) #define GPIO_USB_PWR 134 #define USB_ID 0x000 /* Identification register */ #define USB_HWGENERAL 0x004 /* Hardware General */ #define USB_HWHOST 0x008 /* Host Hardware Parameters */ #define USB_HWDEVICE 0x00C /* Device Hardware Parameters */ #define USB_HWTXBUF 0x010 /* TX Buffer Hardware Parameters */ #define USB_HWRXBUF 0x014 /* RX Buffer Hardware Parameters */ #define USB_HCSPARAMS 0x104 /* Host Controller Structural Parameters */ #define USBPHY_PWD 0x00 /* PHY Power-Down Register */ #define USBPHY_PWD_SET 0x04 /* PHY Power-Down Register */ #define USBPHY_PWD_CLR 0x08 /* PHY Power-Down Register */ #define USBPHY_PWD_TOG 0x0C /* PHY Power-Down Register */ #define USBPHY_TX 0x10 /* PHY Transmitter Control Register */ #define USBPHY_RX 0x20 /* PHY Receiver Control Register */ #define USBPHY_RX_SET 0x24 /* PHY Receiver Control Register */ #define USBPHY_RX_CLR 0x28 /* PHY Receiver Control Register */ #define USBPHY_RX_TOG 0x2C /* PHY Receiver Control Register */ #define USBPHY_CTRL 0x30 /* PHY General Control Register */ #define USBPHY_CTRL_SET 0x34 /* PHY General Control Register */ #define USBPHY_CTRL_CLR 0x38 /* PHY General Control Register */ #define USBPHY_CTRL_TOG 0x3C /* PHY General Control Register */ #define USBPHY_STATUS 0x40 /* PHY Status Register */ #define USBPHY_DEBUG 0x50 /* PHY Debug Register */ #define USBPHY_DEBUG_SET 0x54 /* PHY Debug Register */ #define USBPHY_DEBUG_CLR 0x58 /* PHY Debug Register */ #define USBPHY_DEBUG_TOG 0x5C /* PHY Debug Register */ #define USBPHY_DEBUG0_STATUS 0x60 /* UTMI Debug Status Register 0 */ #define USBPHY_DEBUG1 0x70 /* UTMI Debug Status Register 1 */ #define USBPHY_DEBUG1_SET 0x74 /* UTMI Debug Status Register 1 */ #define USBPHY_DEBUG1_CLR 0x78 /* UTMI Debug Status Register 1 */ #define USBPHY_DEBUG1_TOG 0x7C /* UTMI Debug Status Register 1 */ #define USBPHY_VERSION 0x80 /* UTMI RTL Version */ #define USBPHY_IP 0x90 /* PHY IP Block Register */ #define USBPHY_IP_SET 0x94 /* PHY IP Block Register */ #define USBPHY_IP_CLR 0x98 /* PHY IP Block Register */ #define USBPHY_IP_TOG 0x9C /* PHY IP Block Register */ -#define USBPHY_CTRL_SFTRST (1 << 31) +#define USBPHY_CTRL_SFTRST (1U << 31) #define USBPHY_CTRL_CLKGATE (1 << 30) #define USBPHY_DEBUG_CLKGATE (1 << 30) #define PHY_READ4(_sc, _reg) \ bus_space_read_4(_sc->bst_phy, _sc->bsh_phy, _reg) #define PHY_WRITE4(_sc, _reg, _val) \ bus_space_write_4(_sc->bst_phy, _sc->bsh_phy, _reg, _val) #define USBC_READ4(_sc, _reg) \ bus_space_read_4(_sc->bst_usbc, _sc->bsh_usbc, _reg) #define USBC_WRITE4(_sc, _reg, _val) \ bus_space_write_4(_sc->bst_usbc, _sc->bsh_usbc, _reg, _val) /* Forward declarations */ static int vybrid_ehci_attach(device_t dev); static int vybrid_ehci_detach(device_t dev); static int vybrid_ehci_probe(device_t dev); struct vybrid_ehci_softc { ehci_softc_t base; device_t dev; struct resource *res[6]; bus_space_tag_t bst_phy; bus_space_handle_t bsh_phy; bus_space_tag_t bst_usbc; bus_space_handle_t bsh_usbc; }; static struct resource_spec vybrid_ehci_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static device_method_t ehci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vybrid_ehci_probe), DEVMETHOD(device_attach, vybrid_ehci_attach), DEVMETHOD(device_detach, vybrid_ehci_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), { 0, 0 } }; /* kobj_class definition */ static driver_t ehci_driver = { "ehci", ehci_methods, sizeof(ehci_softc_t) }; static devclass_t ehci_devclass; DRIVER_MODULE(ehci, simplebus, ehci_driver, ehci_devclass, 0, 0); MODULE_DEPEND(ehci, usb, 1, 1, 1); /* * Public methods */ static int vybrid_ehci_probe(device_t dev) { if (ofw_bus_is_compatible(dev, "fsl,mvf600-usb-ehci") == 0) return (ENXIO); device_set_desc(dev, "Vybrid Family integrated USB controller"); return (BUS_PROBE_DEFAULT); } static int phy_init(struct vybrid_ehci_softc *esc) { device_t sc_gpio_dev; int reg; /* Reset phy */ reg = PHY_READ4(esc, USBPHY_CTRL); reg |= (USBPHY_CTRL_SFTRST); PHY_WRITE4(esc, USBPHY_CTRL, reg); /* Minimum reset time */ DELAY(10000); reg &= ~(USBPHY_CTRL_SFTRST | USBPHY_CTRL_CLKGATE); PHY_WRITE4(esc, USBPHY_CTRL, reg); reg = (ENUTMILEVEL2 | ENUTMILEVEL3); PHY_WRITE4(esc, USBPHY_CTRL_SET, reg); /* Get the GPIO device, we need this to give power to USB */ sc_gpio_dev = devclass_get_device(devclass_find("gpio"), 0); if (sc_gpio_dev == NULL) { device_printf(esc->dev, "Error: failed to get the GPIO dev\n"); return (1); } /* Give power to USB */ GPIO_PIN_SETFLAGS(sc_gpio_dev, GPIO_USB_PWR, GPIO_PIN_OUTPUT); GPIO_PIN_SET(sc_gpio_dev, GPIO_USB_PWR, GPIO_PIN_HIGH); /* Power up PHY */ PHY_WRITE4(esc, USBPHY_PWD, 0x00); /* Ungate clocks */ reg = PHY_READ4(esc, USBPHY_DEBUG); reg &= ~(USBPHY_DEBUG_CLKGATE); PHY_WRITE4(esc, USBPHY_DEBUG, reg); #if 0 printf("USBPHY_CTRL == 0x%08x\n", PHY_READ4(esc, USBPHY_CTRL)); printf("USBPHY_IP == 0x%08x\n", PHY_READ4(esc, USBPHY_IP)); printf("USBPHY_STATUS == 0x%08x\n", PHY_READ4(esc, USBPHY_STATUS)); printf("USBPHY_DEBUG == 0x%08x\n", PHY_READ4(esc, USBPHY_DEBUG)); printf("USBPHY_DEBUG0_STATUS == 0x%08x\n", PHY_READ4(esc, USBPHY_DEBUG0_STATUS)); printf("USBPHY_DEBUG1 == 0x%08x\n", PHY_READ4(esc, USBPHY_DEBUG1)); #endif return (0); } static int vybrid_ehci_attach(device_t dev) { struct vybrid_ehci_softc *esc; ehci_softc_t *sc; bus_space_handle_t bsh; int err; int reg; esc = device_get_softc(dev); esc->dev = dev; sc = &esc->base; sc->sc_bus.parent = dev; sc->sc_bus.devices = sc->sc_devices; sc->sc_bus.devices_max = EHCI_MAX_DEVICES; if (bus_alloc_resources(dev, vybrid_ehci_spec, esc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* EHCI registers */ sc->sc_io_tag = rman_get_bustag(esc->res[0]); bsh = rman_get_bushandle(esc->res[0]); sc->sc_io_size = rman_get_size(esc->res[0]); esc->bst_usbc = rman_get_bustag(esc->res[1]); esc->bsh_usbc = rman_get_bushandle(esc->res[1]); esc->bst_phy = rman_get_bustag(esc->res[2]); esc->bsh_phy = rman_get_bushandle(esc->res[2]); /* get all DMA memory */ if (usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(dev), &ehci_iterate_hw_softc)) return (ENXIO); #if 0 printf("USBx_HCSPARAMS is 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_HCSPARAMS)); printf("USB_ID == 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_ID)); printf("USB_HWGENERAL == 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_HWGENERAL)); printf("USB_HWHOST == 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_HWHOST)); printf("USB_HWDEVICE == 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_HWDEVICE)); printf("USB_HWTXBUF == 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_HWTXBUF)); printf("USB_HWRXBUF == 0x%08x\n", bus_space_read_4(sc->sc_io_tag, bsh, USB_HWRXBUF)); #endif if (phy_init(esc)) { device_printf(dev, "Could not setup PHY\n"); return (1); } /* * Set handle to USB related registers subregion used by * generic EHCI driver. */ err = bus_space_subregion(sc->sc_io_tag, bsh, 0x100, sc->sc_io_size, &sc->sc_io_hdl); if (err != 0) return (ENXIO); /* Setup interrupt handler */ err = bus_setup_intr(dev, esc->res[3], INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl); if (err) { device_printf(dev, "Could not setup irq, " "%d\n", err); return (1); } /* Add USB device */ sc->sc_bus.bdev = device_add_child(dev, "usbus", -1); if (!sc->sc_bus.bdev) { device_printf(dev, "Could not add USB device\n"); err = bus_teardown_intr(dev, esc->res[5], sc->sc_intr_hdl); if (err) device_printf(dev, "Could not tear down irq," " %d\n", err); return (1); } device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); strlcpy(sc->sc_vendor, "Freescale", sizeof(sc->sc_vendor)); /* Set host mode */ reg = bus_space_read_4(sc->sc_io_tag, sc->sc_io_hdl, 0xA8); reg |= 0x3; bus_space_write_4(sc->sc_io_tag, sc->sc_io_hdl, 0xA8, reg); /* Set flags */ sc->sc_flags |= EHCI_SCFLG_SETMODE | EHCI_SCFLG_NORESTERM; err = ehci_init(sc); if (!err) { sc->sc_flags |= EHCI_SCFLG_DONEINIT; err = device_probe_and_attach(sc->sc_bus.bdev); } else { device_printf(dev, "USB init failed err=%d\n", err); device_delete_child(dev, sc->sc_bus.bdev); sc->sc_bus.bdev = NULL; err = bus_teardown_intr(dev, esc->res[5], sc->sc_intr_hdl); if (err) device_printf(dev, "Could not tear down irq," " %d\n", err); return (1); } return (0); } static int vybrid_ehci_detach(device_t dev) { struct vybrid_ehci_softc *esc; ehci_softc_t *sc; int err; esc = device_get_softc(dev); sc = &esc->base; if (sc->sc_flags & EHCI_SCFLG_DONEINIT) return (0); /* * only call ehci_detach() after ehci_init() */ if (sc->sc_flags & EHCI_SCFLG_DONEINIT) { ehci_detach(sc); sc->sc_flags &= ~EHCI_SCFLG_DONEINIT; } /* * Disable interrupts that might have been switched on in * ehci_init. */ if (sc->sc_io_tag && sc->sc_io_hdl) bus_space_write_4(sc->sc_io_tag, sc->sc_io_hdl, EHCI_USBINTR, 0); if (esc->res[5] && sc->sc_intr_hdl) { err = bus_teardown_intr(dev, esc->res[5], sc->sc_intr_hdl); if (err) { device_printf(dev, "Could not tear down irq," " %d\n", err); return (err); } sc->sc_intr_hdl = NULL; } if (sc->sc_bus.bdev) { device_delete_child(dev, sc->sc_bus.bdev); sc->sc_bus.bdev = NULL; } /* During module unload there are lots of children leftover */ device_delete_children(dev); bus_release_resources(dev, vybrid_ehci_spec, esc->res); return (0); } Index: head/sys/arm/include/armreg.h =================================================================== --- head/sys/arm/include/armreg.h (revision 258779) +++ head/sys/arm/include/armreg.h (revision 258780) @@ -1,435 +1,435 @@ /* $NetBSD: armreg.h,v 1.37 2007/01/06 00:50:54 christos Exp $ */ /*- * Copyright (c) 1998, 2001 Ben Harris * Copyright (c) 1994-1996 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_ARMREG_H #define MACHINE_ARMREG_H #define INSN_SIZE 4 #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define PSR_MODE 0x0000001f /* mode mask */ #define PSR_USR26_MODE 0x00000000 #define PSR_FIQ26_MODE 0x00000001 #define PSR_IRQ26_MODE 0x00000002 #define PSR_SVC26_MODE 0x00000003 #define PSR_USR32_MODE 0x00000010 #define PSR_FIQ32_MODE 0x00000011 #define PSR_IRQ32_MODE 0x00000012 #define PSR_SVC32_MODE 0x00000013 #define PSR_ABT32_MODE 0x00000017 #define PSR_UND32_MODE 0x0000001b #define PSR_SYS32_MODE 0x0000001f #define PSR_32_MODE 0x00000010 #define PSR_FLAGS 0xf0000000 /* flags */ #define PSR_C_bit (1 << 29) /* carry */ /* The high-order byte is always the implementor */ #define CPU_ID_IMPLEMENTOR_MASK 0xff000000 #define CPU_ID_ARM_LTD 0x41000000 /* 'A' */ #define CPU_ID_DEC 0x44000000 /* 'D' */ #define CPU_ID_INTEL 0x69000000 /* 'i' */ #define CPU_ID_TI 0x54000000 /* 'T' */ #define CPU_ID_FARADAY 0x66000000 /* 'f' */ /* How to decide what format the CPUID is in. */ #define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000) #define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000) #define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x)) /* On ARM3 and ARM6, this byte holds the foundry ID. */ #define CPU_ID_FOUNDRY_MASK 0x00ff0000 #define CPU_ID_FOUNDRY_VLSI 0x00560000 /* On ARM7 it holds the architecture and variant (sub-model) */ #define CPU_ID_7ARCH_MASK 0x00800000 #define CPU_ID_7ARCH_V3 0x00000000 #define CPU_ID_7ARCH_V4T 0x00800000 #define CPU_ID_7VARIANT_MASK 0x007f0000 /* On more recent ARMs, it does the same, but in a different format */ #define CPU_ID_ARCH_MASK 0x000f0000 #define CPU_ID_ARCH_V3 0x00000000 #define CPU_ID_ARCH_V4 0x00010000 #define CPU_ID_ARCH_V4T 0x00020000 #define CPU_ID_ARCH_V5 0x00030000 #define CPU_ID_ARCH_V5T 0x00040000 #define CPU_ID_ARCH_V5TE 0x00050000 #define CPU_ID_ARCH_V5TEJ 0x00060000 #define CPU_ID_ARCH_V6 0x00070000 #define CPU_ID_CPUID_SCHEME 0x000f0000 #define CPU_ID_VARIANT_MASK 0x00f00000 /* Next three nybbles are part number */ #define CPU_ID_PARTNO_MASK 0x0000fff0 /* Intel XScale has sub fields in part number */ #define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */ #define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */ #define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */ /* And finally, the revision number. */ #define CPU_ID_REVISION_MASK 0x0000000f /* Individual CPUs are probably best IDed by everything but the revision. */ #define CPU_ID_CPU_MASK 0xfffffff0 /* Fake CPU IDs for ARMs without CP15 */ #define CPU_ID_ARM2 0x41560200 #define CPU_ID_ARM250 0x41560250 /* Pre-ARM7 CPUs -- [15:12] == 0 */ #define CPU_ID_ARM3 0x41560300 #define CPU_ID_ARM600 0x41560600 #define CPU_ID_ARM610 0x41560610 #define CPU_ID_ARM620 0x41560620 /* ARM7 CPUs -- [15:12] == 7 */ #define CPU_ID_ARM700 0x41007000 /* XXX This is a guess. */ #define CPU_ID_ARM710 0x41007100 #define CPU_ID_ARM7500 0x41027100 #define CPU_ID_ARM710A 0x41047100 /* inc ARM7100 */ #define CPU_ID_ARM7500FE 0x41077100 #define CPU_ID_ARM710T 0x41807100 #define CPU_ID_ARM720T 0x41807200 #define CPU_ID_ARM740T8K 0x41807400 /* XXX no MMU, 8KB cache */ #define CPU_ID_ARM740T4K 0x41817400 /* XXX no MMU, 4KB cache */ /* Post-ARM7 CPUs */ #define CPU_ID_ARM810 0x41018100 #define CPU_ID_ARM920T 0x41129200 #define CPU_ID_ARM920T_ALT 0x41009200 #define CPU_ID_ARM922T 0x41029220 #define CPU_ID_ARM926EJS 0x41069260 #define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */ #define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */ #define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */ #define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */ #define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */ #define CPU_ID_ARM1022ES 0x4105a220 #define CPU_ID_ARM1026EJS 0x4106a260 #define CPU_ID_ARM1136JS 0x4107b360 #define CPU_ID_ARM1136JSR1 0x4117b360 #define CPU_ID_ARM1176JZS 0x410fb760 #define CPU_ID_CORTEXA5 0x410fc050 #define CPU_ID_CORTEXA7 0x410fc070 #define CPU_ID_CORTEXA8R1 0x411fc080 #define CPU_ID_CORTEXA8R2 0x412fc080 #define CPU_ID_CORTEXA8R3 0x413fc080 #define CPU_ID_CORTEXA9R1 0x411fc090 #define CPU_ID_CORTEXA9R2 0x412fc090 #define CPU_ID_CORTEXA9R3 0x413fc090 #define CPU_ID_CORTEXA15 0x410fc0f0 #define CPU_ID_SA110 0x4401a100 #define CPU_ID_SA1100 0x4401a110 #define CPU_ID_TI925T 0x54029250 #define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */ #define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */ #define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */ /* * LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported * L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID. */ #ifdef SOC_MV_LOKIPLUS #define CPU_ID_MV88FR571_41 0x00000000 #else #define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */ #endif #define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_MV88SV584X_V7 0x562F5840 /* Marvell Sheeva 88SV584x v7 Core */ /* Marvell's CPUIDs with ARM ID in implementor field */ #define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_FA526 0x66015260 #define CPU_ID_FA626TE 0x66056260 #define CPU_ID_SA1110 0x6901b110 #define CPU_ID_IXP1200 0x6901c120 #define CPU_ID_80200 0x69052000 #define CPU_ID_PXA250 0x69052100 /* sans core revision */ #define CPU_ID_PXA210 0x69052120 #define CPU_ID_PXA250A 0x69052100 /* 1st version Core */ #define CPU_ID_PXA210A 0x69052120 /* 1st version Core */ #define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */ #define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */ #define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */ #define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */ #define CPU_ID_PXA27X 0x69054110 #define CPU_ID_80321_400 0x69052420 #define CPU_ID_80321_600 0x69052430 #define CPU_ID_80321_400_B0 0x69052c20 #define CPU_ID_80321_600_B0 0x69052c30 #define CPU_ID_80219_400 0x69052e20 /* A0 stepping/revision. */ #define CPU_ID_80219_600 0x69052e30 /* A0 stepping/revision. */ #define CPU_ID_81342 0x69056810 #define CPU_ID_IXP425 0x690541c0 #define CPU_ID_IXP425_533 0x690541c0 #define CPU_ID_IXP425_400 0x690541d0 #define CPU_ID_IXP425_266 0x690541f0 #define CPU_ID_IXP435 0x69054040 #define CPU_ID_IXP465 0x69054200 /* ARM3-specific coprocessor 15 registers */ #define ARM3_CP15_FLUSH 1 #define ARM3_CP15_CONTROL 2 #define ARM3_CP15_CACHEABLE 3 #define ARM3_CP15_UPDATEABLE 4 #define ARM3_CP15_DISRUPTIVE 5 /* ARM3 Control register bits */ #define ARM3_CTL_CACHE_ON 0x00000001 #define ARM3_CTL_SHARED 0x00000002 #define ARM3_CTL_MONITOR 0x00000004 /* CPUID registers */ #define ARM_PFR0_ARM_ISA_MASK 0x0000000f #define ARM_PFR0_THUMB_MASK 0x000000f0 #define ARM_PFR0_THUMB 0x10 #define ARM_PFR0_THUMB2 0x30 #define ARM_PFR0_JAZELLE_MASK 0x00000f00 #define ARM_PFR0_THUMBEE_MASK 0x0000f000 #define ARM_PFR1_ARMV4_MASK 0x0000000f #define ARM_PFR1_SEC_EXT_MASK 0x000000f0 #define ARM_PFR1_MICROCTRL_MASK 0x00000f00 /* * Post-ARM3 CP15 registers: * * 1 Control register * * 2 Translation Table Base * * 3 Domain Access Control * * 4 Reserved * * 5 Fault Status * * 6 Fault Address * * 7 Cache/write-buffer Control * * 8 TLB Control * * 9 Cache Lockdown * * 10 TLB Lockdown * * 11 Reserved * * 12 Reserved * * 13 Process ID (for FCSE) * * 14 Reserved * * 15 Implementation Dependent */ /* Some of the definitions below need cleaning up for V3/V4 architectures */ /* CPU control register (CP15 register 1) */ #define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */ #define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */ #define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */ #define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */ #define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */ #define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */ #define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */ #define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */ #define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */ #define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */ #define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */ #define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */ #define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */ #define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */ #define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */ #define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */ #define CPU_CONTROL_FI_ENABLE 0x00200000 /* FI: Low interrupt latency */ #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */ #define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */ #define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */ #define CPU_CONTROL_AF_ENABLE 0x20000000 /* Access Flag enable */ #define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE /* ARM11x6 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM11X6_AUXCTL_RS 0x00000001 /* return stack */ #define ARM11X6_AUXCTL_DB 0x00000002 /* dynamic branch prediction */ #define ARM11X6_AUXCTL_SB 0x00000004 /* static branch prediction */ #define ARM11X6_AUXCTL_TR 0x00000008 /* MicroTLB replacement strat. */ #define ARM11X6_AUXCTL_EX 0x00000010 /* exclusive L1/L2 cache */ #define ARM11X6_AUXCTL_RA 0x00000020 /* clean entire cache disable */ #define ARM11X6_AUXCTL_RV 0x00000040 /* block transfer cache disable */ #define ARM11X6_AUXCTL_CZ 0x00000080 /* restrict cache size */ /* ARM1136 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1136_AUXCTL_PFI 0x80000000 /* PFI: partial FI mode. */ /* This is an undocumented flag * used to work around a cache bug * in r0 steppings. See errata * 364296. */ /* ARM1176 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1176_AUXCTL_PHD 0x10000000 /* inst. prefetch halting disable */ #define ARM1176_AUXCTL_BFD 0x20000000 /* branch folding disable */ #define ARM1176_AUXCTL_FSD 0x40000000 /* force speculative ops disable */ #define ARM1176_AUXCTL_FIO 0x80000000 /* low intr latency override */ /* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */ #define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */ #define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */ /* Note: XSCale core 3 uses those for LLR DCcahce attributes */ #define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */ #define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */ #define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */ #define XSCALE_AUXCTL_MD_MASK 0x00000030 /* Xscale Core 3 only */ #define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */ /* Marvell Extra Features Register (CP15 register 1, opcode2 0) */ #define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */ #define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */ #define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */ #define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */ #define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */ #define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */ #define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */ #define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */ #define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */ #define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */ /* Cache type register definitions */ #define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */ #define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */ #define CPU_CT_S (1U << 24) /* split cache */ #define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */ #define CPU_CT_FORMAT(x) ((x) >> 29) #define CPU_CT_CTYPE_WT 0 /* write-through */ #define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */ #define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */ #define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */ #define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */ #define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */ #define CPU_CT_xSIZE_M (1U << 2) /* multiplier */ #define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */ #define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */ #define CPU_CT_ARMV7 0x4 /* ARM v7 Cache type definitions */ -#define CPUV7_CT_CTYPE_WT (1 << 31) +#define CPUV7_CT_CTYPE_WT (1U << 31) #define CPUV7_CT_CTYPE_WB (1 << 30) #define CPUV7_CT_CTYPE_RA (1 << 29) #define CPUV7_CT_CTYPE_WA (1 << 28) #define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */ #define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */ #define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */ #define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7) #define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7) #define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7) #define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7) #define CACHE_ICACHE 1 #define CACHE_DCACHE 2 #define CACHE_SEP_CACHE 3 #define CACHE_UNI_CACHE 4 /* Fault status register definitions */ #define FAULT_TYPE_MASK 0x0f #define FAULT_USER 0x10 #define FAULT_WRTBUF_0 0x00 /* Vector Exception */ #define FAULT_WRTBUF_1 0x02 /* Terminal Exception */ #define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */ #define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */ #define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */ #define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */ #define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */ #define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */ #define FAULT_ALIGN_0 0x01 /* Alignment */ #define FAULT_ALIGN_1 0x03 /* Alignment */ #define FAULT_TRANS_S 0x05 /* Translation -- Section */ #define FAULT_TRANS_F 0x06 /* Translation -- Flag */ #define FAULT_TRANS_P 0x07 /* Translation -- Page */ #define FAULT_DOMAIN_S 0x09 /* Domain -- Section */ #define FAULT_DOMAIN_P 0x0b /* Domain -- Page */ #define FAULT_PERM_S 0x0d /* Permission -- Section */ #define FAULT_PERM_P 0x0f /* Permission -- Page */ #define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */ /* * Address of the vector page, low and high versions. */ #ifndef __ASSEMBLER__ #define ARM_VECTORS_LOW 0x00000000U #define ARM_VECTORS_HIGH 0xffff0000U #else #define ARM_VECTORS_LOW 0 #define ARM_VECTORS_HIGH 0xffff0000 #endif /* * ARM Instructions * * 3 3 2 2 2 * 1 0 9 8 7 0 * +-------+-------------------------------------------------------+ * | cond | instruction dependant | * |c c c c| | * +-------+-------------------------------------------------------+ */ #define INSN_SIZE 4 /* Always 4 bytes */ #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define INSN_COND_AL 0xe0000000 /* Always condition */ #define THUMB_INSN_SIZE 2 /* Some are 4 bytes. */ #endif /* !MACHINE_ARMREG_H */ Index: head/sys/arm/lpc/if_lpereg.h =================================================================== --- head/sys/arm/lpc/if_lpereg.h (revision 258779) +++ head/sys/arm/lpc/if_lpereg.h (revision 258780) @@ -1,208 +1,208 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ARM_LPC_IF_LPEREG_H #define _ARM_LPC_IF_LPEREG_H #define LPE_MAC1 0x000 #define LPE_MAC1_RXENABLE (1 << 0) #define LPE_MAC1_PASSALL (1 << 1) #define LPE_MAC1_RXFLOWCTRL (1 << 2) #define LPE_MAC1_TXFLOWCTRL (1 << 3) #define LPE_MAC1_LOOPBACK (1 << 4) #define LPE_MAC1_RESETTX (1 << 8) #define LPE_MAC1_RESETMCSTX (1 << 9) #define LPE_MAC1_RESETRX (1 << 10) #define LPE_MAC1_RESETMCSRX (1 << 11) #define LPE_MAC1_SIMRESET (1 << 14) #define LPE_MAC1_SOFTRESET (1 << 15) #define LPE_MAC2 0x004 #define LPE_MAC2_FULLDUPLEX (1 << 0) #define LPE_MAC2_FRAMELENCHECK (1 << 1) #define LPE_MAC2_HUGEFRAME (1 << 2) #define LPE_MAC2_DELAYEDCRC (1 << 3) #define LPE_MAC2_CRCENABLE (1 << 4) #define LPE_MAC2_PADCRCENABLE (1 << 5) #define LPE_MAC2_VLANPADENABLE (1 << 6) #define LPE_MAC2_AUTOPADENABLE (1 << 7) #define LPE_MAC2_PUREPREAMBLE (1 << 8) #define LPE_MAC2_LONGPREAMBLE (1 << 9) #define LPE_MAC2_NOBACKOFF (1 << 12) #define LPE_MAC2_BACKPRESSURE (1 << 13) #define LPE_MAC2_EXCESSDEFER (1 << 14) #define LPE_IPGT 0x008 #define LPE_IPGR 0x00c #define LPE_CLRT 0x010 #define LPE_MAXF 0x014 #define LPE_SUPP 0x018 #define LPE_SUPP_SPEED (1 << 8) #define LPE_TEST 0x01c #define LPE_MCFG 0x020 #define LPE_MCFG_SCANINCR (1 << 0) #define LPE_MCFG_SUPPREAMBLE (1 << 1) #define LPE_MCFG_CLKSEL(_n) ((_n & 0x7) << 2) #define LPC_MCFG_RESETMII (1 << 15) #define LPE_MCMD 0x024 #define LPE_MCMD_READ (1 << 0) #define LPE_MCMD_WRITE (0 << 0) #define LPE_MCMD_SCAN (1 << 1) #define LPE_MADR 0x028 #define LPE_MADR_REGMASK 0x1f #define LPE_MADR_REGSHIFT 0 #define LPE_MADR_PHYMASK 0x1f #define LPE_MADR_PHYSHIFT 8 #define LPE_MWTD 0x02c #define LPE_MWTD_DATAMASK 0xffff #define LPE_MRDD 0x030 #define LPE_MRDD_DATAMASK 0xffff #define LPE_MIND 0x034 #define LPE_MIND_BUSY (1 << 0) #define LPE_MIND_SCANNING (1 << 1) #define LPE_MIND_INVALID (1 << 2) #define LPE_MIND_MIIFAIL (1 << 3) #define LPE_SA0 0x040 #define LPE_SA1 0x044 #define LPE_SA2 0x048 #define LPE_COMMAND 0x100 #define LPE_COMMAND_RXENABLE (1 << 0) #define LPE_COMMAND_TXENABLE (1 << 1) #define LPE_COMMAND_REGRESET (1 << 3) #define LPE_COMMAND_TXRESET (1 << 4) #define LPE_COMMAND_RXRESET (1 << 5) #define LPE_COMMAND_PASSRUNTFRAME (1 << 6) #define LPE_COMMAND_PASSRXFILTER (1 << 7) #define LPE_COMMAND_TXFLOWCTL (1 << 8) #define LPE_COMMAND_RMII (1 << 9) #define LPE_COMMAND_FULLDUPLEX (1 << 10) #define LPE_STATUS 0x104 #define LPE_STATUS_RXACTIVE (1 << 0) #define LPE_STATUS_TXACTIVE (1 << 1) #define LPE_RXDESC 0x108 #define LPE_RXSTATUS 0x10c #define LPE_RXDESC_NUMBER 0x110 #define LPE_RXDESC_PROD 0x114 #define LPE_RXDESC_CONS 0x118 #define LPE_TXDESC 0x11c #define LPE_TXSTATUS 0x120 #define LPE_TXDESC_NUMBER 0x124 #define LPE_TXDESC_PROD 0x128 #define LPE_TXDESC_CONS 0x12c #define LPE_TSV0 0x158 #define LPE_TSV1 0x15c #define LPE_RSV 0x160 #define LPE_FLOWCONTROL_COUNTER 0x170 #define LPE_FLOWCONTROL_STATUS 0x174 #define LPE_RXFILTER_CTRL 0x200 #define LPE_RXFILTER_UNICAST (1 << 0) #define LPE_RXFILTER_BROADCAST (1 << 1) #define LPE_RXFILTER_MULTICAST (1 << 2) #define LPE_RXFILTER_UNIHASH (1 << 3) #define LPE_RXFILTER_MULTIHASH (1 << 4) #define LPE_RXFILTER_PERFECT (1 << 5) #define LPE_RXFILTER_WOL (1 << 12) #define LPE_RXFILTER_FILTWOL (1 << 13) #define LPE_RXFILTER_WOL_STATUS 0x204 #define LPE_RXFILTER_WOL_CLEAR 0x208 #define LPE_HASHFILTER_L 0x210 #define LPE_HASHFILTER_H 0x214 #define LPE_INTSTATUS 0xfe0 #define LPE_INTENABLE 0xfe4 #define LPE_INTCLEAR 0xfe8 #define LPE_INTSET 0xfec #define LPE_INT_RXOVERRUN (1 << 0) #define LPE_INT_RXERROR (1 << 1) #define LPE_INT_RXFINISH (1 << 2) #define LPE_INT_RXDONE (1 << 3) #define LPE_INT_TXUNDERRUN (1 << 4) #define LPE_INT_TXERROR (1 << 5) #define LPE_INT_TXFINISH (1 << 6) #define LPE_INT_TXDONE (1 << 7) #define LPE_INT_SOFTINT (1 << 12) #define LPE_INTWAKEUPINT (1 << 13) #define LPE_POWERDOWN 0xff4 #define LPE_DESC_ALIGN 8 #define LPE_TXDESC_NUM 128 #define LPE_RXDESC_NUM 128 #define LPE_TXDESC_SIZE (LPE_TXDESC_NUM * sizeof(struct lpe_hwdesc)) #define LPE_RXDESC_SIZE (LPE_RXDESC_NUM * sizeof(struct lpe_hwdesc)) #define LPE_TXSTATUS_SIZE (LPE_TXDESC_NUM * sizeof(struct lpe_hwstatus)) #define LPE_RXSTATUS_SIZE (LPE_RXDESC_NUM * sizeof(struct lpe_hwstatus)) #define LPE_MAXFRAGS 8 struct lpe_hwdesc { uint32_t lhr_data; uint32_t lhr_control; }; struct lpe_hwstatus { uint32_t lhs_info; uint32_t lhs_crc; }; #define LPE_INC(x, y) (x) = ((x) == ((y)-1)) ? 0 : (x)+1 /* These are valid for both Rx and Tx descriptors */ #define LPE_HWDESC_SIZE_MASK (1 << 10) -#define LPE_HWDESC_INTERRUPT (1 << 31) +#define LPE_HWDESC_INTERRUPT (1U << 31) /* These are valid for Tx descriptors */ #define LPE_HWDESC_LAST (1 << 30) #define LPE_HWDESC_CRC (1 << 29) #define LPE_HWDESC_PAD (1 << 28) #define LPE_HWDESC_HUGE (1 << 27) #define LPE_HWDESC_OVERRIDE (1 << 26) /* These are valid for Tx status descriptors */ #define LPE_HWDESC_COLLISIONS(_n) (((_n) >> 21) & 0x7) #define LPE_HWDESC_DEFER (1 << 25) #define LPE_HWDESC_EXCDEFER (1 << 26) #define LPE_HWDESC_EXCCOLL (1 << 27) #define LPE_HWDESC_LATECOLL (1 << 28) #define LPE_HWDESC_UNDERRUN (1 << 29) #define LPE_HWDESC_TXNODESCR (1 << 30) -#define LPE_HWDESC_ERROR (1 << 31) +#define LPE_HWDESC_ERROR (1U << 31) /* These are valid for Rx status descriptors */ #define LPE_HWDESC_CONTROL (1 << 18) #define LPE_HWDESC_VLAN (1 << 19) #define LPE_HWDESC_FAILFILTER (1 << 20) #define LPE_HWDESC_MULTICAST (1 << 21) #define LPE_HWDESC_BROADCAST (1 << 22) #define LPE_HWDESC_CRCERROR (1 << 23) #define LPE_HWDESC_SYMBOLERROR (1 << 24) #define LPE_HWDESC_LENGTHERROR (1 << 25) #define LPE_HWDESC_RANGEERROR (1 << 26) #define LPE_HWDESC_ALIGNERROR (1 << 27) #define LPE_HWDESC_OVERRUN (1 << 28) #define LPE_HWDESC_RXNODESCR (1 << 29) #define LPE_HWDESC_LASTFLAG (1 << 30) -#define LPE_HWDESC_ERROR (1 << 31) +#define LPE_HWDESC_ERROR (1U << 31) #endif /* _ARM_LPC_IF_LPEREG_H */ Index: head/sys/arm/lpc/lpcreg.h =================================================================== --- head/sys/arm/lpc/lpcreg.h (revision 258779) +++ head/sys/arm/lpc/lpcreg.h (revision 258780) @@ -1,661 +1,661 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ARM_LPC_LPCREG_H #define _ARM_LPC_LPCREG_H #define LPC_DEV_PHYS_BASE 0x40000000 #define LPC_DEV_P5_PHYS_BASE 0x20000000 #define LPC_DEV_P6_PHYS_BASE 0x30000000 #define LPC_DEV_BASE 0xd0000000 #define LPC_DEV_SIZE 0x10000000 /* * Interrupt controller (from UM10326: LPC32x0 User manual, page 87) */ #define LPC_INTC_MIC_ER 0x0000 #define LPC_INTC_MIC_RSR 0x0004 #define LPC_INTC_MIC_SR 0x0008 #define LPC_INTC_MIC_APR 0x000c #define LPC_INTC_MIC_ATR 0x0010 #define LPC_INTC_MIC_ITR 0x0014 #define LPC_INTC_SIC1_ER 0x4000 #define LPC_INTC_SIC1_RSR 0x4004 #define LPC_INTC_SIC1_SR 0x4008 #define LPC_INTC_SIC1_APR 0x400c #define LPC_INTC_SIC1_ATR 0x4010 #define LPC_INTC_SIC1_ITR 0x4014 #define LPC_INTC_SIC2_ER 0x8000 #define LPC_INTC_SIC2_RSR 0x8004 #define LPC_INTC_SIC2_SR 0x8008 #define LPC_INTC_SIC2_APR 0x800c #define LPC_INTC_SIC2_ATR 0x8010 #define LPC_INTC_SIC2_ITR 0x8014 /* * Timer 0|1|2|3|4|5. (from UM10326: LPC32x0 User manual, page 540) */ #define LPC_TIMER_IR 0x00 #define LPC_TIMER_TCR 0x04 #define LPC_TIMER_TCR_ENABLE (1 << 0) #define LPC_TIMER_TCR_RESET (1 << 1) #define LPC_TIMER_TC 0x08 #define LPC_TIMER_PR 0x0c #define LPC_TIMER_PC 0x10 #define LPC_TIMER_MCR 0x14 #define LPC_TIMER_MCR_MR0I (1 << 0) #define LPC_TIMER_MCR_MR0R (1 << 1) #define LPC_TIMER_MCR_MR0S (1 << 2) #define LPC_TIMER_MCR_MR1I (1 << 3) #define LPC_TIMER_MCR_MR1R (1 << 4) #define LPC_TIMER_MCR_MR1S (1 << 5) #define LPC_TIMER_MCR_MR2I (1 << 6) #define LPC_TIMER_MCR_MR2R (1 << 7) #define LPC_TIMER_MCR_MR2S (1 << 8) #define LPC_TIMER_MCR_MR3I (1 << 9) #define LPC_TIMER_MCR_MR3R (1 << 10) #define LPC_TIMER_MCR_MR3S (1 << 11) #define LPC_TIMER_MR0 0x18 #define LPC_TIMER_CTCR 0x70 /* * Watchdog timer. (from UM10326: LPC32x0 User manual, page 572) */ #define LPC_WDTIM_BASE (LPC_DEV_BASE + 0x3c000) #define LPC_WDTIM_INT 0x00 #define LPC_WDTIM_CTRL 0x04 #define LPC_WDTIM_COUNTER 0x08 #define LPC_WDTIM_MCTRL 0x0c #define LPC_WDTIM_MATCH0 0x10 #define LPC_WDTIM_EMR 0x14 #define LPC_WDTIM_PULSE 0x18 #define LPC_WDTIM_RES 0x1c /* * Clocking and power control. (from UM10326: LPC32x0 User manual, page 58) */ #define LPC_CLKPWR_BASE (LPC_DEV_BASE + 0x4000) #define LPC_CLKPWR_PWR_CTRL 0x44 #define LPC_CLKPWR_OSC_CTRL 0x4c #define LPC_CLKPWR_SYSCLK_CTRL 0x50 #define LPC_CLKPWR_PLL397_CTRL 0x48 #define LPC_CLKPWR_HCLKPLL_CTRL 0x58 #define LPC_CLKPWR_HCLKDIV_CTRL 0x40 #define LPC_CLKPWR_TEST_CTRL 0xa4 #define LPC_CLKPWR_AUTOCLK_CTRL 0xec #define LPC_CLKPWR_START_ER_PIN 0x30 #define LPC_CLKPWR_START_ER_INT 0x20 #define LPC_CLKPWR_P0_INTR_ER 0x18 #define LPC_CLKPWR_START_SR_PIN 0x38 #define LPC_CLKPWR_START_SR_INT 0x28 #define LPC_CLKPWR_START_RSR_PIN 0x34 #define LPC_CLKPWR_START_RSR_INT 0x24 #define LPC_CLKPWR_START_APR_PIN 0x3c #define LPC_CLKPWR_START_APR_INT 0x2c #define LPC_CLKPWR_USB_CTRL 0x64 #define LPC_CLKPWR_USB_CTRL_SLAVE_HCLK (1 << 24) #define LPC_CLKPWR_USB_CTRL_I2C_EN (1 << 23) #define LPC_CLKPWR_USB_CTRL_DEV_NEED_CLK_EN (1 << 22) #define LPC_CLKPWR_USB_CTRL_HOST_NEED_CLK_EN (1 << 21) #define LPC_CLKPWR_USB_CTRL_BUSKEEPER (1 << 19) #define LPC_CLKPWR_USB_CTRL_CLK_EN2 (1 << 18) #define LPC_CLKPWR_USB_CTRL_CLK_EN1 (1 << 17) #define LPC_CLKPWR_USB_CTRL_PLL_PDOWN (1 << 16) #define LPC_CLKPWR_USB_CTRL_BYPASS (1 << 15) #define LPC_CLKPWR_USB_CTRL_DIRECT_OUT (1 << 14) #define LPC_CLKPWR_USB_CTRL_FEEDBACK (1 << 13) #define LPC_CLKPWR_USB_CTRL_POSTDIV(_x) ((_x & 0x3) << 11) #define LPC_CLKPWR_USB_CTRL_PREDIV(_x) ((_x & 0x3) << 9) #define LPC_CLKPWR_USB_CTRL_FDBKDIV(_x) (((_x-1) & 0xff) << 1) #define LPC_CLKPWR_USB_CTRL_PLL_LOCK (1 << 0) #define LPC_CLKPWR_USBDIV_CTRL 0x1c #define LPC_CLKPWR_MS_CTRL 0x80 #define LPC_CLKPWR_MS_CTRL_DISABLE_SD (1 << 10) #define LPC_CLKPWR_MS_CTRL_CLOCK_EN (1 << 9) #define LPC_CLKPWR_MS_CTRL_MSSDIO23_PAD (1 << 8) #define LPC_CLKPWR_MS_CTRL_MSSDIO1_PAD (1 << 7) #define LPC_CLKPWR_MS_CTRL_MSSDIO0_PAD (1 << 6) #define LPC_CLKPWR_MS_CTRL_SD_CLOCK (1 << 5) #define LPC_CLKPWR_MS_CTRL_CLKDIV_MASK 0xf #define LPC_CLKPWR_DMACLK_CTRL 0xe8 #define LPC_CLKPWR_DMACLK_CTRL_EN (1 << 0) #define LPC_CLKPWR_FLASHCLK_CTRL 0xc8 #define LPC_CLKPWR_MACCLK_CTRL 0x90 #define LPC_CLKPWR_MACCLK_CTRL_REG (1 << 0) #define LPC_CLKPWR_MACCLK_CTRL_SLAVE (1 << 1) #define LPC_CLKPWR_MACCLK_CTRL_MASTER (1 << 2) #define LPC_CLKPWR_MACCLK_CTRL_HDWINF(_n) ((_n & 0x3) << 3) #define LPC_CLKPWR_LCDCLK_CTRL 0x54 #define LPC_CLKPWR_LCDCLK_CTRL_DISPTYPE (1 << 8) #define LPC_CLKPWR_LCDCLK_CTRL_MODE(_n) ((_n & 0x3) << 6) #define LPC_CLKPWR_LCDCLK_CTRL_MODE_12 0x0 #define LPC_CLKPWR_LCDCLK_CTRL_MODE_15 0x1 #define LPC_CLKPWR_LCDCLK_CTRL_MODE_16 0x2 #define LPC_CLKPWR_LCDCLK_CTRL_MODE_24 0x3 #define LPC_CLKPWR_LCDCLK_CTRL_HCLKEN (1 << 5) #define LPC_CLKPWR_LCDCLK_CTRL_CLKDIV(_n) ((_n) & 0x1f) #define LPC_CLKPWR_I2S_CTRL 0x7c #define LPC_CLKPWR_SSP_CTRL 0x78 #define LPC_CLKPWR_SSP_CTRL_SSP1RXDMA (1 << 5) #define LPC_CLKPWR_SSP_CTRL_SSP1TXDMA (1 << 4) #define LPC_CLKPWR_SSP_CTRL_SSP0RXDMA (1 << 3) #define LPC_CLKPWR_SSP_CTRL_SSP0TXDMA (1 << 2) #define LPC_CLKPWR_SSP_CTRL_SSP1EN (1 << 1) #define LPC_CLKPWR_SSP_CTRL_SSP0EN (1 << 0) #define LPC_CLKPWR_SPI_CTRL 0xc4 #define LPC_CLKPWR_I2CCLK_CTRL 0xac #define LPC_CLKPWR_TIMCLK_CTRL1 0xc0 #define LPC_CLKPWR_TIMCLK_CTRL1_TIMER4 (1 << 0) #define LPC_CLKPWR_TIMCLK_CTRL1_TIMER5 (1 << 1) #define LPC_CLKPWR_TIMCLK_CTRL1_TIMER0 (1 << 2) #define LPC_CLKPWR_TIMCLK_CTRL1_TIMER1 (1 << 3) #define LPC_CLKPWR_TIMCLK_CTRL1_TIMER2 (1 << 4) #define LPC_CLKPWR_TIMCLK_CTRL1_TIMER3 (1 << 5) #define LPC_CLKPWR_TIMCLK_CTRL1_MOTORCTL (1 << 6) #define LPC_CLKPWR_TIMCLK_CTRL 0xbc #define LPC_CLKPWR_TIMCLK_CTRL_WATCHDOG (1 << 0) #define LPC_CLKPWR_TIMCLK_CTRL_HSTIMER (1 << 1) #define LPC_CLKPWR_ADCLK_CTRL 0xb4 #define LPC_CLKPWR_ADCLK_CTRL1 0x60 #define LPC_CLKPWR_KEYCLK_CTRL 0xb0 #define LPC_CLKPWR_PWMCLK_CTRL 0xb8 #define LPC_CLKPWR_UARTCLK_CTRL 0xe4 #define LPC_CLKPWR_POS0_IRAM_CTRL 0x110 #define LPC_CLKPWR_POS1_IRAM_CTRL 0x114 /* Additional UART registers in CLKPWR address space. */ #define LPC_CLKPWR_UART_U3CLK 0xd0 #define LPC_CLKPWR_UART_U4CLK 0xd4 #define LPC_CLKPWR_UART_U5CLK 0xd8 #define LPC_CLKPWR_UART_U6CLK 0xdc #define LPC_CLKPWR_UART_UCLK_HCLK (1 << 16) #define LPC_CLKPWR_UART_UCLK_X(_n) (((_n) & 0xff) << 8) #define LPC_CLKPWR_UART_UCLK_Y(_n) ((_n) & 0xff) #define LPC_CLKPWR_UART_IRDACLK 0xe0 /* Additional UART registers */ #define LPC_UART_BASE (LPC_DEV_BASE + 0x80000) #define LPC_UART_CONTROL_BASE (LPC_DEV_BASE + 0x54000) #define LPC_UART5_BASE (LPC_DEV_BASE + 0x90000) #define LPC_UART_CTRL 0x00 #define LPC_UART_CLKMODE 0x04 #define LPC_UART_CLKMODE_UART3(_n) (((_n) & 0x3) << 4) #define LPC_UART_CLKMODE_UART4(_n) (((_n) & 0x3) << 6) #define LPC_UART_CLKMODE_UART5(_n) (((_n) & 0x3) << 8) #define LPC_UART_CLKMODE_UART6(_n) (((_n) & 0x3) << 10) #define LPC_UART_LOOP 0x08 #define LPC_UART_FIFOSIZE 64 /* * Real time clock. (from UM10326: LPC32x0 User manual, page 566) */ #define LPC_RTC_UCOUNT 0x00 #define LPC_RTC_DCOUNT 0x04 #define LPC_RTC_MATCH0 0x08 #define LPC_RTC_MATCH1 0x0c #define LPC_RTC_CTRL 0x10 #define LPC_RTC_CTRL_ONSW (1 << 7) #define LPC_RTC_CTRL_DISABLE (1 << 6) #define LPC_RTC_CTRL_RTCRESET (1 << 4) #define LPC_RTC_CTRL_MATCH0ONSW (1 << 3) #define LPC_RTC_CTRL_MATCH1ONSW (1 << 2) #define LPC_RTC_CTRL_MATCH1INTR (1 << 1) #define LPC_RTC_CTRL_MATCH0INTR (1 << 0) #define LPC_RTC_INTSTAT 0x14 #define LPC_RTC_KEY 0x18 #define LPC_RTC_SRAM_BEGIN 0x80 #define LPC_RTC_SRAM_END 0xff /* * MMC/SD controller. (from UM10326: LPC32x0 User manual, page 436) */ #define LPC_SD_BASE (LPC_DEV_P5_PHYS_BASE + 0x98000) #define LPC_SD_CLK (13 * 1000 * 1000) // 13Mhz #define LPC_SD_POWER 0x00 #define LPC_SD_POWER_OPENDRAIN (1 << 6) #define LPC_SD_POWER_CTRL_OFF 0x00 #define LPC_SD_POWER_CTRL_UP 0x02 #define LPC_SD_POWER_CTRL_ON 0x03 #define LPC_SD_CLOCK 0x04 #define LPC_SD_CLOCK_WIDEBUS (1 << 11) #define LPC_SD_CLOCK_BYPASS (1 << 10) #define LPC_SD_CLOCK_PWRSAVE (1 << 9) #define LPC_SD_CLOCK_ENABLE (1 << 8) #define LPC_SD_CLOCK_CLKDIVMASK 0xff #define LPC_SD_ARGUMENT 0x08 #define LPC_SD_COMMAND 0x0c #define LPC_SD_COMMAND_ENABLE (1 << 10) #define LPC_SD_COMMAND_PENDING (1 << 9) #define LPC_SD_COMMAND_INTERRUPT (1 << 8) #define LPC_SD_COMMAND_LONGRSP (1 << 7) #define LPC_SD_COMMAND_RESPONSE (1 << 6) #define LPC_SD_COMMAND_CMDINDEXMASK 0x3f #define LPC_SD_RESPCMD 0x10 #define LPC_SD_RESP0 0x14 #define LPC_SD_RESP1 0x18 #define LPC_SD_RESP2 0x1c #define LPC_SD_RESP3 0x20 #define LPC_SD_DATATIMER 0x24 #define LPC_SD_DATALENGTH 0x28 #define LPC_SD_DATACTRL 0x2c #define LPC_SD_DATACTRL_BLOCKSIZESHIFT 4 #define LPC_SD_DATACTRL_BLOCKSIZEMASK 0xf #define LPC_SD_DATACTRL_DMAENABLE (1 << 3) #define LPC_SD_DATACTRL_MODE (1 << 2) #define LPC_SD_DATACTRL_WRITE (0 << 1) #define LPC_SD_DATACTRL_READ (1 << 1) #define LPC_SD_DATACTRL_ENABLE (1 << 0) #define LPC_SD_DATACNT 0x30 #define LPC_SD_STATUS 0x34 #define LPC_SD_STATUS_RXDATAAVLBL (1 << 21) #define LPC_SD_STATUS_TXDATAAVLBL (1 << 20) #define LPC_SD_STATUS_RXFIFOEMPTY (1 << 19) #define LPC_SD_STATUS_TXFIFOEMPTY (1 << 18) #define LPC_SD_STATUS_RXFIFOFULL (1 << 17) #define LPC_SD_STATUS_TXFIFOFULL (1 << 16) #define LPC_SD_STATUS_RXFIFOHALFFULL (1 << 15) #define LPC_SD_STATUS_TXFIFOHALFEMPTY (1 << 14) #define LPC_SD_STATUS_RXACTIVE (1 << 13) #define LPC_SD_STATUS_TXACTIVE (1 << 12) #define LPC_SD_STATUS_CMDACTIVE (1 << 11) #define LPC_SD_STATUS_DATABLOCKEND (1 << 10) #define LPC_SD_STATUS_STARTBITERR (1 << 9) #define LPC_SD_STATUS_DATAEND (1 << 8) #define LPC_SD_STATUS_CMDSENT (1 << 7) #define LPC_SD_STATUS_CMDRESPEND (1 << 6) #define LPC_SD_STATUS_RXOVERRUN (1 << 5) #define LPC_SD_STATUS_TXUNDERRUN (1 << 4) #define LPC_SD_STATUS_DATATIMEOUT (1 << 3) #define LPC_SD_STATUS_CMDTIMEOUT (1 << 2) #define LPC_SD_STATUS_DATACRCFAIL (1 << 1) #define LPC_SD_STATUS_CMDCRCFAIL (1 << 0) #define LPC_SD_CLEAR 0x38 #define LPC_SD_MASK0 0x03c #define LPC_SD_MASK1 0x40 #define LPC_SD_FIFOCNT 0x48 #define LPC_SD_FIFO 0x80 /* * USB OTG controller (from UM10326: LPC32x0 User manual, page 410) */ #define LPC_OTG_INT_STATUS 0x100 #define LPC_OTG_INT_ENABLE 0x104 #define LPC_OTG_INT_SET 0x108 #define LPC_OTG_INT_CLEAR 0x10c #define LPC_OTG_STATUS 0x110 #define LPC_OTG_STATUS_ATOB_HNP_TRACK (1 << 9) #define LPC_OTG_STATUS_BTOA_HNP_TACK (1 << 8) #define LPC_OTG_STATUS_TRANSP_I2C_EN (1 << 7) #define LPC_OTG_STATUS_TIMER_RESET (1 << 6) #define LPC_OTG_STATUS_TIMER_EN (1 << 5) #define LPC_OTG_STATUS_TIMER_MODE (1 << 4) #define LPC_OTG_STATUS_TIMER_SCALE (1 << 2) #define LPC_OTG_STATUS_HOST_EN (1 << 0) #define LPC_OTG_TIMER 0x114 #define LPC_OTG_I2C_TXRX 0x300 #define LPC_OTG_I2C_STATUS 0x304 #define LPC_OTG_I2C_STATUS_TFE (1 << 11) #define LPC_OTG_I2C_STATUS_TFF (1 << 10) #define LPC_OTG_I2C_STATUS_RFE (1 << 9) #define LPC_OTG_I2C_STATUS_RFF (1 << 8) #define LPC_OTG_I2C_STATUS_SDA (1 << 7) #define LPC_OTG_I2C_STATUS_SCL (1 << 6) #define LPC_OTG_I2C_STATUS_ACTIVE (1 << 5) #define LPC_OTG_I2C_STATUS_DRSI (1 << 4) #define LPC_OTG_I2C_STATUS_DRMI (1 << 3) #define LPC_OTG_I2C_STATUS_NAI (1 << 2) #define LPC_OTG_I2C_STATUS_AFI (1 << 1) #define LPC_OTG_I2C_STATUS_TDI (1 << 0) #define LPC_OTG_I2C_CTRL 0x308 #define LPC_OTG_I2C_CTRL_SRST (1 << 8) #define LPC_OTG_I2C_CTRL_TFFIE (1 << 7) #define LPC_OTG_I2C_CTRL_RFDAIE (1 << 6) #define LPC_OTG_I2C_CTRL_RFFIE (1 << 5) #define LPC_OTG_I2C_CTRL_DRSIE (1 << 4) #define LPC_OTG_I2C_CTRL_DRMIE (1 << 3) #define LPC_OTG_I2C_CTRL_NAIE (1 << 2) #define LPC_OTG_I2C_CTRL_AFIE (1 << 1) #define LPC_OTG_I2C_CTRL_TDIE (1 << 0) #define LPC_OTG_I2C_CLKHI 0x30c #define LPC_OTG_I2C_CLKLO 0x310 #define LPC_OTG_CLOCK_CTRL 0xff4 #define LPC_OTG_CLOCK_CTRL_AHB_EN (1 << 4) #define LPC_OTG_CLOCK_CTRL_OTG_EN (1 << 3) #define LPC_OTG_CLOCK_CTRL_I2C_EN (1 << 2) #define LPC_OTG_CLOCK_CTRL_DEV_EN (1 << 1) #define LPC_OTG_CLOCK_CTRL_HOST_EN (1 << 0) #define LPC_OTG_CLOCK_STATUS 0xff8 /* * ISP3101 USB transceiver registers */ #define LPC_ISP3101_I2C_ADDR 0x2d #define LPC_ISP3101_MODE_CONTROL_1 0x04 #define LPC_ISP3101_MC1_SPEED_REG (1 << 0) #define LPC_ISP3101_MC1_SUSPEND_REG (1 << 1) #define LPC_ISP3101_MC1_DAT_SE0 (1 << 2) #define LPC_ISP3101_MC1_TRANSPARENT (1 << 3) #define LPC_ISP3101_MC1_BDIS_ACON_EN (1 << 4) #define LPC_ISP3101_MC1_OE_INT_EN (1 << 5) #define LPC_ISP3101_MC1_UART_EN (1 << 6) #define LPC_ISP3101_MODE_CONTROL_2 0x12 #define LPC_ISP3101_MC2_GLOBAL_PWR_DN (1 << 0) #define LPC_ISP3101_MC2_SPD_SUSP_CTRL (1 << 1) #define LPC_ISP3101_MC2_BI_DI (1 << 2) #define LPC_ISP3101_MC2_TRANSP_BDIR0 (1 << 3) #define LPC_ISP3101_MC2_TRANSP_BDIR1 (1 << 4) #define LPC_ISP3101_MC2_AUDIO_EN (1 << 5) #define LPC_ISP3101_MC2_PSW_EN (1 << 6) #define LPC_ISP3101_MC2_EN2V7 (1 << 7) #define LPC_ISP3101_OTG_CONTROL_1 0x06 #define LPC_ISP3101_OTG1_DP_PULLUP (1 << 0) #define LPC_ISP3101_OTG1_DM_PULLUP (1 << 1) #define LPC_ISP3101_OTG1_DP_PULLDOWN (1 << 2) #define LPC_ISP3101_OTG1_DM_PULLDOWN (1 << 3) #define LPC_ISP3101_OTG1_ID_PULLDOWN (1 << 4) #define LPC_ISP3101_OTG1_VBUS_DRV (1 << 5) #define LPC_ISP3101_OTG1_VBUS_DISCHRG (1 << 6) #define LPC_ISP3101_OTG1_VBUS_CHRG (1 << 7) #define LPC_ISP3101_OTG_CONTROL_2 0x10 #define LPC_ISP3101_OTG_INTR_LATCH 0x0a #define LPC_ISP3101_OTG_INTR_FALLING 0x0c #define LPC_ISP3101_OTG_INTR_RISING 0x0e #define LPC_ISP3101_REG_CLEAR_ADDR 0x01 /* * LCD Controller (from UM10326: LPC32x0 User manual, page 229) */ #define LPC_LCD_TIMH 0x00 #define LPC_LCD_TIMH_HBP(_n) (((_n) & 0xff) << 24) #define LPC_LCD_TIMH_HFP(_n) (((_n) & 0xff) << 16) #define LPC_LCD_TIMH_HSW(_n) (((_n) & 0xff) << 8) #define LPC_LCD_TIMH_PPL(_n) (((_n) / 16 - 1) << 2) #define LPC_LCD_TIMV 0x04 #define LPC_LCD_TIMV_VBP(_n) (((_n) & 0xff) << 24) #define LPC_LCD_TIMV_VFP(_n) (((_n) & 0xff) << 16) #define LPC_LCD_TIMV_VSW(_n) (((_n) & 0x3f) << 10) #define LPC_LCD_TIMV_LPP(_n) ((_n) & 0x1ff) #define LPC_LCD_POL 0x08 #define LPC_LCD_POL_PCD_HI (((_n) & 0x1f) << 27) #define LPC_LCD_POL_BCD (1 << 26) #define LPC_LCD_POL_CPL(_n) (((_n) & 0x3ff) << 16) #define LPC_LCD_POL_IOE (1 << 14) #define LPC_LCD_POL_IPC (1 << 13) #define LPC_LCD_POL_IHS (1 << 12) #define LPC_LCD_POL_IVS (1 << 11) #define LPC_LCD_POL_ACB(_n) ((_n & 0x1f) << 6) #define LPC_LCD_POL_CLKSEL (1 << 5) #define LPC_LCD_POL_PCD_LO(_n) ((_n) & 0x1f) #define LPC_LCD_LE 0x0c #define LPC_LCD_LE_LEE (1 << 16) #define LPC_LCD_LE_LED ((_n) & 0x7f) #define LPC_LCD_UPBASE 0x10 #define LPC_LCD_LPBASE 0x14 #define LPC_LCD_CTRL 0x18 #define LPC_LCD_CTRL_WATERMARK (1 << 16) #define LPC_LCD_CTRL_LCDVCOMP(_n) (((_n) & 0x3) << 12) #define LPC_LCD_CTRL_LCDPWR (1 << 11) #define LPC_LCD_CTRL_BEPO (1 << 10) #define LPC_LCD_CTRL_BEBO (1 << 9) #define LPC_LCD_CTRL_BGR (1 << 8) #define LPC_LCD_CTRL_LCDDUAL (1 << 7) #define LPC_LCD_CTRL_LCDMONO8 (1 << 6) #define LPC_LCD_CTRL_LCDTFT (1 << 5) #define LPC_LCD_CTRL_LCDBW (1 << 4) #define LPC_LCD_CTRL_LCDBPP(_n) (((_n) & 0x7) << 1) #define LPC_LCD_CTRL_BPP1 0 #define LPC_LCD_CTRL_BPP2 1 #define LPC_LCD_CTRL_BPP4 2 #define LPC_LCD_CTRL_BPP8 3 #define LPC_LCD_CTRL_BPP16 4 #define LPC_LCD_CTRL_BPP24 5 #define LPC_LCD_CTRL_BPP16_565 6 #define LPC_LCD_CTRL_BPP12_444 7 #define LPC_LCD_CTRL_LCDEN (1 << 0) #define LPC_LCD_INTMSK 0x1c #define LPC_LCD_INTRAW 0x20 #define LPC_LCD_INTSTAT 0x24 #define LPC_LCD_INTCLR 0x28 #define LPC_LCD_UPCURR 0x2c #define LPC_LCD_LPCURR 0x30 #define LPC_LCD_PAL 0x200 #define LPC_LCD_CRSR_IMG 0x800 #define LPC_LCD_CRSR_CTRL 0xc00 #define LPC_LCD_CRSR_CFG 0xc04 #define LPC_LCD_CRSR_PAL0 0xc08 #define LPC_LCD_CRSR_PAL1 0xc0c #define LPC_LCD_CRSR_XY 0xc10 #define LPC_LCD_CRSR_CLIP 0xc14 #define LPC_LCD_CRSR_INTMSK 0xc20 #define LPC_LCD_CRSR_INTCLR 0xc24 #define LPC_LCD_CRSR_INTRAW 0xc28 #define LPC_LCD_CRSR_INTSTAT 0xc2c /* * SPI interface (from UM10326: LPC32x0 User manual, page 483) */ #define LPC_SPI_GLOBAL 0x00 #define LPC_SPI_GLOBAL_RST (1 << 1) #define LPC_SPI_GLOBAL_ENABLE (1 << 0) #define LPC_SPI_CON 0x04 #define LPC_SPI_CON_UNIDIR (1 << 23) #define LPC_SPI_CON_BHALT (1 << 22) #define LPC_SPI_CON_BPOL (1 << 21) #define LPC_SPI_CON_MSB (1 << 19) #define LPC_SPI_CON_MODE(_n) ((_n & 0x3) << 16) #define LPC_SPI_CON_RXTX (1 << 15) #define LPC_SPI_CON_THR (1 << 14) #define LPC_SPI_CON_SHIFT_OFF (1 << 13) #define LPC_SPI_CON_BITNUM(_n) ((_n & 0xf) << 9) #define LPC_SPI_CON_MS (1 << 7) #define LPC_SPI_CON_RATE(_n) (_n & 0x7f) #define LPC_SPI_FRM 0x08 #define LPC_SPI_IER 0x0c #define LPC_SPI_IER_INTEOT (1 << 1) #define LPC_SPI_IER_INTTHR (1 << 0) #define LPC_SPI_STAT 0x10 #define LPC_SPI_STAT_INTCLR (1 << 8) #define LPC_SPI_STAT_EOT (1 << 7) #define LPC_SPI_STAT_BUSYLEV (1 << 6) #define LPC_SPI_STAT_SHIFTACT (1 << 3) #define LPC_SPI_STAT_BF (1 << 2) #define LPC_SPI_STAT_THR (1 << 1) #define LPC_SPI_STAT_BE (1 << 0) #define LPC_SPI_DAT 0x14 #define LPC_SPI_TIM_CTRL 0x400 #define LPC_SPI_TIM_COUNT 0x404 #define LPC_SPI_TIM_STAT 0x408 /* * SSP interface (from UM10326: LPC32x0 User manual, page 500) */ #define LPC_SSP0_BASE 0x4c00 #define LPC_SSP1_BASE 0xc000 #define LPC_SSP_CR0 0x00 #define LPC_SSP_CR0_DSS(_n) ((_n-1) & 0xf) #define LPC_SSP_CR0_TI (1 << 4) #define LPC_SSP_CR0_MICROWIRE (1 << 5) #define LPC_SSP_CR0_CPOL (1 << 6) #define LPC_SSP_CR0_CPHA (1 << 7) #define LPC_SSP_CR0_SCR(_n) ((_x & & 0xff) << 8) #define LPC_SSP_CR1 0x04 #define LPC_SSP_CR1_LBM (1 << 0) #define LPC_SSP_CR1_SSE (1 << 1) #define LPC_SSP_CR1_MS (1 << 2) #define LPC_SSP_CR1_SOD (1 << 3) #define LPC_SSP_DR 0x08 #define LPC_SSP_SR 0x0c #define LPC_SSP_SR_TFE (1 << 0) #define LPC_SSP_SR_TNF (1 << 1) #define LPC_SSP_SR_RNE (1 << 2) #define LPC_SSP_SR_RFF (1 << 3) #define LPC_SSP_SR_BSY (1 << 4) #define LPC_SSP_CPSR 0x10 #define LPC_SSP_IMSC 0x14 #define LPC_SSP_IMSC_RORIM (1 << 0) #define LPC_SSP_IMSC_RTIM (1 << 1) #define LPC_SSP_IMSC_RXIM (1 << 2) #define LPC_SSP_IMSC_TXIM (1 << 3) #define LPC_SSP_RIS 0x18 #define LPC_SSP_RIS_RORRIS (1 << 0) #define LPC_SSP_RIS_RTRIS (1 << 1) #define LPC_SSP_RIS_RXRIS (1 << 2) #define LPC_SSP_RIS_TXRIS (1 << 3) #define LPC_SSP_MIS 0x1c #define LPC_SSP_ICR 0x20 #define LPC_SSP_DMACR 0x24 /* * GPIO (from UM10326: LPC32x0 User manual, page 606) */ #define LPC_GPIO_BASE (LPC_DEV_BASE + 0x28000) #define LPC_GPIO_P0_COUNT 8 #define LPC_GPIO_P1_COUNT 24 #define LPC_GPIO_P2_COUNT 13 #define LPC_GPIO_P3_COUNT 52 #define LPC_GPIO_P0_INP_STATE 0x40 #define LPC_GPIO_P0_OUTP_SET 0x44 #define LPC_GPIO_P0_OUTP_CLR 0x48 #define LPC_GPIO_P0_OUTP_STATE 0x4c #define LPC_GPIO_P0_DIR_SET 0x50 #define LPC_GPIO_P0_DIR_CLR 0x54 #define LPC_GPIO_P0_DIR_STATE 0x58 #define LPC_GPIO_P1_INP_STATE 0x60 #define LPC_GPIO_P1_OUTP_SET 0x64 #define LPC_GPIO_P1_OUTP_CLR 0x68 #define LPC_GPIO_P1_OUTP_STATE 0x6c #define LPC_GPIO_P1_DIR_SET 0x70 #define LPC_GPIO_P1_DIR_CLR 0x74 #define LPC_GPIO_P1_DIR_STATE 0x78 #define LPC_GPIO_P2_INP_STATE 0x1c #define LPC_GPIO_P2_OUTP_SET 0x20 #define LPC_GPIO_P2_OUTP_CLR 0x24 #define LPC_GPIO_P2_DIR_SET 0x10 #define LPC_GPIO_P2_DIR_CLR 0x14 #define LPC_GPIO_P2_DIR_STATE 0x14 #define LPC_GPIO_P3_INP_STATE 0x00 #define LPC_GPIO_P3_OUTP_SET 0x04 #define LPC_GPIO_P3_OUTP_CLR 0x08 #define LPC_GPIO_P3_OUTP_STATE 0x0c /* Aliases for logical pin numbers: */ #define LPC_GPIO_GPI_00(_n) (0 + _n) #define LPC_GPIO_GPI_15(_n) (10 + _n) #define LPC_GPIO_GPI_25 (19) #define LPC_GPIO_GPI_27(_n) (20 + _n) #define LPC_GPIO_GPO_00(_n) (22 + _n) #define LPC_GPIO_GPIO_00(_n) (46 + _n) /* SPI devices chip selects: */ #define SSD1289_CS_PIN LPC_GPIO_GPO_00(4) #define SSD1289_DC_PIN LPC_GPIO_GPO_00(5) #define ADS7846_CS_PIN LPC_GPIO_GPO_00(11) #define ADS7846_INTR_PIN LPC_GPIO_GPIO_00(0) /* * GPDMA controller (from UM10326: LPC32x0 User manual, page 106) */ #define LPC_DMAC_INTSTAT 0x00 #define LPC_DMAC_INTTCSTAT 0x04 #define LPC_DMAC_INTTCCLEAR 0x08 #define LPC_DMAC_INTERRSTAT 0x0c #define LPC_DMAC_INTERRCLEAR 0x10 #define LPC_DMAC_RAWINTTCSTAT 0x14 #define LPC_DMAC_RAWINTERRSTAT 0x18 #define LPC_DMAC_ENABLED_CHANNELS 0x1c #define LPC_DMAC_SOFTBREQ 0x20 #define LPC_DMAC_SOFTSREQ 0x24 #define LPC_DMAC_SOFTLBREQ 0x28 #define LPC_DMAC_SOFTLSREQ 0x2c #define LPC_DMAC_CONFIG 0x30 #define LPC_DMAC_CONFIG_M1 (1 << 2) #define LPC_DMAC_CONFIG_M0 (1 << 1) #define LPC_DMAC_CONFIG_ENABLE (1 << 0) #define LPC_DMAC_CHADDR(_n) (0x100 + (_n * 0x20)) #define LPC_DMAC_CHNUM 8 #define LPC_DMAC_CHSIZE 0x20 #define LPC_DMAC_CH_SRCADDR 0x00 #define LPC_DMAC_CH_DSTADDR 0x04 #define LPC_DMAC_CH_LLI 0x08 #define LPC_DMAC_CH_LLI_AHB1 (1 << 0) #define LPC_DMAC_CH_CONTROL 0x0c -#define LPC_DMAC_CH_CONTROL_I (1 << 31) +#define LPC_DMAC_CH_CONTROL_I (1U << 31) #define LPC_DMAC_CH_CONTROL_DI (1 << 27) #define LPC_DMAC_CH_CONTROL_SI (1 << 26) #define LPC_DMAC_CH_CONTROL_D (1 << 25) #define LPC_DMAC_CH_CONTROL_S (1 << 24) #define LPC_DMAC_CH_CONTROL_WIDTH_4 2 #define LPC_DMAC_CH_CONTROL_DWIDTH(_n) ((_n & 0x7) << 21) #define LPC_DMAC_CH_CONTROL_SWIDTH(_n) ((_n & 0x7) << 18) #define LPC_DMAC_CH_CONTROL_BURST_8 2 #define LPC_DMAC_CH_CONTROL_DBSIZE(_n) ((_n & 0x7) << 15) #define LPC_DMAC_CH_CONTROL_SBSIZE(_n) ((_n & 0x7) << 12) #define LPC_DMAC_CH_CONTROL_XFERLEN(_n) (_n & 0xfff) #define LPC_DMAC_CH_CONFIG 0x10 #define LPC_DMAC_CH_CONFIG_H (1 << 18) #define LPC_DMAC_CH_CONFIG_A (1 << 17) #define LPC_DMAC_CH_CONFIG_L (1 << 16) #define LPC_DMAC_CH_CONFIG_ITC (1 << 15) #define LPC_DMAC_CH_CONFIG_IE (1 << 14) #define LPC_DMAC_CH_CONFIG_FLOWCNTL(_n) ((_n & 0x7) << 11) #define LPC_DMAC_CH_CONFIG_DESTP(_n) ((_n & 0x1f) << 6) #define LPC_DMAC_CH_CONFIG_SRCP(_n) ((_n & 0x1f) << 1) #define LPC_DMAC_CH_CONFIG_E (1 << 0) /* DMA flow control values */ #define LPC_DMAC_FLOW_D_M2M 0 #define LPC_DMAC_FLOW_D_M2P 1 #define LPC_DMAC_FLOW_D_P2M 2 #define LPC_DMAC_FLOW_D_P2P 3 #define LPC_DMAC_FLOW_DP_P2P 4 #define LPC_DMAC_FLOW_P_M2P 5 #define LPC_DMAC_FLOW_P_P2M 6 #define LPC_DMAC_FLOW_SP_P2P 7 /* DMA peripheral ID's */ #define LPC_DMAC_I2S0_DMA0_ID 0 #define LPC_DMAC_NAND_ID 1 #define LPC_DMAC_IS21_DMA0_ID 2 #define LPC_DMAC_SSP1_ID 3 #define LPC_DMAC_SPI2_ID 3 #define LPC_DMAC_SD_ID 4 #define LPC_DMAC_UART1_TX_ID 5 #define LPC_DMAC_UART1_RX_ID 6 #define LPC_DMAC_UART2_TX_ID 7 #define LPC_DMAC_UART2_RX_ID 8 #define LPC_DMAC_UART7_TX_ID 9 #define LPC_DMAC_UART7_RX_ID 10 #define LPC_DMAC_I2S1_DMA1_ID 10 #define LPC_DMAC_SPI1_ID 11 #define LPC_DMAC_SSP1_TX_ID 11 #define LPC_DMAC_NAND2_ID 12 #define LPC_DMAC_I2S0_DMA1_ID 13 #define LPC_DMAC_SSP0_RX 14 #define LPC_DMAC_SSP0_TX 15 #endif /* _ARM_LPC_LPCREG_H */ Index: head/sys/arm/mv/mv_pci.c =================================================================== --- head/sys/arm/mv/mv_pci.c (revision 258779) +++ head/sys/arm/mv/mv_pci.c (revision 258780) @@ -1,1028 +1,1028 @@ /*- * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2012 Semihalf * All rights reserved. * * Developed by Semihalf. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Marvell integrated PCI/PCI-Express controller driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif -#define PCI_CFG_ENA (1 << 31) +#define PCI_CFG_ENA (1U << 31) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8) #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc) #define PCI_REG_CFG_ADDR 0x0C78 #define PCI_REG_CFG_DATA 0x0C7C #define PCIE_REG_CFG_ADDR 0x18F8 #define PCIE_REG_CFG_DATA 0x18FC #define PCIE_REG_CONTROL 0x1A00 #define PCIE_CTRL_LINK1X 0x00000001 #define PCIE_REG_STATUS 0x1A04 #define PCIE_REG_IRQ_MASK 0x1910 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1) #define PCIE_CONTROL_HOT_RESET (1 << 24) #define PCIE_LINK_TIMEOUT 1000000 #define PCIE_STATUS_LINK_DOWN 1 #define PCIE_STATUS_DEV_OFFS 16 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */ #define PCI_MIN_IO_ALLOC 4 #define PCI_MIN_MEM_ALLOC 16 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t)) struct mv_pcib_softc { device_t sc_dev; struct rman sc_mem_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; int sc_win_target; int sc_mem_win_attr; struct rman sc_io_rman; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; int sc_io_win_attr; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct mtx sc_msi_mtx; uint32_t sc_msi_bitmap; int sc_busnr; /* Host bridge bus number */ int sc_devnr; /* Host bridge device number */ int sc_type; int sc_mode; /* Endpoint / Root Complex */ struct fdt_pci_intr sc_intr_info; }; /* Local forward prototypes */ static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *); static void mv_pcib_hw_cfginit(void); static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, int); static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_init(struct mv_pcib_softc *, int, int); static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int); static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int); static int mv_pcib_intr_info(phandle_t, struct mv_pcib_softc *); static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t); static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t); static int mv_pcib_mem_init(struct mv_pcib_softc *); /* Forward prototypes */ static int mv_pcib_probe(device_t); static int mv_pcib_attach(device_t); static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *, u_long, u_long, u_long, u_int); static int mv_pcib_release_resource(device_t, device_t, int, int, struct resource *); static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int mv_pcib_maxslots(device_t); static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_route_interrupt(device_t, device_t, int); #if defined(SOC_MV_ARMADAXP) static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *); static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int mv_pcib_release_msi(device_t, device_t, int, int *); #endif /* * Bus interface definitions. */ static device_method_t mv_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_pcib_probe), DEVMETHOD(device_attach, mv_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar), DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource), DEVMETHOD(bus_release_resource, mv_pcib_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mv_pcib_maxslots), DEVMETHOD(pcib_read_config, mv_pcib_read_config), DEVMETHOD(pcib_write_config, mv_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt), #if defined(SOC_MV_ARMADAXP) DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, mv_pcib_release_msi), DEVMETHOD(pcib_map_msi, mv_pcib_map_msi), #endif /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mv_pcib_driver = { "pcib", mv_pcib_methods, sizeof(struct mv_pcib_softc), }; devclass_t pcib_devclass; DRIVER_MODULE(pcib, nexus, mv_pcib_driver, pcib_devclass, 0, 0); static struct mtx pcicfg_mtx; static int mv_pcib_probe(device_t self) { phandle_t node; node = ofw_bus_get_node(self); if (!fdt_is_type(node, "pci")) return (ENXIO); if (!(fdt_is_compatible(node, "mrvl,pcie") || fdt_is_compatible(node, "mrvl,pci"))) return (ENXIO); device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int mv_pcib_attach(device_t self) { struct mv_pcib_softc *sc; phandle_t node, parnode; uint32_t val, unit; int err; sc = device_get_softc(self); sc->sc_dev = self; unit = fdt_get_unit(self); node = ofw_bus_get_node(self); parnode = OF_parent(node); if (fdt_is_compatible(node, "mrvl,pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET(unit); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(unit); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(unit); } else if (fdt_is_compatible(node, "mrvl,pci")) { sc->sc_type = MV_TYPE_PCI; sc->sc_win_target = MV_WIN_PCI_TARGET; sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR; sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR; } else return (ENXIO); /* * Retrieve our mem-mapped registers range. */ sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(self, "could not map memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL); sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT : MV_MODE_ENDPOINT); /* * Get PCI interrupt info. */ if ((sc->sc_mode == MV_MODE_ROOT) && (mv_pcib_intr_info(node, sc) != 0)) { device_printf(self, "could not retrieve interrupt info\n"); return (ENXIO); } /* * Configure decode windows for PCI(E) access. */ if (mv_pcib_decode_win(node, sc) != 0) return (ENXIO); mv_pcib_hw_cfginit(); /* * Enable PCIE device. */ mv_pcib_enable(sc, unit); /* * Memory management. */ err = mv_pcib_mem_init(sc); if (err) return (err); if (sc->sc_mode == MV_MODE_ROOT) { err = mv_pcib_init(sc, sc->sc_busnr, mv_pcib_maxslots(sc->sc_dev)); if (err) goto error; device_add_child(self, "pci", -1); } else { sc->sc_devnr = 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS); device_add_child(self, "pci_ep", -1); } mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); return (bus_generic_attach(self)); error: /* XXX SYS_RES_ should be released here */ rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static void mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit) { uint32_t val; #if !defined(SOC_MV_ARMADAXP) int timeout; /* * Check if PCIE device is enabled. */ if (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit)) { write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) & ~(CPU_CONTROL_PCIE_DISABLE(unit))); timeout = PCIE_LINK_TIMEOUT; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) { DELAY(1000); timeout -= 1000; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); } } #endif if (sc->sc_mode == MV_MODE_ROOT) { /* * Enable PCI bridge. */ val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND); val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val); } } static int mv_pcib_mem_init(struct mv_pcib_softc *sc) { int err; /* * Memory management. */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_mem_rman); if (err) return (err); sc->sc_io_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_io_rman); if (err) { rman_fini(&sc->sc_mem_rman); return (err); } err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1); if (err) goto error; err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1); if (err) goto error; return (0); error: rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } /* * The idea of this allocator is taken from ARM No-Cache memory * management code (sys/arm/arm/vm_machdep.c). */ static bus_addr_t pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask) { uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i += bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); /* * Need to init the BAR register with 0xffffffff before correct * value can be read. */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4); bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(sc, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4); if (width == 2) mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4, 0, 4); return (width); } static void mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func) { bus_addr_t io_base, mem_base; uint32_t io_limit, mem_limit; int secbus; io_base = sc->sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; /* Configure I/O decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); /* Configure memory decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); /* Disable memory prefetch decode */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SECBUS_1, 1); /* Configure buses behind the bridge */ mv_pcib_init(sc, secbus, PCI_SLOTMAX); } static int mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); error = mv_pcib_init_all_bars(sc, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); /* Handle PCI-PCI bridges */ class = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_CLASS, 1); subclass = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; mv_pcib_init_bridge(sc, bus, slot, func); } } /* Enable all ABCD interrupts */ pcib_write_irq_mask(sc, (0xF << 24)); return (0); } static int mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; /* Program the base address registers */ while (bar < maxbar) { i = mv_pcib_init_bar(sc, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(sc->sc_dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static struct resource * mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); struct rman *rm = NULL; struct resource *res; switch (type) { case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); }; if ((start == 0UL) && (end == ~0UL)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; } if ((start < sc->sc_mem_base) || (start + count - 1 != end) || (end > sc->sc_mem_base + sc->sc_mem_size - 1)) return (NULL); res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_bustag(res, fdtbus_bs_tag); rman_set_bushandle(res, start); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res)) { rman_release_resource(res); return (NULL); } return (res); } static int mv_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, res)); return (rman_release_resource(res)); } static int mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_busnr; return (0); case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); } return (ENOENT); } static int mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busnr = value; return (0); } return (ENOENT); } static inline void pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask) { if (!sc->sc_type != MV_TYPE_PCI) return; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask); } static void mv_pcib_hw_cfginit(void) { static int opened = 0; if (opened) return; mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; } static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); data = ~0; switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, cd)); break; } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, cd, htole32(data)); break; } mtx_unlock_spin(&pcicfg_mtx); } static int mv_pcib_maxslots(device_t dev) { struct mv_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX); } static uint32_t mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return ~0 if link is inactive or trying to read from Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || (slot == 0)) return (~0U); return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes)); } static void mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return if link is inactive or trying to write to Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || (slot == 0)) return; mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static int mv_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { struct mv_pcib_softc *sc; int err, interrupt; sc = device_get_softc(pcib); err = fdt_pci_route_intr(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pin, &sc->sc_intr_info, &interrupt); if (err == 0) return (interrupt); device_printf(pcib, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc) { struct fdt_pci_range io_space, mem_space; device_t dev; int error; dev = sc->sc_dev; if ((error = fdt_pci_ranges(node, &io_space, &mem_space)) != 0) { device_printf(dev, "could not retrieve 'ranges' data\n"); return (error); } /* Configure CPU decoding windows */ error = decode_win_cpu_set(sc->sc_win_target, sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0); if (error < 0) { device_printf(dev, "could not set up CPU decode " "window for PCI IO\n"); return (ENXIO); } error = decode_win_cpu_set(sc->sc_win_target, sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len, mem_space.base_parent); if (error < 0) { device_printf(dev, "could not set up CPU decode " "windows for PCI MEM\n"); return (ENXIO); } sc->sc_io_base = io_space.base_parent; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base_parent; sc->sc_mem_size = mem_space.len; return (0); } static int mv_pcib_intr_info(phandle_t node, struct mv_pcib_softc *sc) { int error; if ((error = fdt_pci_intr_info(node, &sc->sc_intr_info)) != 0) return (error); return (0); } #if defined(SOC_MV_ARMADAXP) static int mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct mv_pcib_softc *sc; sc = device_get_softc(dev); irq = irq - MSI_IRQ; /* validate parameters */ if (isclr(&sc->sc_msi_bitmap, irq)) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } mv_msi_data(irq, addr, data); debugf("%s: irq: %d addr: %jx data: %x\n", __func__, irq, *addr, *data); return (0); } static int mv_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct mv_pcib_softc *sc; u_int start = 0, i; if (powerof2(count) == 0 || count > MSI_IRQ_NUM) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->sc_msi_mtx); for (start = 0; (start + count) < MSI_IRQ_NUM; start++) { for (i = start; i < start + count; i++) { if (isset(&sc->sc_msi_bitmap, i)) break; } if (i == start + count) break; } if ((start + count) == MSI_IRQ_NUM) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = start; i < start + count; i++) { setbit(&sc->sc_msi_bitmap, i); irqs[i] = MSI_IRQ + i; } debugf("%s: start: %x count: %x\n", __func__, start, count); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { struct mv_pcib_softc *sc; u_int i; sc = device_get_softc(dev); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ); mtx_unlock(&sc->sc_msi_mtx); return (0); } #endif Index: head/sys/arm/samsung/exynos/ehci_exynos5.c =================================================================== --- head/sys/arm/samsung/exynos/ehci_exynos5.c (revision 258779) +++ head/sys/arm/samsung/exynos/ehci_exynos5.c (revision 258780) @@ -1,367 +1,367 @@ /*- * Copyright (c) 2013 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" /* GPIO control */ #define GPIO_CON(x, v) ((v) << ((x) * 4)) #define GPIO_MASK 0xf #define GPIO_OUTPUT 1 #define GPIO_INPUT 0 #define GPX3CON 0x0C60 #define GPX3DAT 0x0C64 #define PIN_USB 5 /* PWR control */ #define EXYNOS5_PWR_USBHOST_PHY 0x708 #define PHY_POWER_ON 1 #define PHY_POWER_OFF 0 /* SYSREG */ #define EXYNOS5_SYSREG_USB2_PHY 0x230 #define USB2_MODE_HOST 0x1 /* USB HOST */ #define HOST_CTRL_CLK_24MHZ (5 << 16) #define HOST_CTRL_CLK_MASK (7 << 16) #define HOST_CTRL_SIDDQ (1 << 6) #define HOST_CTRL_SLEEP (1 << 5) #define HOST_CTRL_SUSPEND (1 << 4) #define HOST_CTRL_RESET_LINK (1 << 1) #define HOST_CTRL_RESET_PHY (1 << 0) -#define HOST_CTRL_RESET_PHY_ALL (1 << 31) +#define HOST_CTRL_RESET_PHY_ALL (1U << 31) /* Forward declarations */ static int exynos_ehci_attach(device_t dev); static int exynos_ehci_detach(device_t dev); static int exynos_ehci_probe(device_t dev); struct exynos_ehci_softc { ehci_softc_t base; struct resource *res[6]; bus_space_tag_t host_bst; bus_space_tag_t pwr_bst; bus_space_tag_t sysreg_bst; bus_space_tag_t gpio_bst; bus_space_handle_t host_bsh; bus_space_handle_t pwr_bsh; bus_space_handle_t sysreg_bsh; bus_space_handle_t gpio_bsh; }; static struct resource_spec exynos_ehci_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE }, { SYS_RES_MEMORY, 3, RF_ACTIVE }, { SYS_RES_MEMORY, 4, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static device_method_t ehci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, exynos_ehci_probe), DEVMETHOD(device_attach, exynos_ehci_attach), DEVMETHOD(device_detach, exynos_ehci_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), { 0, 0 } }; /* kobj_class definition */ static driver_t ehci_driver = { "ehci", ehci_methods, sizeof(ehci_softc_t) }; static devclass_t ehci_devclass; DRIVER_MODULE(ehci, simplebus, ehci_driver, ehci_devclass, 0, 0); MODULE_DEPEND(ehci, usb, 1, 1, 1); /* * Public methods */ static int exynos_ehci_probe(device_t dev) { if (ofw_bus_is_compatible(dev, "exynos,usb-ehci") == 0) return (ENXIO); device_set_desc(dev, "Exynos integrated USB controller"); return (BUS_PROBE_DEFAULT); } static int gpio_ctrl(struct exynos_ehci_softc *esc, int dir, int power) { int reg; /* Power control */ reg = bus_space_read_4(esc->gpio_bst, esc->gpio_bsh, GPX3DAT); reg &= ~(1 << PIN_USB); reg |= (power << PIN_USB); bus_space_write_4(esc->gpio_bst, esc->gpio_bsh, GPX3DAT, reg); /* Input/Output control */ reg = bus_space_read_4(esc->gpio_bst, esc->gpio_bsh, GPX3CON); reg &= ~GPIO_CON(PIN_USB, GPIO_MASK); reg |= GPIO_CON(PIN_USB, dir); bus_space_write_4(esc->gpio_bst, esc->gpio_bsh, GPX3CON, reg); return (0); } static int phy_init(struct exynos_ehci_softc *esc) { int reg; gpio_ctrl(esc, GPIO_INPUT, 1); /* set USB HOST mode */ bus_space_write_4(esc->sysreg_bst, esc->sysreg_bsh, EXYNOS5_SYSREG_USB2_PHY, USB2_MODE_HOST); /* Power ON phy */ bus_space_write_4(esc->pwr_bst, esc->pwr_bsh, EXYNOS5_PWR_USBHOST_PHY, PHY_POWER_ON); reg = bus_space_read_4(esc->host_bst, esc->host_bsh, 0x0); reg &= ~(HOST_CTRL_CLK_MASK | HOST_CTRL_RESET_PHY | HOST_CTRL_RESET_PHY_ALL | HOST_CTRL_SIDDQ | HOST_CTRL_SUSPEND | HOST_CTRL_SLEEP); reg |= (HOST_CTRL_CLK_24MHZ | HOST_CTRL_RESET_LINK); bus_space_write_4(esc->host_bst, esc->host_bsh, 0x0, reg); DELAY(10); reg = bus_space_read_4(esc->host_bst, esc->host_bsh, 0x0); reg &= ~(HOST_CTRL_RESET_LINK); bus_space_write_4(esc->host_bst, esc->host_bsh, 0x0, reg); gpio_ctrl(esc, GPIO_OUTPUT, 1); return (0); } static int exynos_ehci_attach(device_t dev) { struct exynos_ehci_softc *esc; ehci_softc_t *sc; bus_space_handle_t bsh; int err; esc = device_get_softc(dev); sc = &esc->base; sc->sc_bus.parent = dev; sc->sc_bus.devices = sc->sc_devices; sc->sc_bus.devices_max = EHCI_MAX_DEVICES; if (bus_alloc_resources(dev, exynos_ehci_spec, esc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* EHCI registers */ sc->sc_io_tag = rman_get_bustag(esc->res[0]); bsh = rman_get_bushandle(esc->res[0]); sc->sc_io_size = rman_get_size(esc->res[0]); /* EHCI HOST ctrl registers */ esc->host_bst = rman_get_bustag(esc->res[1]); esc->host_bsh = rman_get_bushandle(esc->res[1]); /* PWR registers */ esc->pwr_bst = rman_get_bustag(esc->res[2]); esc->pwr_bsh = rman_get_bushandle(esc->res[2]); /* SYSREG */ esc->sysreg_bst = rman_get_bustag(esc->res[3]); esc->sysreg_bsh = rman_get_bushandle(esc->res[3]); /* GPIO */ esc->gpio_bst = rman_get_bustag(esc->res[4]); esc->gpio_bsh = rman_get_bushandle(esc->res[4]); /* get all DMA memory */ if (usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(dev), &ehci_iterate_hw_softc)) return (ENXIO); /* * Set handle to USB related registers subregion used by * generic EHCI driver. */ err = bus_space_subregion(sc->sc_io_tag, bsh, 0x0, sc->sc_io_size, &sc->sc_io_hdl); if (err != 0) return (ENXIO); phy_init(esc); /* Setup interrupt handler */ err = bus_setup_intr(dev, esc->res[5], INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl); if (err) { device_printf(dev, "Could not setup irq, " "%d\n", err); return (1); } /* Add USB device */ sc->sc_bus.bdev = device_add_child(dev, "usbus", -1); if (!sc->sc_bus.bdev) { device_printf(dev, "Could not add USB device\n"); err = bus_teardown_intr(dev, esc->res[5], sc->sc_intr_hdl); if (err) device_printf(dev, "Could not tear down irq," " %d\n", err); return (1); } device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); strlcpy(sc->sc_vendor, "Samsung", sizeof(sc->sc_vendor)); err = ehci_init(sc); if (!err) { sc->sc_flags |= EHCI_SCFLG_DONEINIT; err = device_probe_and_attach(sc->sc_bus.bdev); } else { device_printf(dev, "USB init failed err=%d\n", err); device_delete_child(dev, sc->sc_bus.bdev); sc->sc_bus.bdev = NULL; err = bus_teardown_intr(dev, esc->res[5], sc->sc_intr_hdl); if (err) device_printf(dev, "Could not tear down irq," " %d\n", err); return (1); } return (0); } static int exynos_ehci_detach(device_t dev) { struct exynos_ehci_softc *esc; ehci_softc_t *sc; int err; esc = device_get_softc(dev); sc = &esc->base; if (sc->sc_flags & EHCI_SCFLG_DONEINIT) return (0); /* * only call ehci_detach() after ehci_init() */ if (sc->sc_flags & EHCI_SCFLG_DONEINIT) { ehci_detach(sc); sc->sc_flags &= ~EHCI_SCFLG_DONEINIT; } /* * Disable interrupts that might have been switched on in * ehci_init. */ if (sc->sc_io_tag && sc->sc_io_hdl) bus_space_write_4(sc->sc_io_tag, sc->sc_io_hdl, EHCI_USBINTR, 0); if (esc->res[5] && sc->sc_intr_hdl) { err = bus_teardown_intr(dev, esc->res[5], sc->sc_intr_hdl); if (err) { device_printf(dev, "Could not tear down irq," " %d\n", err); return (err); } sc->sc_intr_hdl = NULL; } if (sc->sc_bus.bdev) { device_delete_child(dev, sc->sc_bus.bdev); sc->sc_bus.bdev = NULL; } /* During module unload there are lots of children leftover */ device_delete_children(dev); bus_release_resources(dev, exynos_ehci_spec, esc->res); return (0); } Index: head/sys/arm/xscale/i8134x/i81342reg.h =================================================================== --- head/sys/arm/xscale/i8134x/i81342reg.h (revision 258779) +++ head/sys/arm/xscale/i8134x/i81342reg.h (revision 258780) @@ -1,348 +1,348 @@ /*- * Copyright (c) 2006 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef I83142_REG_H_ #define I83142_REG_H_ /* Physical Memory Map */ /* * 0x000000000 - 0x07FFFFFFF SDRAM * 0x090100000 - 0x0901FFFFF ATUe Outbound IO Window * 0x0F0000000 - 0x0F1FFFFFF Flash * 0x0F2000000 - 0x0F20FFFFF PCE1 * 0x0F3000000 - 0x0FFCFFFFF Compact Flash * 0x0FFD00000 - 0x0FFDFFFFF MMR * 0x0FFFB0000 - 0x0FFFBFFFF ATU-X Outbound I/O Window * 0x0FFFD0000 - 0x0FFFDFFFF ATUe Outbound I/O Window * 0x100000000 - 0x1FFFFFFFF ATU-X outbound Memory Translation Window * 0x2FF000000 - 0x2FFFFFFFF ATUe Outbound Memory Translation Window */ #define IOP34X_VADDR 0xf0000000 #define IOP34X_HWADDR 0xffd00000 #define IOP34X_SIZE 0x100000 #define IOP34X_ADMA0_OFFSET 0x00080000 #define IOP34X_ADMA1_OFFSET 0x00080200 #define IOP34X_ADMA2_OFFSET 0x00080400 #define IOP34X_ADMA_SIZE 0x200 /* ADMA Registers */ #define IOP34X_ADMA_CCR 0x0000 /* Channel Control Register */ #define IOP34X_ADMA_CSR 0x0004 /* Channel Status Register */ #define IOP34X_ADMA_DAR 0x0008 /* Descriptor Address Register */ #define IOP34X_ADMA_IPCR 0x0018 /* Internal Interface Parity Ctrl Reg */ #define IOP34X_ADMA_NDAR 0x0024 /* Next Descriptor Register */ #define IOP34X_ADMA_DCR 0x0028 /* Descriptor Control Register */ #define IOP34X_ADMA_IE (1 << 0) /* Interrupt enable */ #define IOP34X_ADMA_TR (1 << 1) /* Transfert Direction */ /* * Source Destination * 00 Host I/O Interface Local Memory * 01 Local Memory Host I/O Interface * 10 Internal Bus Local Memory * 11 Local Memory Internal Bus */ #define IOP34X_ADMA_SS (1 << 3) /* Source selection */ /* 0000: Data Transfer / CRC / Memory Block Fill */ #define IOP34X_ADMA_ZRBCE (1 << 7) /* Zero Result Buffer Check Enable */ #define IOP34X_ADMA_MBFE (1 << 8) /* Memory Block Fill Enable */ #define IOP34X_ADMA_CGE (1 << 9) /* CRC Generation enable */ #define IOP34X_ADMA_CTD (1 << 10) /* CRC Transfer disable */ #define IOP34X_ADMA_CSFD (1 << 11) /* CRC Seed fetch disable */ #define IOP34X_ADMA_SWBE (1 << 12) /* Status write back enable */ #define IOP34X_ADMA_ESE (1 << 13) /* Endian swap enable */ #define IOP34X_ADMA_PQUTE (1 << 16) /* P+Q Update Transfer Enable */ #define IOP34X_ADMA_DXE (1 << 17) /* Dual XOR Enable */ #define IOP34X_ADMA_PQTE (1 << 18) /* P+Q Transfer Enable */ #define IOP34X_ADMA_PTD (1 << 19) /* P Transfer Disable */ #define IOP34X_ADMA_ROE (1 << 30) /* Relaxed Ordering Enable */ -#define IOP34X_ADMA_NSE (1 << 31) /* No Snoop Enable */ +#define IOP34X_ADMA_NSE (1U << 31) /* No Snoop Enable */ #define IOP34X_PBBAR0 0x81588 /* PBI Base Address Register 0 */ #define IOP34X_PBBAR0_ADDRMASK 0xfffff000 #define IOP34X_PBBAR1 0x81590 #define IOP34X_PCE1 0xF2000000 #define IOP34X_PCE1_SIZE 0x00100000 #define IOP34X_PCE1_VADDR 0xF1000000 #define IOP34X_ESSTSR0 0x82188 #define IOP34X_CONTROLLER_ONLY (1 << 14) #define IOP34X_INT_SEL_PCIX (1 << 15) #define IOP34X_PFR 0x82180 /* Processor Frequency Register */ #define IOP34X_FREQ_MASK ((1 << 16) | (1 << 17) | (1 << 18)) #define IOP34X_FREQ_600 (0) #define IOP34X_FREQ_667 (1 << 16) #define IOP34X_FREQ_800 (1 << 17) #define IOP34X_FREQ_833 ((1 << 17) | (1 << 16)) #define IOP34X_FREQ_1000 (1 << 18) #define IOP34X_FREQ_1200 ((1 << 16) | (1 << 18)) #define IOP34X_UART0_VADDR IOP34X_VADDR + 0x82300 #define IOP34X_UART0_HWADDR IOP34X_HWADDR + 0x82300 #define IOP34X_UART1_VADDR IOP34X_VADDR + 0x82340 #define IOP34X_UART1_HWADDR IOP34X_HWADDR + 0x82340 #define IOP34X_PBI_HWADDR 0xffd81580 /* SDRAM Memory Controller */ #define SMC_SDBR 0x8180c /* Base Register */ #define SMC_SDBR_BASEADDR (1 << 27) #define SMC_SDBR_BASEADDR_MASK ((1 << 27) | (1 << 28) | (1 << 29) | (1 << 30) \ - | (1 << 31)) + | (1U << 31)) #define SMC_SDUBR 0x81810 /* Upper Base Register */ #define SMC_SBSR 0x81814 /* SDRAM Bank Size Register */ #define SMC_SBSR_BANK_NB (1 << 2) /* Number of DDR Banks 0 => 2 Banks 1 => 1 Bank */ #define SMC_SBSR_BANK_SZ (1 << 27) /* SDRAM Bank Size : 0x00000 Empty 0x00001 128MB 0x00010 256MB 0x00100 512MB 0x01000 1GB */ #define SMC_SBSR_BANK_SZ_MASK ((1 << 27) | (1 << 28) | (1 << 29) | (1 << 30) \ - | (1 << 31)) + | (1U << 31)) /* Two possible addresses for ATUe depending on configuration. */ #define IOP34X_ATUE_ADDR(esstrsr) ((((esstrsr) & (IOP34X_CONTROLLER_ONLY | \ IOP34X_INT_SEL_PCIX)) == (IOP34X_CONTROLLER_ONLY | IOP34X_INT_SEL_PCIX)) ? \ 0xffdc8000 : 0xffdcd000) /* Three possible addresses for ATU-X depending on configuration. */ #define IOP34X_ATUX_ADDR(esstrsr) (!((esstrsr) & IOP34X_CONTROLLER_ONLY) ? \ 0xffdcc000 : !((esstrsr) & IOP34X_INT_SEL_PCIX) ? 0xffdc8000 : 0xffdcd000) #define IOP34X_OIOBAR_SIZE 0x10000 #define IOP34X_PCIX_OIOBAR 0xfffb0000 #define IOP34X_PCIX_OIOBAR_VADDR 0xf01b0000 #define IOP34X_PCIX_OMBAR 0x100000000 #define IOP34X_PCIE_OIOBAR 0xfffd0000 #define IOP34X_PCIE_OIOBAR_VADDR 0xf01d0000 #define IOP34X_PCIE_OMBAR 0x200000000 /* ATU Registers */ /* Common for ATU-X and ATUe */ #define ATU_VID 0x0000 /* ATU Vendor ID */ #define ATU_DID 0x0002 /* ATU Device ID */ #define ATU_CMD 0x0004 /* ATU Command Register */ #define ATU_SR 0x0006 /* ATU Status Register */ #define ATU_RID 0x0008 /* ATU Revision ID */ #define ATU_CCR 0x0009 /* ATU Class Code */ #define ATU_CLSR 0x000c /* ATU Cacheline Size */ #define ATU_LT 0x000d /* ATU Latency Timer */ #define ATU_HTR 0x000e /* ATU Header Type */ #define ATU_BISTR 0x000f /* ATU BIST Register */ #define ATU_IABAR0 0x0010 /* Inbound ATU Base Address register 0 */ #define ATU_IAUBAR0 0x0014 /* Inbound ATU Upper Base Address Register 0 */ #define ATU_IABAR1 0x0018 /* Inbound ATU Base Address Register 1 */ #define ATU_IAUBAR1 0x001c /* Inbound ATU Upper Base Address Register 1 */ #define ATU_IABAR2 0x0020 /* Inbound ATU Base Address Register 2 */ #define ATU_IAUBAR2 0x0024 /* Inbound ATU Upper Base Address Register 2 */ #define ATU_VSIR 0x002c /* ATU Subsystem Vendor ID Register */ #define ATU_SIR 0x002e /* ATU Subsystem ID Register */ #define ATU_ERBAR 0x0030 /* Expansion ROM Base Address Register */ #define ATU_CAPPTR 0x0034 /* ATU Capabilities Pointer Register */ #define ATU_ILR 0x003c /* ATU Interrupt Line Register */ #define ATU_IPR 0x003d /* ATU Interrupt Pin Register */ #define ATU_MGNT 0x003e /* ATU Minimum Grand Register */ #define ATU_MLAT 0x003f /* ATU Maximum Latency Register */ #define ATU_IALR0 0x0040 /* Inbound ATU Limit Register 0 */ #define ATU_IATVR0 0x0044 /* Inbound ATU Translate Value Register 0 */ #define ATU_IAUTVR0 0x0048 /* Inbound ATU Upper Translate Value Register 0*/ #define ATU_IALR1 0x004c /* Inbound ATU Limit Register 1 */ #define ATU_IATVR1 0x0050 /* Inbound ATU Translate Value Register 1 */ #define ATU_IAUTVR1 0x0054 /* Inbound ATU Upper Translate Value Register 1*/ #define ATU_IALR2 0x0058 /* Inbound ATU Limit Register 2 */ #define ATU_IATVR2 0x005c /* Inbound ATU Translate Value Register 2 */ #define ATU_IAUTVR2 0x0060 /* Inbound ATU Upper Translate Value Register 2*/ #define ATU_ERLR 0x0064 /* Expansion ROM Limit Register */ #define ATU_ERTVR 0x0068 /* Expansion ROM Translater Value Register */ #define ATU_ERUTVR 0x006c /* Expansion ROM Upper Translate Value Register*/ #define ATU_CR 0x0070 /* ATU Configuration Register */ #define ATU_CR_OUT_EN (1 << 1) #define ATU_PCSR 0x0074 /* PCI Configuration and Status Register */ #define PCIE_BUSNO(x) ((x & 0xff000000) >> 24) -#define ATUX_CORE_RST ((1 << 30) | (1 << 31)) /* Core Processor Reset */ +#define ATUX_CORE_RST ((1 << 30) | (1U << 31)) /* Core Processor Reset */ #define ATUX_P_RSTOUT (1 << 21) /* Central Resource PCI Bus Reset */ #define ATUE_CORE_RST ((1 << 9) | (1 << 8)) /* Core Processor Reset */ #define ATU_ISR 0x0078 /* ATU Interrupt Status Register */ #define ATUX_ISR_PIE (1 << 18) /* PCI Interface error */ #define ATUX_ISR_IBPR (1 << 16) /* Internal Bus Parity Error */ #define ATUX_ISR_DCE (1 << 14) /* Detected Correctable error */ #define ATUX_ISR_ISCE (1 << 13) /* Initiated Split Completion Error Msg */ #define ATUX_ISR_RSCE (1 << 12) /* Received Split Completion Error Msg */ #define ATUX_ISR_DPE (1 << 9) /* Detected Parity Error */ #define ATUX_ISR_IBMA (1 << 7) /* Internal Bus Master Abort */ #define ATUX_ISR_PMA (1 << 3) /* PCI Master Abort */ #define ATUX_ISR_PTAM (1 << 2) /* PCI Target Abort (Master) */ #define ATUX_ISR_PTAT (1 << 1) /* PCI Target Abort (Target) */ #define ATUX_ISR_PMPE (1 << 0) /* PCI Master Parity Error */ #define ATUX_ISR_ERRMSK (ATUX_ISR_PIE | ATUX_ISR_IBPR | ATUX_ISR_DCE | \ ATUX_ISR_ISCE | ATUX_ISR_RSCE | ATUX_ISR_DPE | ATUX_ISR_IBMA | ATUX_ISR_PMA\ | ATUX_ISR_PTAM | ATUX_ISR_PTAT | ATUX_ISR_PMPE) #define ATUE_ISR_HON (1 << 13) /* Halt on Error Interrupt */ #define ATUE_ISR_RSE (1 << 12) /* Root System Error Message */ #define ATUE_ISR_REM (1 << 11) /* Root Error Message */ #define ATUE_ISR_PIE (1 << 10) /* PCI Interface error */ #define ATUE_ISR_CEM (1 << 9) /* Correctable Error Message */ #define ATUE_ISR_UEM (1 << 8) /* Uncorrectable error message */ #define ATUE_ISR_CRS (1 << 7) /* Received Configuration Retry Status */ #define ATUE_ISR_IBMA (1 << 5) /* Internal Bus Master Abort */ #define ATUE_ISR_DPE (1 << 4) /* Detected Parity Error Interrupt */ #define ATUE_ISR_MAI (1 << 3) /* Received Master Abort Interrupt */ #define ATUE_ISR_STAI (1 << 2) /* Signaled Target Abort Interrupt */ #define ATUE_ISR_TAI (1 << 1) /* Received Target Abort Interrupt */ #define ATUE_ISR_MDPE (1 << 0) /* Master Data Parity Error Interrupt */ #define ATUE_ISR_ERRMSK (ATUE_ISR_HON | ATUE_ISR_RSE | ATUE_ISR_REM | \ ATUE_ISR_PIE | ATUE_ISR_CEM | ATUE_ISR_UEM | ATUE_ISR_CRS | ATUE_ISR_IBMA |\ ATUE_ISR_DPE | ATUE_ISR_MAI | ATUE_ISR_STAI | ATUE_ISR_TAI | ATUE_ISR_MDPE) #define ATU_IMR 0x007c /* ATU Interrupt Mask Register */ /* 0x0080 - 0x008f reserved */ #define ATU_VPDCID 0x0090 /* VPD Capability Identifier Register */ #define ATU_VPDNIP 0x0091 /* VPD Next Item Pointer Register */ #define ATU_VPDAR 0x0092 /* VPD Address Register */ #define ATU_VPDDR 0x0094 /* VPD Data Register */ #define ATU_PMCID 0x0098 /* PM Capability Identifier Register */ #define ATU_PMNIPR 0x0099 /* PM Next Item Pointer Register */ #define ATU_PMCR 0x009a /* ATU Power Management Capabilities Register */ #define ATU_PMCSR 0x009c /* ATU Power Management Control/Status Register*/ #define ATU_MSICIR 0x00a0 /* MSI Capability Identifier Register */ #define ATU_MSINIPR 0x00a1 /* MSI Next Item Pointer Register */ #define ATU_MCR 0x00a2 /* Message Control Register */ #define ATU_MAR 0x00a4 /* Message Address Register */ #define ATU_MUAR 0x00a8 /* Message Upper Address Register */ #define ATU_MDR 0x00ac /* Message Data Register */ #define ATU_PCIXSR 0x00d4 /* PCI-X Status Register */ #define PCIXSR_BUSNO(x) (((x) & 0xff00) >> 8) #define ATU_IABAR3 0x0200 /* Inbound ATU Base Address Register 3 */ #define ATU_IAUBAR3 0x0204 /* Inbound ATU Upper Base Address Register 3 */ #define ATU_IALR3 0x0208 /* Inbound ATU Limit Register 3 */ #define ATU_ITVR3 0x020c /* Inbound ATU Upper Translate Value Reg 3 */ #define ATU_OIOBAR 0x0300 /* Outbound I/O Base Address Register */ #define ATU_OIOWTVR 0x0304 /* Outbound I/O Window Translate Value Reg */ #define ATU_OUMBAR0 0x0308 /* Outbound Upper Memory Window base addr reg 0*/ #define ATU_OUMBAR_FUNC (28) -#define ATU_OUMBAR_EN (1 << 31) +#define ATU_OUMBAR_EN (1U << 31) #define ATU_OUMWTVR0 0x030c /* Outbound Upper 32bit Memory Window Translate Value Register 0 */ #define ATU_OUMBAR1 0x0310 /* Outbound Upper Memory Window base addr reg1*/ #define ATU_OUMWTVR1 0x0314 /* Outbound Upper 32bit Memory Window Translate Value Register 1 */ #define ATU_OUMBAR2 0x0318 /* Outbound Upper Memory Window base addr reg2*/ #define ATU_OUMWTVR2 0x031c /* Outbount Upper 32bit Memory Window Translate Value Register 2 */ #define ATU_OUMBAR3 0x0320 /* Outbound Upper Memory Window base addr reg3*/ #define ATU_OUMWTVR3 0x0324 /* Outbound Upper 32bit Memory Window Translate Value Register 3 */ /* ATU-X specific */ #define ATUX_OCCAR 0x0330 /* Outbound Configuration Cycle Address Reg */ #define ATUX_OCCDR 0x0334 /* Outbound Configuration Cycle Data Reg */ #define ATUX_OCCFN 0x0338 /* Outbound Configuration Cycle Function Number*/ /* ATUe specific */ #define ATUE_OCCAR 0x032c /* Outbound Configuration Cycle Address Reg */ #define ATUE_OCCDR 0x0330 /* Outbound Configuration Cycle Data Reg */ #define ATUE_OCCFN 0x0334 /* Outbound Configuration Cycle Function Number*/ /* Interrupts */ /* IINTRSRC0 */ #define ICU_INT_ADMA0_EOT (0) /* ADMA 0 End of transfer */ #define ICU_INT_ADMA0_EOC (1) /* ADMA 0 End of Chain */ #define ICU_INT_ADMA1_EOT (2) /* ADMA 1 End of transfer */ #define ICU_INT_ADMA1_EOC (3) /* ADMA 1 End of chain */ #define ICU_INT_ADMA2_EOT (4) /* ADMA 2 End of transfer */ #define ICU_INT_ADMA2_EOC (5) /* ADMA 2 end of chain */ #define ICU_INT_WDOG (6) /* Watchdog timer */ /* 7 Reserved */ #define ICU_INT_TIMER0 (8) /* Timer 0 */ #define ICU_INT_TIMER1 (9) /* Timer 1 */ #define ICU_INT_I2C0 (10) /* I2C bus interface 0 */ #define ICU_INT_I2C1 (11) /* I2C bus interface 1 */ #define ICU_INT_MU (12) /* Message Unit */ #define ICU_INT_MU_IPQ (13) /* Message unit inbound post queue */ #define ICU_INT_ATUE_IM (14) /* ATU-E inbound message */ #define ICU_INT_ATU_BIST (15) /* ATU/Start BIST */ #define ICU_INT_PMC (16) /* PMC */ #define ICU_INT_PMU (17) /* PMU */ #define ICU_INT_PC (18) /* Processor cache */ /* 19-23 Reserved */ #define ICU_INT_XINT0 (24) #define ICU_INT_XINT1 (25) #define ICU_INT_XINT2 (26) #define ICU_INT_XINT3 (27) #define ICU_INT_XINT4 (28) #define ICU_INT_XINT5 (29) #define ICU_INT_XINT6 (30) #define ICU_INT_XINT7 (31) /* IINTSRC1 */ #define ICU_INT_XINT8 (32) #define ICU_INT_XINT9 (33) #define ICU_INT_XINT10 (34) #define ICU_INT_XINT11 (35) #define ICU_INT_XINT12 (36) #define ICU_INT_XINT13 (37) #define ICU_INT_XINT14 (38) #define ICU_INT_XINT15 (39) /* 40-50 reserved */ #define ICU_INT_UART0 (51) /* UART 0 */ #define ICU_INT_UART1 (52) /* UART 1 */ #define ICU_INT_PBIUE (53) /* Peripheral bus interface unit error */ #define ICU_INT_ATUCRW (54) /* ATU Configuration register write */ #define ICU_INT_ATUE (55) /* ATU error */ #define ICU_INT_MCUE (56) /* Memory controller unit error */ #define ICU_INT_ADMA0E (57) /* ADMA Channel 0 error */ #define ICU_INT_ADMA1E (58) /* ADMA Channel 1 error */ #define ICU_INT_ADMA2E (59) /* ADMA Channel 2 error */ /* 60-61 reserved */ #define ICU_INT_MUE (62) /* Messaging Unit Error */ /* 63 reserved */ /* IINTSRC2 */ #define ICU_INT_IP (64) /* Inter-processor */ /* 65-93 reserved */ #define ICU_INT_SIBBE (94) /* South internal bus bridge error */ /* 95 reserved */ /* IINTSRC3 */ #define ICU_INT_I2C2 (96) /* I2C bus interface 2 */ #define ICU_INT_ATUE_BIST (97) /* ATU-E/Start BIST */ #define ICU_INT_ATUE_CRW (98) /* ATU-E Configuration register write */ #define ICU_INT_ATUEE (99) /* ATU-E Error */ #define ICU_INT_IMU (100) /* IMU */ /* 101-106 reserved */ #define ICU_INT_ATUE_MA (107) /* ATUE Interrupt message A */ #define ICU_INT_ATUE_MB (108) /* ATUE Interrupt message B */ #define ICU_INT_ATUE_MC (109) /* ATUE Interrupt message C */ #define ICU_INT_ATUE_MD (110) /* ATUE Interrupt message D */ #define ICU_INT_MU_MSIX_TW (111) /* MU MSI-X Table write */ /* 112 reserved */ #define ICU_INT_IMSI (113) /* Inbound MSI */ /* 114-126 reserved */ #define ICU_INT_HPI (127) /* HPI */ #endif /* I81342_REG_H_ */ Index: head/sys/arm/xscale/ixp425/ixp425reg.h =================================================================== --- head/sys/arm/xscale/ixp425/ixp425reg.h (revision 258779) +++ head/sys/arm/xscale/ixp425/ixp425reg.h (revision 258780) @@ -1,714 +1,714 @@ /* $NetBSD: ixp425reg.h,v 1.19 2005/12/11 12:16:51 christos Exp $ */ /* * Copyright (c) 2003 * Ichiro FUKUHARA . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Ichiro FUKUHARA. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _IXP425REG_H_ #define _IXP425REG_H_ /* * Physical memory map for the Intel IXP425 */ /* * CC00 00FF --------------------------- * SDRAM Configuration Registers * CC00 0000 --------------------------- * * C800 BFFF --------------------------- * System and Peripheral Registers * C800 0000 --------------------------- * Expansion Bus Configuration Registers * C400 0000 --------------------------- * PCI Configuration and Status Registers * C000 0000 --------------------------- * * 6400 0000 --------------------------- * Queue manager * 6000 0000 --------------------------- * Expansion Bus Data * 5000 0000 --------------------------- * PCI Data * 4800 0000 --------------------------- * * 4000 0000 --------------------------- * SDRAM * 0000 0000 --------------------------- */ /* * Virtual memory map for the Intel IXP425/IXP435 integrated devices */ /* * FFFF FFFF --------------------------- * * Global cache clean area * FF00 0000 --------------------------- * * FE00 0000 --------------------------- * 16M CFI Flash (on ext bus) * FD00 0000 --------------------------- * * FC00 0000 --------------------------- * PCI Data (memory space) * F800 0000 --------------------------- IXP425_PCI_MEM_VBASE * * F020 1000 --------------------------- * SDRAM/DDR Memory Controller * F020 0000 --------------------------- IXP425_MCU_VBASE * * F001 F000 RS485 (Cambria) CAMBRIA_RS485_VBASE * F001 E000 GPS (Cambria) CAMBRIA_GPS_VBASE * F001 D000 EHCI USB 2 (IXP435) IXP435_USB2_VBASE * F001 C000 EHCI USB 1 (IXP435) IXP435_USB1_VBASE * Queue manager * F001 8000 --------------------------- IXP425_QMGR_VBASE * PCI Configuration and Status * F001 7000 --------------------------- IXP425_PCI_VBASE * * (NB: gap for future addition of EXP CS5-7) * F001 4000 Expansion Bus Chip Select 4 * F001 3000 Expansion Bus Chip Select 3 * F001 2000 Expansion Bus Chip Select 2 * F001 1000 Expansion Bus Chip Select 1 * Expansion Bus Configuration * F001 0000 --------------------------- IXP425_EXP_VBASE * * F000 C000 MAC-A (IXP435) * F000 B000 USB (option on IXP425) * F000 A000 MAC-B (IXP425) | MAC-C (IXP435) * F000 9000 MAC-A (IXP425) * F000 8000 NPE-C * F000 7000 NPE-B (IXP425) * F000 6000 NPE-A * F000 5000 Timers * F000 4000 GPIO Controller * F000 3000 Interrupt Controller * F000 2000 Performance Monitor Controller (PMC) * F000 1000 UART 1 (IXP425) * F000 0000 UART 0 * F000 0000 --------------------------- IXP425_IO_VBASE * * 0000 0000 --------------------------- * */ /* Physical/Virtual address for I/O space */ #define IXP425_IO_VBASE 0xf0000000UL #define IXP425_IO_HWBASE 0xc8000000UL #define IXP425_IO_SIZE 0x00010000UL /* Physical/Virtual addresss offsets */ #define IXP425_UART0_OFFSET 0x00000000UL #define IXP425_UART1_OFFSET 0x00001000UL #define IXP425_PMC_OFFSET 0x00002000UL #define IXP425_INTR_OFFSET 0x00003000UL #define IXP425_GPIO_OFFSET 0x00004000UL #define IXP425_TIMER_OFFSET 0x00005000UL #define IXP425_NPE_A_OFFSET 0x00006000UL /* Not User Programmable */ #define IXP425_NPE_B_OFFSET 0x00007000UL /* Not User Programmable */ #define IXP425_NPE_C_OFFSET 0x00008000UL /* Not User Programmable */ #define IXP425_MAC_B_OFFSET 0x00009000UL /* Ethernet MAC on NPE-B */ #define IXP425_MAC_C_OFFSET 0x0000a000UL /* Ethernet MAC on NPE-C */ #define IXP425_USB_OFFSET 0x0000b000UL #define IXP435_MAC_A_OFFSET 0x0000c000UL /* Ethernet MAC on NPE-A */ #define IXP425_REG_SIZE 0x1000 /* * UART * UART0 0xc8000000 * UART1 0xc8001000 * */ /* I/O space */ #define IXP425_UART0_HWBASE (IXP425_IO_HWBASE + IXP425_UART0_OFFSET) #define IXP425_UART1_HWBASE (IXP425_IO_HWBASE + IXP425_UART1_OFFSET) #define IXP425_UART0_VBASE (IXP425_IO_VBASE + IXP425_UART0_OFFSET) /* 0xf0000000 */ #define IXP425_UART1_VBASE (IXP425_IO_VBASE + IXP425_UART1_OFFSET) /* 0xf0001000 */ #define IXP425_UART_FREQ 14745600 #define IXP425_UART_IER 0x01 /* interrupt enable register */ #define IXP425_UART_IER_RTOIE 0x10 /* receiver timeout interrupt enable */ #define IXP425_UART_IER_UUE 0x40 /* UART Unit enable */ /*#define IXP4XX_COM_NPORTS 8*/ /* * Timers */ #define IXP425_TIMER_HWBASE (IXP425_IO_HWBASE + IXP425_TIMER_OFFSET) #define IXP425_TIMER_VBASE (IXP425_IO_VBASE + IXP425_TIMER_OFFSET) #define IXP425_OST_TS 0x0000 #define IXP425_OST_TIM0 0x0004 #define IXP425_OST_TIM1 0x000C #define IXP425_OST_TIM0_RELOAD 0x0008 #define IXP425_OST_TIM1_RELOAD 0x0010 #define TIMERRELOAD_MASK 0xFFFFFFFC #define OST_ONESHOT_EN (1U << 1) #define OST_TIMER_EN (1U << 0) #define IXP425_OST_STATUS 0x0020 #define OST_WARM_RESET (1U << 4) #define OST_WDOG_INT (1U << 3) #define OST_TS_INT (1U << 2) #define OST_TIM1_INT (1U << 1) #define OST_TIM0_INT (1U << 0) #define IXP425_OST_WDOG 0x0014 #define IXP425_OST_WDOG_ENAB 0x0018 #define IXP425_OST_WDOG_KEY 0x001c #define OST_WDOG_KEY_MAJICK 0x482e #define OST_WDOG_ENAB_RST_ENA (1u << 0) #define OST_WDOG_ENAB_INT_ENA (1u << 1) #define OST_WDOG_ENAB_CNT_ENA (1u << 2) /* * Interrupt Controller Unit. * PA 0xc8003000 */ #define IXP425_IRQ_HWBASE IXP425_IO_HWBASE + IXP425_INTR_OFFSET #define IXP425_IRQ_VBASE IXP425_IO_VBASE + IXP425_INTR_OFFSET /* 0xf0003000 */ #define IXP425_IRQ_SIZE 0x00000020UL #define IXP425_INT_STATUS (IXP425_IRQ_VBASE + 0x00) #define IXP425_INT_ENABLE (IXP425_IRQ_VBASE + 0x04) #define IXP425_INT_SELECT (IXP425_IRQ_VBASE + 0x08) #define IXP425_IRQ_STATUS (IXP425_IRQ_VBASE + 0x0C) #define IXP425_FIQ_STATUS (IXP425_IRQ_VBASE + 0x10) #define IXP425_INT_PRTY (IXP425_IRQ_VBASE + 0x14) #define IXP425_IRQ_ENC (IXP425_IRQ_VBASE + 0x18) #define IXP425_FIQ_ENC (IXP425_IRQ_VBASE + 0x1C) #define IXP425_INT_SW1 31 /* SW Interrupt 1 */ #define IXP425_INT_SW0 30 /* SW Interrupt 0 */ #define IXP425_INT_GPIO_12 29 /* GPIO 12 */ #define IXP425_INT_GPIO_11 28 /* GPIO 11 */ #define IXP425_INT_GPIO_10 27 /* GPIO 11 */ #define IXP425_INT_GPIO_9 26 /* GPIO 9 */ #define IXP425_INT_GPIO_8 25 /* GPIO 8 */ #define IXP425_INT_GPIO_7 24 /* GPIO 7 */ #define IXP425_INT_GPIO_6 23 /* GPIO 6 */ #define IXP425_INT_GPIO_5 22 /* GPIO 5 */ #define IXP425_INT_GPIO_4 21 /* GPIO 4 */ #define IXP425_INT_GPIO_3 20 /* GPIO 3 */ #define IXP425_INT_GPIO_2 19 /* GPIO 2 */ #define IXP425_INT_XSCALE_PMU 18 /* XScale PMU */ #define IXP425_INT_AHB_PMU 17 /* AHB PMU */ #define IXP425_INT_WDOG 16 /* Watchdog Timer */ #define IXP425_INT_UART0 15 /* HighSpeed UART */ #define IXP425_INT_STAMP 14 /* Timestamp Timer */ #define IXP425_INT_UART1 13 /* Console UART */ #define IXP425_INT_USB 12 /* USB */ #define IXP425_INT_TMR1 11 /* General-Purpose Timer1 */ #define IXP425_INT_PCIDMA2 10 /* PCI DMA Channel 2 */ #define IXP425_INT_PCIDMA1 9 /* PCI DMA Channel 1 */ #define IXP425_INT_PCIINT 8 /* PCI Interrupt */ #define IXP425_INT_GPIO_1 7 /* GPIO 1 */ #define IXP425_INT_GPIO_0 6 /* GPIO 0 */ #define IXP425_INT_TMR0 5 /* General-Purpose Timer0 */ #define IXP425_INT_QUE33_64 4 /* Queue Manager 33-64 */ #define IXP425_INT_QUE1_32 3 /* Queue Manager 1-32 */ #define IXP425_INT_NPE_C 2 /* NPE C */ #define IXP425_INT_NPE_B 1 /* NPE B */ #define IXP425_INT_NPE_A 0 /* NPE A */ /* NB: IXP435 has an additional 32 IRQ's */ #define IXP435_INT_STATUS2 (IXP425_IRQ_VBASE + 0x20) #define IXP435_INT_ENABLE2 (IXP425_IRQ_VBASE + 0x24) #define IXP435_INT_SELECT2 (IXP425_IRQ_VBASE + 0x28) #define IXP435_IRQ_STATUS2 (IXP425_IRQ_VBASE + 0x2C) #define IXP435_FIQ_STATUS2 (IXP425_IRQ_VBASE + 0x30) #define IXP435_INT_USB0 32 /* USB Host 2.0 Host 0 */ #define IXP435_INT_USB1 33 /* USB Host 2.0 Host 1 */ #define IXP435_INT_QMGR_PER 60 /* Queue manager parity error */ #define IXP435_INT_ECC 61 /* Single or multi-bit ECC error */ /* * software interrupt */ #define IXP425_INT_bit31 31 #define IXP425_INT_bit30 30 #define IXP425_INT_bit14 14 #define IXP425_INT_bit11 11 #define IXP425_INT_HWMASK (0xffffffff & \ ~((1 << IXP425_INT_bit31) | \ (1 << IXP425_INT_bit30) | \ (1 << IXP425_INT_bit14) | \ (1 << IXP425_INT_bit11))) #define IXP425_INT_GPIOMASK (0x3ff800c0u) #define IXP435_INT_HWMASK ((1 << (IXP435_INT_USB0 - 32)) | \ (1 << (IXP435_INT_USB1 - 32)) | \ (1 << (IXP435_INT_QMGR_PER - 32)) | \ (1 << (IXP435_INT_ECC - 32))) /* * GPIO */ #define IXP425_GPIO_HWBASE IXP425_IO_HWBASE + IXP425_GPIO_OFFSET #define IXP425_GPIO_VBASE IXP425_IO_VBASE + IXP425_GPIO_OFFSET /* 0xf0004000 */ #define IXP425_GPIO_SIZE 0x00000020UL #define IXP425_GPIO_GPOUTR 0x00 #define IXP425_GPIO_GPOER 0x04 #define IXP425_GPIO_GPINR 0x08 #define IXP425_GPIO_GPISR 0x0c #define IXP425_GPIO_GPIT1R 0x10 #define IXP425_GPIO_GPIT2R 0x14 #define IXP425_GPIO_GPCLKR 0x18 # define GPCLKR_MUX14 (1U << 8) # define GPCLKR_CLK0TC_SHIFT 4 # define GPCLKR_CLK0DC_SHIFT 0 /* GPIO Output */ #define GPOUT_ON 0x1 #define GPOUT_OFF 0x0 /* GPIO direction */ #define GPOER_INPUT 0x1 #define GPOER_OUTPUT 0x0 /* GPIO Type bits */ #define GPIO_TYPE_ACT_HIGH 0x0 #define GPIO_TYPE_ACT_LOW 0x1 #define GPIO_TYPE_EDG_RISING 0x2 #define GPIO_TYPE_EDG_FALLING 0x3 #define GPIO_TYPE_TRANSITIONAL 0x4 #define GPIO_TYPE_MASK 0x7 #define GPIO_TYPE(b,v) ((v) << (((b) & 0x7) * 3)) #define GPIO_TYPE_REG(b) (((b)&8)?IXP425_GPIO_GPIT2R:IXP425_GPIO_GPIT1R) #define IXP4XX_GPIO_PINS 16 /* * Expansion Bus Configuration Space. */ #define IXP425_EXP_HWBASE 0xc4000000UL #define IXP425_EXP_VBASE 0xf0010000UL #define IXP425_EXP_SIZE 0x1000 /* offset */ #define EXP_TIMING_CS0_OFFSET 0x0000 #define EXP_TIMING_CS1_OFFSET 0x0004 #define EXP_TIMING_CS2_OFFSET 0x0008 #define EXP_TIMING_CS3_OFFSET 0x000c #define EXP_TIMING_CS4_OFFSET 0x0010 #define EXP_TIMING_CS5_OFFSET 0x0014 #define EXP_TIMING_CS6_OFFSET 0x0018 #define EXP_TIMING_CS7_OFFSET 0x001c #define EXP_CNFG0_OFFSET 0x0020 #define EXP_CNFG1_OFFSET 0x0024 #define EXP_FCTRL_OFFSET 0x0028 #define IXP425_EXP_RECOVERY_SHIFT 16 #define IXP425_EXP_HOLD_SHIFT 20 #define IXP425_EXP_STROBE_SHIFT 22 #define IXP425_EXP_SETUP_SHIFT 26 #define IXP425_EXP_ADDR_SHIFT 28 #define IXP425_EXP_CS_EN (1U << 31) #define IXP425_EXP_RECOVERY_T(x) (((x) & 15) << IXP425_EXP_RECOVERY_SHIFT) #define IXP425_EXP_HOLD_T(x) (((x) & 3) << IXP425_EXP_HOLD_SHIFT) #define IXP425_EXP_STROBE_T(x) (((x) & 15) << IXP425_EXP_STROBE_SHIFT) #define IXP425_EXP_SETUP_T(x) (((x) & 3) << IXP425_EXP_SETUP_SHIFT) #define IXP425_EXP_ADDR_T(x) (((x) & 3) << IXP425_EXP_ADDR_SHIFT) /* EXP_CSn bits */ #define EXP_BYTE_EN 0x00000001 /* bus uses only 8-bit data */ #define EXP_WR_EN 0x00000002 /* ena writes to CS region */ /* bit 2 is reserved */ #define EXP_SPLT_EN 0x00000008 /* ena AHB split transfers */ #define EXP_MUX_EN 0x00000010 /* multiplexed address/data */ #define EXP_HRDY_POL 0x00000020 /* HPI|HRDY polarity */ #define EXP_BYTE_RD16 0x00000040 /* byte rd access to word dev */ #define EXP_CNFG 0x00003c00 /* device config size */ #define EXP_SZ_512 (0 << 10) #define EXP_SZ_1K (1 << 10) #define EXP_SZ_2K (2 << 10) #define EXP_SZ_4K (3 << 10) #define EXP_SZ_8K (4 << 10) #define EXP_SZ_16K (5 << 10) #define EXP_SZ_32K (6 << 10) #define EXP_SZ_64K (7 << 10) #define EXP_SZ_128K (8 << 10) #define EXP_SZ_256K (9 << 10) #define EXP_SZ_512K (10 << 10) #define EXP_SZ_1M (11 << 10) #define EXP_SZ_2M (12 << 10) #define EXP_SZ_4M (13 << 10) #define EXP_SZ_8M (14 << 10) #define EXP_SZ_16M (15 << 10) #define EXP_CYC_TYPE 0x0000c000 /* bus cycle "type" */ #define EXP_CYC_INTEL (0 << 14) #define EXP_CYC_MOTO (1 << 14) #define EXP_CYC_HPI (2 << 14) #define EXP_T5 0x000f0000 /* recovery timing */ #define EXP_T4 0x00300000 /* hold timing */ #define EXP_T3 0x03c00000 /* strobe timing */ #define EXP_T2 0x0c000000 /* setup/chip select timing */ #define EXP_T1 0x30000000 /* address timing */ /* bit 30 is reserved */ #define EXP_CS_EN 0x80000000 /* chip select enabled */ /* EXP_CNFG0 bits */ #define EXP_CNFG0_8BIT (1 << 0) #define EXP_CNFG0_PCI_HOST (1 << 1) #define EXP_CNFG0_PCI_ARB (1 << 2) #define EXP_CNFG0_PCI_66MHZ (1 << 4) -#define EXP_CNFG0_MEM_MAP (1 << 31) +#define EXP_CNFG0_MEM_MAP (1U << 31) /* EXP_CNFG1 bits */ #define EXP_CNFG1_SW_INT0 (1 << 0) #define EXP_CNFG1_SW_INT1 (1 << 1) #define EXP_FCTRL_RCOMP (1<<0) #define EXP_FCTRL_USB_DEVICE (1<<1) #define EXP_FCTRL_HASH (1<<2) #define EXP_FCTRL_AES (1<<3) #define EXP_FCTRL_DES (1<<4) #define EXP_FCTRL_HDLC (1<<5) #define EXP_FCTRL_AAL (1<<6) #define EXP_FCTRL_HSS (1<<7) #define EXP_FCTRL_UTOPIA (1<<8) #define EXP_FCTRL_ETH0 (1<<9) #define EXP_FCTRL_ETH1 (1<<10) #define EXP_FCTRL_NPEA (1<<11) /* reset */ #define EXP_FCTRL_NPEB (1<<12) /* reset */ #define EXP_FCTRL_NPEC (1<<13) /* reset */ #define EXP_FCTRL_PCI (1<<14) #define EXP_FCTRL_ECC_TIMESYNC (1<<15) #define EXP_FCTRL_UTOPIA_PHY (3<<16) /* PHY limit */ #define EXP_FCTRL_USB_HOST (1<<18) #define EXP_FCTRL_NPEA_ETH (1<<19) #define EXP_FCTRL_NPEB_ETH (1<<20) #define EXP_FCTRL_RSA (1<<21) #define EXP_FCTRL_MAXFREQ (3<<22) /* XScale frequency */ #define EXP_FCTRL_RESVD (0xff<<24) #define EXP_FCTRL_IXP46X_ONLY \ (EXP_FCTRL_ECC_TIMESYNC | EXP_FCTRL_USB_HOST | EXP_FCTRL_NPEA_ETH | \ EXP_FCTRL_NPEB_ETH | EXP_FCTRL_RSA | EXP_FCTRL_MAXFREQ) #define EXP_FCTRL_BITS \ "\20\1RCOMP\2USB\3HASH\4AES\5DES\6HDLC\7AAL\10HSS\11UTOPIA\12ETH0" \ "\13ETH1\17PCI\20ECC\23USB_HOST\24NPEA_ETH\25NPEB_ETH\26RSA" /* * PCI */ #define IXP425_PCI_HWBASE 0xc0000000 #define IXP425_PCI_VBASE 0xf0017000UL #define IXP425_PCI_SIZE 0x1000 #define IXP425_AHB_OFFSET 0x00000000UL /* AHB bus */ /* * Mapping registers of IXP425 PCI Configuration */ /* PCI_ID_REG 0x00 */ /* PCI_COMMAND_STATUS_REG 0x04 */ /* PCI_CLASS_REG 0x08 */ /* PCI_BHLC_REG 0x0c */ #define PCI_MAPREG_BAR0 0x10 /* Base Address 0 */ #define PCI_MAPREG_BAR1 0x14 /* Base Address 1 */ #define PCI_MAPREG_BAR2 0x18 /* Base Address 2 */ #define PCI_MAPREG_BAR3 0x1c /* Base Address 3 */ #define PCI_MAPREG_BAR4 0x20 /* Base Address 4 */ #define PCI_MAPREG_BAR5 0x24 /* Base Address 5 */ /* PCI_SUBSYS_ID_REG 0x2c */ /* PCI_INTERRUPT_REG 0x3c */ #define PCI_RTOTTO 0x40 /* PCI Controller CSR Base Address */ #define IXP425_PCI_CSR_BASE IXP425_PCI_VBASE /* PCI Memory Space */ #define IXP425_PCI_MEM_HWBASE 0x48000000UL #define IXP425_PCI_MEM_VBASE 0xf8000000UL #define IXP425_PCI_MEM_SIZE 0x04000000UL /* 64MB */ /* PCI I/O Space */ #define IXP425_PCI_IO_HWBASE 0x00000000UL #define IXP425_PCI_IO_SIZE 0x00100000UL /* 1Mbyte */ /* PCI Controller Configuration Offset */ #define PCI_NP_AD 0x00 #define PCI_NP_CBE 0x04 # define NP_CBE_SHIFT 4 #define PCI_NP_WDATA 0x08 #define PCI_NP_RDATA 0x0c #define PCI_CRP_AD_CBE 0x10 #define PCI_CRP_AD_WDATA 0x14 #define PCI_CRP_AD_RDATA 0x18 #define PCI_CSR 0x1c # define CSR_PRST (1U << 16) # define CSR_IC (1U << 15) # define CSR_ABE (1U << 4) # define CSR_PDS (1U << 3) # define CSR_ADS (1U << 2) # define CSR_HOST (1U << 0) #define PCI_ISR 0x20 # define ISR_AHBE (1U << 3) # define ISR_PPE (1U << 2) # define ISR_PFE (1U << 1) # define ISR_PSE (1U << 0) #define PCI_INTEN 0x24 #define PCI_DMACTRL 0x28 #define PCI_AHBMEMBASE 0x2c #define PCI_AHBIOBASE 0x30 #define PCI_PCIMEMBASE 0x34 #define PCI_AHBDOORBELL 0x38 #define PCI_PCIDOORBELL 0x3c #define PCI_ATPDMA0_AHBADDR 0x40 #define PCI_ATPDMA0_PCIADDR 0x44 #define PCI_ATPDMA0_LENGTH 0x48 #define PCI_ATPDMA1_AHBADDR 0x4c #define PCI_ATPDMA1_PCIADDR 0x50 #define PCI_ATPDMA1_LENGTH 0x54 #define PCI_PTADMA0_AHBADDR 0x58 #define PCI_PTADMA0_PCIADDR 0x5c #define PCI_PTADMA0_LENGTH 0x60 #define PCI_PTADMA1_AHBADDR 0x64 #define PCI_PTADMA1_PCIADDR 0x68 #define PCI_PTADMA1_LENGTH 0x6c /* PCI target(T)/initiator(I) Interface Commands for PCI_NP_CBE register */ #define COMMAND_NP_IA 0x0 /* Interrupt Acknowledge (I)*/ #define COMMAND_NP_SC 0x1 /* Special Cycle (I)*/ #define COMMAND_NP_IO_READ 0x2 /* I/O Read (T)(I) */ #define COMMAND_NP_IO_WRITE 0x3 /* I/O Write (T)(I) */ #define COMMAND_NP_MEM_READ 0x6 /* Memory Read (T)(I) */ #define COMMAND_NP_MEM_WRITE 0x7 /* Memory Write (T)(I) */ #define COMMAND_NP_CONF_READ 0xa /* Configuration Read (T)(I) */ #define COMMAND_NP_CONF_WRITE 0xb /* Configuration Write (T)(I) */ /* PCI byte enables */ #define BE_8BIT(a) ((0x10u << ((a) & 0x03)) ^ 0xf0) #define BE_16BIT(a) ((0x30u << ((a) & 0x02)) ^ 0xf0) #define BE_32BIT(a) 0x00 /* PCI byte selects */ #define READ_8BIT(v,a) ((u_int8_t)((v) >> (((a) & 3) * 8))) #define READ_16BIT(v,a) ((u_int16_t)((v) >> (((a) & 2) * 8))) #define WRITE_8BIT(v,a) (((u_int32_t)(v)) << (((a) & 3) * 8)) #define WRITE_16BIT(v,a) (((u_int32_t)(v)) << (((a) & 2) * 8)) /* PCI Controller Configuration Commands for PCI_CRP_AD_CBE */ #define COMMAND_CRP_READ 0x00 #define COMMAND_CRP_WRITE (1U << 16) /* * SDRAM Configuration Register */ #define IXP425_MCU_HWBASE 0xcc000000UL #define IXP425_MCU_VBASE 0xf0200000UL #define IXP425_MCU_SIZE 0x1000 /* Actually only 256 bytes */ #define MCU_SDR_CONFIG 0x00 #define MCU_SDR_CONFIG_MCONF(x) ((x) & 0x7) #define MCU_SDR_CONFIG_64MBIT (1u << 5) #define MCU_SDR_REFRESH 0x04 #define MCU_SDR_IR 0x08 /* * IXP435 DDR MCU Registers */ #define IXP435_MCU_HWBASE 0xcc00e500UL #define MCU_DDR_SDIR 0x00 /* DDR SDAM Initialization Reg*/ #define MCU_DDR_SDCR0 0x04 /* DDR SDRAM Control Reg 0 */ #define MCU_DDR_SDCR1 0x08 /* DDR SDRAM Control Reg 1 */ #define MCU_DDR_SDBR 0x0c /* SDRAM Base Register */ #define MCU_DDR_SBR0 0x10 /* SDRAM Boundary Register 0 */ #define MCU_DDR_SBR1 0x14 /* SDRAM Boundary Register 1 */ #define MCU_DDR_ECCR 0x1c /* ECC Control Register */ #define MCU_DDR_ELOG0 0x20 /* ECC Log Register 0 */ #define MCU_DDR_ELOG1 0x24 /* ECC Log Register 1 */ #define MCU_DDR_ECAR0 0x28 /* ECC Address Register 0 */ #define MCU_DDR_ECAR1 0x2c /* ECC Address Register 1 */ #define MCU_DDR_ECTST 0x30 /* ECC Test Register */ #define MCU_DDR_MCISR 0x34 /* MC Interrupt Status Reg */ #define MCU_DDR_MPTCR 0x3c /* MC Port Transaction Cnt Reg*/ #define MCU_DDR_RFR 0x48 /* Refresh Frequency Register */ #define MCU_DDR_SDPR(n) (0x50+(n)*4) /* SDRAM Page Register 0-7 */ /* NB: RCVDLY at 0x1050 and LEGOVERIDE at 0x1074 */ /* * Performance Monitoring Unit (CP14) * * CP14.0.1 Performance Monitor Control Register(PMNC) * CP14.1.1 Clock Counter(CCNT) * CP14.4.1 Interrupt Enable Register(INTEN) * CP14.5.1 Overflow Flag Register(FLAG) * CP14.8.1 Event Selection Register(EVTSEL) * CP14.0.2 Performance Counter Register 0(PMN0) * CP14.1.2 Performance Counter Register 0(PMN1) * CP14.2.2 Performance Counter Register 0(PMN2) * CP14.3.2 Performance Counter Register 0(PMN3) */ #define PMNC_E 0x00000001 /* enable all counters */ #define PMNC_P 0x00000002 /* reset all PMNs to 0 */ #define PMNC_C 0x00000004 /* clock counter reset */ #define PMNC_D 0x00000008 /* clock counter / 64 */ #define INTEN_CC_IE 0x00000001 /* enable clock counter interrupt */ #define INTEN_PMN0_IE 0x00000002 /* enable PMN0 interrupt */ #define INTEN_PMN1_IE 0x00000004 /* enable PMN1 interrupt */ #define INTEN_PMN2_IE 0x00000008 /* enable PMN2 interrupt */ #define INTEN_PMN3_IE 0x00000010 /* enable PMN3 interrupt */ #define FLAG_CC_IF 0x00000001 /* clock counter overflow */ #define FLAG_PMN0_IF 0x00000002 /* PMN0 overflow */ #define FLAG_PMN1_IF 0x00000004 /* PMN1 overflow */ #define FLAG_PMN2_IF 0x00000008 /* PMN2 overflow */ #define FLAG_PMN3_IF 0x00000010 /* PMN3 overflow */ #define EVTSEL_EVCNT_MASK 0x0000000ff /* event to count for PMNs */ #define PMNC_EVCNT0_SHIFT 0 #define PMNC_EVCNT1_SHIFT 8 #define PMNC_EVCNT2_SHIFT 16 #define PMNC_EVCNT3_SHIFT 24 /* * Queue Manager */ #define IXP425_QMGR_HWBASE 0x60000000UL #define IXP425_QMGR_VBASE 0xf0018000UL #define IXP425_QMGR_SIZE 0x4000 /* * Network Processing Engines (NPE's) and associated Ethernet MAC's. */ #define IXP425_NPE_A_HWBASE (IXP425_IO_HWBASE + IXP425_NPE_A_OFFSET) #define IXP425_NPE_A_VBASE (IXP425_IO_VBASE + IXP425_NPE_A_OFFSET) #define IXP425_NPE_A_SIZE 0x1000 /* Actually only 256 bytes */ #define IXP425_NPE_B_HWBASE (IXP425_IO_HWBASE + IXP425_NPE_B_OFFSET) #define IXP425_NPE_B_VBASE (IXP425_IO_VBASE + IXP425_NPE_B_OFFSET) #define IXP425_NPE_B_SIZE 0x1000 /* Actually only 256 bytes */ #define IXP425_NPE_C_HWBASE (IXP425_IO_HWBASE + IXP425_NPE_C_OFFSET) #define IXP425_NPE_C_VBASE (IXP425_IO_VBASE + IXP425_NPE_C_OFFSET) #define IXP425_NPE_C_SIZE 0x1000 /* Actually only 256 bytes */ #define IXP425_MAC_B_HWBASE (IXP425_IO_HWBASE + IXP425_MAC_B_OFFSET) #define IXP425_MAC_B_VBASE (IXP425_IO_VBASE + IXP425_MAC_B_OFFSET) #define IXP425_MAC_B_SIZE 0x1000 /* Actually only 256 bytes */ #define IXP425_MAC_C_HWBASE (IXP425_IO_HWBASE + IXP425_MAC_C_OFFSET) #define IXP425_MAC_C_VBASE (IXP425_IO_VBASE + IXP425_MAC_C_OFFSET) #define IXP425_MAC_C_SIZE 0x1000 /* Actually only 256 bytes */ #define IXP435_MAC_A_HWBASE (IXP425_IO_HWBASE + IXP435_MAC_A_OFFSET) #define IXP435_MAC_A_VBASE (IXP425_IO_VBASE + IXP435_MAC_A_OFFSET) #define IXP435_MAC_A_SIZE 0x1000 /* Actually only 256 bytes */ /* * Expansion Bus Data Space. */ #define IXP425_EXP_BUS_HWBASE 0x50000000UL #define IXP425_EXP_BUS_SIZE 0x01000000 /* max, typically smaller */ #define IXP425_EXP_BUS_CSx_HWBASE(i) \ (IXP425_EXP_BUS_HWBASE + (i)*IXP425_EXP_BUS_SIZE) #define IXP425_EXP_BUS_CSx_SIZE 0x1000 #define IXP425_EXP_BUS_CSx_VBASE(i) \ (0xF0011000UL + (((i)-1)*IXP425_EXP_BUS_CSx_SIZE)) /* NB: CS0 is special; it maps flash */ #define IXP425_EXP_BUS_CS0_HWBASE IXP425_EXP_BUS_CSx_HWBASE(0) #define IXP425_EXP_BUS_CS0_VBASE 0xFD000000UL #ifndef IXP4XX_FLASH_SIZE #define IXP425_EXP_BUS_CS0_SIZE 0x01000000 /* NB: 16M */ #else #define IXP425_EXP_BUS_CS0_SIZE IXP4XX_FLASH_SIZE #endif #define IXP425_EXP_BUS_CS1_HWBASE IXP425_EXP_BUS_CSx_HWBASE(1) #define IXP425_EXP_BUS_CS1_VBASE IXP425_EXP_BUS_CSx_VBASE(1) #define IXP425_EXP_BUS_CS1_SIZE IXP425_EXP_BUS_CSx_SIZE #define IXP425_EXP_BUS_CS2_HWBASE IXP425_EXP_BUS_CSx_HWBASE(2) #define IXP425_EXP_BUS_CS2_VBASE IXP425_EXP_BUS_CSx_VBASE(2) #define IXP425_EXP_BUS_CS2_SIZE IXP425_EXP_BUS_CSx_SIZE #define IXP425_EXP_BUS_CS3_HWBASE IXP425_EXP_BUS_CSx_HWBASE(3) #define IXP425_EXP_BUS_CS3_VBASE IXP425_EXP_BUS_CSx_VBASE(3) #define IXP425_EXP_BUS_CS3_SIZE IXP425_EXP_BUS_CSx_SIZE #define IXP425_EXP_BUS_CS4_HWBASE IXP425_EXP_BUS_CSx_HWBASE(4) #define IXP425_EXP_BUS_CS4_VBASE IXP425_EXP_BUS_CSx_VBASE(4) #define IXP425_EXP_BUS_CS4_SIZE IXP425_EXP_BUS_CSx_SIZE /* NB: not mapped (yet) */ #define IXP425_EXP_BUS_CS5_HWBASE IXP425_EXP_BUS_CSx_HWBASE(5) #define IXP425_EXP_BUS_CS6_HWBASE IXP425_EXP_BUS_CSx_HWBASE(6) #define IXP425_EXP_BUS_CS7_HWBASE IXP425_EXP_BUS_CSx_HWBASE(7) /* * IXP435/Gateworks Cambria */ #define IXP435_USB1_HWBASE 0xCD000000UL /* USB host controller 1 */ #define IXP435_USB1_VBASE 0xF001C000UL #define IXP435_USB1_SIZE 0x1000 /* NB: only uses 0x300 */ #define IXP435_USB2_HWBASE 0xCE000000UL /* USB host controller 2 */ #define IXP435_USB2_VBASE 0xF001D000UL #define IXP435_USB2_SIZE 0x1000 /* NB: only uses 0x300 */ #define CAMBRIA_GPS_HWBASE 0x53FC0000UL /* optional GPS Serial Port */ #define CAMBRIA_GPS_VBASE 0xF001E000UL #define CAMBRIA_GPS_SIZE 0x1000 #define CAMBRIA_RS485_HWBASE 0x53F80000UL /* optional RS485 Serial Port */ #define CAMBRIA_RS485_VBASE 0xF001F000UL #define CAMBRIA_RS485_SIZE 0x1000 /* NB: these are mapped on the fly, so no fixed virtual addresses */ #define CAMBRIA_OCTAL_LED_HWBASE 0x53F40000UL /* Octal Status LED Latch */ #define CAMBRIA_OCTAL_LED_SIZE 0x1000 #define CAMBRIA_CFSEL1_HWBASE 0x53E40000UL /* Compact Flash Socket Sel 0 */ #define CAMBRIA_CFSEL1_SIZE 0x40000 #define CAMBRIA_CFSEL0_HWBASE 0x53E00000UL /* Compact Flash Socket Sel 1 */ #define CAMBRIA_CFSEL0_SIZE 0x40000 #endif /* _IXP425REG_H_ */ Index: head/sys/boot/arm/at91/libat91/mci_device.h =================================================================== --- head/sys/boot/arm/at91/libat91/mci_device.h (revision 258779) +++ head/sys/boot/arm/at91/libat91/mci_device.h (revision 258780) @@ -1,429 +1,429 @@ /*- * Copyright (c) 2006 M. Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This software is derived from software provide by Kwikbyte who specifically * disclaimed copyright on the code. * * $FreeBSD$ */ //*--------------------------------------------------------------------------- //* ATMEL Microcontroller Software Support - ROUSSET - //*--------------------------------------------------------------------------- //* The software is delivered "AS IS" without warranty or condition of any //* kind, either express, implied or statutory. This includes without //* limitation any warranty or condition with respect to merchantability or //* fitness for any particular purpose, or against the infringements of //* intellectual property rights of others. //*--------------------------------------------------------------------------- //* File Name : AT91C_MCI_Device.h //* Object : Data Flash Atmel Description File //* Translator : //* //* 1.0 26/11/02 FB : Creation //*--------------------------------------------------------------------------- #ifndef __MCI_Device_h #define __MCI_Device_h #include typedef unsigned int AT91S_MCIDeviceStatus; /////////////////////////////////////////////////////////////////////////////// #define AT91C_CARD_REMOVED 0 #define AT91C_MMC_CARD_INSERTED 1 #define AT91C_SD_CARD_INSERTED 2 #define AT91C_NO_ARGUMENT 0x0 #define AT91C_FIRST_RCA 0xCAFE #define AT91C_MAX_MCI_CARDS 10 #define AT91C_BUS_WIDTH_1BIT 0x00 #define AT91C_BUS_WIDTH_4BITS 0x02 /* Driver State */ #define AT91C_MCI_IDLE 0x0 #define AT91C_MCI_TIMEOUT_ERROR 0x1 #define AT91C_MCI_RX_SINGLE_BLOCK 0x2 #define AT91C_MCI_RX_MULTIPLE_BLOCK 0x3 #define AT91C_MCI_RX_STREAM 0x4 #define AT91C_MCI_TX_SINGLE_BLOCK 0x5 #define AT91C_MCI_TX_MULTIPLE_BLOCK 0x6 #define AT91C_MCI_TX_STREAM 0x7 /* TimeOut */ #define AT91C_TIMEOUT_CMDRDY 30 /////////////////////////////////////////////////////////////////////////////// // MMC & SDCard Structures /////////////////////////////////////////////////////////////////////////////// /*---------------------------------------------*/ /* MCI Device Structure Definition */ /*---------------------------------------------*/ typedef struct _AT91S_MciDevice { volatile unsigned char state; unsigned char SDCard_bus_width; unsigned int RCA; // RCA unsigned int READ_BL_LEN; #ifdef REPORT_SIZE unsigned int Memory_Capacity; #endif } AT91S_MciDevice; #include /////////////////////////////////////////////////////////////////////////////// // Functions returnals /////////////////////////////////////////////////////////////////////////////// #define AT91C_CMD_SEND_OK 0 // Command ok #define AT91C_CMD_SEND_ERROR -1 // Command failed #define AT91C_INIT_OK 2 // Init Successfull #define AT91C_INIT_ERROR 3 // Init Failed #define AT91C_READ_OK 4 // Read Successfull #define AT91C_READ_ERROR 5 // Read Failed #define AT91C_WRITE_OK 6 // Write Successfull #define AT91C_WRITE_ERROR 7 // Write Failed #define AT91C_ERASE_OK 8 // Erase Successfull #define AT91C_ERASE_ERROR 9 // Erase Failed #define AT91C_CARD_SELECTED_OK 10 // Card Selection Successfull #define AT91C_CARD_SELECTED_ERROR 11 // Card Selection Failed #define AT91C_MCI_SR_ERROR (AT91C_MCI_UNRE | AT91C_MCI_OVRE | AT91C_MCI_DTOE | \ AT91C_MCI_DCRCE | AT91C_MCI_RTOE | AT91C_MCI_RENDE | AT91C_MCI_RCRCE | \ AT91C_MCI_RDIRE | AT91C_MCI_RINDE) #define MMC_CMDNB (0x1Fu << 0) // Command Number #define MMC_RSPTYP (0x3u << 6) // Response Type #define MMC_RSPTYP_NO (0x0u << 6) // No response #define MMC_RSPTYP_48 (0x1u << 6) // 48-bit response #define MMC_RSPTYP_136 (0x2u << 6) // 136-bit response #define MMC_SPCMD (0x7u << 8) // Special CMD #define MMC_SPCMD_NONE (0x0u << 8) // Not a special CMD #define MMC_SPCMD_INIT (0x1u << 8) // Initialization CMD #define MMC_SPCMD_SYNC (0x2u << 8) // Synchronized CMD #define MMC_SPCMD_IT_CMD (0x4u << 8) // Interrupt command #define MMC_SPCMD_IT_REP (0x5u << 8) // Interrupt response #define MMC_OPDCMD (0x1u << 11) // Open Drain Command #define MMC_MAXLAT (0x1u << 12) // Maximum Latency for Command to respond #define MMC_TRCMD (0x3u << 16) // Transfer CMD #define MMC_TRCMD_NO (0x0u << 16) // No transfer #define MMC_TRCMD_START (0x1u << 16) // Start transfer #define MMC_TRCMD_STOP (0x2u << 16) // Stop transfer #define MMC_TRDIR (0x1u << 18) // Transfer Direction #define MMC_TRTYP (0x3u << 19) // Transfer Type #define MMC_TRTYP_BLOCK (0x0u << 19) // Block Transfer type #define MMC_TRTYP_MULTIPLE (0x1u << 19) // Multiple Block transfer type #define MMC_TRTYP_STREAM (0x2u << 19) // Stream transfer type /////////////////////////////////////////////////////////////////////////////// // MCI_CMD Register Value /////////////////////////////////////////////////////////////////////////////// #define POWER_ON_INIT \ (0 | MMC_TRCMD_NO | MMC_SPCMD_INIT | MMC_OPDCMD) ///////////////////////////////////////////////////////////////// // Class 0 & 1 commands: Basic commands and Read Stream commands ///////////////////////////////////////////////////////////////// #define GO_IDLE_STATE_CMD \ (0 | MMC_TRCMD_NO | MMC_SPCMD_NONE ) #define MMC_GO_IDLE_STATE_CMD \ (0 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_OPDCMD) #define MMC_SEND_OP_COND_CMD \ (1 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_48 | \ MMC_OPDCMD) #define ALL_SEND_CID_CMD \ (2 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_136) #define MMC_ALL_SEND_CID_CMD \ (2 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_136 | \ MMC_OPDCMD) #define SET_RELATIVE_ADDR_CMD \ (3 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_48 | \ MMC_MAXLAT) #define MMC_SET_RELATIVE_ADDR_CMD \ (3 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_48 | \ MMC_MAXLAT | MMC_OPDCMD) #define SET_DSR_CMD \ (4 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_NO | \ MMC_MAXLAT) // no tested #define SEL_DESEL_CARD_CMD \ (7 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_48 | \ MMC_MAXLAT) #define SEND_CSD_CMD \ (9 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_136 | \ MMC_MAXLAT) #define SEND_CID_CMD \ (10 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_136 | \ MMC_MAXLAT) #define MMC_READ_DAT_UNTIL_STOP_CMD \ (11 | MMC_TRTYP_STREAM | MMC_SPCMD_NONE | \ MMC_RSPTYP_48 | MMC_TRDIR | MMC_TRCMD_START | \ MMC_MAXLAT) #define STOP_TRANSMISSION_CMD \ (12 | MMC_TRCMD_STOP | MMC_SPCMD_NONE | MMC_RSPTYP_48 | \ MMC_MAXLAT) #define STOP_TRANSMISSION_SYNC_CMD \ (12 | MMC_TRCMD_STOP | MMC_SPCMD_SYNC | MMC_RSPTYP_48 | \ MMC_MAXLAT) #define SEND_STATUS_CMD \ (13 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_48 | \ MMC_MAXLAT) #define GO_INACTIVE_STATE_CMD \ (15 | MMC_RSPTYP_NO) //*------------------------------------------------ //* Class 2 commands: Block oriented Read commands //*------------------------------------------------ #define SET_BLOCKLEN_CMD (16 | MMC_TRCMD_NO | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_MAXLAT ) #define READ_SINGLE_BLOCK_CMD (17 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_START | MMC_TRTYP_BLOCK | MMC_TRDIR | MMC_MAXLAT) #define READ_MULTIPLE_BLOCK_CMD (18 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_START | MMC_TRTYP_MULTIPLE | MMC_TRDIR | MMC_MAXLAT) //*-------------------------------------------- //* Class 3 commands: Sequential write commands //*-------------------------------------------- #define MMC_WRITE_DAT_UNTIL_STOP_CMD (20 | MMC_TRTYP_STREAM| MMC_SPCMD_NONE | MMC_RSPTYP_48 & ~(MMC_TRDIR) | MMC_TRCMD_START | MMC_MAXLAT ) // MMC //*------------------------------------------------ //* Class 4 commands: Block oriented write commands //*------------------------------------------------ #define WRITE_BLOCK_CMD (24 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_START | (MMC_TRTYP_BLOCK & ~(MMC_TRDIR)) | MMC_MAXLAT) #define WRITE_MULTIPLE_BLOCK_CMD (25 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_START | (MMC_TRTYP_MULTIPLE & ~(MMC_TRDIR)) | MMC_MAXLAT) #define PROGRAM_CSD_CMD (27 | MMC_RSPTYP_48 ) //*---------------------------------------- //* Class 6 commands: Group Write protect //*---------------------------------------- #define SET_WRITE_PROT_CMD (28 | MMC_RSPTYP_48 ) #define CLR_WRITE_PROT_CMD (29 | MMC_RSPTYP_48 ) #define SEND_WRITE_PROT_CMD (30 | MMC_RSPTYP_48 ) //*---------------------------------------- //* Class 5 commands: Erase commands //*---------------------------------------- #define TAG_SECTOR_START_CMD (32 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define TAG_SECTOR_END_CMD (33 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define MMC_UNTAG_SECTOR_CMD (34 | MMC_RSPTYP_48 ) #define MMC_TAG_ERASE_GROUP_START_CMD (35 | MMC_RSPTYP_48 ) #define MMC_TAG_ERASE_GROUP_END_CMD (36 | MMC_RSPTYP_48 ) #define MMC_UNTAG_ERASE_GROUP_CMD (37 | MMC_RSPTYP_48 ) #define ERASE_CMD (38 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT ) //*---------------------------------------- //* Class 7 commands: Lock commands //*---------------------------------------- #define LOCK_UNLOCK (42 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) // no tested //*----------------------------------------------- // Class 8 commands: Application specific commands //*----------------------------------------------- #define APP_CMD (55 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define GEN_CMD (56 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) // no tested #define SDCARD_SET_BUS_WIDTH_CMD (6 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define SDCARD_STATUS_CMD (13 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define SDCARD_SEND_NUM_WR_BLOCKS_CMD (22 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define SDCARD_SET_WR_BLK_ERASE_COUNT_CMD (23 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define SDCARD_APP_OP_COND_CMD (41 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO ) #define SDCARD_SET_CLR_CARD_DETECT_CMD (42 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define SDCARD_SEND_SCR_CMD (51 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) #define SDCARD_APP_ALL_CMD (SDCARD_SET_BUS_WIDTH_CMD +\ SDCARD_STATUS_CMD +\ SDCARD_SEND_NUM_WR_BLOCKS_CMD +\ SDCARD_SET_WR_BLK_ERASE_COUNT_CMD +\ SDCARD_APP_OP_COND_CMD +\ SDCARD_SET_CLR_CARD_DETECT_CMD +\ SDCARD_SEND_SCR_CMD) //*---------------------------------------- //* Class 9 commands: IO Mode commands //*---------------------------------------- #define MMC_FAST_IO_CMD (39 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_MAXLAT) #define MMC_GO_IRQ_STATE_CMD (40 | MMC_SPCMD_NONE | MMC_RSPTYP_48 | MMC_TRCMD_NO | MMC_MAXLAT) /////////////////////////////////////////////////////////////////////////////// // OCR Register /////////////////////////////////////////////////////////////////////////////// #define AT91C_VDD_16_17 (1 << 4) #define AT91C_VDD_17_18 (1 << 5) #define AT91C_VDD_18_19 (1 << 6) #define AT91C_VDD_19_20 (1 << 7) #define AT91C_VDD_20_21 (1 << 8) #define AT91C_VDD_21_22 (1 << 9) #define AT91C_VDD_22_23 (1 << 10) #define AT91C_VDD_23_24 (1 << 11) #define AT91C_VDD_24_25 (1 << 12) #define AT91C_VDD_25_26 (1 << 13) #define AT91C_VDD_26_27 (1 << 14) #define AT91C_VDD_27_28 (1 << 15) #define AT91C_VDD_28_29 (1 << 16) #define AT91C_VDD_29_30 (1 << 17) #define AT91C_VDD_30_31 (1 << 18) #define AT91C_VDD_31_32 (1 << 19) #define AT91C_VDD_32_33 (1 << 20) #define AT91C_VDD_33_34 (1 << 21) #define AT91C_VDD_34_35 (1 << 22) #define AT91C_VDD_35_36 (1 << 23) -#define AT91C_CARD_POWER_UP_BUSY (1 << 31) +#define AT91C_CARD_POWER_UP_BUSY (1U << 31) #define AT91C_MMC_HOST_VOLTAGE_RANGE (AT91C_VDD_27_28 | AT91C_VDD_28_29 | \ AT91C_VDD_29_30 | AT91C_VDD_30_31 | AT91C_VDD_31_32 | AT91C_VDD_32_33) /////////////////////////////////////////////////////////////////////////////// // CURRENT_STATE & READY_FOR_DATA in SDCard Status Register definition (response type R1) /////////////////////////////////////////////////////////////////////////////// #define AT91C_SR_READY_FOR_DATA (1 << 8) // corresponds to buffer empty signalling on the bus #define AT91C_SR_IDLE (0 << 9) #define AT91C_SR_READY (1 << 9) #define AT91C_SR_IDENT (2 << 9) #define AT91C_SR_STBY (3 << 9) #define AT91C_SR_TRAN (4 << 9) #define AT91C_SR_DATA (5 << 9) #define AT91C_SR_RCV (6 << 9) #define AT91C_SR_PRG (7 << 9) #define AT91C_SR_DIS (8 << 9) #define AT91C_SR_CARD_SELECTED (AT91C_SR_READY_FOR_DATA + AT91C_SR_TRAN) #define MMC_FIRST_RCA 0xCAFE /////////////////////////////////////////////////////////////////////////////// // MMC CSD register header File // CSD_x_xxx_S for shift value for word x // CSD_x_xxx_M for mask value for word x /////////////////////////////////////////////////////////////////////////////// // First Response INT <=> CSD[3] : bits 0 to 31 #define CSD_3_BIT0_S 0 // [0:0] #define CSD_3_BIT0_M 0x01 #define CSD_3_CRC_S 1 // [7:1] #define CSD_3_CRC_M 0x7F #define CSD_3_MMC_ECC_S 8 // [9:8] reserved for MMC compatibility #define CSD_3_MMC_ECC_M 0x03 #define CSD_3_FILE_FMT_S 10 // [11:10] #define CSD_3_FILE_FMT_M 0x03 #define CSD_3_TMP_WP_S 12 // [12:12] #define CSD_3_TMP_WP_M 0x01 #define CSD_3_PERM_WP_S 13 // [13:13] #define CSD_3_PERM_WP_M 0x01 #define CSD_3_COPY_S 14 // [14:14] #define CSD_3_COPY_M 0x01 #define CSD_3_FILE_FMT_GRP_S 15 // [15:15] #define CSD_3_FILE_FMT_GRP_M 0x01 // reserved 16 // [20:16] // reserved 0x1F #define CSD_3_WBLOCK_P_S 21 // [21:21] #define CSD_3_WBLOCK_P_M 0x01 #define CSD_3_WBLEN_S 22 // [25:22] #define CSD_3_WBLEN_M 0x0F #define CSD_3_R2W_F_S 26 // [28:26] #define CSD_3_R2W_F_M 0x07 #define CSD_3_MMC_DEF_ECC_S 29 // [30:29] reserved for MMC compatibility #define CSD_3_MMC_DEF_ECC_M 0x03 #define CSD_3_WP_GRP_EN_S 31 // [31:31] #define CSD_3_WP_GRP_EN_M 0x01 // Seconde Response INT <=> CSD[2] : bits 32 to 63 #define CSD_2_v21_WP_GRP_SIZE_S 0 // [38:32] #define CSD_2_v21_WP_GRP_SIZE_M 0x7F #define CSD_2_v21_SECT_SIZE_S 7 // [45:39] #define CSD_2_v21_SECT_SIZE_M 0x7F #define CSD_2_v21_ER_BLEN_EN_S 14 // [46:46] #define CSD_2_v21_ER_BLEN_EN_M 0x01 #define CSD_2_v22_WP_GRP_SIZE_S 0 // [36:32] #define CSD_2_v22_WP_GRP_SIZE_M 0x1F #define CSD_2_v22_ER_GRP_SIZE_S 5 // [41:37] #define CSD_2_v22_ER_GRP_SIZE_M 0x1F #define CSD_2_v22_SECT_SIZE_S 10 // [46:42] #define CSD_2_v22_SECT_SIZE_M 0x1F #define CSD_2_C_SIZE_M_S 15 // [49:47] #define CSD_2_C_SIZE_M_M 0x07 #define CSD_2_VDD_WMAX_S 18 // [52:50] #define CSD_2_VDD_WMAX_M 0x07 #define CSD_2_VDD_WMIN_S 21 // [55:53] #define CSD_2_VDD_WMIN_M 0x07 #define CSD_2_RCUR_MAX_S 24 // [58:56] #define CSD_2_RCUR_MAX_M 0x07 #define CSD_2_RCUR_MIN_S 27 // [61:59] #define CSD_2_RCUR_MIN_M 0x07 #define CSD_2_CSIZE_L_S 30 // [63:62] <=> 2 LSB of CSIZE #define CSD_2_CSIZE_L_M 0x03 // Third Response INT <=> CSD[1] : bits 64 to 95 #define CSD_1_CSIZE_H_S 0 // [73:64] <=> 10 MSB of CSIZE #define CSD_1_CSIZE_H_M 0x03FF // reserved 10 // [75:74] // reserved 0x03 #define CSD_1_DSR_I_S 12 // [76:76] #define CSD_1_DSR_I_M 0x01 #define CSD_1_RD_B_MIS_S 13 // [77:77] #define CSD_1_RD_B_MIS_M 0x01 #define CSD_1_WR_B_MIS_S 14 // [78:78] #define CSD_1_WR_B_MIS_M 0x01 #define CSD_1_RD_B_PAR_S 15 // [79:79] #define CSD_1_RD_B_PAR_M 0x01 #define CSD_1_RD_B_LEN_S 16 // [83:80] #define CSD_1_RD_B_LEN_M 0x0F #define CSD_1_CCC_S 20 // [95:84] #define CSD_1_CCC_M 0x0FFF // Fourth Response INT <=> CSD[0] : bits 96 to 127 #define CSD_0_TRANS_SPEED_S 0 // [103:96] #define CSD_0_TRANS_SPEED_M 0xFF #define CSD_0_NSAC_S 8 // [111:104] #define CSD_0_NSAC_M 0xFF #define CSD_0_TAAC_S 16 // [119:112] #define CSD_0_TAAC_M 0xFF // reserved 24 // [121:120] // reserved 0x03 #define CSD_0_MMC_SPEC_VERS_S 26 // [125:122] reserved for MMC compatibility #define CSD_0_MMC_SPEC_VERS_M 0x0F #define CSD_0_STRUCT_S 30 // [127:126] #define CSD_0_STRUCT_M 0x03 /////////////////////////////////////////////////////////////////////////////// #endif Index: head/sys/boot/i386/libfirewire/fwohci.h =================================================================== --- head/sys/boot/i386/libfirewire/fwohci.h (revision 258779) +++ head/sys/boot/i386/libfirewire/fwohci.h (revision 258780) @@ -1,162 +1,162 @@ /* * Copyright (c) 2007 Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #define MAX_OHCI 5 #define CROMSIZE 0x400 struct fw_eui64 { uint32_t hi, lo; }; struct fwohci_softc { uint32_t locator; uint32_t devid; uint32_t base_addr; uint32_t bus_id; uint32_t handle; int32_t state; struct crom_src_buf *crom_src_buf; struct crom_src *crom_src; struct crom_chunk *crom_root; struct fw_eui64 eui; int speed; int maxrec; uint32_t *config_rom; char config_rom_buf[CROMSIZE*2]; /* double size for alignment */ }; int fwohci_init(struct fwohci_softc *, int); void fwohci_ibr(struct fwohci_softc *); void fwohci_poll(struct fwohci_softc *); #define FWOHCI_STATE_DEAD (-1) #define FWOHCI_STATE_INIT 0 #define FWOHCI_STATE_ENABLED 1 #define FWOHCI_STATE_BUSRESET 2 #define FWOHCI_STATE_NORMAL 3 #define OREAD(f, o) (*(volatile uint32_t *)((f)->handle + (o))) #define OWRITE(f, o, v) (*(volatile uint32_t *)((f)->handle + (o)) = (v)) #define OHCI_VERSION 0x00 #define OHCI_ATRETRY 0x08 #define OHCI_CROMHDR 0x18 #define OHCI_BUS_ID 0x1c #define OHCI_BUS_OPT 0x20 -#define OHCI_BUSIRMC (1 << 31) +#define OHCI_BUSIRMC (1U << 31) #define OHCI_BUSCMC (1 << 30) #define OHCI_BUSISC (1 << 29) #define OHCI_BUSBMC (1 << 28) #define OHCI_BUSPMC (1 << 27) #define OHCI_BUSFNC OHCI_BUSIRMC | OHCI_BUSCMC | OHCI_BUSISC |\ OHCI_BUSBMC | OHCI_BUSPMC #define OHCI_EUID_HI 0x24 #define OHCI_EUID_LO 0x28 #define OHCI_CROMPTR 0x34 #define OHCI_HCCCTL 0x50 #define OHCI_HCCCTLCLR 0x54 #define OHCI_AREQHI 0x100 #define OHCI_AREQHICLR 0x104 #define OHCI_AREQLO 0x108 #define OHCI_AREQLOCLR 0x10c #define OHCI_PREQHI 0x110 #define OHCI_PREQHICLR 0x114 #define OHCI_PREQLO 0x118 #define OHCI_PREQLOCLR 0x11c #define OHCI_PREQUPPER 0x120 #define OHCI_SID_BUF 0x64 #define OHCI_SID_CNT 0x68 -#define OHCI_SID_ERR (1 << 31) +#define OHCI_SID_ERR (1U << 31) #define OHCI_SID_CNT_MASK 0xffc #define OHCI_IT_STAT 0x90 #define OHCI_IT_STATCLR 0x94 #define OHCI_IT_MASK 0x98 #define OHCI_IT_MASKCLR 0x9c #define OHCI_IR_STAT 0xa0 #define OHCI_IR_STATCLR 0xa4 #define OHCI_IR_MASK 0xa8 #define OHCI_IR_MASKCLR 0xac #define OHCI_LNKCTL 0xe0 #define OHCI_LNKCTLCLR 0xe4 #define OHCI_PHYACCESS 0xec #define OHCI_CYCLETIMER 0xf0 #define OHCI_DMACTL(off) (off) #define OHCI_DMACTLCLR(off) (off + 4) #define OHCI_DMACMD(off) (off + 0xc) #define OHCI_DMAMATCH(off) (off + 0x10) #define OHCI_ATQOFF 0x180 #define OHCI_ATQCTL OHCI_ATQOFF #define OHCI_ATQCTLCLR (OHCI_ATQOFF + 4) #define OHCI_ATQCMD (OHCI_ATQOFF + 0xc) #define OHCI_ATQMATCH (OHCI_ATQOFF + 0x10) #define OHCI_ATSOFF 0x1a0 #define OHCI_ATSCTL OHCI_ATSOFF #define OHCI_ATSCTLCLR (OHCI_ATSOFF + 4) #define OHCI_ATSCMD (OHCI_ATSOFF + 0xc) #define OHCI_ATSMATCH (OHCI_ATSOFF + 0x10) #define OHCI_ARQOFF 0x1c0 #define OHCI_ARQCTL OHCI_ARQOFF #define OHCI_ARQCTLCLR (OHCI_ARQOFF + 4) #define OHCI_ARQCMD (OHCI_ARQOFF + 0xc) #define OHCI_ARQMATCH (OHCI_ARQOFF + 0x10) #define OHCI_ARSOFF 0x1e0 #define OHCI_ARSCTL OHCI_ARSOFF #define OHCI_ARSCTLCLR (OHCI_ARSOFF + 4) #define OHCI_ARSCMD (OHCI_ARSOFF + 0xc) #define OHCI_ARSMATCH (OHCI_ARSOFF + 0x10) #define OHCI_ITOFF(CH) (0x200 + 0x10 * (CH)) #define OHCI_ITCTL(CH) (OHCI_ITOFF(CH)) #define OHCI_ITCTLCLR(CH) (OHCI_ITOFF(CH) + 4) #define OHCI_ITCMD(CH) (OHCI_ITOFF(CH) + 0xc) #define OHCI_IROFF(CH) (0x400 + 0x20 * (CH)) #define OHCI_IRCTL(CH) (OHCI_IROFF(CH)) #define OHCI_IRCTLCLR(CH) (OHCI_IROFF(CH) + 4) #define OHCI_IRCMD(CH) (OHCI_IROFF(CH) + 0xc) #define OHCI_IRMATCH(CH) (OHCI_IROFF(CH) + 0x10) Index: head/sys/boot/i386/libfirewire/fwohcireg.h =================================================================== --- head/sys/boot/i386/libfirewire/fwohcireg.h (revision 258779) +++ head/sys/boot/i386/libfirewire/fwohcireg.h (revision 258780) @@ -1,369 +1,369 @@ /* * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #define PCI_CBMEM PCIR_BAR(0) #define FW_VENDORID_NATSEMI 0x100B #define FW_VENDORID_NEC 0x1033 #define FW_VENDORID_SIS 0x1039 #define FW_VENDORID_TI 0x104c #define FW_VENDORID_SONY 0x104d #define FW_VENDORID_VIA 0x1106 #define FW_VENDORID_RICOH 0x1180 #define FW_VENDORID_APPLE 0x106b #define FW_VENDORID_LUCENT 0x11c1 #define FW_VENDORID_INTEL 0x8086 #define FW_VENDORID_ADAPTEC 0x9004 #define FW_DEVICE_CS4210 (0x000f << 16) #define FW_DEVICE_UPD861 (0x0063 << 16) #define FW_DEVICE_UPD871 (0x00ce << 16) #define FW_DEVICE_UPD72870 (0x00cd << 16) #define FW_DEVICE_UPD72873 (0x00e7 << 16) #define FW_DEVICE_UPD72874 (0x00f2 << 16) #define FW_DEVICE_TITSB22 (0x8009 << 16) #define FW_DEVICE_TITSB23 (0x8019 << 16) #define FW_DEVICE_TITSB26 (0x8020 << 16) #define FW_DEVICE_TITSB43 (0x8021 << 16) #define FW_DEVICE_TITSB43A (0x8023 << 16) #define FW_DEVICE_TITSB43AB23 (0x8024 << 16) #define FW_DEVICE_TITSB82AA2 (0x8025 << 16) #define FW_DEVICE_TITSB43AB21 (0x8026 << 16) #define FW_DEVICE_TIPCI4410A (0x8017 << 16) #define FW_DEVICE_TIPCI4450 (0x8011 << 16) #define FW_DEVICE_TIPCI4451 (0x8027 << 16) #define FW_DEVICE_CXD1947 (0x8009 << 16) #define FW_DEVICE_CXD3222 (0x8039 << 16) #define FW_DEVICE_VT6306 (0x3044 << 16) #define FW_DEVICE_R5C551 (0x0551 << 16) #define FW_DEVICE_R5C552 (0x0552 << 16) #define FW_DEVICE_PANGEA (0x0030 << 16) #define FW_DEVICE_UNINORTH (0x0031 << 16) #define FW_DEVICE_AIC5800 (0x5800 << 16) #define FW_DEVICE_FW322 (0x5811 << 16) #define FW_DEVICE_7007 (0x7007 << 16) #define FW_DEVICE_82372FB (0x7605 << 16) #define PCI_INTERFACE_OHCI 0x10 #define FW_OHCI_BASE_REG 0x10 #define OHCI_DMA_ITCH 0x20 #define OHCI_DMA_IRCH 0x20 #define OHCI_MAX_DMA_CH (0x4 + OHCI_DMA_ITCH + OHCI_DMA_IRCH) typedef uint32_t fwohcireg_t; /* for PCI */ #if BYTE_ORDER == BIG_ENDIAN #define FWOHCI_DMA_WRITE(x, y) ((x) = htole32(y)) #define FWOHCI_DMA_READ(x) le32toh(x) #define FWOHCI_DMA_SET(x, y) ((x) |= htole32(y)) #define FWOHCI_DMA_CLEAR(x, y) ((x) &= htole32(~(y))) #else #define FWOHCI_DMA_WRITE(x, y) ((x) = (y)) #define FWOHCI_DMA_READ(x) (x) #define FWOHCI_DMA_SET(x, y) ((x) |= (y)) #define FWOHCI_DMA_CLEAR(x, y) ((x) &= ~(y)) #endif struct fwohcidb { union { struct { uint32_t cmd; uint32_t addr; uint32_t depend; uint32_t res; } desc; uint32_t immed[4]; } db; #define OHCI_STATUS_SHIFT 16 #define OHCI_COUNT_MASK 0xffff #define OHCI_OUTPUT_MORE (0 << 28) #define OHCI_OUTPUT_LAST (1 << 28) #define OHCI_INPUT_MORE (2 << 28) #define OHCI_INPUT_LAST (3 << 28) #define OHCI_STORE_QUAD (4 << 28) #define OHCI_LOAD_QUAD (5 << 28) #define OHCI_NOP (6 << 28) #define OHCI_STOP (7 << 28) #define OHCI_STORE (8 << 28) #define OHCI_CMD_MASK (0xf << 28) #define OHCI_UPDATE (1 << 27) #define OHCI_KEY_ST0 (0 << 24) #define OHCI_KEY_ST1 (1 << 24) #define OHCI_KEY_ST2 (2 << 24) #define OHCI_KEY_ST3 (3 << 24) #define OHCI_KEY_REGS (5 << 24) #define OHCI_KEY_SYS (6 << 24) #define OHCI_KEY_DEVICE (7 << 24) #define OHCI_KEY_MASK (7 << 24) #define OHCI_INTERRUPT_NEVER (0 << 20) #define OHCI_INTERRUPT_TRUE (1 << 20) #define OHCI_INTERRUPT_FALSE (2 << 20) #define OHCI_INTERRUPT_ALWAYS (3 << 20) #define OHCI_BRANCH_NEVER (0 << 18) #define OHCI_BRANCH_TRUE (1 << 18) #define OHCI_BRANCH_FALSE (2 << 18) #define OHCI_BRANCH_ALWAYS (3 << 18) #define OHCI_BRANCH_MASK (3 << 18) #define OHCI_WAIT_NEVER (0 << 16) #define OHCI_WAIT_TRUE (1 << 16) #define OHCI_WAIT_FALSE (2 << 16) #define OHCI_WAIT_ALWAYS (3 << 16) }; #define OHCI_SPD_S100 0x4 #define OHCI_SPD_S200 0x1 #define OHCI_SPD_S400 0x2 #define FWOHCIEV_NOSTAT 0 #define FWOHCIEV_LONGP 2 #define FWOHCIEV_MISSACK 3 #define FWOHCIEV_UNDRRUN 4 #define FWOHCIEV_OVRRUN 5 #define FWOHCIEV_DESCERR 6 #define FWOHCIEV_DTRDERR 7 #define FWOHCIEV_DTWRERR 8 #define FWOHCIEV_BUSRST 9 #define FWOHCIEV_TIMEOUT 0xa #define FWOHCIEV_TCODERR 0xb #define FWOHCIEV_UNKNOWN 0xe #define FWOHCIEV_FLUSHED 0xf #define FWOHCIEV_ACKCOMPL 0x11 #define FWOHCIEV_ACKPEND 0x12 #define FWOHCIEV_ACKBSX 0x14 #define FWOHCIEV_ACKBSA 0x15 #define FWOHCIEV_ACKBSB 0x16 #define FWOHCIEV_ACKTARD 0x1b #define FWOHCIEV_ACKDERR 0x1d #define FWOHCIEV_ACKTERR 0x1e #define FWOHCIEV_MASK 0x1f struct ohci_dma{ fwohcireg_t cntl; #define OHCI_CNTL_CYCMATCH_S (0x1 << 31) #define OHCI_CNTL_BUFFIL (0x1 << 31) #define OHCI_CNTL_ISOHDR (0x1 << 30) #define OHCI_CNTL_CYCMATCH_R (0x1 << 29) #define OHCI_CNTL_MULTICH (0x1 << 28) #define OHCI_CNTL_DMA_RUN (0x1 << 15) #define OHCI_CNTL_DMA_WAKE (0x1 << 12) #define OHCI_CNTL_DMA_DEAD (0x1 << 11) #define OHCI_CNTL_DMA_ACTIVE (0x1 << 10) #define OHCI_CNTL_DMA_BT (0x1 << 8) #define OHCI_CNTL_DMA_BAD (0x1 << 7) #define OHCI_CNTL_DMA_STAT (0xff) fwohcireg_t cntl_clr; fwohcireg_t dummy0; fwohcireg_t cmd; fwohcireg_t match; fwohcireg_t dummy1; fwohcireg_t dummy2; fwohcireg_t dummy3; }; struct ohci_itdma{ fwohcireg_t cntl; fwohcireg_t cntl_clr; fwohcireg_t dummy0; fwohcireg_t cmd; }; struct ohci_registers { fwohcireg_t ver; /* Version No. 0x0 */ fwohcireg_t guid; /* GUID_ROM No. 0x4 */ fwohcireg_t retry; /* AT retries 0x8 */ #define FWOHCI_RETRY 0x8 fwohcireg_t csr_data; /* CSR data 0xc */ fwohcireg_t csr_cmp; /* CSR compare 0x10 */ fwohcireg_t csr_cntl; /* CSR compare 0x14 */ fwohcireg_t rom_hdr; /* config ROM ptr. 0x18 */ fwohcireg_t bus_id; /* BUS_ID 0x1c */ fwohcireg_t bus_opt; /* BUS option 0x20 */ #define FWOHCIGUID_H 0x24 #define FWOHCIGUID_L 0x28 fwohcireg_t guid_hi; /* GUID hi 0x24 */ fwohcireg_t guid_lo; /* GUID lo 0x28 */ fwohcireg_t dummy0[2]; /* dummy 0x2c-0x30 */ fwohcireg_t config_rom; /* config ROM map 0x34 */ fwohcireg_t post_wr_lo; /* post write addr lo 0x38 */ fwohcireg_t post_wr_hi; /* post write addr hi 0x3c */ fwohcireg_t vender; /* vender ID 0x40 */ fwohcireg_t dummy1[3]; /* dummy 0x44-0x4c */ fwohcireg_t hcc_cntl_set; /* HCC control set 0x50 */ fwohcireg_t hcc_cntl_clr; /* HCC control clr 0x54 */ -#define OHCI_HCC_BIBIV (1 << 31) /* BIBimage Valid */ +#define OHCI_HCC_BIBIV (1U << 31) /* BIBimage Valid */ #define OHCI_HCC_BIGEND (1 << 30) /* noByteSwapData */ #define OHCI_HCC_PRPHY (1 << 23) /* programPhyEnable */ #define OHCI_HCC_PHYEN (1 << 22) /* aPhyEnhanceEnable */ #define OHCI_HCC_LPS (1 << 19) /* LPS */ #define OHCI_HCC_POSTWR (1 << 18) /* postedWriteEnable */ #define OHCI_HCC_LINKEN (1 << 17) /* linkEnable */ #define OHCI_HCC_RESET (1 << 16) /* softReset */ fwohcireg_t dummy2[2]; /* dummy 0x58-0x5c */ fwohcireg_t dummy3[1]; /* dummy 0x60 */ fwohcireg_t sid_buf; /* self id buffer 0x64 */ fwohcireg_t sid_cnt; /* self id count 0x68 */ fwohcireg_t dummy4[1]; /* dummy 0x6c */ fwohcireg_t ir_mask_hi_set; /* ir mask hi set 0x70 */ fwohcireg_t ir_mask_hi_clr; /* ir mask hi set 0x74 */ fwohcireg_t ir_mask_lo_set; /* ir mask hi set 0x78 */ fwohcireg_t ir_mask_lo_clr; /* ir mask hi set 0x7c */ #define FWOHCI_INTSTAT 0x80 #define FWOHCI_INTSTATCLR 0x84 #define FWOHCI_INTMASK 0x88 #define FWOHCI_INTMASKCLR 0x8c fwohcireg_t int_stat; /* 0x80 */ fwohcireg_t int_clear; /* 0x84 */ fwohcireg_t int_mask; /* 0x88 */ fwohcireg_t int_mask_clear; /* 0x8c */ fwohcireg_t it_int_stat; /* 0x90 */ fwohcireg_t it_int_clear; /* 0x94 */ fwohcireg_t it_int_mask; /* 0x98 */ fwohcireg_t it_mask_clear; /* 0x9c */ fwohcireg_t ir_int_stat; /* 0xa0 */ fwohcireg_t ir_int_clear; /* 0xa4 */ fwohcireg_t ir_int_mask; /* 0xa8 */ fwohcireg_t ir_mask_clear; /* 0xac */ fwohcireg_t dummy5[11]; /* dummy 0xb0-d8 */ fwohcireg_t fairness; /* fairness control 0xdc */ fwohcireg_t link_cntl; /* Chip control 0xe0*/ fwohcireg_t link_cntl_clr; /* Chip control clear 0xe4*/ #define FWOHCI_NODEID 0xe8 fwohcireg_t node; /* Node ID 0xe8 */ -#define OHCI_NODE_VALID (1 << 31) +#define OHCI_NODE_VALID (1U << 31) #define OHCI_NODE_ROOT (1 << 30) #define OHCI_ASYSRCBUS 1 fwohcireg_t phy_access; /* PHY cntl 0xec */ #define PHYDEV_RDDONE (1<<31) #define PHYDEV_RDCMD (1<<15) #define PHYDEV_WRCMD (1<<14) #define PHYDEV_REGADDR 8 #define PHYDEV_WRDATA 0 #define PHYDEV_RDADDR 24 #define PHYDEV_RDDATA 16 fwohcireg_t cycle_timer; /* Cycle Timer 0xf0 */ fwohcireg_t dummy6[3]; /* dummy 0xf4-fc */ fwohcireg_t areq_hi; /* Async req. filter hi 0x100 */ fwohcireg_t areq_hi_clr; /* Async req. filter hi 0x104 */ fwohcireg_t areq_lo; /* Async req. filter lo 0x108 */ fwohcireg_t areq_lo_clr; /* Async req. filter lo 0x10c */ fwohcireg_t preq_hi; /* Async req. filter hi 0x110 */ fwohcireg_t preq_hi_clr; /* Async req. filter hi 0x114 */ fwohcireg_t preq_lo; /* Async req. filter lo 0x118 */ fwohcireg_t preq_lo_clr; /* Async req. filter lo 0x11c */ fwohcireg_t pys_upper; /* Physical Upper bound 0x120 */ fwohcireg_t dummy7[23]; /* dummy 0x124-0x17c */ /* 0x180, 0x184, 0x188, 0x18c */ /* 0x190, 0x194, 0x198, 0x19c */ /* 0x1a0, 0x1a4, 0x1a8, 0x1ac */ /* 0x1b0, 0x1b4, 0x1b8, 0x1bc */ /* 0x1c0, 0x1c4, 0x1c8, 0x1cc */ /* 0x1d0, 0x1d4, 0x1d8, 0x1dc */ /* 0x1e0, 0x1e4, 0x1e8, 0x1ec */ /* 0x1f0, 0x1f4, 0x1f8, 0x1fc */ struct ohci_dma dma_ch[0x4]; /* 0x200, 0x204, 0x208, 0x20c */ /* 0x210, 0x204, 0x208, 0x20c */ struct ohci_itdma dma_itch[0x20]; /* 0x400, 0x404, 0x408, 0x40c */ /* 0x410, 0x404, 0x408, 0x40c */ struct ohci_dma dma_irch[0x20]; }; #define OHCI_CNTL_CYCSRC (0x1 << 22) #define OHCI_CNTL_CYCMTR (0x1 << 21) #define OHCI_CNTL_CYCTIMER (0x1 << 20) #define OHCI_CNTL_PHYPKT (0x1 << 10) #define OHCI_CNTL_SID (0x1 << 9) #define OHCI_INT_DMA_ATRQ (0x1 << 0) #define OHCI_INT_DMA_ATRS (0x1 << 1) #define OHCI_INT_DMA_ARRQ (0x1 << 2) #define OHCI_INT_DMA_ARRS (0x1 << 3) #define OHCI_INT_DMA_PRRQ (0x1 << 4) #define OHCI_INT_DMA_PRRS (0x1 << 5) #define OHCI_INT_DMA_IT (0x1 << 6) #define OHCI_INT_DMA_IR (0x1 << 7) #define OHCI_INT_PW_ERR (0x1 << 8) #define OHCI_INT_LR_ERR (0x1 << 9) #define OHCI_INT_PHY_SID (0x1 << 16) #define OHCI_INT_PHY_BUS_R (0x1 << 17) #define OHCI_INT_REG_FAIL (0x1 << 18) #define OHCI_INT_PHY_INT (0x1 << 19) #define OHCI_INT_CYC_START (0x1 << 20) #define OHCI_INT_CYC_64SECOND (0x1 << 21) #define OHCI_INT_CYC_LOST (0x1 << 22) #define OHCI_INT_CYC_ERR (0x1 << 23) #define OHCI_INT_ERR (0x1 << 24) #define OHCI_INT_CYC_LONG (0x1 << 25) #define OHCI_INT_PHY_REG (0x1 << 26) #define OHCI_INT_EN (0x1 << 31) #define IP_CHANNELS 0x0234 #define FWOHCI_MAXREC 2048 #define OHCI_ISORA 0x02 #define OHCI_ISORB 0x04 #define FWOHCITCODE_PHY 0xe Index: head/sys/dev/aac/aacvar.h =================================================================== --- head/sys/dev/aac/aacvar.h (revision 258779) +++ head/sys/dev/aac/aacvar.h (revision 258780) @@ -1,648 +1,648 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include SYSCTL_DECL(_hw_aac); #define AAC_TYPE_DEVO 1 #define AAC_TYPE_ALPHA 2 #define AAC_TYPE_BETA 3 #define AAC_TYPE_RELEASE 4 #define AAC_DRIVER_MAJOR_VERSION 2 #define AAC_DRIVER_MINOR_VERSION 1 #define AAC_DRIVER_BUGFIX_LEVEL 9 #define AAC_DRIVER_TYPE AAC_TYPE_RELEASE #ifndef AAC_DRIVER_BUILD # define AAC_DRIVER_BUILD 1 #endif /* * Driver Parameter Definitions */ /* * The firmware interface allows for a 16-bit s/g list length. We limit * ourselves to a reasonable maximum and ensure alignment. */ #define AAC_MAXSGENTRIES 64 /* max S/G entries, limit 65535 */ /* * We allocate a small set of FIBs for the adapter to use to send us messages. */ #define AAC_ADAPTER_FIBS 8 /* * The controller reports status events in AIFs. We hang on to a number of * these in order to pass them out to user-space management tools. */ #define AAC_AIFQ_LENGTH 64 /* * Firmware messages are passed in the printf buffer. */ #define AAC_PRINTF_BUFSIZE 256 /* * We wait this many seconds for the adapter to come ready if it is still * booting */ #define AAC_BOOT_TIMEOUT (3 * 60) /* * Timeout for immediate commands. */ #define AAC_IMMEDIATE_TIMEOUT 30 /* seconds */ /* * Timeout for normal commands */ #define AAC_CMD_TIMEOUT 120 /* seconds */ /* * Rate at which we periodically check for timed out commands and kick the * controller. */ #define AAC_PERIODIC_INTERVAL 20 /* seconds */ /* * Per-container data structure */ struct aac_container { struct aac_mntobj co_mntobj; device_t co_disk; int co_found; TAILQ_ENTRY(aac_container) co_link; }; /* * Per-SIM data structure */ struct aac_cam; struct aac_sim { device_t sim_dev; int TargetsPerBus; int BusNumber; int InitiatorBusId; struct aac_softc *aac_sc; struct aac_cam *aac_cam; TAILQ_ENTRY(aac_sim) sim_link; }; /* * Per-disk structure */ struct aac_disk { device_t ad_dev; struct aac_softc *ad_controller; struct aac_container *ad_container; struct disk *ad_disk; int ad_flags; #define AAC_DISK_OPEN (1<<0) int ad_cylinders; int ad_heads; int ad_sectors; u_int64_t ad_size; int unit; }; /* * Per-command control structure. */ struct aac_command { TAILQ_ENTRY(aac_command) cm_link; /* list linkage */ struct aac_softc *cm_sc; /* controller that owns us */ struct aac_fib *cm_fib; /* FIB associated with this * command */ u_int64_t cm_fibphys; /* bus address of the FIB */ void *cm_data; /* pointer to data in kernel * space */ u_int32_t cm_datalen; /* data length */ bus_dmamap_t cm_datamap; /* DMA map for bio data */ struct aac_sg_table *cm_sgtable; /* pointer to s/g table in * command */ u_int cm_flags; #define AAC_CMD_MAPPED (1<<0) /* command has had its data * mapped */ #define AAC_CMD_DATAIN (1<<1) /* command involves data moving * from controller to host */ #define AAC_CMD_DATAOUT (1<<2) /* command involves data moving * from host to controller */ #define AAC_CMD_COMPLETED (1<<3) /* command has been completed */ #define AAC_CMD_TIMEDOUT (1<<4) /* command taken too long */ #define AAC_ON_AACQ_FREE (1<<5) #define AAC_ON_AACQ_READY (1<<6) #define AAC_ON_AACQ_BUSY (1<<7) #define AAC_ON_AACQ_AIF (1<<8) #define AAC_ON_AACQ_NORM (1<<10) #define AAC_ON_AACQ_MASK ((1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<10)) #define AAC_QUEUE_FRZN (1<<9) /* Freeze the processing of * commands on the queue. */ #define AAC_REQ_BIO (1 << 11) #define AAC_REQ_CCB (1 << 12) void (*cm_complete)(struct aac_command *cm); void *cm_private; time_t cm_timestamp; /* command creation time */ int cm_queue; int cm_index; }; struct aac_fibmap { TAILQ_ENTRY(aac_fibmap) fm_link; /* list linkage */ struct aac_fib *aac_fibs; bus_dmamap_t aac_fibmap; struct aac_command *aac_commands; }; /* * We gather a number of adapter-visible items into a single structure. * * The ordering of this strucure may be important; we copy the Linux driver: * * Adapter FIBs * Init struct * Queue headers (Comm Area) * Printf buffer * * In addition, we add: * Sync Fib */ struct aac_common { /* fibs for the controller to send us messages */ struct aac_fib ac_fibs[AAC_ADAPTER_FIBS]; /* the init structure */ struct aac_adapter_init ac_init; /* arena within which the queue structures are kept */ u_int8_t ac_qbuf[sizeof(struct aac_queue_table) + AAC_QUEUE_ALIGN]; /* buffer for text messages from the controller */ char ac_printf[AAC_PRINTF_BUFSIZE]; /* fib for synchronous commands */ struct aac_fib ac_sync_fib; }; /* * Interface operations */ struct aac_interface { int (*aif_get_fwstatus)(struct aac_softc *sc); void (*aif_qnotify)(struct aac_softc *sc, int qbit); int (*aif_get_istatus)(struct aac_softc *sc); void (*aif_clr_istatus)(struct aac_softc *sc, int mask); void (*aif_set_mailbox)(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); int (*aif_get_mailbox)(struct aac_softc *sc, int mb); void (*aif_set_interrupts)(struct aac_softc *sc, int enable); int (*aif_send_command)(struct aac_softc *sc, struct aac_command *cm); int (*aif_get_outb_queue)(struct aac_softc *sc); void (*aif_set_outb_queue)(struct aac_softc *sc, int index); }; extern const struct aac_interface aac_rx_interface; extern const struct aac_interface aac_sa_interface; extern const struct aac_interface aac_fa_interface; extern const struct aac_interface aac_rkt_interface; #define AAC_GET_FWSTATUS(sc) ((sc)->aac_if->aif_get_fwstatus((sc))) #define AAC_QNOTIFY(sc, qbit) ((sc)->aac_if->aif_qnotify((sc), (qbit))) #define AAC_GET_ISTATUS(sc) ((sc)->aac_if->aif_get_istatus((sc))) #define AAC_CLEAR_ISTATUS(sc, mask) ((sc)->aac_if->aif_clr_istatus((sc), \ (mask))) #define AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3) \ ((sc)->aac_if->aif_set_mailbox((sc), (command), (arg0), (arg1), (arg2), \ (arg3))) #define AAC_GET_MAILBOX(sc, mb) ((sc)->aac_if->aif_get_mailbox((sc), \ (mb))) #define AAC_MASK_INTERRUPTS(sc) ((sc)->aac_if->aif_set_interrupts((sc), \ 0)) #define AAC_UNMASK_INTERRUPTS(sc) ((sc)->aac_if->aif_set_interrupts((sc), \ 1)) #define AAC_SEND_COMMAND(sc, cm) ((sc)->aac_if->aif_send_command((sc), (cm))) #define AAC_GET_OUTB_QUEUE(sc) ((sc)->aac_if->aif_get_outb_queue((sc))) #define AAC_SET_OUTB_QUEUE(sc, idx) ((sc)->aac_if->aif_set_outb_queue((sc), (idx))) #define AAC_MEM0_SETREG4(sc, reg, val) bus_space_write_4(sc->aac_btag0, \ sc->aac_bhandle0, reg, val) #define AAC_MEM0_GETREG4(sc, reg) bus_space_read_4(sc->aac_btag0, \ sc->aac_bhandle0, reg) #define AAC_MEM0_SETREG2(sc, reg, val) bus_space_write_2(sc->aac_btag0, \ sc->aac_bhandle0, reg, val) #define AAC_MEM0_GETREG2(sc, reg) bus_space_read_2(sc->aac_btag0, \ sc->aac_bhandle0, reg) #define AAC_MEM0_SETREG1(sc, reg, val) bus_space_write_1(sc->aac_btag0, \ sc->aac_bhandle0, reg, val) #define AAC_MEM0_GETREG1(sc, reg) bus_space_read_1(sc->aac_btag0, \ sc->aac_bhandle0, reg) #define AAC_MEM1_SETREG4(sc, reg, val) bus_space_write_4(sc->aac_btag1, \ sc->aac_bhandle1, reg, val) #define AAC_MEM1_GETREG4(sc, reg) bus_space_read_4(sc->aac_btag1, \ sc->aac_bhandle1, reg) #define AAC_MEM1_SETREG2(sc, reg, val) bus_space_write_2(sc->aac_btag1, \ sc->aac_bhandle1, reg, val) #define AAC_MEM1_GETREG2(sc, reg) bus_space_read_2(sc->aac_btag1, \ sc->aac_bhandle1, reg) #define AAC_MEM1_SETREG1(sc, reg, val) bus_space_write_1(sc->aac_btag1, \ sc->aac_bhandle1, reg, val) #define AAC_MEM1_GETREG1(sc, reg) bus_space_read_1(sc->aac_btag1, \ sc->aac_bhandle1, reg) /* fib context (IOCTL) */ struct aac_fib_context { u_int32_t unique; int ctx_idx; int ctx_wrap; struct aac_fib_context *next, *prev; }; /* * Per-controller structure. */ struct aac_softc { /* bus connections */ device_t aac_dev; struct resource *aac_regs_res0, *aac_regs_res1; /* reg. if. window */ bus_space_handle_t aac_bhandle0, aac_bhandle1; /* bus space handle */ bus_space_tag_t aac_btag0, aac_btag1; /* bus space tag */ bus_dma_tag_t aac_parent_dmat; /* parent DMA tag */ bus_dma_tag_t aac_buffer_dmat; /* data buffer/command * DMA tag */ struct resource *aac_irq; /* interrupt */ void *aac_intr; /* interrupt handle */ eventhandler_tag eh; /* controller features, limits and status */ int aac_state; #define AAC_STATE_SUSPEND (1<<0) #define AAC_STATE_UNUSED0 (1<<1) #define AAC_STATE_INTERRUPTS_ON (1<<2) #define AAC_STATE_AIF_SLEEPER (1<<3) struct FsaRevision aac_revision; /* controller hardware interface */ int aac_hwif; #define AAC_HWIF_I960RX 0 #define AAC_HWIF_STRONGARM 1 #define AAC_HWIF_RKT 3 #define AAC_HWIF_NARK 4 #define AAC_HWIF_UNKNOWN -1 bus_dma_tag_t aac_common_dmat; /* common structure * DMA tag */ bus_dmamap_t aac_common_dmamap; /* common structure * DMA map */ struct aac_common *aac_common; u_int32_t aac_common_busaddr; const struct aac_interface *aac_if; /* command/fib resources */ bus_dma_tag_t aac_fib_dmat; /* DMA tag for allocing FIBs */ TAILQ_HEAD(,aac_fibmap) aac_fibmap_tqh; u_int total_fibs; struct aac_command *aac_commands; /* command management */ TAILQ_HEAD(,aac_command) aac_free; /* command structures * available for reuse */ TAILQ_HEAD(,aac_command) aac_ready; /* commands on hold for * controller resources */ TAILQ_HEAD(,aac_command) aac_busy; TAILQ_HEAD(,aac_event) aac_ev_cmfree; struct bio_queue_head aac_bioq; struct aac_queue_table *aac_queues; struct aac_queue_entry *aac_qentries[AAC_QUEUE_COUNT]; struct aac_qstat aac_qstat[AACQ_COUNT]; /* queue statistics */ /* connected containters */ TAILQ_HEAD(,aac_container) aac_container_tqh; struct mtx aac_container_lock; /* * The general I/O lock. This protects the sync fib, the lists, the * queues, and the registers. */ struct mtx aac_io_lock; /* delayed activity infrastructure */ struct task aac_task_complete; /* deferred-completion * task */ struct intr_config_hook aac_ich; /* management interface */ struct cdev *aac_dev_t; struct mtx aac_aifq_lock; struct aac_fib aac_aifq[AAC_AIFQ_LENGTH]; int aifq_idx; int aifq_filled; struct aac_fib_context *fibctx; struct selinfo rcv_select; struct proc *aifthread; int aifflags; #define AAC_AIFFLAGS_RUNNING (1 << 0) #define AAC_AIFFLAGS_UNUSED0 (1 << 1) #define AAC_AIFFLAGS_EXIT (1 << 2) #define AAC_AIFFLAGS_EXITED (1 << 3) #define AAC_AIFFLAGS_UNUSED1 (1 << 4) #define AAC_AIFFLAGS_ALLOCFIBS (1 << 5) #define AAC_AIFFLAGS_PENDING AAC_AIFFLAGS_ALLOCFIBS u_int32_t flags; #define AAC_FLAGS_PERC2QC (1 << 0) #define AAC_FLAGS_ENABLE_CAM (1 << 1) /* No SCSI passthrough */ #define AAC_FLAGS_CAM_NORESET (1 << 2) /* Fake SCSI resets */ #define AAC_FLAGS_CAM_PASSONLY (1 << 3) /* Only create pass devices */ #define AAC_FLAGS_SG_64BIT (1 << 4) /* Use 64-bit S/G addresses */ #define AAC_FLAGS_4GB_WINDOW (1 << 5) /* Device can access host mem * 2GB-4GB range */ #define AAC_FLAGS_NO4GB (1 << 6) /* Can't access host mem >2GB */ #define AAC_FLAGS_256FIBS (1 << 7) /* Can only do 256 commands */ #define AAC_FLAGS_BROKEN_MEMMAP (1 << 8) /* Broken HostPhysMemPages */ #define AAC_FLAGS_SLAVE (1 << 9) #define AAC_FLAGS_MASTER (1 << 10) #define AAC_FLAGS_NEW_COMM (1 << 11) /* New comm. interface supported */ #define AAC_FLAGS_RAW_IO (1 << 12) /* Raw I/O interface */ #define AAC_FLAGS_ARRAY_64BIT (1 << 13) /* 64-bit array size */ #define AAC_FLAGS_LBA_64BIT (1 << 14) /* 64-bit LBA support */ -#define AAC_FLAGS_NOMSI (1 << 31) /* Broken MSI */ +#define AAC_FLAGS_NOMSI (1U << 31) /* Broken MSI */ u_int32_t supported_options; u_int32_t scsi_method_id; TAILQ_HEAD(,aac_sim) aac_sim_tqh; struct callout aac_daemontime; /* clock daemon callout */ u_int32_t aac_max_fibs; /* max. FIB count */ u_int32_t aac_max_fibs_alloc; /* max. alloc. per alloc_commands() */ u_int32_t aac_max_fib_size; /* max. FIB size */ u_int32_t aac_sg_tablesize; /* max. sg count from host */ u_int32_t aac_max_sectors; /* max. I/O size from host (blocks) */ #define AAC_CAM_TARGET_WILDCARD ~0 void (*cam_rescan_cb)(struct aac_softc *, uint32_t, uint32_t); }; /* * Event callback mechanism for the driver */ #define AAC_EVENT_NONE 0x00 #define AAC_EVENT_CMFREE 0x01 #define AAC_EVENT_MASK 0xff #define AAC_EVENT_REPEAT 0x100 typedef void aac_event_cb_t(struct aac_softc *sc, struct aac_event *event, void *arg); struct aac_event { TAILQ_ENTRY(aac_event) ev_links; int ev_type; aac_event_cb_t *ev_callback; void *ev_arg; }; /* * Public functions */ extern void aac_free(struct aac_softc *sc); extern int aac_attach(struct aac_softc *sc); extern int aac_detach(device_t dev); extern int aac_shutdown(device_t dev); extern int aac_suspend(device_t dev); extern int aac_resume(device_t dev); extern void aac_new_intr(void *arg); extern int aac_filter(void *arg); extern void aac_submit_bio(struct bio *bp); extern void aac_biodone(struct bio *bp); extern void aac_startio(struct aac_softc *sc); extern int aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp); extern void aac_release_command(struct aac_command *cm); extern int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize); extern void aac_add_event(struct aac_softc *sc, struct aac_event *event); #ifdef AAC_DEBUG extern int aac_debug_enable; # define fwprintf(sc, flags, fmt, args...) \ do { \ if (!aac_debug_enable) \ break; \ if (sc != NULL) \ device_printf(((struct aac_softc *)sc)->aac_dev, \ "%s: " fmt "\n", __func__, ##args); \ else \ printf("%s: " fmt "\n", __func__, ##args); \ } while(0) extern void aac_print_queues(struct aac_softc *sc); extern void aac_panic(struct aac_softc *sc, char *reason); extern void aac_print_fib(struct aac_softc *sc, struct aac_fib *fib, const char *caller); extern void aac_print_aif(struct aac_softc *sc, struct aac_aif_command *aif); #define AAC_PRINT_FIB(sc, fib) aac_print_fib(sc, fib, __func__) #else # define fwprintf(sc, flags, fmt, args...) # define aac_print_queues(sc) # define aac_panic(sc, reason) # define AAC_PRINT_FIB(sc, fib) # define aac_print_aif(sc, aac_aif_command) #endif struct aac_code_lookup { const char *string; u_int32_t code; }; /* * Queue primitives for driver queues. */ #define AACQ_ADD(sc, qname) \ do { \ struct aac_qstat *qs; \ \ qs = &(sc)->aac_qstat[qname]; \ \ qs->q_length++; \ if (qs->q_length > qs->q_max) \ qs->q_max = qs->q_length; \ } while (0) #define AACQ_REMOVE(sc, qname) (sc)->aac_qstat[qname].q_length-- #define AACQ_INIT(sc, qname) \ do { \ sc->aac_qstat[qname].q_length = 0; \ sc->aac_qstat[qname].q_max = 0; \ } while (0) #define AACQ_COMMAND_QUEUE(name, index) \ static __inline void \ aac_initq_ ## name (struct aac_softc *sc) \ { \ TAILQ_INIT(&sc->aac_ ## name); \ AACQ_INIT(sc, index); \ } \ static __inline void \ aac_enqueue_ ## name (struct aac_command *cm) \ { \ if ((cm->cm_flags & AAC_ON_AACQ_MASK) != 0) { \ panic("aac: command %p is on another queue, flags = %#x", \ cm, cm->cm_flags); \ } \ TAILQ_INSERT_TAIL(&cm->cm_sc->aac_ ## name, cm, cm_link); \ cm->cm_flags |= AAC_ON_ ## index; \ AACQ_ADD(cm->cm_sc, index); \ } \ static __inline void \ aac_requeue_ ## name (struct aac_command *cm) \ { \ if ((cm->cm_flags & AAC_ON_AACQ_MASK) != 0) { \ panic("aac: command %p is on another queue, flags = %#x", \ cm, cm->cm_flags); \ } \ TAILQ_INSERT_HEAD(&cm->cm_sc->aac_ ## name, cm, cm_link); \ cm->cm_flags |= AAC_ON_ ## index; \ AACQ_ADD(cm->cm_sc, index); \ } \ static __inline struct aac_command * \ aac_dequeue_ ## name (struct aac_softc *sc) \ { \ struct aac_command *cm; \ \ if ((cm = TAILQ_FIRST(&sc->aac_ ## name)) != NULL) { \ if ((cm->cm_flags & AAC_ON_ ## index) == 0) { \ panic("aac: command %p not in queue, flags = %#x, bit = %#x", \ cm, cm->cm_flags, AAC_ON_ ## index); \ } \ TAILQ_REMOVE(&sc->aac_ ## name, cm, cm_link); \ cm->cm_flags &= ~AAC_ON_ ## index; \ AACQ_REMOVE(sc, index); \ } \ return(cm); \ } \ static __inline void \ aac_remove_ ## name (struct aac_command *cm) \ { \ if ((cm->cm_flags & AAC_ON_ ## index) == 0) { \ panic("aac: command %p not in queue, flags = %#x, bit = %#x", \ cm, cm->cm_flags, AAC_ON_ ## index); \ } \ TAILQ_REMOVE(&cm->cm_sc->aac_ ## name, cm, cm_link); \ cm->cm_flags &= ~AAC_ON_ ## index; \ AACQ_REMOVE(cm->cm_sc, index); \ } \ AACQ_COMMAND_QUEUE(free, AACQ_FREE); AACQ_COMMAND_QUEUE(ready, AACQ_READY); AACQ_COMMAND_QUEUE(busy, AACQ_BUSY); /* * outstanding bio queue */ static __inline void aac_initq_bio(struct aac_softc *sc) { bioq_init(&sc->aac_bioq); AACQ_INIT(sc, AACQ_BIO); } static __inline void aac_enqueue_bio(struct aac_softc *sc, struct bio *bp) { bioq_insert_tail(&sc->aac_bioq, bp); AACQ_ADD(sc, AACQ_BIO); } static __inline struct bio * aac_dequeue_bio(struct aac_softc *sc) { struct bio *bp; if ((bp = bioq_first(&sc->aac_bioq)) != NULL) { bioq_remove(&sc->aac_bioq, bp); AACQ_REMOVE(sc, AACQ_BIO); } return(bp); } static __inline void aac_print_printf(struct aac_softc *sc) { /* * XXX We have the ability to read the length of the printf string * from out of the mailboxes. */ device_printf(sc->aac_dev, "**Monitor** %.*s", AAC_PRINTF_BUFSIZE, sc->aac_common->ac_printf); sc->aac_common->ac_printf[0] = 0; AAC_QNOTIFY(sc, AAC_DB_PRINTF); } static __inline int aac_alloc_sync_fib(struct aac_softc *sc, struct aac_fib **fib) { mtx_assert(&sc->aac_io_lock, MA_OWNED); *fib = &sc->aac_common->ac_sync_fib; return (0); } static __inline void aac_release_sync_fib(struct aac_softc *sc) { mtx_assert(&sc->aac_io_lock, MA_OWNED); } Index: head/sys/dev/acpica/acpi_video.c =================================================================== --- head/sys/dev/acpica/acpi_video.c (revision 258779) +++ head/sys/dev/acpica/acpi_video.c (revision 258780) @@ -1,1082 +1,1082 @@ /*- * Copyright (c) 2002-2003 Taku YAMAMOTO * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: acpi_vid.c,v 1.4 2003/10/13 10:07:36 taku Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include /* ACPI video extension driver. */ struct acpi_video_output { ACPI_HANDLE handle; UINT32 adr; STAILQ_ENTRY(acpi_video_output) vo_next; struct { int num; STAILQ_ENTRY(acpi_video_output) next; } vo_unit; int vo_brightness; int vo_fullpower; int vo_economy; int vo_numlevels; int *vo_levels; struct sysctl_ctx_list vo_sysctl_ctx; struct sysctl_oid *vo_sysctl_tree; }; STAILQ_HEAD(acpi_video_output_queue, acpi_video_output); struct acpi_video_softc { device_t device; ACPI_HANDLE handle; struct acpi_video_output_queue vid_outputs; eventhandler_tag vid_pwr_evh; }; /* interfaces */ static int acpi_video_modevent(struct module*, int, void *); static void acpi_video_identify(driver_t *driver, device_t parent); static int acpi_video_probe(device_t); static int acpi_video_attach(device_t); static int acpi_video_detach(device_t); static int acpi_video_resume(device_t); static int acpi_video_shutdown(device_t); static void acpi_video_notify_handler(ACPI_HANDLE, UINT32, void *); static void acpi_video_power_profile(void *); static void acpi_video_bind_outputs(struct acpi_video_softc *); static struct acpi_video_output *acpi_video_vo_init(UINT32); static void acpi_video_vo_bind(struct acpi_video_output *, ACPI_HANDLE); static void acpi_video_vo_destroy(struct acpi_video_output *); static int acpi_video_vo_check_level(struct acpi_video_output *, int); static void acpi_video_vo_notify_handler(ACPI_HANDLE, UINT32, void *); static int acpi_video_vo_active_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_video_vo_bright_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_video_vo_presets_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_video_vo_levels_sysctl(SYSCTL_HANDLER_ARGS); /* operations */ static void vid_set_switch_policy(ACPI_HANDLE, UINT32); static int vid_enum_outputs(ACPI_HANDLE, void(*)(ACPI_HANDLE, UINT32, void *), void *); static int vo_get_brightness_levels(ACPI_HANDLE, int **); static int vo_get_brightness(ACPI_HANDLE); static void vo_set_brightness(ACPI_HANDLE, int); static UINT32 vo_get_device_status(ACPI_HANDLE); static UINT32 vo_get_graphics_state(ACPI_HANDLE); static void vo_set_device_state(ACPI_HANDLE, UINT32); /* events */ #define VID_NOTIFY_SWITCHED 0x80 #define VID_NOTIFY_REPROBE 0x81 #define VID_NOTIFY_CYCLE_BRN 0x85 #define VID_NOTIFY_INC_BRN 0x86 #define VID_NOTIFY_DEC_BRN 0x87 #define VID_NOTIFY_ZERO_BRN 0x88 /* _DOS (Enable/Disable Output Switching) argument bits */ #define DOS_SWITCH_MASK 3 #define DOS_SWITCH_BY_OSPM 0 #define DOS_SWITCH_BY_BIOS 1 #define DOS_SWITCH_LOCKED 2 #define DOS_BRIGHTNESS_BY_OSPM (1 << 2) /* _DOD and subdev's _ADR */ #define DOD_DEVID_MASK 0x0f00 #define DOD_DEVID_MASK_FULL 0xffff #define DOD_DEVID_MASK_DISPIDX 0x000f #define DOD_DEVID_MASK_DISPPORT 0x00f0 #define DOD_DEVID_MONITOR 0x0100 #define DOD_DEVID_LCD 0x0110 #define DOD_DEVID_TV 0x0200 #define DOD_DEVID_EXT 0x0300 #define DOD_DEVID_INTDFP 0x0400 #define DOD_BIOS (1 << 16) #define DOD_NONVGA (1 << 17) #define DOD_HEAD_ID_SHIFT 18 #define DOD_HEAD_ID_BITS 3 #define DOD_HEAD_ID_MASK \ (((1 << DOD_HEAD_ID_BITS) - 1) << DOD_HEAD_ID_SHIFT) -#define DOD_DEVID_SCHEME_STD (1 << 31) +#define DOD_DEVID_SCHEME_STD (1U << 31) /* _BCL related constants */ #define BCL_FULLPOWER 0 #define BCL_ECONOMY 1 /* _DCS (Device Currrent Status) value bits and masks. */ #define DCS_EXISTS (1 << 0) #define DCS_ACTIVE (1 << 1) #define DCS_READY (1 << 2) #define DCS_FUNCTIONAL (1 << 3) #define DCS_ATTACHED (1 << 4) /* _DSS (Device Set Status) argument bits and masks. */ #define DSS_INACTIVE 0 #define DSS_ACTIVE (1 << 0) #define DSS_SETNEXT (1 << 30) -#define DSS_COMMIT (1 << 31) +#define DSS_COMMIT (1U << 31) static device_method_t acpi_video_methods[] = { DEVMETHOD(device_identify, acpi_video_identify), DEVMETHOD(device_probe, acpi_video_probe), DEVMETHOD(device_attach, acpi_video_attach), DEVMETHOD(device_detach, acpi_video_detach), DEVMETHOD(device_resume, acpi_video_resume), DEVMETHOD(device_shutdown, acpi_video_shutdown), { 0, 0 } }; static driver_t acpi_video_driver = { "acpi_video", acpi_video_methods, sizeof(struct acpi_video_softc), }; static devclass_t acpi_video_devclass; DRIVER_MODULE(acpi_video, vgapci, acpi_video_driver, acpi_video_devclass, acpi_video_modevent, NULL); MODULE_DEPEND(acpi_video, acpi, 1, 1, 1); static struct sysctl_ctx_list acpi_video_sysctl_ctx; static struct sysctl_oid *acpi_video_sysctl_tree; static struct acpi_video_output_queue crt_units, tv_units, ext_units, lcd_units, other_units; /* * The 'video' lock protects the hierarchy of video output devices * (the video "bus"). The 'video_output' lock protects per-output * data is equivalent to a softc lock for each video output. */ ACPI_SERIAL_DECL(video, "ACPI video"); ACPI_SERIAL_DECL(video_output, "ACPI video output"); static MALLOC_DEFINE(M_ACPIVIDEO, "acpivideo", "ACPI video extension"); static int acpi_video_modevent(struct module *mod __unused, int evt, void *cookie __unused) { int err; err = 0; switch (evt) { case MOD_LOAD: sysctl_ctx_init(&acpi_video_sysctl_ctx); STAILQ_INIT(&crt_units); STAILQ_INIT(&tv_units); STAILQ_INIT(&ext_units); STAILQ_INIT(&lcd_units); STAILQ_INIT(&other_units); break; case MOD_UNLOAD: sysctl_ctx_free(&acpi_video_sysctl_ctx); acpi_video_sysctl_tree = NULL; break; default: err = EINVAL; } return (err); } static void acpi_video_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "acpi_video", -1) == NULL) device_add_child(parent, "acpi_video", -1); } static int acpi_video_probe(device_t dev) { ACPI_HANDLE devh, h; ACPI_OBJECT_TYPE t_dos; devh = acpi_get_handle(dev); if (acpi_disabled("video") || ACPI_FAILURE(AcpiGetHandle(devh, "_DOD", &h)) || ACPI_FAILURE(AcpiGetHandle(devh, "_DOS", &h)) || ACPI_FAILURE(AcpiGetType(h, &t_dos)) || t_dos != ACPI_TYPE_METHOD) return (ENXIO); device_set_desc(dev, "ACPI video extension"); return (0); } static int acpi_video_attach(device_t dev) { struct acpi_softc *acpi_sc; struct acpi_video_softc *sc; sc = device_get_softc(dev); acpi_sc = devclass_get_softc(devclass_find("acpi"), 0); if (acpi_sc == NULL) return (ENXIO); ACPI_SERIAL_BEGIN(video); if (acpi_video_sysctl_tree == NULL) { acpi_video_sysctl_tree = SYSCTL_ADD_NODE(&acpi_video_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "video", CTLFLAG_RD, 0, "video extension control"); } ACPI_SERIAL_END(video); sc->device = dev; sc->handle = acpi_get_handle(dev); STAILQ_INIT(&sc->vid_outputs); AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY, acpi_video_notify_handler, sc); sc->vid_pwr_evh = EVENTHANDLER_REGISTER(power_profile_change, acpi_video_power_profile, sc, 0); ACPI_SERIAL_BEGIN(video); acpi_video_bind_outputs(sc); ACPI_SERIAL_END(video); /* * Notify the BIOS that we want to switch both active outputs and * brightness levels. */ vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_OSPM | DOS_BRIGHTNESS_BY_OSPM); acpi_video_power_profile(sc); return (0); } static int acpi_video_detach(device_t dev) { struct acpi_video_softc *sc; struct acpi_video_output *vo, *vn; sc = device_get_softc(dev); vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_BIOS); EVENTHANDLER_DEREGISTER(power_profile_change, sc->vid_pwr_evh); AcpiRemoveNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY, acpi_video_notify_handler); ACPI_SERIAL_BEGIN(video); STAILQ_FOREACH_SAFE(vo, &sc->vid_outputs, vo_next, vn) { acpi_video_vo_destroy(vo); } ACPI_SERIAL_END(video); return (0); } static int acpi_video_resume(device_t dev) { struct acpi_video_softc *sc; struct acpi_video_output *vo, *vn; int level; sc = device_get_softc(dev); /* Restore brightness level */ ACPI_SERIAL_BEGIN(video); ACPI_SERIAL_BEGIN(video_output); STAILQ_FOREACH_SAFE(vo, &sc->vid_outputs, vo_next, vn) { if ((vo->adr & DOD_DEVID_MASK_FULL) != DOD_DEVID_LCD && (vo->adr & DOD_DEVID_MASK) != DOD_DEVID_INTDFP) continue; if ((vo_get_device_status(vo->handle) & DCS_ACTIVE) == 0) continue; level = vo_get_brightness(vo->handle); if (level != -1) vo_set_brightness(vo->handle, level); } ACPI_SERIAL_END(video_output); ACPI_SERIAL_END(video); return (0); } static int acpi_video_shutdown(device_t dev) { struct acpi_video_softc *sc; sc = device_get_softc(dev); vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_BIOS); return (0); } static void acpi_video_notify_handler(ACPI_HANDLE handle, UINT32 notify, void *context) { struct acpi_video_softc *sc; struct acpi_video_output *vo, *vo_tmp; ACPI_HANDLE lasthand; UINT32 dcs, dss, dss_p; sc = (struct acpi_video_softc *)context; switch (notify) { case VID_NOTIFY_SWITCHED: dss_p = 0; lasthand = NULL; ACPI_SERIAL_BEGIN(video); ACPI_SERIAL_BEGIN(video_output); STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { dss = vo_get_graphics_state(vo->handle); dcs = vo_get_device_status(vo->handle); if (!(dcs & DCS_READY)) dss = DSS_INACTIVE; if (((dcs & DCS_ACTIVE) && dss == DSS_INACTIVE) || (!(dcs & DCS_ACTIVE) && dss == DSS_ACTIVE)) { if (lasthand != NULL) vo_set_device_state(lasthand, dss_p); dss_p = dss; lasthand = vo->handle; } } if (lasthand != NULL) vo_set_device_state(lasthand, dss_p|DSS_COMMIT); ACPI_SERIAL_END(video_output); ACPI_SERIAL_END(video); break; case VID_NOTIFY_REPROBE: ACPI_SERIAL_BEGIN(video); STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) vo->handle = NULL; acpi_video_bind_outputs(sc); STAILQ_FOREACH_SAFE(vo, &sc->vid_outputs, vo_next, vo_tmp) { if (vo->handle == NULL) { STAILQ_REMOVE(&sc->vid_outputs, vo, acpi_video_output, vo_next); acpi_video_vo_destroy(vo); } } ACPI_SERIAL_END(video); break; default: device_printf(sc->device, "unknown notify event 0x%x\n", notify); } } static void acpi_video_power_profile(void *context) { int state; struct acpi_video_softc *sc; struct acpi_video_output *vo; sc = context; state = power_profile_get_state(); if (state != POWER_PROFILE_PERFORMANCE && state != POWER_PROFILE_ECONOMY) return; ACPI_SERIAL_BEGIN(video); ACPI_SERIAL_BEGIN(video_output); STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { if (vo->vo_levels != NULL && vo->vo_brightness == -1) vo_set_brightness(vo->handle, state == POWER_PROFILE_ECONOMY ? vo->vo_economy : vo->vo_fullpower); } ACPI_SERIAL_END(video_output); ACPI_SERIAL_END(video); } static void acpi_video_bind_outputs_subr(ACPI_HANDLE handle, UINT32 adr, void *context) { struct acpi_video_softc *sc; struct acpi_video_output *vo; ACPI_SERIAL_ASSERT(video); sc = context; STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { if (vo->adr == adr) { acpi_video_vo_bind(vo, handle); return; } } vo = acpi_video_vo_init(adr); if (vo != NULL) { acpi_video_vo_bind(vo, handle); STAILQ_INSERT_TAIL(&sc->vid_outputs, vo, vo_next); } } static void acpi_video_bind_outputs(struct acpi_video_softc *sc) { ACPI_SERIAL_ASSERT(video); vid_enum_outputs(sc->handle, acpi_video_bind_outputs_subr, sc); } static struct acpi_video_output * acpi_video_vo_init(UINT32 adr) { struct acpi_video_output *vn, *vo, *vp; int n, x; char name[8], env[32]; const char *type, *desc; struct acpi_video_output_queue *voqh; ACPI_SERIAL_ASSERT(video); switch (adr & DOD_DEVID_MASK) { case DOD_DEVID_MONITOR: if ((adr & DOD_DEVID_MASK_FULL) == DOD_DEVID_LCD) { /* DOD_DEVID_LCD is a common, backward compatible ID */ desc = "Internal/Integrated Digital Flat Panel"; type = "lcd"; voqh = &lcd_units; } else { desc = "VGA CRT or VESA Compatible Analog Monitor"; type = "crt"; voqh = &crt_units; } break; case DOD_DEVID_TV: desc = "TV/HDTV or Analog-Video Monitor"; type = "tv"; voqh = &tv_units; break; case DOD_DEVID_EXT: desc = "External Digital Monitor"; type = "ext"; voqh = &ext_units; break; case DOD_DEVID_INTDFP: desc = "Internal/Integrated Digital Flat Panel"; type = "lcd"; voqh = &lcd_units; break; default: desc = "unknown output"; type = "out"; voqh = &other_units; } n = 0; vp = NULL; STAILQ_FOREACH(vn, voqh, vo_unit.next) { if (vn->vo_unit.num != n) break; vp = vn; n++; } snprintf(name, sizeof(name), "%s%d", type, n); vo = malloc(sizeof(*vo), M_ACPIVIDEO, M_NOWAIT); if (vo != NULL) { vo->handle = NULL; vo->adr = adr; vo->vo_unit.num = n; vo->vo_brightness = -1; vo->vo_fullpower = -1; /* TODO: override with tunables */ vo->vo_economy = -1; vo->vo_numlevels = 0; vo->vo_levels = NULL; snprintf(env, sizeof(env), "hw.acpi.video.%s.fullpower", name); if (getenv_int(env, &x)) vo->vo_fullpower = x; snprintf(env, sizeof(env), "hw.acpi.video.%s.economy", name); if (getenv_int(env, &x)) vo->vo_economy = x; sysctl_ctx_init(&vo->vo_sysctl_ctx); if (vp != NULL) STAILQ_INSERT_AFTER(voqh, vp, vo, vo_unit.next); else STAILQ_INSERT_TAIL(voqh, vo, vo_unit.next); if (acpi_video_sysctl_tree != NULL) vo->vo_sysctl_tree = SYSCTL_ADD_NODE(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(acpi_video_sysctl_tree), OID_AUTO, name, CTLFLAG_RD, 0, desc); if (vo->vo_sysctl_tree != NULL) { SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "active", CTLTYPE_INT|CTLFLAG_RW, vo, 0, acpi_video_vo_active_sysctl, "I", "current activity of this device"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "brightness", CTLTYPE_INT|CTLFLAG_RW, vo, 0, acpi_video_vo_bright_sysctl, "I", "current brightness level"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "fullpower", CTLTYPE_INT|CTLFLAG_RW, vo, POWER_PROFILE_PERFORMANCE, acpi_video_vo_presets_sysctl, "I", "preset level for full power mode"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "economy", CTLTYPE_INT|CTLFLAG_RW, vo, POWER_PROFILE_ECONOMY, acpi_video_vo_presets_sysctl, "I", "preset level for economy mode"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "levels", CTLTYPE_INT | CTLFLAG_RD, vo, 0, acpi_video_vo_levels_sysctl, "I", "supported brightness levels"); } else printf("%s: sysctl node creation failed\n", type); } else printf("%s: softc allocation failed\n", type); if (bootverbose) { printf("found %s(%x)", desc, adr & DOD_DEVID_MASK_FULL); printf(", idx#%x", adr & DOD_DEVID_MASK_DISPIDX); printf(", port#%x", (adr & DOD_DEVID_MASK_DISPPORT) >> 4); if (adr & DOD_BIOS) printf(", detectable by BIOS"); if (adr & DOD_NONVGA) printf(" (Non-VGA output device whose power " "is related to the VGA device)"); printf(", head #%d\n", (adr & DOD_HEAD_ID_MASK) >> DOD_HEAD_ID_SHIFT); } return (vo); } static void acpi_video_vo_bind(struct acpi_video_output *vo, ACPI_HANDLE handle) { ACPI_SERIAL_BEGIN(video_output); if (vo->vo_levels != NULL) AcpiOsFree(vo->vo_levels); vo->handle = handle; vo->vo_numlevels = vo_get_brightness_levels(handle, &vo->vo_levels); if (vo->vo_numlevels >= 2) { if (vo->vo_fullpower == -1 || acpi_video_vo_check_level(vo, vo->vo_fullpower) != 0) /* XXX - can't deal with rebinding... */ vo->vo_fullpower = vo->vo_levels[BCL_FULLPOWER]; if (vo->vo_economy == -1 || acpi_video_vo_check_level(vo, vo->vo_economy) != 0) /* XXX - see above. */ vo->vo_economy = vo->vo_levels[BCL_ECONOMY]; } if (vo->vo_levels != NULL) AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY, acpi_video_vo_notify_handler, vo); ACPI_SERIAL_END(video_output); } static void acpi_video_vo_destroy(struct acpi_video_output *vo) { struct acpi_video_output_queue *voqh; ACPI_SERIAL_ASSERT(video); if (vo->vo_sysctl_tree != NULL) { vo->vo_sysctl_tree = NULL; sysctl_ctx_free(&vo->vo_sysctl_ctx); } if (vo->vo_levels != NULL) { AcpiRemoveNotifyHandler(vo->handle, ACPI_DEVICE_NOTIFY, acpi_video_vo_notify_handler); AcpiOsFree(vo->vo_levels); } switch (vo->adr & DOD_DEVID_MASK) { case DOD_DEVID_MONITOR: voqh = &crt_units; break; case DOD_DEVID_TV: voqh = &tv_units; break; case DOD_DEVID_EXT: voqh = &ext_units; break; case DOD_DEVID_INTDFP: voqh = &lcd_units; break; default: voqh = &other_units; } STAILQ_REMOVE(voqh, vo, acpi_video_output, vo_unit.next); free(vo, M_ACPIVIDEO); } static int acpi_video_vo_check_level(struct acpi_video_output *vo, int level) { int i; ACPI_SERIAL_ASSERT(video_output); if (vo->vo_levels == NULL) return (ENODEV); for (i = 0; i < vo->vo_numlevels; i++) if (vo->vo_levels[i] == level) return (0); return (EINVAL); } static void acpi_video_vo_notify_handler(ACPI_HANDLE handle, UINT32 notify, void *context) { struct acpi_video_output *vo; int i, j, level, new_level; vo = context; ACPI_SERIAL_BEGIN(video_output); if (vo->handle != handle) goto out; switch (notify) { case VID_NOTIFY_CYCLE_BRN: if (vo->vo_numlevels <= 3) goto out; /* FALLTHROUGH */ case VID_NOTIFY_INC_BRN: case VID_NOTIFY_DEC_BRN: case VID_NOTIFY_ZERO_BRN: if (vo->vo_levels == NULL) goto out; level = vo_get_brightness(handle); if (level < 0) goto out; break; default: printf("unknown notify event 0x%x from %s\n", notify, acpi_name(handle)); goto out; } new_level = level; switch (notify) { case VID_NOTIFY_CYCLE_BRN: for (i = 2; i < vo->vo_numlevels; i++) if (vo->vo_levels[i] == level) { new_level = vo->vo_numlevels > i + 1 ? vo->vo_levels[i + 1] : vo->vo_levels[2]; break; } break; case VID_NOTIFY_INC_BRN: case VID_NOTIFY_DEC_BRN: for (i = 0; i < vo->vo_numlevels; i++) { j = vo->vo_levels[i]; if (notify == VID_NOTIFY_INC_BRN) { if (j > level && (j < new_level || level == new_level)) new_level = j; } else { if (j < level && (j > new_level || level == new_level)) new_level = j; } } break; case VID_NOTIFY_ZERO_BRN: for (i = 0; i < vo->vo_numlevels; i++) if (vo->vo_levels[i] == 0) { new_level = 0; break; } break; } if (new_level != level) { vo_set_brightness(handle, new_level); vo->vo_brightness = new_level; } out: ACPI_SERIAL_END(video_output); } /* ARGSUSED */ static int acpi_video_vo_active_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int state, err; vo = (struct acpi_video_output *)arg1; if (vo->handle == NULL) return (ENXIO); ACPI_SERIAL_BEGIN(video_output); state = (vo_get_device_status(vo->handle) & DCS_ACTIVE) ? 1 : 0; err = sysctl_handle_int(oidp, &state, 0, req); if (err != 0 || req->newptr == NULL) goto out; vo_set_device_state(vo->handle, DSS_COMMIT | (state ? DSS_ACTIVE : DSS_INACTIVE)); out: ACPI_SERIAL_END(video_output); return (err); } /* ARGSUSED */ static int acpi_video_vo_bright_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int level, preset, err; vo = (struct acpi_video_output *)arg1; ACPI_SERIAL_BEGIN(video_output); if (vo->handle == NULL) { err = ENXIO; goto out; } if (vo->vo_levels == NULL) { err = ENODEV; goto out; } preset = (power_profile_get_state() == POWER_PROFILE_ECONOMY) ? vo->vo_economy : vo->vo_fullpower; level = vo->vo_brightness; if (level == -1) level = preset; err = sysctl_handle_int(oidp, &level, 0, req); if (err != 0 || req->newptr == NULL) goto out; if (level < -1 || level > 100) { err = EINVAL; goto out; } if (level != -1 && (err = acpi_video_vo_check_level(vo, level))) goto out; vo->vo_brightness = level; vo_set_brightness(vo->handle, (level == -1) ? preset : level); out: ACPI_SERIAL_END(video_output); return (err); } static int acpi_video_vo_presets_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int i, level, *preset, err; vo = (struct acpi_video_output *)arg1; ACPI_SERIAL_BEGIN(video_output); if (vo->handle == NULL) { err = ENXIO; goto out; } if (vo->vo_levels == NULL) { err = ENODEV; goto out; } preset = (arg2 == POWER_PROFILE_ECONOMY) ? &vo->vo_economy : &vo->vo_fullpower; level = *preset; err = sysctl_handle_int(oidp, &level, 0, req); if (err != 0 || req->newptr == NULL) goto out; if (level < -1 || level > 100) { err = EINVAL; goto out; } if (level == -1) { i = (arg2 == POWER_PROFILE_ECONOMY) ? BCL_ECONOMY : BCL_FULLPOWER; level = vo->vo_levels[i]; } else if ((err = acpi_video_vo_check_level(vo, level)) != 0) goto out; if (vo->vo_brightness == -1 && (power_profile_get_state() == arg2)) vo_set_brightness(vo->handle, level); *preset = level; out: ACPI_SERIAL_END(video_output); return (err); } /* ARGSUSED */ static int acpi_video_vo_levels_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int err; vo = (struct acpi_video_output *)arg1; ACPI_SERIAL_BEGIN(video_output); if (vo->vo_levels == NULL) { err = ENODEV; goto out; } if (req->newptr != NULL) { err = EPERM; goto out; } err = sysctl_handle_opaque(oidp, vo->vo_levels, vo->vo_numlevels * sizeof(*vo->vo_levels), req); out: ACPI_SERIAL_END(video_output); return (err); } static void vid_set_switch_policy(ACPI_HANDLE handle, UINT32 policy) { ACPI_STATUS status; status = acpi_SetInteger(handle, "_DOS", policy); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DOS - %s\n", acpi_name(handle), AcpiFormatException(status)); } struct enum_callback_arg { void (*callback)(ACPI_HANDLE, UINT32, void *); void *context; ACPI_OBJECT *dod_pkg; int count; }; static ACPI_STATUS vid_enum_outputs_subr(ACPI_HANDLE handle, UINT32 level __unused, void *context, void **retp __unused) { ACPI_STATUS status; UINT32 adr, val; struct enum_callback_arg *argset; size_t i; ACPI_SERIAL_ASSERT(video); argset = context; status = acpi_GetInteger(handle, "_ADR", &adr); if (ACPI_FAILURE(status)) return (AE_OK); for (i = 0; i < argset->dod_pkg->Package.Count; i++) { if (acpi_PkgInt32(argset->dod_pkg, i, &val) == 0 && (val & DOD_DEVID_MASK_FULL) == (adr & DOD_DEVID_MASK_FULL)) { argset->callback(handle, val, argset->context); argset->count++; } } return (AE_OK); } static int vid_enum_outputs(ACPI_HANDLE handle, void (*callback)(ACPI_HANDLE, UINT32, void *), void *context) { ACPI_STATUS status; ACPI_BUFFER dod_buf; ACPI_OBJECT *res; struct enum_callback_arg argset; ACPI_SERIAL_ASSERT(video); dod_buf.Length = ACPI_ALLOCATE_BUFFER; dod_buf.Pointer = NULL; status = AcpiEvaluateObject(handle, "_DOD", NULL, &dod_buf); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) printf("can't evaluate %s._DOD - %s\n", acpi_name(handle), AcpiFormatException(status)); argset.count = -1; goto out; } res = (ACPI_OBJECT *)dod_buf.Pointer; if (!ACPI_PKG_VALID(res, 1)) { printf("evaluation of %s._DOD makes no sense\n", acpi_name(handle)); argset.count = -1; goto out; } if (callback == NULL) { argset.count = res->Package.Count; goto out; } argset.callback = callback; argset.context = context; argset.dod_pkg = res; argset.count = 0; status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, handle, 1, vid_enum_outputs_subr, NULL, &argset, NULL); if (ACPI_FAILURE(status)) printf("failed walking down %s - %s\n", acpi_name(handle), AcpiFormatException(status)); out: if (dod_buf.Pointer != NULL) AcpiOsFree(dod_buf.Pointer); return (argset.count); } static int vo_get_brightness_levels(ACPI_HANDLE handle, int **levelp) { ACPI_STATUS status; ACPI_BUFFER bcl_buf; ACPI_OBJECT *res; int num, i, n, *levels; bcl_buf.Length = ACPI_ALLOCATE_BUFFER; bcl_buf.Pointer = NULL; status = AcpiEvaluateObject(handle, "_BCL", NULL, &bcl_buf); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) printf("can't evaluate %s._BCL - %s\n", acpi_name(handle), AcpiFormatException(status)); goto out; } res = (ACPI_OBJECT *)bcl_buf.Pointer; if (!ACPI_PKG_VALID(res, 2)) { printf("evaluation of %s._BCL makes no sense\n", acpi_name(handle)); goto out; } num = res->Package.Count; if (num < 2 || levelp == NULL) goto out; levels = AcpiOsAllocate(num * sizeof(*levels)); if (levels == NULL) goto out; for (i = 0, n = 0; i < num; i++) if (acpi_PkgInt32(res, i, &levels[n]) == 0) n++; if (n < 2) { AcpiOsFree(levels); goto out; } *levelp = levels; return (n); out: if (bcl_buf.Pointer != NULL) AcpiOsFree(bcl_buf.Pointer); return (0); } static int vo_get_brightness(ACPI_HANDLE handle) { UINT32 level; ACPI_STATUS status; ACPI_SERIAL_ASSERT(video_output); status = acpi_GetInteger(handle, "_BQC", &level); if (ACPI_FAILURE(status)) { printf("can't evaluate %s._BQC - %s\n", acpi_name(handle), AcpiFormatException(status)); return (-1); } if (level > 100) return (-1); return (level); } static void vo_set_brightness(ACPI_HANDLE handle, int level) { ACPI_STATUS status; ACPI_SERIAL_ASSERT(video_output); status = acpi_SetInteger(handle, "_BCM", level); if (ACPI_FAILURE(status)) printf("can't evaluate %s._BCM - %s\n", acpi_name(handle), AcpiFormatException(status)); } static UINT32 vo_get_device_status(ACPI_HANDLE handle) { UINT32 dcs; ACPI_STATUS status; ACPI_SERIAL_ASSERT(video_output); dcs = 0; status = acpi_GetInteger(handle, "_DCS", &dcs); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DCS - %s\n", acpi_name(handle), AcpiFormatException(status)); return (dcs); } static UINT32 vo_get_graphics_state(ACPI_HANDLE handle) { UINT32 dgs; ACPI_STATUS status; dgs = 0; status = acpi_GetInteger(handle, "_DGS", &dgs); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DGS - %s\n", acpi_name(handle), AcpiFormatException(status)); return (dgs); } static void vo_set_device_state(ACPI_HANDLE handle, UINT32 state) { ACPI_STATUS status; ACPI_SERIAL_ASSERT(video_output); status = acpi_SetInteger(handle, "_DSS", state); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DSS - %s\n", acpi_name(handle), AcpiFormatException(status)); } Index: head/sys/dev/agp/agp_i810.c =================================================================== --- head/sys/dev/agp/agp_i810.c (revision 258779) +++ head/sys/dev/agp/agp_i810.c (revision 258780) @@ -1,2570 +1,2570 @@ /*- * Copyright (c) 2000 Doug Rabson * Copyright (c) 2000 Ruslan Ermilov * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Fixes for 830/845G support: David Dawes * 852GM/855GM/865G support added by David Dawes * * This is generic Intel GTT handling code, morphed from the AGP * bridge code. */ #include __FBSDID("$FreeBSD$"); #if 0 #define KTR_AGP_I810 KTR_DEV #else #define KTR_AGP_I810 0 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DECLARE(M_AGP); struct agp_i810_match; static int agp_i810_check_active(device_t bridge_dev); static int agp_i830_check_active(device_t bridge_dev); static int agp_i915_check_active(device_t bridge_dev); static int agp_sb_check_active(device_t bridge_dev); static void agp_82852_set_desc(device_t dev, const struct agp_i810_match *match); static void agp_i810_set_desc(device_t dev, const struct agp_i810_match *match); static void agp_i810_dump_regs(device_t dev); static void agp_i830_dump_regs(device_t dev); static void agp_i855_dump_regs(device_t dev); static void agp_i915_dump_regs(device_t dev); static void agp_i965_dump_regs(device_t dev); static void agp_sb_dump_regs(device_t dev); static int agp_i810_get_stolen_size(device_t dev); static int agp_i830_get_stolen_size(device_t dev); static int agp_i915_get_stolen_size(device_t dev); static int agp_sb_get_stolen_size(device_t dev); static int agp_i810_get_gtt_mappable_entries(device_t dev); static int agp_i830_get_gtt_mappable_entries(device_t dev); static int agp_i915_get_gtt_mappable_entries(device_t dev); static int agp_i810_get_gtt_total_entries(device_t dev); static int agp_i965_get_gtt_total_entries(device_t dev); static int agp_gen5_get_gtt_total_entries(device_t dev); static int agp_sb_get_gtt_total_entries(device_t dev); static int agp_i810_install_gatt(device_t dev); static int agp_i830_install_gatt(device_t dev); static void agp_i810_deinstall_gatt(device_t dev); static void agp_i830_deinstall_gatt(device_t dev); static void agp_i810_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i915_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i965_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_g4x_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_sb_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i810_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_i915_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_i965_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_g4x_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_sb_write_gtt(device_t dev, u_int index, uint32_t pte); static u_int32_t agp_i810_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_i915_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_i965_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_g4x_read_gtt_pte(device_t dev, u_int index); static vm_paddr_t agp_i810_read_gtt_pte_paddr(device_t dev, u_int index); static vm_paddr_t agp_i915_read_gtt_pte_paddr(device_t dev, u_int index); static vm_paddr_t agp_sb_read_gtt_pte_paddr(device_t dev, u_int index); static int agp_i810_set_aperture(device_t dev, u_int32_t aperture); static int agp_i830_set_aperture(device_t dev, u_int32_t aperture); static int agp_i915_set_aperture(device_t dev, u_int32_t aperture); static int agp_i810_chipset_flush_setup(device_t dev); static int agp_i915_chipset_flush_setup(device_t dev); static int agp_i965_chipset_flush_setup(device_t dev); static void agp_i810_chipset_flush_teardown(device_t dev); static void agp_i915_chipset_flush_teardown(device_t dev); static void agp_i965_chipset_flush_teardown(device_t dev); static void agp_i810_chipset_flush(device_t dev); static void agp_i830_chipset_flush(device_t dev); static void agp_i915_chipset_flush(device_t dev); enum { CHIP_I810, /* i810/i815 */ CHIP_I830, /* 830M/845G */ CHIP_I855, /* 852GM/855GM/865G */ CHIP_I915, /* 915G/915GM */ CHIP_I965, /* G965 */ CHIP_G33, /* G33/Q33/Q35 */ CHIP_IGD, /* Pineview */ CHIP_G4X, /* G45/Q45 */ CHIP_SB, /* SandyBridge */ }; /* The i810 through i855 have the registers at BAR 1, and the GATT gets * allocated by us. The i915 has registers in BAR 0 and the GATT is at the * start of the stolen memory, and should only be accessed by the OS through * BAR 3. The G965 has registers and GATT in the same BAR (0) -- first 512KB * is registers, second 512KB is GATT. */ static struct resource_spec agp_i810_res_spec[] = { { SYS_RES_MEMORY, AGP_I810_MMADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_i915_res_spec[] = { { SYS_RES_MEMORY, AGP_I915_MMADR, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_MEMORY, AGP_I915_GTTADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_i965_res_spec[] = { { SYS_RES_MEMORY, AGP_I965_GTTMMADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_g4x_res_spec[] = { { SYS_RES_MEMORY, AGP_G4X_MMADR, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_MEMORY, AGP_G4X_GTTADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct agp_i810_softc { struct agp_softc agp; u_int32_t initial_aperture; /* aperture size at startup */ struct agp_gatt *gatt; u_int32_t dcache_size; /* i810 only */ u_int32_t stolen; /* number of i830/845 gtt entries for stolen memory */ u_int stolen_size; /* BIOS-reserved graphics memory */ u_int gtt_total_entries; /* Total number of gtt ptes */ u_int gtt_mappable_entries; /* Number of gtt ptes mappable by CPU */ device_t bdev; /* bridge device */ void *argb_cursor; /* contigmalloc area for ARGB cursor */ struct resource *sc_res[2]; const struct agp_i810_match *match; int sc_flush_page_rid; struct resource *sc_flush_page_res; void *sc_flush_page_vaddr; int sc_bios_allocated_flush_page; }; static device_t intel_agp; struct agp_i810_driver { int chiptype; int gen; int busdma_addr_mask_sz; struct resource_spec *res_spec; int (*check_active)(device_t); void (*set_desc)(device_t, const struct agp_i810_match *); void (*dump_regs)(device_t); int (*get_stolen_size)(device_t); int (*get_gtt_total_entries)(device_t); int (*get_gtt_mappable_entries)(device_t); int (*install_gatt)(device_t); void (*deinstall_gatt)(device_t); void (*write_gtt)(device_t, u_int, uint32_t); void (*install_gtt_pte)(device_t, u_int, vm_offset_t, int); u_int32_t (*read_gtt_pte)(device_t, u_int); vm_paddr_t (*read_gtt_pte_paddr)(device_t , u_int); int (*set_aperture)(device_t, u_int32_t); int (*chipset_flush_setup)(device_t); void (*chipset_flush_teardown)(device_t); void (*chipset_flush)(device_t); }; static const struct agp_i810_driver agp_i810_i810_driver = { .chiptype = CHIP_I810, .gen = 1, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i810_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i810_dump_regs, .get_stolen_size = agp_i810_get_stolen_size, .get_gtt_mappable_entries = agp_i810_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i810_install_gatt, .deinstall_gatt = agp_i810_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i810_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i810_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i810_chipset_flush, }; static const struct agp_i810_driver agp_i810_i815_driver = { .chiptype = CHIP_I810, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i810_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i810_dump_regs, .get_stolen_size = agp_i810_get_stolen_size, .get_gtt_mappable_entries = agp_i830_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i810_install_gatt, .deinstall_gatt = agp_i810_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i810_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i810_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i830_driver = { .chiptype = CHIP_I830, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i830_dump_regs, .get_stolen_size = agp_i830_get_stolen_size, .get_gtt_mappable_entries = agp_i830_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i830_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i855_driver = { .chiptype = CHIP_I855, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_82852_set_desc, .dump_regs = agp_i855_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i830_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i865_driver = { .chiptype = CHIP_I855, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i855_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i915_driver = { .chiptype = CHIP_I915, .gen = 3, .busdma_addr_mask_sz = 32, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i915_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i915_chipset_flush_setup, .chipset_flush_teardown = agp_i915_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g965_driver = { .chiptype = CHIP_I965, .gen = 4, .busdma_addr_mask_sz = 36, .res_spec = agp_i965_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i965_write_gtt, .install_gtt_pte = agp_i965_install_gtt_pte, .read_gtt_pte = agp_i965_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g33_driver = { .chiptype = CHIP_G33, .gen = 3, .busdma_addr_mask_sz = 36, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_igd_driver = { .chiptype = CHIP_IGD, .gen = 3, .busdma_addr_mask_sz = 36, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i915_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g4x_driver = { .chiptype = CHIP_G4X, .gen = 5, .busdma_addr_mask_sz = 36, .res_spec = agp_i965_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_gen5_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_g4x_write_gtt, .install_gtt_pte = agp_g4x_install_gtt_pte, .read_gtt_pte = agp_g4x_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_sb_driver = { .chiptype = CHIP_SB, .gen = 6, .busdma_addr_mask_sz = 40, .res_spec = agp_g4x_res_spec, .check_active = agp_sb_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_sb_dump_regs, .get_stolen_size = agp_sb_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_sb_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_sb_write_gtt, .install_gtt_pte = agp_sb_install_gtt_pte, .read_gtt_pte = agp_g4x_read_gtt_pte, .read_gtt_pte_paddr = agp_sb_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i810_chipset_flush, }; /* For adding new devices, devid is the id of the graphics controller * (pci:0:2:0, for example). The placeholder (usually at pci:0:2:1) for the * second head should never be added. The bridge_offset is the offset to * subtract from devid to get the id of the hostb that the device is on. */ static const struct agp_i810_match { int devid; char *name; const struct agp_i810_driver *driver; } agp_i810_matches[] = { { .devid = 0x71218086, .name = "Intel 82810 (i810 GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x71238086, .name = "Intel 82810-DC100 (i810-DC100 GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x71258086, .name = "Intel 82810E (i810E GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x11328086, .name = "Intel 82815 (i815 GMCH) SVGA controller", .driver = &agp_i810_i815_driver }, { .devid = 0x35778086, .name = "Intel 82830M (830M GMCH) SVGA controller", .driver = &agp_i810_i830_driver }, { .devid = 0x25628086, .name = "Intel 82845M (845M GMCH) SVGA controller", .driver = &agp_i810_i830_driver }, { .devid = 0x35828086, .name = "Intel 82852/855GM SVGA controller", .driver = &agp_i810_i855_driver }, { .devid = 0x25728086, .name = "Intel 82865G (865G GMCH) SVGA controller", .driver = &agp_i810_i865_driver }, { .devid = 0x25828086, .name = "Intel 82915G (915G GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x258A8086, .name = "Intel E7221 SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x25928086, .name = "Intel 82915GM (915GM GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27728086, .name = "Intel 82945G (945G GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27A28086, .name = "Intel 82945GM (945GM GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27AE8086, .name = "Intel 945GME SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x29728086, .name = "Intel 946GZ SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29828086, .name = "Intel G965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29928086, .name = "Intel Q965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29A28086, .name = "Intel G965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29B28086, .name = "Intel Q35 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0x29C28086, .name = "Intel G33 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0x29D28086, .name = "Intel Q33 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0xA0018086, .name = "Intel Pineview SVGA controller", .driver = &agp_i810_igd_driver }, { .devid = 0xA0118086, .name = "Intel Pineview (M) SVGA controller", .driver = &agp_i810_igd_driver }, { .devid = 0x2A028086, .name = "Intel GM965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x2A128086, .name = "Intel GME965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x2A428086, .name = "Intel GM45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E028086, .name = "Intel Eaglelake SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E128086, .name = "Intel Q45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E228086, .name = "Intel G45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E328086, .name = "Intel G41 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x00428086, .name = "Intel Ironlake (D) SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x00468086, .name = "Intel Ironlake (M) SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x01028086, .name = "SandyBridge desktop GT1 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01128086, .name = "SandyBridge desktop GT2 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01228086, .name = "SandyBridge desktop GT2+ IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01068086, .name = "SandyBridge mobile GT1 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01168086, .name = "SandyBridge mobile GT2 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01268086, .name = "SandyBridge mobile GT2+ IG", .driver = &agp_i810_sb_driver }, { .devid = 0x010a8086, .name = "SandyBridge server IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01528086, .name = "IvyBridge desktop GT1 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01628086, .name = "IvyBridge desktop GT2 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01568086, .name = "IvyBridge mobile GT1 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x01668086, .name = "IvyBridge mobile GT2 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x015a8086, .name = "IvyBridge server GT1 IG", .driver = &agp_i810_sb_driver }, { .devid = 0x016a8086, .name = "IvyBridge server GT2 IG", .driver = &agp_i810_sb_driver }, { .devid = 0, } }; static const struct agp_i810_match* agp_i810_match(device_t dev) { int i, devid; if (pci_get_class(dev) != PCIC_DISPLAY || pci_get_subclass(dev) != PCIS_DISPLAY_VGA) return (NULL); devid = pci_get_devid(dev); for (i = 0; agp_i810_matches[i].devid != 0; i++) { if (agp_i810_matches[i].devid == devid) break; } if (agp_i810_matches[i].devid == 0) return (NULL); else return (&agp_i810_matches[i]); } /* * Find bridge device. */ static device_t agp_i810_find_bridge(device_t dev) { return (pci_find_dbsf(0, 0, 0, 0)); } static void agp_i810_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "agp", -1) == NULL && agp_i810_match(parent)) device_add_child(parent, "agp", -1); } static int agp_i810_check_active(device_t bridge_dev) { u_int8_t smram; smram = pci_read_config(bridge_dev, AGP_I810_SMRAM, 1); if ((smram & AGP_I810_SMRAM_GMS) == AGP_I810_SMRAM_GMS_DISABLED) return (ENXIO); return (0); } static int agp_i830_check_active(device_t bridge_dev) { int gcc1; gcc1 = pci_read_config(bridge_dev, AGP_I830_GCC1, 1); if ((gcc1 & AGP_I830_GCC1_DEV2) == AGP_I830_GCC1_DEV2_DISABLED) return (ENXIO); return (0); } static int agp_i915_check_active(device_t bridge_dev) { int deven; deven = pci_read_config(bridge_dev, AGP_I915_DEVEN, 4); if ((deven & AGP_I915_DEVEN_D2F0) == AGP_I915_DEVEN_D2F0_DISABLED) return (ENXIO); return (0); } static int agp_sb_check_active(device_t bridge_dev) { int deven; deven = pci_read_config(bridge_dev, AGP_I915_DEVEN, 4); if ((deven & AGP_SB_DEVEN_D2EN) == AGP_SB_DEVEN_D2EN_DISABLED) return (ENXIO); return (0); } static void agp_82852_set_desc(device_t dev, const struct agp_i810_match *match) { switch (pci_read_config(dev, AGP_I85X_CAPID, 1)) { case AGP_I855_GME: device_set_desc(dev, "Intel 82855GME (855GME GMCH) SVGA controller"); break; case AGP_I855_GM: device_set_desc(dev, "Intel 82855GM (855GM GMCH) SVGA controller"); break; case AGP_I852_GME: device_set_desc(dev, "Intel 82852GME (852GME GMCH) SVGA controller"); break; case AGP_I852_GM: device_set_desc(dev, "Intel 82852GM (852GM GMCH) SVGA controller"); break; default: device_set_desc(dev, "Intel 8285xM (85xGM GMCH) SVGA controller"); break; } } static void agp_i810_set_desc(device_t dev, const struct agp_i810_match *match) { device_set_desc(dev, match->name); } static int agp_i810_probe(device_t dev) { device_t bdev; const struct agp_i810_match *match; int err; if (resource_disabled("agp", device_get_unit(dev))) return (ENXIO); match = agp_i810_match(dev); if (match == NULL) return (ENXIO); bdev = agp_i810_find_bridge(dev); if (bdev == NULL) { if (bootverbose) printf("I810: can't find bridge device\n"); return (ENXIO); } /* * checking whether internal graphics device has been activated. */ err = match->driver->check_active(bdev); if (err != 0) { if (bootverbose) printf("i810: disabled, not probing\n"); return (err); } match->driver->set_desc(dev, match); return (BUS_PROBE_DEFAULT); } static void agp_i810_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I810_MISCC: 0x%04x\n", pci_read_config(sc->bdev, AGP_I810_MISCC, 2)); } static void agp_i830_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I830_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I830_GCC1, 1)); } static void agp_i855_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); } static void agp_i915_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); device_printf(dev, "AGP_I915_MSAC: 0x%02x\n", pci_read_config(sc->bdev, AGP_I915_MSAC, 1)); } static void agp_i965_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I965_PGTBL_CTL2: %08x\n", bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); device_printf(dev, "AGP_I965_MSAC: 0x%02x\n", pci_read_config(sc->bdev, AGP_I965_MSAC, 1)); } static void agp_sb_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_SNB_GFX_MODE: %08x\n", bus_read_4(sc->sc_res[0], AGP_SNB_GFX_MODE)); device_printf(dev, "AGP_SNB_GCC1: 0x%04x\n", pci_read_config(sc->bdev, AGP_SNB_GCC1, 2)); } static int agp_i810_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->stolen = 0; sc->stolen_size = 0; return (0); } static int agp_i830_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; unsigned int gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 1); switch (gcc1 & AGP_I830_GCC1_GMS) { case AGP_I830_GCC1_GMS_STOLEN_512: sc->stolen = (512 - 132) * 1024 / 4096; sc->stolen_size = 512 * 1024; break; case AGP_I830_GCC1_GMS_STOLEN_1024: sc->stolen = (1024 - 132) * 1024 / 4096; sc->stolen_size = 1024 * 1024; break; case AGP_I830_GCC1_GMS_STOLEN_8192: sc->stolen = (8192 - 132) * 1024 / 4096; sc->stolen_size = 8192 * 1024; break; default: sc->stolen = 0; device_printf(dev, "unknown memory configuration, disabling (GCC1 %x)\n", gcc1); return (EINVAL); } return (0); } static int agp_i915_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; unsigned int gcc1, stolen, gtt_size; sc = device_get_softc(dev); /* * Stolen memory is set up at the beginning of the aperture by * the BIOS, consisting of the GATT followed by 4kb for the * BIOS display. */ switch (sc->match->driver->chiptype) { case CHIP_I855: gtt_size = 128; break; case CHIP_I915: gtt_size = 256; break; case CHIP_I965: switch (bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL) & AGP_I810_PGTBL_SIZE_MASK) { case AGP_I810_PGTBL_SIZE_128KB: gtt_size = 128; break; case AGP_I810_PGTBL_SIZE_256KB: gtt_size = 256; break; case AGP_I810_PGTBL_SIZE_512KB: gtt_size = 512; break; case AGP_I965_PGTBL_SIZE_1MB: gtt_size = 1024; break; case AGP_I965_PGTBL_SIZE_2MB: gtt_size = 2048; break; case AGP_I965_PGTBL_SIZE_1_5MB: gtt_size = 1024 + 512; break; default: device_printf(dev, "Bad PGTBL size\n"); return (EINVAL); } break; case CHIP_G33: gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 2); switch (gcc1 & AGP_G33_MGGC_GGMS_MASK) { case AGP_G33_MGGC_GGMS_SIZE_1M: gtt_size = 1024; break; case AGP_G33_MGGC_GGMS_SIZE_2M: gtt_size = 2048; break; default: device_printf(dev, "Bad PGTBL size\n"); return (EINVAL); } break; case CHIP_IGD: case CHIP_G4X: gtt_size = 0; break; default: device_printf(dev, "Bad chiptype\n"); return (EINVAL); } /* GCC1 is called MGGC on i915+ */ gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 1); switch (gcc1 & AGP_I855_GCC1_GMS) { case AGP_I855_GCC1_GMS_STOLEN_1M: stolen = 1024; break; case AGP_I855_GCC1_GMS_STOLEN_4M: stolen = 4 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_8M: stolen = 8 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_16M: stolen = 16 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_32M: stolen = 32 * 1024; break; case AGP_I915_GCC1_GMS_STOLEN_48M: stolen = sc->match->driver->gen > 2 ? 48 * 1024 : 0; break; case AGP_I915_GCC1_GMS_STOLEN_64M: stolen = sc->match->driver->gen > 2 ? 64 * 1024 : 0; break; case AGP_G33_GCC1_GMS_STOLEN_128M: stolen = sc->match->driver->gen > 2 ? 128 * 1024 : 0; break; case AGP_G33_GCC1_GMS_STOLEN_256M: stolen = sc->match->driver->gen > 2 ? 256 * 1024 : 0; break; case AGP_G4X_GCC1_GMS_STOLEN_96M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 96 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_160M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 160 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_224M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 224 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_352M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 352 * 1024; else stolen = 0; break; default: device_printf(dev, "unknown memory configuration, disabling (GCC1 %x)\n", gcc1); return (EINVAL); } gtt_size += 4; sc->stolen_size = stolen * 1024; sc->stolen = (stolen - gtt_size) * 1024 / 4096; return (0); } static int agp_sb_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; uint16_t gmch_ctl; sc = device_get_softc(dev); gmch_ctl = pci_read_config(sc->bdev, AGP_SNB_GCC1, 2); switch (gmch_ctl & AGP_SNB_GMCH_GMS_STOLEN_MASK) { case AGP_SNB_GMCH_GMS_STOLEN_32M: sc->stolen_size = 32 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_64M: sc->stolen_size = 64 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_96M: sc->stolen_size = 96 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_128M: sc->stolen_size = 128 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_160M: sc->stolen_size = 160 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_192M: sc->stolen_size = 192 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_224M: sc->stolen_size = 224 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_256M: sc->stolen_size = 256 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_288M: sc->stolen_size = 288 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_320M: sc->stolen_size = 320 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_352M: sc->stolen_size = 352 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_384M: sc->stolen_size = 384 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_416M: sc->stolen_size = 416 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_448M: sc->stolen_size = 448 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_480M: sc->stolen_size = 480 * 1024 * 1024; break; case AGP_SNB_GMCH_GMS_STOLEN_512M: sc->stolen_size = 512 * 1024 * 1024; break; } sc->stolen = (sc->stolen_size - 4) / 4096; return (0); } static int agp_i810_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; uint16_t miscc; sc = device_get_softc(dev); miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2); if ((miscc & AGP_I810_MISCC_WINSIZE) == AGP_I810_MISCC_WINSIZE_32) ap = 32; else ap = 64; sc->gtt_mappable_entries = (ap * 1024 * 1024) >> AGP_PAGE_SHIFT; return (0); } static int agp_i830_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; uint16_t gmch_ctl; sc = device_get_softc(dev); gmch_ctl = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); if ((gmch_ctl & AGP_I830_GCC1_GMASIZE) == AGP_I830_GCC1_GMASIZE_64) ap = 64; else ap = 128; sc->gtt_mappable_entries = (ap * 1024 * 1024) >> AGP_PAGE_SHIFT; return (0); } static int agp_i915_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; sc = device_get_softc(dev); ap = AGP_GET_APERTURE(dev); sc->gtt_mappable_entries = ap >> AGP_PAGE_SHIFT; return (0); } static int agp_i810_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->gtt_total_entries = sc->gtt_mappable_entries; return (0); } static int agp_i965_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t pgetbl_ctl; int error; sc = device_get_softc(dev); error = 0; pgetbl_ctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); switch (pgetbl_ctl & AGP_I810_PGTBL_SIZE_MASK) { case AGP_I810_PGTBL_SIZE_128KB: sc->gtt_total_entries = 128 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_256KB: sc->gtt_total_entries = 256 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_512KB: sc->gtt_total_entries = 512 * 1024 / 4; break; /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ case AGP_I810_PGTBL_SIZE_1MB: sc->gtt_total_entries = 1024 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_2MB: sc->gtt_total_entries = 2 * 1024 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_1_5MB: sc->gtt_total_entries = (1024 + 512) * 1024 / 4; break; default: device_printf(dev, "Unknown page table size\n"); error = ENXIO; } return (error); } static void agp_gen5_adjust_pgtbl_size(device_t dev, uint32_t sz) { struct agp_i810_softc *sc; uint32_t pgetbl_ctl, pgetbl_ctl2; sc = device_get_softc(dev); /* Disable per-process page table. */ pgetbl_ctl2 = bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2); pgetbl_ctl2 &= ~AGP_I810_PGTBL_ENABLED; bus_write_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2, pgetbl_ctl2); /* Write the new ggtt size. */ pgetbl_ctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgetbl_ctl &= ~AGP_I810_PGTBL_SIZE_MASK; pgetbl_ctl |= sz; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgetbl_ctl); } static int agp_gen5_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint16_t gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); switch (gcc1 & AGP_G4x_GCC1_SIZE_MASK) { case AGP_G4x_GCC1_SIZE_1M: case AGP_G4x_GCC1_SIZE_VT_1M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_1MB); break; case AGP_G4x_GCC1_SIZE_VT_1_5M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_1_5MB); break; case AGP_G4x_GCC1_SIZE_2M: case AGP_G4x_GCC1_SIZE_VT_2M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_2MB); break; default: device_printf(dev, "Unknown page table size\n"); return (ENXIO); } return (agp_i965_get_gtt_total_entries(dev)); } static int agp_sb_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint16_t gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_SNB_GCC1, 2); switch (gcc1 & AGP_SNB_GTT_SIZE_MASK) { default: case AGP_SNB_GTT_SIZE_0M: printf("Bad GTT size mask: 0x%04x\n", gcc1); return (ENXIO); case AGP_SNB_GTT_SIZE_1M: sc->gtt_total_entries = 1024 * 1024 / 4; break; case AGP_SNB_GTT_SIZE_2M: sc->gtt_total_entries = 2 * 1024 * 1024 / 4; break; } return (0); } static int agp_i810_install_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); /* Some i810s have on-chip memory called dcache. */ if ((bus_read_1(sc->sc_res[0], AGP_I810_DRT) & AGP_I810_DRT_POPULATED) != 0) sc->dcache_size = 4 * 1024 * 1024; else sc->dcache_size = 0; /* According to the specs the gatt on the i810 must be 64k. */ sc->gatt->ag_virtual = contigmalloc(64 * 1024, M_AGP, 0, 0, ~0, PAGE_SIZE, 0); if (sc->gatt->ag_virtual == NULL) { if (bootverbose) device_printf(dev, "contiguous allocation failed\n"); return (ENOMEM); } bzero(sc->gatt->ag_virtual, sc->gatt->ag_entries * sizeof(u_int32_t)); sc->gatt->ag_physical = vtophys((vm_offset_t)sc->gatt->ag_virtual); agp_flush_cache(); /* Install the GATT. */ bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, sc->gatt->ag_physical | 1); return (0); } static int agp_i830_install_gatt(device_t dev) { struct agp_i810_softc *sc; uint32_t pgtblctl; sc = device_get_softc(dev); /* * The i830 automatically initializes the 128k gatt on boot. * GATT address is already in there, make sure it's enabled. */ pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgtblctl |= 1; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl); sc->gatt->ag_physical = pgtblctl & ~1; return (0); } static int agp_i810_attach(device_t dev) { struct agp_i810_softc *sc; int error; sc = device_get_softc(dev); sc->bdev = agp_i810_find_bridge(dev); if (sc->bdev == NULL) return (ENOENT); sc->match = agp_i810_match(dev); agp_set_aperture_resource(dev, sc->match->driver->gen <= 2 ? AGP_APBASE : AGP_I915_GMADR); error = agp_generic_attach(dev); if (error) return (error); if (ptoa((vm_paddr_t)Maxmem) > (1ULL << sc->match->driver->busdma_addr_mask_sz) - 1) { device_printf(dev, "agp_i810 does not support physical " "memory above %ju.\n", (uintmax_t)(1ULL << sc->match->driver->busdma_addr_mask_sz) - 1); return (ENOENT); } if (bus_alloc_resources(dev, sc->match->driver->res_spec, sc->sc_res)) { agp_generic_detach(dev); return (ENODEV); } sc->initial_aperture = AGP_GET_APERTURE(dev); sc->gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_WAITOK); sc->gatt->ag_entries = AGP_GET_APERTURE(dev) >> AGP_PAGE_SHIFT; if ((error = sc->match->driver->get_stolen_size(dev)) != 0 || (error = sc->match->driver->install_gatt(dev)) != 0 || (error = sc->match->driver->get_gtt_mappable_entries(dev)) != 0 || (error = sc->match->driver->get_gtt_total_entries(dev)) != 0 || (error = sc->match->driver->chipset_flush_setup(dev)) != 0) { bus_release_resources(dev, sc->match->driver->res_spec, sc->sc_res); free(sc->gatt, M_AGP); agp_generic_detach(dev); return (error); } intel_agp = dev; device_printf(dev, "aperture size is %dM", sc->initial_aperture / 1024 / 1024); if (sc->stolen > 0) printf(", detected %dk stolen memory\n", sc->stolen * 4); else printf("\n"); if (bootverbose) { sc->match->driver->dump_regs(dev); device_printf(dev, "Mappable GTT entries: %d\n", sc->gtt_mappable_entries); device_printf(dev, "Total GTT entries: %d\n", sc->gtt_total_entries); } return (0); } static void agp_i810_deinstall_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0); contigfree(sc->gatt->ag_virtual, 64 * 1024, M_AGP); } static void agp_i830_deinstall_gatt(device_t dev) { struct agp_i810_softc *sc; unsigned int pgtblctl; sc = device_get_softc(dev); pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgtblctl &= ~1; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl); } static int agp_i810_detach(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); agp_free_cdev(dev); /* Clear the GATT base. */ sc->match->driver->deinstall_gatt(dev); sc->match->driver->chipset_flush_teardown(dev); /* Put the aperture back the way it started. */ AGP_SET_APERTURE(dev, sc->initial_aperture); free(sc->gatt, M_AGP); bus_release_resources(dev, sc->match->driver->res_spec, sc->sc_res); agp_free_res(dev); return (0); } static int agp_i810_resume(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); AGP_SET_APERTURE(dev, sc->initial_aperture); /* Install the GATT. */ bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, sc->gatt->ag_physical | 1); return (bus_generic_resume(dev)); } /** * Sets the PCI resource size of the aperture on i830-class and below chipsets, * while returning failure on later chipsets when an actual change is * requested. * * This whole function is likely bogus, as the kernel would probably need to * reconfigure the placement of the AGP aperture if a larger size is requested, * which doesn't happen currently. */ static int agp_i810_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; u_int16_t miscc; sc = device_get_softc(dev); /* * Double check for sanity. */ if (aperture != 32 * 1024 * 1024 && aperture != 64 * 1024 * 1024) { device_printf(dev, "bad aperture size %d\n", aperture); return (EINVAL); } miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2); miscc &= ~AGP_I810_MISCC_WINSIZE; if (aperture == 32 * 1024 * 1024) miscc |= AGP_I810_MISCC_WINSIZE_32; else miscc |= AGP_I810_MISCC_WINSIZE_64; pci_write_config(sc->bdev, AGP_I810_MISCC, miscc, 2); return (0); } static int agp_i830_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; u_int16_t gcc1; sc = device_get_softc(dev); if (aperture != 64 * 1024 * 1024 && aperture != 128 * 1024 * 1024) { device_printf(dev, "bad aperture size %d\n", aperture); return (EINVAL); } gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); gcc1 &= ~AGP_I830_GCC1_GMASIZE; if (aperture == 64 * 1024 * 1024) gcc1 |= AGP_I830_GCC1_GMASIZE_64; else gcc1 |= AGP_I830_GCC1_GMASIZE_128; pci_write_config(sc->bdev, AGP_I830_GCC1, gcc1, 2); return (0); } static int agp_i915_set_aperture(device_t dev, u_int32_t aperture) { return (agp_generic_set_aperture(dev, aperture)); } static int agp_i810_method_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; sc = device_get_softc(dev); return (sc->match->driver->set_aperture(dev, aperture)); } /** * Writes a GTT entry mapping the page at the given offset from the * beginning of the aperture to the given physical address. Setup the * caching mode according to flags. * * For gen 1, 2 and 3, GTT start is located at AGP_I810_GTT offset * from corresponding BAR start. For gen 4, offset is 512KB + * AGP_I810_GTT, for gen 5 and 6 it is 2MB + AGP_I810_GTT. * * Also, the bits of the physical page address above 4GB needs to be * placed into bits 40-32 of PTE. */ static void agp_i810_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_DCACHE_MEMORY) pte |= I810_PTE_LOCAL; else if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; agp_i810_write_gtt(dev, index, pte); } static void agp_i810_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], AGP_I810_GTT + index * 4, pte); CTR2(KTR_AGP_I810, "810_pte %x %x", index, pte); } static void agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; agp_i810_write_gtt(dev, index, pte); } static void agp_i915_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_i915_write_gtt(dev, index, pte); } static void agp_i915_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[1], index * 4, pte); CTR2(KTR_AGP_I810, "915_pte %x %x", index, pte); } static void agp_i965_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_i965_write_gtt(dev, index, pte); } static void agp_i965_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (512 * 1024), pte); CTR2(KTR_AGP_I810, "965_pte %x %x", index, pte); } static void agp_g4x_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_g4x_write_gtt(dev, index, pte); } static void agp_g4x_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024), pte); CTR2(KTR_AGP_I810, "g4x_pte %x %x", index, pte); } static void agp_sb_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { int type_mask, gfdt; uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; gfdt = (flags & AGP_USER_CACHED_MEMORY_GFDT) != 0 ? GEN6_PTE_GFDT : 0; if (type_mask == AGP_USER_MEMORY) pte |= GEN6_PTE_UNCACHED; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) pte |= GEN6_PTE_LLC_MLC | gfdt; else pte |= GEN6_PTE_LLC | gfdt; pte |= (physical & 0x000000ff00000000ull) >> 28; agp_sb_write_gtt(dev, index, pte); } static void agp_sb_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024), pte); CTR2(KTR_AGP_I810, "sb_pte %x %x", index, pte); } static int agp_i810_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical) { struct agp_i810_softc *sc = device_get_softc(dev); u_int index; if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) { device_printf(dev, "failed: offset is 0x%08jx, " "shift is %d, entries is %d\n", (intmax_t)offset, AGP_PAGE_SHIFT, sc->gatt->ag_entries); return (EINVAL); } index = offset >> AGP_PAGE_SHIFT; if (sc->stolen != 0 && index < sc->stolen) { device_printf(dev, "trying to bind into stolen memory\n"); return (EINVAL); } sc->match->driver->install_gtt_pte(dev, index, physical, 0); return (0); } static int agp_i810_unbind_page(device_t dev, vm_offset_t offset) { struct agp_i810_softc *sc; u_int index; sc = device_get_softc(dev); if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) return (EINVAL); index = offset >> AGP_PAGE_SHIFT; if (sc->stolen != 0 && index < sc->stolen) { device_printf(dev, "trying to unbind from stolen memory\n"); return (EINVAL); } sc->match->driver->install_gtt_pte(dev, index, 0, 0); return (0); } static u_int32_t agp_i810_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], AGP_I810_GTT + index * 4); return (pte); } static u_int32_t agp_i915_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[1], index * 4); return (pte); } static u_int32_t agp_i965_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], index * 4 + (512 * 1024)); return (pte); } static u_int32_t agp_g4x_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024)); return (pte); } static vm_paddr_t agp_i810_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = pte & ~PAGE_MASK; return (res); } static vm_paddr_t agp_i915_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = (pte & ~PAGE_MASK) | ((pte & 0xf0) << 28); return (res); } static vm_paddr_t agp_sb_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = (pte & ~PAGE_MASK) | ((pte & 0xff0) << 28); return (res); } /* * Writing via memory mapped registers already flushes all TLBs. */ static void agp_i810_flush_tlb(device_t dev) { } static int agp_i810_enable(device_t dev, u_int32_t mode) { return (0); } static struct agp_memory * agp_i810_alloc_memory(device_t dev, int type, vm_size_t size) { struct agp_i810_softc *sc; struct agp_memory *mem; vm_page_t m; sc = device_get_softc(dev); if ((size & (AGP_PAGE_SIZE - 1)) != 0 || sc->agp.as_allocated + size > sc->agp.as_maxmem) return (0); if (type == 1) { /* * Mapping local DRAM into GATT. */ if (sc->match->driver->chiptype != CHIP_I810) return (0); if (size != sc->dcache_size) return (0); } else if (type == 2) { /* * Type 2 is the contiguous physical memory type, that hands * back a physical address. This is used for cursors on i810. * Hand back as many single pages with physical as the user * wants, but only allow one larger allocation (ARGB cursor) * for simplicity. */ if (size != AGP_PAGE_SIZE) { if (sc->argb_cursor != NULL) return (0); /* Allocate memory for ARGB cursor, if we can. */ sc->argb_cursor = contigmalloc(size, M_AGP, 0, 0, ~0, PAGE_SIZE, 0); if (sc->argb_cursor == NULL) return (0); } } mem = malloc(sizeof *mem, M_AGP, M_WAITOK); mem->am_id = sc->agp.as_nextid++; mem->am_size = size; mem->am_type = type; if (type != 1 && (type != 2 || size == AGP_PAGE_SIZE)) mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size))); else mem->am_obj = 0; if (type == 2) { if (size == AGP_PAGE_SIZE) { /* * Allocate and wire down the page now so that we can * get its physical address. */ VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO); VM_OBJECT_WUNLOCK(mem->am_obj); mem->am_physical = VM_PAGE_TO_PHYS(m); } else { /* Our allocation is already nicely wired down for us. * Just grab the physical address. */ mem->am_physical = vtophys(sc->argb_cursor); } } else mem->am_physical = 0; mem->am_offset = 0; mem->am_is_bound = 0; TAILQ_INSERT_TAIL(&sc->agp.as_memory, mem, am_link); sc->agp.as_allocated += size; return (mem); } static int agp_i810_free_memory(device_t dev, struct agp_memory *mem) { struct agp_i810_softc *sc; vm_page_t m; if (mem->am_is_bound) return (EBUSY); sc = device_get_softc(dev); if (mem->am_type == 2) { if (mem->am_size == AGP_PAGE_SIZE) { /* * Unwire the page which we wired in alloc_memory. */ VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); vm_page_lock(m); vm_page_unwire(m, 0); vm_page_unlock(m); VM_OBJECT_WUNLOCK(mem->am_obj); } else { contigfree(sc->argb_cursor, mem->am_size, M_AGP); sc->argb_cursor = NULL; } } sc->agp.as_allocated -= mem->am_size; TAILQ_REMOVE(&sc->agp.as_memory, mem, am_link); if (mem->am_obj) vm_object_deallocate(mem->am_obj); free(mem, M_AGP); return (0); } static int agp_i810_bind_memory(device_t dev, struct agp_memory *mem, vm_offset_t offset) { struct agp_i810_softc *sc; vm_offset_t i; /* Do some sanity checks first. */ if ((offset & (AGP_PAGE_SIZE - 1)) != 0 || offset + mem->am_size > AGP_GET_APERTURE(dev)) { device_printf(dev, "binding memory at bad offset %#x\n", (int)offset); return (EINVAL); } sc = device_get_softc(dev); if (mem->am_type == 2 && mem->am_size != AGP_PAGE_SIZE) { mtx_lock(&sc->agp.as_lock); if (mem->am_is_bound) { mtx_unlock(&sc->agp.as_lock); return (EINVAL); } /* The memory's already wired down, just stick it in the GTT. */ for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, (offset + i) >> AGP_PAGE_SHIFT, mem->am_physical + i, 0); } agp_flush_cache(); mem->am_offset = offset; mem->am_is_bound = 1; mtx_unlock(&sc->agp.as_lock); return (0); } if (mem->am_type != 1) return (agp_generic_bind_memory(dev, mem, offset)); /* * Mapping local DRAM into GATT. */ if (sc->match->driver->chiptype != CHIP_I810) return (EINVAL); for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) bus_write_4(sc->sc_res[0], AGP_I810_GTT + (i >> AGP_PAGE_SHIFT) * 4, i | 3); return (0); } static int agp_i810_unbind_memory(device_t dev, struct agp_memory *mem) { struct agp_i810_softc *sc; vm_offset_t i; sc = device_get_softc(dev); if (mem->am_type == 2 && mem->am_size != AGP_PAGE_SIZE) { mtx_lock(&sc->agp.as_lock); if (!mem->am_is_bound) { mtx_unlock(&sc->agp.as_lock); return (EINVAL); } for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, (mem->am_offset + i) >> AGP_PAGE_SHIFT, 0, 0); } agp_flush_cache(); mem->am_is_bound = 0; mtx_unlock(&sc->agp.as_lock); return (0); } if (mem->am_type != 1) return (agp_generic_unbind_memory(dev, mem)); if (sc->match->driver->chiptype != CHIP_I810) return (EINVAL); for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, i >> AGP_PAGE_SHIFT, 0, 0); } return (0); } static device_method_t agp_i810_methods[] = { /* Device interface */ DEVMETHOD(device_identify, agp_i810_identify), DEVMETHOD(device_probe, agp_i810_probe), DEVMETHOD(device_attach, agp_i810_attach), DEVMETHOD(device_detach, agp_i810_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, agp_i810_resume), /* AGP interface */ DEVMETHOD(agp_get_aperture, agp_generic_get_aperture), DEVMETHOD(agp_set_aperture, agp_i810_method_set_aperture), DEVMETHOD(agp_bind_page, agp_i810_bind_page), DEVMETHOD(agp_unbind_page, agp_i810_unbind_page), DEVMETHOD(agp_flush_tlb, agp_i810_flush_tlb), DEVMETHOD(agp_enable, agp_i810_enable), DEVMETHOD(agp_alloc_memory, agp_i810_alloc_memory), DEVMETHOD(agp_free_memory, agp_i810_free_memory), DEVMETHOD(agp_bind_memory, agp_i810_bind_memory), DEVMETHOD(agp_unbind_memory, agp_i810_unbind_memory), DEVMETHOD(agp_chipset_flush, agp_intel_gtt_chipset_flush), { 0, 0 } }; static driver_t agp_i810_driver = { "agp", agp_i810_methods, sizeof(struct agp_i810_softc), }; static devclass_t agp_devclass; DRIVER_MODULE(agp_i810, vgapci, agp_i810_driver, agp_devclass, 0, 0); MODULE_DEPEND(agp_i810, agp, 1, 1, 1); MODULE_DEPEND(agp_i810, pci, 1, 1, 1); extern vm_page_t bogus_page; void agp_intel_gtt_clear_range(device_t dev, u_int first_entry, u_int num_entries) { struct agp_i810_softc *sc; u_int i; sc = device_get_softc(dev); for (i = 0; i < num_entries; i++) sc->match->driver->install_gtt_pte(dev, first_entry + i, VM_PAGE_TO_PHYS(bogus_page), 0); sc->match->driver->read_gtt_pte(dev, first_entry + num_entries - 1); } void agp_intel_gtt_insert_pages(device_t dev, u_int first_entry, u_int num_entries, vm_page_t *pages, u_int flags) { struct agp_i810_softc *sc; u_int i; sc = device_get_softc(dev); for (i = 0; i < num_entries; i++) { MPASS(pages[i]->valid == VM_PAGE_BITS_ALL); MPASS(pages[i]->wire_count > 0); sc->match->driver->install_gtt_pte(dev, first_entry + i, VM_PAGE_TO_PHYS(pages[i]), flags); } sc->match->driver->read_gtt_pte(dev, first_entry + num_entries - 1); } struct intel_gtt agp_intel_gtt_get(device_t dev) { struct agp_i810_softc *sc; struct intel_gtt res; sc = device_get_softc(dev); res.stolen_size = sc->stolen_size; res.gtt_total_entries = sc->gtt_total_entries; res.gtt_mappable_entries = sc->gtt_mappable_entries; res.do_idle_maps = 0; res.scratch_page_dma = VM_PAGE_TO_PHYS(bogus_page); return (res); } static int agp_i810_chipset_flush_setup(device_t dev) { return (0); } static void agp_i810_chipset_flush_teardown(device_t dev) { /* Nothing to do. */ } static void agp_i810_chipset_flush(device_t dev) { /* Nothing to do. */ } static void agp_i830_chipset_flush(device_t dev) { struct agp_i810_softc *sc; uint32_t hic; int i; sc = device_get_softc(dev); pmap_invalidate_cache(); hic = bus_read_4(sc->sc_res[0], AGP_I830_HIC); - bus_write_4(sc->sc_res[0], AGP_I830_HIC, hic | (1 << 31)); + bus_write_4(sc->sc_res[0], AGP_I830_HIC, hic | (1U << 31)); for (i = 0; i < 20000 /* 1 sec */; i++) { hic = bus_read_4(sc->sc_res[0], AGP_I830_HIC); - if ((hic & (1 << 31)) == 0) + if ((hic & (1U << 31)) == 0) break; DELAY(50); } } static int agp_i915_chipset_flush_alloc_page(device_t dev, uint64_t start, uint64_t end) { struct agp_i810_softc *sc; device_t vga; sc = device_get_softc(dev); vga = device_get_parent(dev); sc->sc_flush_page_rid = 100; sc->sc_flush_page_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, &sc->sc_flush_page_rid, start, end, PAGE_SIZE, RF_ACTIVE); if (sc->sc_flush_page_res == NULL) { device_printf(dev, "Failed to allocate flush page at 0x%jx\n", (uintmax_t)start); return (EINVAL); } sc->sc_flush_page_vaddr = rman_get_virtual(sc->sc_flush_page_res); if (bootverbose) { device_printf(dev, "Allocated flush page phys 0x%jx virt %p\n", (uintmax_t)rman_get_start(sc->sc_flush_page_res), sc->sc_flush_page_vaddr); } return (0); } static void agp_i915_chipset_flush_free_page(device_t dev) { struct agp_i810_softc *sc; device_t vga; sc = device_get_softc(dev); vga = device_get_parent(dev); if (sc->sc_flush_page_res == NULL) return; BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, sc->sc_flush_page_rid, sc->sc_flush_page_res); BUS_RELEASE_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, sc->sc_flush_page_rid, sc->sc_flush_page_res); } static int agp_i915_chipset_flush_setup(device_t dev) { struct agp_i810_softc *sc; uint32_t temp; int error; sc = device_get_softc(dev); temp = pci_read_config(sc->bdev, AGP_I915_IFPADDR, 4); if ((temp & 1) != 0) { temp &= ~1; if (bootverbose) device_printf(dev, "Found already configured flush page at 0x%jx\n", (uintmax_t)temp); sc->sc_bios_allocated_flush_page = 1; /* * In the case BIOS initialized the flush pointer (?) * register, expect that BIOS also set up the resource * for the page. */ error = agp_i915_chipset_flush_alloc_page(dev, temp, temp + PAGE_SIZE - 1); if (error != 0) return (error); } else { sc->sc_bios_allocated_flush_page = 0; error = agp_i915_chipset_flush_alloc_page(dev, 0, 0xffffffff); if (error != 0) return (error); temp = rman_get_start(sc->sc_flush_page_res); pci_write_config(sc->bdev, AGP_I915_IFPADDR, temp | 1, 4); } return (0); } static void agp_i915_chipset_flush_teardown(device_t dev) { struct agp_i810_softc *sc; uint32_t temp; sc = device_get_softc(dev); if (sc->sc_flush_page_res == NULL) return; if (!sc->sc_bios_allocated_flush_page) { temp = pci_read_config(sc->bdev, AGP_I915_IFPADDR, 4); temp &= ~1; pci_write_config(sc->bdev, AGP_I915_IFPADDR, temp, 4); } agp_i915_chipset_flush_free_page(dev); } static int agp_i965_chipset_flush_setup(device_t dev) { struct agp_i810_softc *sc; uint64_t temp; uint32_t temp_hi, temp_lo; int error; sc = device_get_softc(dev); temp_hi = pci_read_config(sc->bdev, AGP_I965_IFPADDR + 4, 4); temp_lo = pci_read_config(sc->bdev, AGP_I965_IFPADDR, 4); if ((temp_lo & 1) != 0) { temp = ((uint64_t)temp_hi << 32) | (temp_lo & ~1); if (bootverbose) device_printf(dev, "Found already configured flush page at 0x%jx\n", (uintmax_t)temp); sc->sc_bios_allocated_flush_page = 1; /* * In the case BIOS initialized the flush pointer (?) * register, expect that BIOS also set up the resource * for the page. */ error = agp_i915_chipset_flush_alloc_page(dev, temp, temp + PAGE_SIZE - 1); if (error != 0) return (error); } else { sc->sc_bios_allocated_flush_page = 0; error = agp_i915_chipset_flush_alloc_page(dev, 0, ~0); if (error != 0) return (error); temp = rman_get_start(sc->sc_flush_page_res); pci_write_config(sc->bdev, AGP_I965_IFPADDR + 4, (temp >> 32) & UINT32_MAX, 4); pci_write_config(sc->bdev, AGP_I965_IFPADDR, (temp & UINT32_MAX) | 1, 4); } return (0); } static void agp_i965_chipset_flush_teardown(device_t dev) { struct agp_i810_softc *sc; uint32_t temp_lo; sc = device_get_softc(dev); if (sc->sc_flush_page_res == NULL) return; if (!sc->sc_bios_allocated_flush_page) { temp_lo = pci_read_config(sc->bdev, AGP_I965_IFPADDR, 4); temp_lo &= ~1; pci_write_config(sc->bdev, AGP_I965_IFPADDR, temp_lo, 4); } agp_i915_chipset_flush_free_page(dev); } static void agp_i915_chipset_flush(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); *(uint32_t *)sc->sc_flush_page_vaddr = 1; } int agp_intel_gtt_chipset_flush(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->match->driver->chipset_flush(dev); return (0); } void agp_intel_gtt_unmap_memory(device_t dev, struct sglist *sg_list) { } int agp_intel_gtt_map_memory(device_t dev, vm_page_t *pages, u_int num_entries, struct sglist **sg_list) { struct agp_i810_softc *sc; struct sglist *sg; int i; #if 0 int error; bus_dma_tag_t dmat; #endif if (*sg_list != NULL) return (0); sc = device_get_softc(dev); sg = sglist_alloc(num_entries, M_WAITOK /* XXXKIB */); for (i = 0; i < num_entries; i++) { sg->sg_segs[i].ss_paddr = VM_PAGE_TO_PHYS(pages[i]); sg->sg_segs[i].ss_len = PAGE_SIZE; } #if 0 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1 /* alignment */, 0 /* boundary */, 1ULL << sc->match->busdma_addr_mask_sz /* lowaddr */, BUS_SPACE_MAXADDR /* highaddr */, NULL /* filtfunc */, NULL /* filtfuncarg */, BUS_SPACE_MAXADDR /* maxsize */, BUS_SPACE_UNRESTRICTED /* nsegments */, BUS_SPACE_MAXADDR /* maxsegsz */, 0 /* flags */, NULL /* lockfunc */, NULL /* lockfuncarg */, &dmat); if (error != 0) { sglist_free(sg); return (error); } /* XXXKIB */ #endif *sg_list = sg; return (0); } void agp_intel_gtt_insert_sg_entries(device_t dev, struct sglist *sg_list, u_int first_entry, u_int flags) { struct agp_i810_softc *sc; vm_paddr_t spaddr; size_t slen; u_int i, j; sc = device_get_softc(dev); for (i = j = 0; j < sg_list->sg_nseg; j++) { spaddr = sg_list->sg_segs[i].ss_paddr; slen = sg_list->sg_segs[i].ss_len; for (; slen > 0; i++) { sc->match->driver->install_gtt_pte(dev, first_entry + i, spaddr, flags); spaddr += AGP_PAGE_SIZE; slen -= AGP_PAGE_SIZE; } } sc->match->driver->read_gtt_pte(dev, first_entry + i - 1); } void intel_gtt_clear_range(u_int first_entry, u_int num_entries) { agp_intel_gtt_clear_range(intel_agp, first_entry, num_entries); } void intel_gtt_insert_pages(u_int first_entry, u_int num_entries, vm_page_t *pages, u_int flags) { agp_intel_gtt_insert_pages(intel_agp, first_entry, num_entries, pages, flags); } struct intel_gtt intel_gtt_get(void) { return (agp_intel_gtt_get(intel_agp)); } int intel_gtt_chipset_flush(void) { return (agp_intel_gtt_chipset_flush(intel_agp)); } void intel_gtt_unmap_memory(struct sglist *sg_list) { agp_intel_gtt_unmap_memory(intel_agp, sg_list); } int intel_gtt_map_memory(vm_page_t *pages, u_int num_entries, struct sglist **sg_list) { return (agp_intel_gtt_map_memory(intel_agp, pages, num_entries, sg_list)); } void intel_gtt_insert_sg_entries(struct sglist *sg_list, u_int first_entry, u_int flags) { agp_intel_gtt_insert_sg_entries(intel_agp, sg_list, first_entry, flags); } device_t intel_gtt_get_bridge_device(void) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->bdev); } vm_paddr_t intel_gtt_read_pte_paddr(u_int entry) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->read_gtt_pte_paddr(intel_agp, entry)); } u_int32_t intel_gtt_read_pte(u_int entry) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->read_gtt_pte(intel_agp, entry)); } void intel_gtt_write(u_int entry, uint32_t val) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->write_gtt(intel_agp, entry, val)); } Index: head/sys/dev/ahci/ahci.h =================================================================== --- head/sys/dev/ahci/ahci.h (revision 258779) +++ head/sys/dev/ahci/ahci.h (revision 258780) @@ -1,546 +1,546 @@ /*- * Copyright (c) 1998 - 2008 Søren Schmidt * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* ATA register defines */ #define ATA_DATA 0 /* (RW) data */ #define ATA_FEATURE 1 /* (W) feature */ #define ATA_F_DMA 0x01 /* enable DMA */ #define ATA_F_OVL 0x02 /* enable overlap */ #define ATA_COUNT 2 /* (W) sector count */ #define ATA_SECTOR 3 /* (RW) sector # */ #define ATA_CYL_LSB 4 /* (RW) cylinder# LSB */ #define ATA_CYL_MSB 5 /* (RW) cylinder# MSB */ #define ATA_DRIVE 6 /* (W) Sector/Drive/Head */ #define ATA_D_LBA 0x40 /* use LBA addressing */ #define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */ #define ATA_COMMAND 7 /* (W) command */ #define ATA_ERROR 8 /* (R) error */ #define ATA_E_ILI 0x01 /* illegal length */ #define ATA_E_NM 0x02 /* no media */ #define ATA_E_ABORT 0x04 /* command aborted */ #define ATA_E_MCR 0x08 /* media change request */ #define ATA_E_IDNF 0x10 /* ID not found */ #define ATA_E_MC 0x20 /* media changed */ #define ATA_E_UNC 0x40 /* uncorrectable data */ #define ATA_E_ICRC 0x80 /* UDMA crc error */ #define ATA_E_ATAPI_SENSE_MASK 0xf0 /* ATAPI sense key mask */ #define ATA_IREASON 9 /* (R) interrupt reason */ #define ATA_I_CMD 0x01 /* cmd (1) | data (0) */ #define ATA_I_IN 0x02 /* read (1) | write (0) */ #define ATA_I_RELEASE 0x04 /* released bus (1) */ #define ATA_I_TAGMASK 0xf8 /* tag mask */ #define ATA_STATUS 10 /* (R) status */ #define ATA_ALTSTAT 11 /* (R) alternate status */ #define ATA_S_ERROR 0x01 /* error */ #define ATA_S_INDEX 0x02 /* index */ #define ATA_S_CORR 0x04 /* data corrected */ #define ATA_S_DRQ 0x08 /* data request */ #define ATA_S_DSC 0x10 /* drive seek completed */ #define ATA_S_SERVICE 0x10 /* drive needs service */ #define ATA_S_DWF 0x20 /* drive write fault */ #define ATA_S_DMA 0x20 /* DMA ready */ #define ATA_S_READY 0x40 /* drive ready */ #define ATA_S_BUSY 0x80 /* busy */ #define ATA_CONTROL 12 /* (W) control */ #define ATA_A_IDS 0x02 /* disable interrupts */ #define ATA_A_RESET 0x04 /* RESET controller */ #define ATA_A_4BIT 0x08 /* 4 head bits */ #define ATA_A_HOB 0x80 /* High Order Byte enable */ /* SATA register defines */ #define ATA_SSTATUS 13 #define ATA_SS_DET_MASK 0x0000000f #define ATA_SS_DET_NO_DEVICE 0x00000000 #define ATA_SS_DET_DEV_PRESENT 0x00000001 #define ATA_SS_DET_PHY_ONLINE 0x00000003 #define ATA_SS_DET_PHY_OFFLINE 0x00000004 #define ATA_SS_SPD_MASK 0x000000f0 #define ATA_SS_SPD_NO_SPEED 0x00000000 #define ATA_SS_SPD_GEN1 0x00000010 #define ATA_SS_SPD_GEN2 0x00000020 #define ATA_SS_SPD_GEN3 0x00000040 #define ATA_SS_IPM_MASK 0x00000f00 #define ATA_SS_IPM_NO_DEVICE 0x00000000 #define ATA_SS_IPM_ACTIVE 0x00000100 #define ATA_SS_IPM_PARTIAL 0x00000200 #define ATA_SS_IPM_SLUMBER 0x00000600 #define ATA_SS_IPM_DEVSLEEP 0x00000800 #define ATA_SERROR 14 #define ATA_SE_DATA_CORRECTED 0x00000001 #define ATA_SE_COMM_CORRECTED 0x00000002 #define ATA_SE_DATA_ERR 0x00000100 #define ATA_SE_COMM_ERR 0x00000200 #define ATA_SE_PROT_ERR 0x00000400 #define ATA_SE_HOST_ERR 0x00000800 #define ATA_SE_PHY_CHANGED 0x00010000 #define ATA_SE_PHY_IERROR 0x00020000 #define ATA_SE_COMM_WAKE 0x00040000 #define ATA_SE_DECODE_ERR 0x00080000 #define ATA_SE_PARITY_ERR 0x00100000 #define ATA_SE_CRC_ERR 0x00200000 #define ATA_SE_HANDSHAKE_ERR 0x00400000 #define ATA_SE_LINKSEQ_ERR 0x00800000 #define ATA_SE_TRANSPORT_ERR 0x01000000 #define ATA_SE_UNKNOWN_FIS 0x02000000 #define ATA_SE_EXCHANGED 0x04000000 #define ATA_SCONTROL 15 #define ATA_SC_DET_MASK 0x0000000f #define ATA_SC_DET_IDLE 0x00000000 #define ATA_SC_DET_RESET 0x00000001 #define ATA_SC_DET_DISABLE 0x00000004 #define ATA_SC_SPD_MASK 0x000000f0 #define ATA_SC_SPD_NO_SPEED 0x00000000 #define ATA_SC_SPD_SPEED_GEN1 0x00000010 #define ATA_SC_SPD_SPEED_GEN2 0x00000020 #define ATA_SC_SPD_SPEED_GEN3 0x00000040 #define ATA_SC_IPM_MASK 0x00000f00 #define ATA_SC_IPM_NONE 0x00000000 #define ATA_SC_IPM_DIS_PARTIAL 0x00000100 #define ATA_SC_IPM_DIS_SLUMBER 0x00000200 #define ATA_SC_IPM_DIS_DEVSLEEP 0x00000400 #define ATA_SACTIVE 16 #define AHCI_MAX_PORTS 32 #define AHCI_MAX_SLOTS 32 /* SATA AHCI v1.0 register defines */ #define AHCI_CAP 0x00 #define AHCI_CAP_NPMASK 0x0000001f #define AHCI_CAP_SXS 0x00000020 #define AHCI_CAP_EMS 0x00000040 #define AHCI_CAP_CCCS 0x00000080 #define AHCI_CAP_NCS 0x00001F00 #define AHCI_CAP_NCS_SHIFT 8 #define AHCI_CAP_PSC 0x00002000 #define AHCI_CAP_SSC 0x00004000 #define AHCI_CAP_PMD 0x00008000 #define AHCI_CAP_FBSS 0x00010000 #define AHCI_CAP_SPM 0x00020000 #define AHCI_CAP_SAM 0x00080000 #define AHCI_CAP_ISS 0x00F00000 #define AHCI_CAP_ISS_SHIFT 20 #define AHCI_CAP_SCLO 0x01000000 #define AHCI_CAP_SAL 0x02000000 #define AHCI_CAP_SALP 0x04000000 #define AHCI_CAP_SSS 0x08000000 #define AHCI_CAP_SMPS 0x10000000 #define AHCI_CAP_SSNTF 0x20000000 #define AHCI_CAP_SNCQ 0x40000000 #define AHCI_CAP_64BIT 0x80000000 #define AHCI_GHC 0x04 #define AHCI_GHC_AE 0x80000000 #define AHCI_GHC_MRSM 0x00000004 #define AHCI_GHC_IE 0x00000002 #define AHCI_GHC_HR 0x00000001 #define AHCI_IS 0x08 #define AHCI_PI 0x0c #define AHCI_VS 0x10 #define AHCI_CCCC 0x14 #define AHCI_CCCC_TV_MASK 0xffff0000 #define AHCI_CCCC_TV_SHIFT 16 #define AHCI_CCCC_CC_MASK 0x0000ff00 #define AHCI_CCCC_CC_SHIFT 8 #define AHCI_CCCC_INT_MASK 0x000000f8 #define AHCI_CCCC_INT_SHIFT 3 #define AHCI_CCCC_EN 0x00000001 #define AHCI_CCCP 0x18 #define AHCI_EM_LOC 0x1C #define AHCI_EM_CTL 0x20 #define AHCI_EM_MR 0x00000001 #define AHCI_EM_TM 0x00000100 #define AHCI_EM_RST 0x00000200 #define AHCI_EM_LED 0x00010000 #define AHCI_EM_SAFTE 0x00020000 #define AHCI_EM_SES2 0x00040000 #define AHCI_EM_SGPIO 0x00080000 #define AHCI_EM_SMB 0x01000000 #define AHCI_EM_XMT 0x02000000 #define AHCI_EM_ALHD 0x04000000 #define AHCI_EM_PM 0x08000000 #define AHCI_CAP2 0x24 #define AHCI_CAP2_BOH 0x00000001 #define AHCI_CAP2_NVMP 0x00000002 #define AHCI_CAP2_APST 0x00000004 #define AHCI_CAP2_SDS 0x00000008 #define AHCI_CAP2_SADM 0x00000010 #define AHCI_CAP2_DESO 0x00000020 #define AHCI_OFFSET 0x100 #define AHCI_STEP 0x80 #define AHCI_P_CLB 0x00 #define AHCI_P_CLBU 0x04 #define AHCI_P_FB 0x08 #define AHCI_P_FBU 0x0c #define AHCI_P_IS 0x10 #define AHCI_P_IE 0x14 #define AHCI_P_IX_DHR 0x00000001 #define AHCI_P_IX_PS 0x00000002 #define AHCI_P_IX_DS 0x00000004 #define AHCI_P_IX_SDB 0x00000008 #define AHCI_P_IX_UF 0x00000010 #define AHCI_P_IX_DP 0x00000020 #define AHCI_P_IX_PC 0x00000040 #define AHCI_P_IX_MP 0x00000080 #define AHCI_P_IX_PRC 0x00400000 #define AHCI_P_IX_IPM 0x00800000 #define AHCI_P_IX_OF 0x01000000 #define AHCI_P_IX_INF 0x04000000 #define AHCI_P_IX_IF 0x08000000 #define AHCI_P_IX_HBD 0x10000000 #define AHCI_P_IX_HBF 0x20000000 #define AHCI_P_IX_TFE 0x40000000 #define AHCI_P_IX_CPD 0x80000000 #define AHCI_P_CMD 0x18 #define AHCI_P_CMD_ST 0x00000001 #define AHCI_P_CMD_SUD 0x00000002 #define AHCI_P_CMD_POD 0x00000004 #define AHCI_P_CMD_CLO 0x00000008 #define AHCI_P_CMD_FRE 0x00000010 #define AHCI_P_CMD_CCS_MASK 0x00001f00 #define AHCI_P_CMD_CCS_SHIFT 8 #define AHCI_P_CMD_ISS 0x00002000 #define AHCI_P_CMD_FR 0x00004000 #define AHCI_P_CMD_CR 0x00008000 #define AHCI_P_CMD_CPS 0x00010000 #define AHCI_P_CMD_PMA 0x00020000 #define AHCI_P_CMD_HPCP 0x00040000 #define AHCI_P_CMD_MPSP 0x00080000 #define AHCI_P_CMD_CPD 0x00100000 #define AHCI_P_CMD_ESP 0x00200000 #define AHCI_P_CMD_FBSCP 0x00400000 #define AHCI_P_CMD_APSTE 0x00800000 #define AHCI_P_CMD_ATAPI 0x01000000 #define AHCI_P_CMD_DLAE 0x02000000 #define AHCI_P_CMD_ALPE 0x04000000 #define AHCI_P_CMD_ASP 0x08000000 #define AHCI_P_CMD_ICC_MASK 0xf0000000 #define AHCI_P_CMD_NOOP 0x00000000 #define AHCI_P_CMD_ACTIVE 0x10000000 #define AHCI_P_CMD_PARTIAL 0x20000000 #define AHCI_P_CMD_SLUMBER 0x60000000 #define AHCI_P_CMD_DEVSLEEP 0x80000000 #define AHCI_P_TFD 0x20 #define AHCI_P_SIG 0x24 #define AHCI_P_SSTS 0x28 #define AHCI_P_SCTL 0x2c #define AHCI_P_SERR 0x30 #define AHCI_P_SACT 0x34 #define AHCI_P_CI 0x38 #define AHCI_P_SNTF 0x3C #define AHCI_P_FBS 0x40 #define AHCI_P_FBS_EN 0x00000001 #define AHCI_P_FBS_DEC 0x00000002 #define AHCI_P_FBS_SDE 0x00000004 #define AHCI_P_FBS_DEV 0x00000f00 #define AHCI_P_FBS_DEV_SHIFT 8 #define AHCI_P_FBS_ADO 0x0000f000 #define AHCI_P_FBS_ADO_SHIFT 12 #define AHCI_P_FBS_DWE 0x000f0000 #define AHCI_P_FBS_DWE_SHIFT 16 #define AHCI_P_DEVSLP 0x44 #define AHCI_P_DEVSLP_ADSE 0x00000001 #define AHCI_P_DEVSLP_DSP 0x00000002 #define AHCI_P_DEVSLP_DETO 0x000003fc #define AHCI_P_DEVSLP_DETO_SHIFT 2 #define AHCI_P_DEVSLP_MDAT 0x00007c00 #define AHCI_P_DEVSLP_MDAT_SHIFT 10 #define AHCI_P_DEVSLP_DITO 0x01ff8000 #define AHCI_P_DEVSLP_DITO_SHIFT 15 #define AHCI_P_DEVSLP_DM 0x0e000000 #define AHCI_P_DEVSLP_DM_SHIFT 25 /* Just to be sure, if building as module. */ #if MAXPHYS < 512 * 1024 #undef MAXPHYS #define MAXPHYS 512 * 1024 #endif /* Pessimistic prognosis on number of required S/G entries */ #define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8)) /* Command list. 32 commands. First, 1Kbyte aligned. */ #define AHCI_CL_OFFSET 0 #define AHCI_CL_SIZE 32 /* Command tables. Up to 32 commands, Each, 128byte aligned. */ #define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS) #define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16) /* Total main work area. */ #define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots) struct ahci_dma_prd { u_int64_t dba; u_int32_t reserved; u_int32_t dbc; /* 0 based */ #define AHCI_PRD_MASK 0x003fffff /* max 4MB */ #define AHCI_PRD_MAX (AHCI_PRD_MASK + 1) -#define AHCI_PRD_IPC (1 << 31) +#define AHCI_PRD_IPC (1U << 31) } __packed; struct ahci_cmd_tab { u_int8_t cfis[64]; u_int8_t acmd[32]; u_int8_t reserved[32]; struct ahci_dma_prd prd_tab[AHCI_SG_ENTRIES]; } __packed; struct ahci_cmd_list { u_int16_t cmd_flags; #define AHCI_CMD_ATAPI 0x0020 #define AHCI_CMD_WRITE 0x0040 #define AHCI_CMD_PREFETCH 0x0080 #define AHCI_CMD_RESET 0x0100 #define AHCI_CMD_BIST 0x0200 #define AHCI_CMD_CLR_BUSY 0x0400 u_int16_t prd_length; /* PRD entries */ u_int32_t bytecount; u_int64_t cmd_table_phys; /* 128byte aligned */ } __packed; /* misc defines */ #define ATA_IRQ_RID 0 #define ATA_INTR_FLAGS (INTR_MPSAFE|INTR_TYPE_BIO|INTR_ENTROPY) struct ata_dmaslot { bus_dmamap_t data_map; /* data DMA map */ int nsegs; /* Number of segs loaded */ }; /* structure holding DMA related information */ struct ata_dma { bus_dma_tag_t work_tag; /* workspace DMA tag */ bus_dmamap_t work_map; /* workspace DMA map */ uint8_t *work; /* workspace */ bus_addr_t work_bus; /* bus address of work */ bus_dma_tag_t rfis_tag; /* RFIS list DMA tag */ bus_dmamap_t rfis_map; /* RFIS list DMA map */ uint8_t *rfis; /* FIS receive area */ bus_addr_t rfis_bus; /* bus address of rfis */ bus_dma_tag_t data_tag; /* data DMA tag */ }; enum ahci_slot_states { AHCI_SLOT_EMPTY, AHCI_SLOT_LOADING, AHCI_SLOT_RUNNING, AHCI_SLOT_EXECUTING }; struct ahci_slot { device_t dev; /* Device handle */ u_int8_t slot; /* Number of this slot */ enum ahci_slot_states state; /* Slot state */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ }; struct ahci_device { int revision; int mode; u_int bytecount; u_int atapi; u_int tags; u_int caps; }; struct ahci_led { device_t dev; /* Device handle */ struct cdev *led; uint8_t num; /* Number of this led */ uint8_t state; /* State of this led */ }; #define AHCI_NUM_LEDS 3 /* structure describing an ATA channel */ struct ahci_channel { device_t dev; /* Device handle */ int unit; /* Physical channel */ struct resource *r_mem; /* Memory of this channel */ struct resource *r_irq; /* Interrupt of this channel */ void *ih; /* Interrupt handle */ struct ata_dma dma; /* DMA data */ struct cam_sim *sim; struct cam_path *path; uint32_t caps; /* Controller capabilities */ uint32_t caps2; /* Controller capabilities */ uint32_t chcaps; /* Channel capabilities */ uint32_t chscaps; /* Channel sleep capabilities */ int quirks; int numslots; /* Number of present slots */ int pm_level; /* power management level */ struct ahci_slot slot[AHCI_MAX_SLOTS]; union ccb *hold[AHCI_MAX_SLOTS]; struct mtx mtx; /* state lock */ STAILQ_HEAD(, ccb_hdr) doneq; /* queue of completed CCBs */ int batch; /* doneq is in use */ int devices; /* What is present */ int pm_present; /* PM presence reported */ int fbs_enabled; /* FIS-based switching enabled */ uint32_t oslots; /* Occupied slots */ uint32_t rslots; /* Running slots */ uint32_t aslots; /* Slots with atomic commands */ uint32_t eslots; /* Slots in error */ uint32_t toslots; /* Slots in timeout */ int numrslots; /* Number of running slots */ int numrslotspd[16];/* Number of running slots per dev */ int numtslots; /* Number of tagged slots */ int numtslotspd[16];/* Number of tagged slots per dev */ int numhslots; /* Number of held slots */ int recoverycmd; /* Our READ LOG active */ int fatalerr; /* Fatal error happend */ int lastslot; /* Last used slot */ int taggedtarget; /* Last tagged target */ int resetting; /* Hard-reset in progress. */ int resetpolldiv; /* Hard-reset poll divider. */ int listening; /* SUD bit is cleared. */ int wrongccs; /* CCS field in CMD was wrong */ union ccb *frozen; /* Frozen command */ struct callout pm_timer; /* Power management events */ struct callout reset_timer; /* Hard-reset timeout */ struct ahci_device user[16]; /* User-specified settings */ struct ahci_device curr[16]; /* Current settings */ }; struct ahci_enclosure { device_t dev; /* Device handle */ struct resource *r_memc; /* Control register */ struct resource *r_memt; /* Transmit buffer */ struct resource *r_memr; /* Recieve buffer */ struct cam_sim *sim; struct cam_path *path; struct mtx mtx; /* state lock */ struct ahci_led leds[AHCI_MAX_PORTS * 3]; uint32_t capsem; /* Controller capabilities */ uint8_t status[AHCI_MAX_PORTS][4]; /* ArrayDev statuses */ int quirks; int channels; int ichannels; }; /* structure describing a AHCI controller */ struct ahci_controller { device_t dev; bus_dma_tag_t dma_tag; int r_rid; struct resource *r_mem; struct rman sc_iomem; struct ahci_controller_irq { struct ahci_controller *ctlr; struct resource *r_irq; void *handle; int r_irq_rid; int mode; #define AHCI_IRQ_MODE_ALL 0 #define AHCI_IRQ_MODE_AFTER 1 #define AHCI_IRQ_MODE_ONE 2 } irqs[16]; uint32_t caps; /* Controller capabilities */ uint32_t caps2; /* Controller capabilities */ uint32_t capsem; /* Controller capabilities */ uint32_t emloc; /* EM buffer location */ int quirks; int numirqs; int channels; int ichannels; int ccc; /* CCC timeout */ int cccv; /* CCC vector */ int direct; /* Direct command completion */ int msi; /* MSI interupts */ struct { void (*function)(void *); void *argument; } interrupt[AHCI_MAX_PORTS]; }; enum ahci_err_type { AHCI_ERR_NONE, /* No error */ AHCI_ERR_INVALID, /* Error detected by us before submitting. */ AHCI_ERR_INNOCENT, /* Innocent victim. */ AHCI_ERR_TFE, /* Task File Error. */ AHCI_ERR_SATA, /* SATA error. */ AHCI_ERR_TIMEOUT, /* Command execution timeout. */ AHCI_ERR_NCQ, /* NCQ command error. CCB should be put on hold * until READ LOG executed to reveal error. */ }; /* macros to hide busspace uglyness */ #define ATA_INB(res, offset) \ bus_read_1((res), (offset)) #define ATA_INW(res, offset) \ bus_read_2((res), (offset)) #define ATA_INL(res, offset) \ bus_read_4((res), (offset)) #define ATA_INSW(res, offset, addr, count) \ bus_read_multi_2((res), (offset), (addr), (count)) #define ATA_INSW_STRM(res, offset, addr, count) \ bus_read_multi_stream_2((res), (offset), (addr), (count)) #define ATA_INSL(res, offset, addr, count) \ bus_read_multi_4((res), (offset), (addr), (count)) #define ATA_INSL_STRM(res, offset, addr, count) \ bus_read_multi_stream_4((res), (offset), (addr), (count)) #define ATA_OUTB(res, offset, value) \ bus_write_1((res), (offset), (value)) #define ATA_OUTW(res, offset, value) \ bus_write_2((res), (offset), (value)) #define ATA_OUTL(res, offset, value) \ bus_write_4((res), (offset), (value)) #define ATA_OUTSW(res, offset, addr, count) \ bus_write_multi_2((res), (offset), (addr), (count)) #define ATA_OUTSW_STRM(res, offset, addr, count) \ bus_write_multi_stream_2((res), (offset), (addr), (count)) #define ATA_OUTSL(res, offset, addr, count) \ bus_write_multi_4((res), (offset), (addr), (count)) #define ATA_OUTSL_STRM(res, offset, addr, count) \ bus_write_multi_stream_4((res), (offset), (addr), (count)) Index: head/sys/dev/bktr/bktr_core.c =================================================================== --- head/sys/dev/bktr/bktr_core.c (revision 258779) +++ head/sys/dev/bktr/bktr_core.c (revision 258780) @@ -1,4318 +1,4318 @@ /*- * 1. Redistributions of source code must retain the * Copyright (c) 1997 Amancio Hasty, 1999 Roger Hardiman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Amancio Hasty and * Roger Hardiman * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * 1. Redistributions of source code must retain the * Copyright (c) 1995 Mark Tinguely and Jim Lowe * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Tinguely and Jim Lowe * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This is part of the Driver for Video Capture Cards (Frame grabbers) * and TV Tuner cards using the Brooktree Bt848, Bt848A, Bt849A, Bt878, Bt879 * chipset. * Copyright Roger Hardiman and Amancio Hasty. * * bktr_core : This deals with the Bt848/849/878/879 PCI Frame Grabber, * Handles all the open, close, ioctl and read userland calls. * Sets the Bt848 registers and generates RISC pograms. * Controls the i2c bus and GPIO interface. * Contains the interface to the kernel. * (eg probe/attach and open/close/ioctl) */ /* The Brooktree BT848 Driver driver is based upon Mark Tinguely and Jim Lowe's driver for the Matrox Meteor PCI card . The Philips SAA 7116 and SAA 7196 are very different chipsets than the BT848. The original copyright notice by Mark and Jim is included mostly to honor their fantastic work in the Matrox Meteor driver! */ #include "opt_bktr.h" /* Include any kernel config options */ #if ( \ (defined(__FreeBSD__)) \ || (defined(__bsdi__)) \ || (defined(__OpenBSD__)) \ || (defined(__NetBSD__)) \ ) /*******************/ /* *** FreeBSD *** */ /*******************/ #ifdef __FreeBSD__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* used by smbus and newbus */ #if (__FreeBSD_version < 500000) #include /* for DELAY */ #define PROC_LOCK(p) #define PROC_UNLOCK(p) #include #else #include #endif #include #include #include #include /* extensions to ioctl_meteor.h */ #include #include #include #include #include #include #if defined(BKTR_FREEBSD_MODULE) #include #endif #if defined(BKTR_USE_FREEBSD_SMBUS) #include #include #include #include "smbus_if.h" #include "iicbus_if.h" #endif const char * bktr_name(bktr_ptr_t bktr) { return bktr->bktr_xname; } #endif /* __FreeBSD__ */ /****************/ /* *** BSDI *** */ /****************/ #ifdef __bsdi__ #define PROC_LOCK(p) #define PROC_UNLOCK(p) #endif /* __bsdi__ */ /**************************/ /* *** OpenBSD/NetBSD *** */ /**************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #include #include #include #ifdef __NetBSD__ #include #else #include #include #include #include #endif #include /* uintptr_t */ #include #include #include #include #include #include #include static int bt848_format = -1; const char * bktr_name(bktr_ptr_t bktr) { return (bktr->bktr_dev.dv_xname); } #define PROC_LOCK(p) #define PROC_UNLOCK(p) #endif /* __NetBSD__ || __OpenBSD__ */ typedef u_char bool_t; #define BKTRPRI (PZERO+8)|PCATCH #define VBIPRI (PZERO-4)|PCATCH /* * memory allocated for DMA programs */ #define DMA_PROG_ALLOC (8 * PAGE_SIZE) /* When to split a dma transfer , the bt848 has timing as well as dma transfer size limitations so that we have to split dma transfers into two dma requests */ #define DMA_BT848_SPLIT 319*2 /* * Allocate enough memory for: * 768x576 RGB 16 or YUV (16 storage bits/pixel) = 884736 = 216 pages * * You may override this using the options "BROOKTREE_ALLOC_PAGES=value" * in your kernel configuration file. */ #ifndef BROOKTREE_ALLOC_PAGES #define BROOKTREE_ALLOC_PAGES 217*4 #endif #define BROOKTREE_ALLOC (BROOKTREE_ALLOC_PAGES * PAGE_SIZE) /* Definitions for VBI capture. * There are 16 VBI lines in a PAL video field (32 in a frame), * and we take 2044 samples from each line (placed in a 2048 byte buffer * for alignment). * VBI lines are held in a circular buffer before being read by a * user program from /dev/vbi. */ #define MAX_VBI_LINES 16 /* Maximum for all vidoe formats */ #define VBI_LINE_SIZE 2048 /* Store upto 2048 bytes per line */ #define VBI_BUFFER_ITEMS 20 /* Number of frames we buffer */ #define VBI_DATA_SIZE (VBI_LINE_SIZE * MAX_VBI_LINES * 2) #define VBI_BUFFER_SIZE (VBI_DATA_SIZE * VBI_BUFFER_ITEMS) /* Defines for fields */ #define ODD_F 0x01 #define EVEN_F 0x02 /* * Parameters describing size of transmitted image. */ static struct format_params format_params[] = { /* # define BT848_IFORM_F_AUTO (0x0) - don't matter. */ { 525, 26, 480, 910, 135, 754, 640, 780, 30, 0x68, 0x5d, BT848_IFORM_X_AUTO, 12, 1600 }, /* # define BT848_IFORM_F_NTSCM (0x1) */ { 525, 26, 480, 910, 135, 754, 640, 780, 30, 0x68, 0x5d, BT848_IFORM_X_XT0, 12, 1600 }, /* # define BT848_IFORM_F_NTSCJ (0x2) */ { 525, 22, 480, 910, 135, 754, 640, 780, 30, 0x68, 0x5d, BT848_IFORM_X_XT0, 12, 1600 }, /* # define BT848_IFORM_F_PALBDGHI (0x3) */ { 625, 32, 576, 1135, 186, 924, 768, 944, 25, 0x7f, 0x72, BT848_IFORM_X_XT1, 16, 2044 }, /* # define BT848_IFORM_F_PALM (0x4) */ { 525, 22, 480, 910, 135, 754, 640, 780, 30, 0x68, 0x5d, BT848_IFORM_X_XT0, 12, 1600 }, /* # define BT848_IFORM_F_PALN (0x5) */ { 625, 32, 576, 1135, 186, 924, 768, 944, 25, 0x7f, 0x72, BT848_IFORM_X_XT1, 16, 2044 }, /* # define BT848_IFORM_F_SECAM (0x6) */ { 625, 32, 576, 1135, 186, 924, 768, 944, 25, 0x7f, 0xa0, BT848_IFORM_X_XT1, 16, 2044 }, /* # define BT848_IFORM_F_RSVD (0x7) - ???? */ { 625, 32, 576, 1135, 186, 924, 768, 944, 25, 0x7f, 0x72, BT848_IFORM_X_XT0, 16, 2044 }, }; /* * Table of supported Pixel Formats */ static struct meteor_pixfmt_internal { struct meteor_pixfmt public; u_int color_fmt; } pixfmt_table[] = { { { 0, METEOR_PIXTYPE_RGB, 2, { 0x7c00, 0x03e0, 0x001f }, 0,0 }, 0x33 }, { { 0, METEOR_PIXTYPE_RGB, 2, { 0x7c00, 0x03e0, 0x001f }, 1,0 }, 0x33 }, { { 0, METEOR_PIXTYPE_RGB, 2, { 0xf800, 0x07e0, 0x001f }, 0,0 }, 0x22 }, { { 0, METEOR_PIXTYPE_RGB, 2, { 0xf800, 0x07e0, 0x001f }, 1,0 }, 0x22 }, { { 0, METEOR_PIXTYPE_RGB, 3, { 0xff0000,0x00ff00,0x0000ff }, 1,0 }, 0x11 }, { { 0, METEOR_PIXTYPE_RGB, 4, { 0xff0000,0x00ff00,0x0000ff }, 0,0 }, 0x00 }, { { 0, METEOR_PIXTYPE_RGB, 4, { 0xff0000,0x00ff00,0x0000ff }, 0,1 }, 0x00 }, { { 0, METEOR_PIXTYPE_RGB, 4, { 0xff0000,0x00ff00,0x0000ff }, 1,0 }, 0x00 }, { { 0, METEOR_PIXTYPE_RGB, 4, { 0xff0000,0x00ff00,0x0000ff }, 1,1 }, 0x00 }, { { 0, METEOR_PIXTYPE_YUV, 2, { 0xff0000,0x00ff00,0x0000ff }, 1,1 }, 0x88 }, { { 0, METEOR_PIXTYPE_YUV_PACKED, 2, { 0xff0000,0x00ff00,0x0000ff }, 0,1 }, 0x44 }, { { 0, METEOR_PIXTYPE_YUV_12, 2, { 0xff0000,0x00ff00,0x0000ff }, 1,1 }, 0x88 }, }; #define PIXFMT_TABLE_SIZE ( sizeof(pixfmt_table) / sizeof(pixfmt_table[0]) ) /* * Table of Meteor-supported Pixel Formats (for SETGEO compatibility) */ /* FIXME: Also add YUV_422 and YUV_PACKED as well */ static struct { u_long meteor_format; struct meteor_pixfmt public; } meteor_pixfmt_table[] = { { METEOR_GEO_YUV_12, { 0, METEOR_PIXTYPE_YUV_12, 2, { 0xff0000,0x00ff00,0x0000ff }, 1,1 } }, /* FIXME: Should byte swap flag be on for this one; negative in drvr? */ { METEOR_GEO_YUV_422, { 0, METEOR_PIXTYPE_YUV, 2, { 0xff0000,0x00ff00,0x0000ff }, 1,1 } }, { METEOR_GEO_YUV_PACKED, { 0, METEOR_PIXTYPE_YUV_PACKED, 2, { 0xff0000,0x00ff00,0x0000ff }, 0,1 } }, { METEOR_GEO_RGB16, { 0, METEOR_PIXTYPE_RGB, 2, { 0x7c00, 0x03e0, 0x001f }, 0, 0 } }, { METEOR_GEO_RGB24, { 0, METEOR_PIXTYPE_RGB, 4, { 0xff0000, 0x00ff00, 0x0000ff }, 0, 0 } }, }; #define METEOR_PIXFMT_TABLE_SIZE ( sizeof(meteor_pixfmt_table) / \ sizeof(meteor_pixfmt_table[0]) ) #define BSWAP (BT848_COLOR_CTL_BSWAP_ODD | BT848_COLOR_CTL_BSWAP_EVEN) #define WSWAP (BT848_COLOR_CTL_WSWAP_ODD | BT848_COLOR_CTL_WSWAP_EVEN) /* sync detect threshold */ #if 0 #define SYNC_LEVEL (BT848_ADC_RESERVED | \ BT848_ADC_CRUSH) /* threshold ~125 mV */ #else #define SYNC_LEVEL (BT848_ADC_RESERVED | \ BT848_ADC_SYNC_T) /* threshold ~75 mV */ #endif /* debug utility for holding previous INT_STAT contents */ #define STATUS_SUM static u_long status_sum = 0; /* * defines to make certain bit-fiddles understandable */ #define FIFO_ENABLED BT848_DMA_CTL_FIFO_EN #define RISC_ENABLED BT848_DMA_CTL_RISC_EN #define FIFO_RISC_ENABLED (BT848_DMA_CTL_FIFO_EN | BT848_DMA_CTL_RISC_EN) #define FIFO_RISC_DISABLED 0 #define ALL_INTS_DISABLED 0 #define ALL_INTS_CLEARED 0xffffffff #define CAPTURE_OFF 0 #define BIT_SEVEN_HIGH (1<<7) #define BIT_EIGHT_HIGH (1<<8) #define I2C_BITS (BT848_INT_RACK | BT848_INT_I2CDONE) #define TDEC_BITS (BT848_INT_FDSR | BT848_INT_FBUS) static int oformat_meteor_to_bt( u_long format ); static u_int pixfmt_swap_flags( int pixfmt ); /* * bt848 RISC programming routines. */ #ifdef BT848_DUMP static int dump_bt848( bktr_ptr_t bktr ); #endif static void yuvpack_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ); static void yuv422_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ); static void yuv12_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ); static void rgb_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ); static void rgb_vbi_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ); static void build_dma_prog( bktr_ptr_t bktr, char i_flag ); static bool_t getline(bktr_reg_t *, int); static bool_t notclipped(bktr_reg_t * , int , int); static bool_t split(bktr_reg_t *, volatile uint32_t **, int, u_long, int, volatile u_char ** , int ); static void start_capture( bktr_ptr_t bktr, unsigned type ); static void set_fps( bktr_ptr_t bktr, u_short fps ); /* * Remote Control Functions */ static void remote_read(bktr_ptr_t bktr, struct bktr_remote *remote); /* * ioctls common to both video & tuner. */ static int common_ioctl( bktr_ptr_t bktr, ioctl_cmd_t cmd, caddr_t arg ); #if !defined(BKTR_USE_FREEBSD_SMBUS) /* * i2c primitives for low level control of i2c bus. Added for MSP34xx control */ static void i2c_start( bktr_ptr_t bktr); static void i2c_stop( bktr_ptr_t bktr); static int i2c_write_byte( bktr_ptr_t bktr, unsigned char data); static int i2c_read_byte( bktr_ptr_t bktr, unsigned char *data, int last ); #endif /* * the common attach code, used by all OS versions. */ void common_bktr_attach( bktr_ptr_t bktr, int unit, u_long pci_id, u_int rev ) { vm_offset_t buf = 0; int need_to_allocate_memory = 1; #ifdef BKTR_NEW_MSP34XX_DRIVER int err; #endif /***************************************/ /* *** OS Specific memory routines *** */ /***************************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) /* allocate space for dma program */ bktr->dma_prog = get_bktr_mem(bktr, &bktr->dm_prog, DMA_PROG_ALLOC); bktr->odd_dma_prog = get_bktr_mem(bktr, &bktr->dm_oprog, DMA_PROG_ALLOC); /* allocate space for the VBI buffer */ bktr->vbidata = get_bktr_mem(bktr, &bktr->dm_vbidata, VBI_DATA_SIZE); bktr->vbibuffer = get_bktr_mem(bktr, &bktr->dm_vbibuffer, VBI_BUFFER_SIZE); /* allocate space for pixel buffer */ if ( BROOKTREE_ALLOC ) buf = get_bktr_mem(bktr, &bktr->dm_mem, BROOKTREE_ALLOC); else buf = 0; #endif #if defined(__FreeBSD__) || defined(__bsdi__) /* If this is a module, check if there is any currently saved contiguous memory */ #if defined(BKTR_FREEBSD_MODULE) if (bktr_has_stored_addresses(unit) == 1) { /* recover the addresses */ bktr->dma_prog = bktr_retrieve_address(unit, BKTR_MEM_DMA_PROG); bktr->odd_dma_prog = bktr_retrieve_address(unit, BKTR_MEM_ODD_DMA_PROG); bktr->vbidata = bktr_retrieve_address(unit, BKTR_MEM_VBIDATA); bktr->vbibuffer = bktr_retrieve_address(unit, BKTR_MEM_VBIBUFFER); buf = bktr_retrieve_address(unit, BKTR_MEM_BUF); need_to_allocate_memory = 0; } #endif if (need_to_allocate_memory == 1) { /* allocate space for dma program */ bktr->dma_prog = get_bktr_mem(unit, DMA_PROG_ALLOC); bktr->odd_dma_prog = get_bktr_mem(unit, DMA_PROG_ALLOC); /* allocte space for the VBI buffer */ bktr->vbidata = get_bktr_mem(unit, VBI_DATA_SIZE); bktr->vbibuffer = get_bktr_mem(unit, VBI_BUFFER_SIZE); /* allocate space for pixel buffer */ if ( BROOKTREE_ALLOC ) buf = get_bktr_mem(unit, BROOKTREE_ALLOC); else buf = 0; } #endif /* FreeBSD or BSDi */ #ifdef USE_VBIMUTEX mtx_init(&bktr->vbimutex, "bktr vbi lock", NULL, MTX_DEF); #endif /* If this is a module, save the current contiguous memory */ #if defined(BKTR_FREEBSD_MODULE) bktr_store_address(unit, BKTR_MEM_DMA_PROG, bktr->dma_prog); bktr_store_address(unit, BKTR_MEM_ODD_DMA_PROG, bktr->odd_dma_prog); bktr_store_address(unit, BKTR_MEM_VBIDATA, bktr->vbidata); bktr_store_address(unit, BKTR_MEM_VBIBUFFER, bktr->vbibuffer); bktr_store_address(unit, BKTR_MEM_BUF, buf); #endif if ( bootverbose ) { printf("%s: buffer size %d, addr %p\n", bktr_name(bktr), (int)BROOKTREE_ALLOC, (void *)(uintptr_t)vtophys(buf)); } if ( buf != 0 ) { bktr->bigbuf = buf; bktr->alloc_pages = BROOKTREE_ALLOC_PAGES; bzero((caddr_t) bktr->bigbuf, BROOKTREE_ALLOC); } else { bktr->alloc_pages = 0; } bktr->flags = METEOR_INITALIZED | METEOR_AUTOMODE | METEOR_DEV0 | METEOR_RGB16; bktr->dma_prog_loaded = FALSE; bktr->cols = 640; bktr->rows = 480; bktr->frames = 1; /* one frame */ bktr->format = METEOR_GEO_RGB16; bktr->pixfmt = oformat_meteor_to_bt( bktr->format ); bktr->pixfmt_compat = TRUE; bktr->vbiinsert = 0; bktr->vbistart = 0; bktr->vbisize = 0; bktr->vbiflags = 0; /* using the pci device id and revision id */ /* and determine the card type */ if (PCI_VENDOR(pci_id) == PCI_VENDOR_BROOKTREE) { switch (PCI_PRODUCT(pci_id)) { case PCI_PRODUCT_BROOKTREE_BT848: if (rev == 0x12) bktr->id = BROOKTREE_848A; else bktr->id = BROOKTREE_848; break; case PCI_PRODUCT_BROOKTREE_BT849: bktr->id = BROOKTREE_849A; break; case PCI_PRODUCT_BROOKTREE_BT878: bktr->id = BROOKTREE_878; break; case PCI_PRODUCT_BROOKTREE_BT879: bktr->id = BROOKTREE_879; break; } }; bktr->clr_on_start = FALSE; /* defaults for the tuner section of the card */ bktr->tflags = TUNER_INITALIZED; bktr->tuner.frequency = 0; bktr->tuner.channel = 0; bktr->tuner.chnlset = DEFAULT_CHNLSET; bktr->tuner.afc = 0; bktr->tuner.radio_mode = 0; bktr->audio_mux_select = 0; bktr->audio_mute_state = FALSE; bktr->bt848_card = -1; bktr->bt848_tuner = -1; bktr->reverse_mute = -1; bktr->slow_msp_audio = 0; bktr->msp_use_mono_source = 0; bktr->msp_source_selected = -1; bktr->audio_mux_present = 1; #if defined(__FreeBSD__) #ifdef BKTR_NEW_MSP34XX_DRIVER /* get hint on short programming of the msp34xx, so we know */ /* if the decision what thread to start should be overwritten */ if ( (err = resource_int_value("bktr", unit, "mspsimple", &(bktr->mspsimple)) ) != 0 ) bktr->mspsimple = -1; /* fall back to default */ #endif #endif probeCard( bktr, TRUE, unit ); /* Initialise any MSP34xx or TDA98xx audio chips */ init_audio_devices( bktr ); #ifdef BKTR_NEW_MSP34XX_DRIVER /* setup the kenrel thread */ err = msp_attach( bktr ); if ( err != 0 ) /* error doing kernel thread stuff, disable msp3400c */ bktr->card.msp3400c = 0; #endif } /* Copy the vbi lines from 'vbidata' into the circular buffer, 'vbibuffer'. * The circular buffer holds 'n' fixed size data blocks. * vbisize is the number of bytes in the circular buffer * vbiread is the point we reading data out of the circular buffer * vbiinsert is the point we insert data into the circular buffer */ static void vbidecode(bktr_ptr_t bktr) { unsigned char *dest; unsigned int *seq_dest; /* Check if there is room in the buffer to insert the data. */ if (bktr->vbisize + VBI_DATA_SIZE > VBI_BUFFER_SIZE) return; /* Copy the VBI data into the next free slot in the buffer. */ /* 'dest' is the point in vbibuffer where we want to insert new data */ dest = (unsigned char *)bktr->vbibuffer + bktr->vbiinsert; memcpy(dest, (unsigned char*)bktr->vbidata, VBI_DATA_SIZE); /* Write the VBI sequence number to the end of the vbi data */ /* This is used by the AleVT teletext program */ seq_dest = (unsigned int *)((unsigned char *)bktr->vbibuffer + bktr->vbiinsert + (VBI_DATA_SIZE - sizeof(bktr->vbi_sequence_number))); *seq_dest = bktr->vbi_sequence_number; /* And increase the VBI sequence number */ /* This can wrap around */ bktr->vbi_sequence_number++; /* Increment the vbiinsert pointer */ /* This can wrap around */ bktr->vbiinsert += VBI_DATA_SIZE; bktr->vbiinsert = (bktr->vbiinsert % VBI_BUFFER_SIZE); /* And increase the amount of vbi data in the buffer */ bktr->vbisize = bktr->vbisize + VBI_DATA_SIZE; } /* * the common interrupt handler. * Returns a 0 or 1 depending on whether the interrupt has handled. * In the OS specific section, bktr_intr() is defined which calls this * common interrupt handler. */ int common_bktr_intr( void *arg ) { bktr_ptr_t bktr; u_long bktr_status; u_char dstatus; u_long field; u_long w_field; u_long req_field; bktr = (bktr_ptr_t) arg; /* * check to see if any interrupts are unmasked on this device. If * none are, then we likely got here by way of being on a PCI shared * interrupt dispatch list. */ if (INL(bktr, BKTR_INT_MASK) == ALL_INTS_DISABLED) return 0; /* bail out now, before we do something we shouldn't */ if (!(bktr->flags & METEOR_OPEN)) { OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); /* return; ?? */ } /* record and clear the INTerrupt status bits */ bktr_status = INL(bktr, BKTR_INT_STAT); OUTL(bktr, BKTR_INT_STAT, bktr_status & ~I2C_BITS); /* don't touch i2c */ /* record and clear the device status register */ dstatus = INB(bktr, BKTR_DSTATUS); OUTB(bktr, BKTR_DSTATUS, 0x00); #if defined( STATUS_SUM ) /* add any new device status or INTerrupt status bits */ status_sum |= (bktr_status & ~(BT848_INT_RSV0|BT848_INT_RSV1)); status_sum |= ((dstatus & (BT848_DSTATUS_COF|BT848_DSTATUS_LOF)) << 6); #endif /* STATUS_SUM */ /* printf( "%s: STATUS %x %x %x \n", bktr_name(bktr), dstatus, bktr_status, INL(bktr, BKTR_RISC_COUNT) ); */ /* if risc was disabled re-start process again */ /* if there was one of the following errors re-start again */ if ( !(bktr_status & BT848_INT_RISC_EN) || ((bktr_status &(/* BT848_INT_FBUS | */ /* BT848_INT_FTRGT | */ /* BT848_INT_FDSR | */ BT848_INT_PPERR | BT848_INT_RIPERR | BT848_INT_PABORT | BT848_INT_OCERR | BT848_INT_SCERR) ) != 0) || ((INB(bktr, BKTR_TDEC) == 0) && (bktr_status & TDEC_BITS)) ) { u_short tdec_save = INB(bktr, BKTR_TDEC); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); OUTB(bktr, BKTR_CAP_CTL, CAPTURE_OFF); OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); /* Reset temporal decimation counter */ OUTB(bktr, BKTR_TDEC, 0); OUTB(bktr, BKTR_TDEC, tdec_save); /* Reset to no-fields captured state */ if (bktr->flags & (METEOR_CONTIN | METEOR_SYNCAP)) { switch(bktr->flags & METEOR_ONLY_FIELDS_MASK) { case METEOR_ONLY_ODD_FIELDS: bktr->flags |= METEOR_WANT_ODD; break; case METEOR_ONLY_EVEN_FIELDS: bktr->flags |= METEOR_WANT_EVEN; break; default: bktr->flags |= METEOR_WANT_MASK; break; } } OUTL(bktr, BKTR_RISC_STRT_ADD, vtophys(bktr->dma_prog)); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, bktr->capcontrol); OUTL(bktr, BKTR_INT_MASK, BT848_INT_MYSTERYBIT | BT848_INT_RISCI | BT848_INT_VSYNC | BT848_INT_FMTCHG); OUTB(bktr, BKTR_CAP_CTL, bktr->bktr_cap_ctl); return 1; } /* If this is not a RISC program interrupt, return */ if (!(bktr_status & BT848_INT_RISCI)) return 0; /** printf( "%s: intr status %x %x %x\n", bktr_name(bktr), bktr_status, dstatus, INL(bktr, BKTR_RISC_COUNT) ); */ /* * Disable future interrupts if a capture mode is not selected. * This can happen when we are in the process of closing or * changing capture modes, otherwise it shouldn't happen. */ if (!(bktr->flags & METEOR_CAP_MASK)) OUTB(bktr, BKTR_CAP_CTL, CAPTURE_OFF); /* Determine which field generated this interrupt */ field = ( bktr_status & BT848_INT_FIELD ) ? EVEN_F : ODD_F; /* * Process the VBI data if it is being captured. We do this once * both Odd and Even VBI data is captured. Therefore we do this * in the Even field interrupt handler. */ LOCK_VBI(bktr); if ( (bktr->vbiflags & VBI_CAPTURE) &&(bktr->vbiflags & VBI_OPEN) &&(field==EVEN_F)) { /* Put VBI data into circular buffer */ vbidecode(bktr); /* If someone is blocked on reading from /dev/vbi, wake them */ if (bktr->vbi_read_blocked) { bktr->vbi_read_blocked = FALSE; wakeup(VBI_SLEEP); } /* If someone has a select() on /dev/vbi, inform them */ if (SEL_WAITING(&bktr->vbi_select)) { selwakeuppri(&bktr->vbi_select, VBIPRI); } } UNLOCK_VBI(bktr); /* * Register the completed field * (For dual-field mode, require fields from the same frame) */ switch ( bktr->flags & METEOR_WANT_MASK ) { case METEOR_WANT_ODD : w_field = ODD_F ; break; case METEOR_WANT_EVEN : w_field = EVEN_F ; break; default : w_field = (ODD_F|EVEN_F); break; } switch ( bktr->flags & METEOR_ONLY_FIELDS_MASK ) { case METEOR_ONLY_ODD_FIELDS : req_field = ODD_F ; break; case METEOR_ONLY_EVEN_FIELDS : req_field = EVEN_F ; break; default : req_field = (ODD_F|EVEN_F); break; } if (( field == EVEN_F ) && ( w_field == EVEN_F )) bktr->flags &= ~METEOR_WANT_EVEN; else if (( field == ODD_F ) && ( req_field == ODD_F ) && ( w_field == ODD_F )) bktr->flags &= ~METEOR_WANT_ODD; else if (( field == ODD_F ) && ( req_field == (ODD_F|EVEN_F) ) && ( w_field == (ODD_F|EVEN_F) )) bktr->flags &= ~METEOR_WANT_ODD; else if (( field == ODD_F ) && ( req_field == (ODD_F|EVEN_F) ) && ( w_field == ODD_F )) { bktr->flags &= ~METEOR_WANT_ODD; bktr->flags |= METEOR_WANT_EVEN; } else { /* We're out of sync. Start over. */ if (bktr->flags & (METEOR_CONTIN | METEOR_SYNCAP)) { switch(bktr->flags & METEOR_ONLY_FIELDS_MASK) { case METEOR_ONLY_ODD_FIELDS: bktr->flags |= METEOR_WANT_ODD; break; case METEOR_ONLY_EVEN_FIELDS: bktr->flags |= METEOR_WANT_EVEN; break; default: bktr->flags |= METEOR_WANT_MASK; break; } } return 1; } /* * If we have a complete frame. */ if (!(bktr->flags & METEOR_WANT_MASK)) { bktr->frames_captured++; /* * post the completion time. */ if (bktr->flags & METEOR_WANT_TS) { struct timeval *ts; if ((u_int) bktr->alloc_pages * PAGE_SIZE <= (bktr->frame_size + sizeof(struct timeval))) { ts =(struct timeval *)bktr->bigbuf + bktr->frame_size; /* doesn't work in synch mode except * for first frame */ /* XXX */ microtime(ts); } } /* * Wake up the user in single capture mode. */ if (bktr->flags & METEOR_SINGLE) { /* stop dma */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); /* disable risc, leave fifo running */ OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); wakeup(BKTR_SLEEP); } /* * If the user requested to be notified via signal, * let them know the frame is complete. */ if (bktr->proc != NULL) { PROC_LOCK(bktr->proc); kern_psignal( bktr->proc, bktr->signal); PROC_UNLOCK(bktr->proc); } /* * Reset the want flags if in continuous or * synchronous capture mode. */ /* * XXX NOTE (Luigi): * currently we only support 3 capture modes: odd only, even only, * odd+even interlaced (odd field first). A fourth mode (non interlaced, * either even OR odd) could provide 60 (50 for PAL) pictures per * second, but it would require this routine to toggle the desired frame * each time, and one more different DMA program for the Bt848. * As a consequence, this fourth mode is currently unsupported. */ if (bktr->flags & (METEOR_CONTIN | METEOR_SYNCAP)) { switch(bktr->flags & METEOR_ONLY_FIELDS_MASK) { case METEOR_ONLY_ODD_FIELDS: bktr->flags |= METEOR_WANT_ODD; break; case METEOR_ONLY_EVEN_FIELDS: bktr->flags |= METEOR_WANT_EVEN; break; default: bktr->flags |= METEOR_WANT_MASK; break; } } } return 1; } /* * */ extern int bt848_format; /* used to set the default format, PAL or NTSC */ int video_open( bktr_ptr_t bktr ) { int frame_rate, video_format=0; if (bktr->flags & METEOR_OPEN) /* device is busy */ return( EBUSY ); bktr->flags |= METEOR_OPEN; #ifdef BT848_DUMP dump_bt848( bt848 ); #endif bktr->clr_on_start = FALSE; OUTB(bktr, BKTR_DSTATUS, 0x00); /* clear device status reg. */ OUTB(bktr, BKTR_ADC, SYNC_LEVEL); #if defined(BKTR_SYSTEM_DEFAULT) && BKTR_SYSTEM_DEFAULT == BROOKTREE_PAL video_format = 0; #else video_format = 1; #endif if (bt848_format == 0 ) video_format = 0; if (bt848_format == 1 ) video_format = 1; if (video_format == 1 ) { OUTB(bktr, BKTR_IFORM, BT848_IFORM_F_NTSCM); bktr->format_params = BT848_IFORM_F_NTSCM; } else { OUTB(bktr, BKTR_IFORM, BT848_IFORM_F_PALBDGHI); bktr->format_params = BT848_IFORM_F_PALBDGHI; } OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | format_params[bktr->format_params].iform_xtsel); /* work around for new Hauppauge 878 cards */ if ((bktr->card.card_id == CARD_HAUPPAUGE) && (bktr->id==BROOKTREE_878 || bktr->id==BROOKTREE_879) ) OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX3); else OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX1); OUTB(bktr, BKTR_ADELAY, format_params[bktr->format_params].adelay); OUTB(bktr, BKTR_BDELAY, format_params[bktr->format_params].bdelay); frame_rate = format_params[bktr->format_params].frame_rate; /* enable PLL mode using 28Mhz crystal for PAL/SECAM users */ if (bktr->xtal_pll_mode == BT848_USE_PLL) { OUTB(bktr, BKTR_TGCTRL, 0); OUTB(bktr, BKTR_PLL_F_LO, 0xf9); OUTB(bktr, BKTR_PLL_F_HI, 0xdc); OUTB(bktr, BKTR_PLL_F_XCI, 0x8e); } bktr->flags = (bktr->flags & ~METEOR_DEV_MASK) | METEOR_DEV0; bktr->max_clip_node = 0; OUTB(bktr, BKTR_COLOR_CTL, BT848_COLOR_CTL_GAMMA | BT848_COLOR_CTL_RGB_DED); OUTB(bktr, BKTR_E_HSCALE_LO, 170); OUTB(bktr, BKTR_O_HSCALE_LO, 170); OUTB(bktr, BKTR_E_DELAY_LO, 0x72); OUTB(bktr, BKTR_O_DELAY_LO, 0x72); OUTB(bktr, BKTR_E_SCLOOP, 0); OUTB(bktr, BKTR_O_SCLOOP, 0); OUTB(bktr, BKTR_VBI_PACK_SIZE, 0); OUTB(bktr, BKTR_VBI_PACK_DEL, 0); bktr->fifo_errors = 0; bktr->dma_errors = 0; bktr->frames_captured = 0; bktr->even_fields_captured = 0; bktr->odd_fields_captured = 0; bktr->proc = NULL; set_fps(bktr, frame_rate); bktr->video.addr = 0; bktr->video.width = 0; bktr->video.banksize = 0; bktr->video.ramsize = 0; bktr->pixfmt_compat = TRUE; bktr->format = METEOR_GEO_RGB16; bktr->pixfmt = oformat_meteor_to_bt( bktr->format ); bktr->capture_area_enabled = FALSE; OUTL(bktr, BKTR_INT_MASK, BT848_INT_MYSTERYBIT); /* if you take this out triton based motherboards will operate unreliably */ return( 0 ); } int vbi_open( bktr_ptr_t bktr ) { LOCK_VBI(bktr); if (bktr->vbiflags & VBI_OPEN) { /* device is busy */ UNLOCK_VBI(bktr); return( EBUSY ); } bktr->vbiflags |= VBI_OPEN; /* reset the VBI circular buffer pointers and clear the buffers */ bktr->vbiinsert = 0; bktr->vbistart = 0; bktr->vbisize = 0; bktr->vbi_sequence_number = 0; bktr->vbi_read_blocked = FALSE; bzero((caddr_t) bktr->vbibuffer, VBI_BUFFER_SIZE); bzero((caddr_t) bktr->vbidata, VBI_DATA_SIZE); UNLOCK_VBI(bktr); return( 0 ); } /* * */ int tuner_open( bktr_ptr_t bktr ) { if ( !(bktr->tflags & TUNER_INITALIZED) ) /* device not found */ return( ENXIO ); if ( bktr->tflags & TUNER_OPEN ) /* already open */ return( 0 ); bktr->tflags |= TUNER_OPEN; bktr->tuner.frequency = 0; bktr->tuner.channel = 0; bktr->tuner.chnlset = DEFAULT_CHNLSET; bktr->tuner.afc = 0; bktr->tuner.radio_mode = 0; /* enable drivers on the GPIO port that control the MUXes */ OUTL(bktr, BKTR_GPIO_OUT_EN, INL(bktr, BKTR_GPIO_OUT_EN) | bktr->card.gpio_mux_bits); /* unmute the audio stream */ set_audio( bktr, AUDIO_UNMUTE ); /* Initialise any audio chips, eg MSP34xx or TDA98xx */ init_audio_devices( bktr ); return( 0 ); } /* * */ int video_close( bktr_ptr_t bktr ) { bktr->flags &= ~(METEOR_OPEN | METEOR_SINGLE | METEOR_CAP_MASK | METEOR_WANT_MASK); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); OUTB(bktr, BKTR_CAP_CTL, CAPTURE_OFF); bktr->dma_prog_loaded = FALSE; OUTB(bktr, BKTR_TDEC, 0); OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); /** FIXME: is 0xf magic, wouldn't 0x00 work ??? */ OUTL(bktr, BKTR_SRESET, 0xf); OUTL(bktr, BKTR_INT_STAT, ALL_INTS_CLEARED); return( 0 ); } /* * tuner close handle, * place holder for tuner specific operations on a close. */ int tuner_close( bktr_ptr_t bktr ) { bktr->tflags &= ~TUNER_OPEN; /* mute the audio by switching the mux */ set_audio( bktr, AUDIO_MUTE ); /* disable drivers on the GPIO port that control the MUXes */ OUTL(bktr, BKTR_GPIO_OUT_EN, INL(bktr, BKTR_GPIO_OUT_EN) & ~bktr->card.gpio_mux_bits); return( 0 ); } int vbi_close( bktr_ptr_t bktr ) { LOCK_VBI(bktr); bktr->vbiflags &= ~VBI_OPEN; UNLOCK_VBI(bktr); return( 0 ); } /* * */ int video_read(bktr_ptr_t bktr, int unit, struct cdev *dev, struct uio *uio) { int status; int count; if (bktr->bigbuf == 0) /* no frame buffer allocated (ioctl failed) */ return( ENOMEM ); if (bktr->flags & METEOR_CAP_MASK) return( EIO ); /* already capturing */ OUTB(bktr, BKTR_CAP_CTL, bktr->bktr_cap_ctl); count = bktr->rows * bktr->cols * pixfmt_table[ bktr->pixfmt ].public.Bpp; if ((int) uio->uio_iov->iov_len < count) return( EINVAL ); bktr->flags &= ~(METEOR_CAP_MASK | METEOR_WANT_MASK); /* capture one frame */ start_capture(bktr, METEOR_SINGLE); /* wait for capture to complete */ OUTL(bktr, BKTR_INT_STAT, ALL_INTS_CLEARED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, bktr->capcontrol); OUTL(bktr, BKTR_INT_MASK, BT848_INT_MYSTERYBIT | BT848_INT_RISCI | BT848_INT_VSYNC | BT848_INT_FMTCHG); status = tsleep(BKTR_SLEEP, BKTRPRI, "captur", 0); if (!status) /* successful capture */ status = uiomove((caddr_t)bktr->bigbuf, count, uio); else printf ("%s: read: tsleep error %d\n", bktr_name(bktr), status); bktr->flags &= ~(METEOR_SINGLE | METEOR_WANT_MASK); return( status ); } /* * Read VBI data from the vbi circular buffer * The buffer holds vbi data blocks which are the same size * vbiinsert is the position we will insert the next item into the buffer * vbistart is the actual position in the buffer we want to read from * vbisize is the exact number of bytes in the buffer left to read */ int vbi_read(bktr_ptr_t bktr, struct uio *uio, int ioflag) { int readsize, readsize2, start; int status; /* * XXX - vbi_read() should be protected against being re-entered * while it is unlocked for the uiomove. */ LOCK_VBI(bktr); while(bktr->vbisize == 0) { if (ioflag & FNDELAY) { status = EWOULDBLOCK; goto out; } bktr->vbi_read_blocked = TRUE; #ifdef USE_VBIMUTEX if ((status = msleep(VBI_SLEEP, &bktr->vbimutex, VBIPRI, "vbi", 0))) { goto out; } #else if ((status = tsleep(VBI_SLEEP, VBIPRI, "vbi", 0))) { goto out; } #endif } /* Now we have some data to give to the user */ /* We cannot read more bytes than there are in * the circular buffer */ readsize = (int)uio->uio_iov->iov_len; if (readsize > bktr->vbisize) readsize = bktr->vbisize; /* Check if we can read this number of bytes without having * to wrap around the circular buffer */ if((bktr->vbistart + readsize) >= VBI_BUFFER_SIZE) { /* We need to wrap around */ readsize2 = VBI_BUFFER_SIZE - bktr->vbistart; start = bktr->vbistart; UNLOCK_VBI(bktr); status = uiomove((caddr_t)bktr->vbibuffer + start, readsize2, uio); if (status == 0) status = uiomove((caddr_t)bktr->vbibuffer, (readsize - readsize2), uio); } else { UNLOCK_VBI(bktr); /* We do not need to wrap around */ status = uiomove((caddr_t)bktr->vbibuffer + bktr->vbistart, readsize, uio); } LOCK_VBI(bktr); /* Update the number of bytes left to read */ bktr->vbisize -= readsize; /* Update vbistart */ bktr->vbistart += readsize; bktr->vbistart = bktr->vbistart % VBI_BUFFER_SIZE; /* wrap around if needed */ out: UNLOCK_VBI(bktr); return( status ); } /* * video ioctls */ int video_ioctl( bktr_ptr_t bktr, int unit, ioctl_cmd_t cmd, caddr_t arg, struct thread* td ) { volatile u_char c_temp; unsigned int temp; unsigned int temp_iform; unsigned int error; struct meteor_geomet *geo; struct meteor_counts *counts; struct meteor_video *video; struct bktr_capture_area *cap_area; vm_offset_t buf; int i; int sig; char char_temp; switch ( cmd ) { case BT848SCLIP: /* set clip region */ bktr->max_clip_node = 0; memcpy(&bktr->clip_list, arg, sizeof(bktr->clip_list)); for (i = 0; i < BT848_MAX_CLIP_NODE; i++) { if (bktr->clip_list[i].y_min == 0 && bktr->clip_list[i].y_max == 0) break; } bktr->max_clip_node = i; /* make sure that the list contains a valid clip secquence */ /* the clip rectangles should be sorted by x then by y as the second order sort key */ /* clip rectangle list is terminated by y_min and y_max set to 0 */ /* to disable clipping set y_min and y_max to 0 in the first clip rectangle . The first clip rectangle is clip_list[0]. */ if (bktr->max_clip_node == 0 && (bktr->clip_list[0].y_min != 0 && bktr->clip_list[0].y_max != 0)) { return EINVAL; } for (i = 0; i < BT848_MAX_CLIP_NODE - 1 ; i++) { if (bktr->clip_list[i].y_min == 0 && bktr->clip_list[i].y_max == 0) { break; } if ( bktr->clip_list[i+1].y_min != 0 && bktr->clip_list[i+1].y_max != 0 && bktr->clip_list[i].x_min > bktr->clip_list[i+1].x_min ) { bktr->max_clip_node = 0; return (EINVAL); } if (bktr->clip_list[i].x_min >= bktr->clip_list[i].x_max || bktr->clip_list[i].y_min >= bktr->clip_list[i].y_max || bktr->clip_list[i].x_min < 0 || bktr->clip_list[i].x_max < 0 || bktr->clip_list[i].y_min < 0 || bktr->clip_list[i].y_max < 0 ) { bktr->max_clip_node = 0; return (EINVAL); } } bktr->dma_prog_loaded = FALSE; break; case METEORSTATUS: /* get Bt848 status */ c_temp = INB(bktr, BKTR_DSTATUS); temp = 0; if (!(c_temp & 0x40)) temp |= METEOR_STATUS_HCLK; if (!(c_temp & 0x10)) temp |= METEOR_STATUS_FIDT; *(u_short *)arg = temp; break; case BT848SFMT: /* set input format */ temp = *(unsigned long*)arg & BT848_IFORM_FORMAT; temp_iform = INB(bktr, BKTR_IFORM); temp_iform &= ~BT848_IFORM_FORMAT; temp_iform &= ~BT848_IFORM_XTSEL; OUTB(bktr, BKTR_IFORM, (temp_iform | temp | format_params[temp].iform_xtsel)); switch( temp ) { case BT848_IFORM_F_AUTO: bktr->flags = (bktr->flags & ~METEOR_FORM_MASK) | METEOR_AUTOMODE; break; case BT848_IFORM_F_NTSCM: case BT848_IFORM_F_NTSCJ: bktr->flags = (bktr->flags & ~METEOR_FORM_MASK) | METEOR_NTSC; OUTB(bktr, BKTR_ADELAY, format_params[temp].adelay); OUTB(bktr, BKTR_BDELAY, format_params[temp].bdelay); bktr->format_params = temp; break; case BT848_IFORM_F_PALBDGHI: case BT848_IFORM_F_PALN: case BT848_IFORM_F_SECAM: case BT848_IFORM_F_RSVD: case BT848_IFORM_F_PALM: bktr->flags = (bktr->flags & ~METEOR_FORM_MASK) | METEOR_PAL; OUTB(bktr, BKTR_ADELAY, format_params[temp].adelay); OUTB(bktr, BKTR_BDELAY, format_params[temp].bdelay); bktr->format_params = temp; break; } bktr->dma_prog_loaded = FALSE; break; case METEORSFMT: /* set input format */ temp_iform = INB(bktr, BKTR_IFORM); temp_iform &= ~BT848_IFORM_FORMAT; temp_iform &= ~BT848_IFORM_XTSEL; switch(*(unsigned long *)arg & METEOR_FORM_MASK ) { case 0: /* default */ case METEOR_FMT_NTSC: bktr->flags = (bktr->flags & ~METEOR_FORM_MASK) | METEOR_NTSC; OUTB(bktr, BKTR_IFORM, temp_iform | BT848_IFORM_F_NTSCM | format_params[BT848_IFORM_F_NTSCM].iform_xtsel); OUTB(bktr, BKTR_ADELAY, format_params[BT848_IFORM_F_NTSCM].adelay); OUTB(bktr, BKTR_BDELAY, format_params[BT848_IFORM_F_NTSCM].bdelay); bktr->format_params = BT848_IFORM_F_NTSCM; break; case METEOR_FMT_PAL: bktr->flags = (bktr->flags & ~METEOR_FORM_MASK) | METEOR_PAL; OUTB(bktr, BKTR_IFORM, temp_iform | BT848_IFORM_F_PALBDGHI | format_params[BT848_IFORM_F_PALBDGHI].iform_xtsel); OUTB(bktr, BKTR_ADELAY, format_params[BT848_IFORM_F_PALBDGHI].adelay); OUTB(bktr, BKTR_BDELAY, format_params[BT848_IFORM_F_PALBDGHI].bdelay); bktr->format_params = BT848_IFORM_F_PALBDGHI; break; case METEOR_FMT_AUTOMODE: bktr->flags = (bktr->flags & ~METEOR_FORM_MASK) | METEOR_AUTOMODE; OUTB(bktr, BKTR_IFORM, temp_iform | BT848_IFORM_F_AUTO | format_params[BT848_IFORM_F_AUTO].iform_xtsel); break; default: return( EINVAL ); } bktr->dma_prog_loaded = FALSE; break; case METEORGFMT: /* get input format */ *(u_long *)arg = bktr->flags & METEOR_FORM_MASK; break; case BT848GFMT: /* get input format */ *(u_long *)arg = INB(bktr, BKTR_IFORM) & BT848_IFORM_FORMAT; break; case METEORSCOUNT: /* (re)set error counts */ counts = (struct meteor_counts *) arg; bktr->fifo_errors = counts->fifo_errors; bktr->dma_errors = counts->dma_errors; bktr->frames_captured = counts->frames_captured; bktr->even_fields_captured = counts->even_fields_captured; bktr->odd_fields_captured = counts->odd_fields_captured; break; case METEORGCOUNT: /* get error counts */ counts = (struct meteor_counts *) arg; counts->fifo_errors = bktr->fifo_errors; counts->dma_errors = bktr->dma_errors; counts->frames_captured = bktr->frames_captured; counts->even_fields_captured = bktr->even_fields_captured; counts->odd_fields_captured = bktr->odd_fields_captured; break; case METEORGVIDEO: video = (struct meteor_video *)arg; video->addr = bktr->video.addr; video->width = bktr->video.width; video->banksize = bktr->video.banksize; video->ramsize = bktr->video.ramsize; break; case METEORSVIDEO: video = (struct meteor_video *)arg; bktr->video.addr = video->addr; bktr->video.width = video->width; bktr->video.banksize = video->banksize; bktr->video.ramsize = video->ramsize; break; case METEORSFPS: set_fps(bktr, *(u_short *)arg); break; case METEORGFPS: *(u_short *)arg = bktr->fps; break; case METEORSHUE: /* set hue */ OUTB(bktr, BKTR_HUE, (*(u_char *) arg) & 0xff); break; case METEORGHUE: /* get hue */ *(u_char *)arg = INB(bktr, BKTR_HUE); break; case METEORSBRIG: /* set brightness */ char_temp = ( *(u_char *)arg & 0xff) - 128; OUTB(bktr, BKTR_BRIGHT, char_temp); break; case METEORGBRIG: /* get brightness */ *(u_char *)arg = INB(bktr, BKTR_BRIGHT); break; case METEORSCSAT: /* set chroma saturation */ temp = (int)*(u_char *)arg; OUTB(bktr, BKTR_SAT_U_LO, (temp << 1) & 0xff); OUTB(bktr, BKTR_SAT_V_LO, (temp << 1) & 0xff); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~(BT848_E_CONTROL_SAT_U_MSB | BT848_E_CONTROL_SAT_V_MSB)); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) & ~(BT848_O_CONTROL_SAT_U_MSB | BT848_O_CONTROL_SAT_V_MSB)); if ( temp & BIT_SEVEN_HIGH ) { OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) | (BT848_E_CONTROL_SAT_U_MSB | BT848_E_CONTROL_SAT_V_MSB)); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) | (BT848_O_CONTROL_SAT_U_MSB | BT848_O_CONTROL_SAT_V_MSB)); } break; case METEORGCSAT: /* get chroma saturation */ temp = (INB(bktr, BKTR_SAT_V_LO) >> 1) & 0xff; if ( INB(bktr, BKTR_E_CONTROL) & BT848_E_CONTROL_SAT_V_MSB ) temp |= BIT_SEVEN_HIGH; *(u_char *)arg = (u_char)temp; break; case METEORSCONT: /* set contrast */ temp = (int)*(u_char *)arg & 0xff; temp <<= 1; OUTB(bktr, BKTR_CONTRAST_LO, temp & 0xff); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~BT848_E_CONTROL_CON_MSB); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) & ~BT848_O_CONTROL_CON_MSB); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) | (((temp & 0x100) >> 6 ) & BT848_E_CONTROL_CON_MSB)); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) | (((temp & 0x100) >> 6 ) & BT848_O_CONTROL_CON_MSB)); break; case METEORGCONT: /* get contrast */ temp = (int)INB(bktr, BKTR_CONTRAST_LO) & 0xff; temp |= ((int)INB(bktr, BKTR_O_CONTROL) & 0x04) << 6; *(u_char *)arg = (u_char)((temp >> 1) & 0xff); break; case BT848SCBUF: /* set Clear-Buffer-on-start flag */ bktr->clr_on_start = (*(int *)arg != 0); break; case BT848GCBUF: /* get Clear-Buffer-on-start flag */ *(int *)arg = (int) bktr->clr_on_start; break; case METEORSSIGNAL: sig = *(int *)arg; /* Historically, applications used METEOR_SIG_MODE_MASK * to reset signal delivery. */ if (sig == METEOR_SIG_MODE_MASK) sig = 0; if (sig < 0 || sig > _SIG_MAXSIG) return (EINVAL); bktr->signal = sig; bktr->proc = sig ? td->td_proc : NULL; break; case METEORGSIGNAL: *(int *)arg = bktr->signal; break; case METEORCAPTUR: temp = bktr->flags; switch (*(int *) arg) { case METEOR_CAP_SINGLE: if (bktr->bigbuf==0) /* no frame buffer allocated */ return( ENOMEM ); /* already capturing */ if (temp & METEOR_CAP_MASK) return( EIO ); start_capture(bktr, METEOR_SINGLE); /* wait for capture to complete */ OUTL(bktr, BKTR_INT_STAT, ALL_INTS_CLEARED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, bktr->capcontrol); OUTL(bktr, BKTR_INT_MASK, BT848_INT_MYSTERYBIT | BT848_INT_RISCI | BT848_INT_VSYNC | BT848_INT_FMTCHG); OUTB(bktr, BKTR_CAP_CTL, bktr->bktr_cap_ctl); error = tsleep(BKTR_SLEEP, BKTRPRI, "captur", hz); if (error && (error != ERESTART)) { /* Here if we didn't get complete frame */ #ifdef DIAGNOSTIC printf( "%s: ioctl: tsleep error %d %x\n", bktr_name(bktr), error, INL(bktr, BKTR_RISC_COUNT)); #endif /* stop dma */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); /* disable risc, leave fifo running */ OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); } bktr->flags &= ~(METEOR_SINGLE|METEOR_WANT_MASK); /* FIXME: should we set bt848->int_stat ??? */ break; case METEOR_CAP_CONTINOUS: if (bktr->bigbuf==0) /* no frame buffer allocated */ return( ENOMEM ); /* already capturing */ if (temp & METEOR_CAP_MASK) return( EIO ); start_capture(bktr, METEOR_CONTIN); /* Clear the interrypt status register */ OUTL(bktr, BKTR_INT_STAT, INL(bktr, BKTR_INT_STAT)); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, bktr->capcontrol); OUTB(bktr, BKTR_CAP_CTL, bktr->bktr_cap_ctl); OUTL(bktr, BKTR_INT_MASK, BT848_INT_MYSTERYBIT | BT848_INT_RISCI | BT848_INT_VSYNC | BT848_INT_FMTCHG); #ifdef BT848_DUMP dump_bt848( bt848 ); #endif break; case METEOR_CAP_STOP_CONT: if (bktr->flags & METEOR_CONTIN) { /* turn off capture */ OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); OUTB(bktr, BKTR_CAP_CTL, CAPTURE_OFF); OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); bktr->flags &= ~(METEOR_CONTIN | METEOR_WANT_MASK); } } break; case METEORSETGEO: /* can't change parameters while capturing */ if (bktr->flags & METEOR_CAP_MASK) return( EBUSY ); geo = (struct meteor_geomet *) arg; error = 0; /* Either even or odd, if even & odd, then these a zero */ if ((geo->oformat & METEOR_GEO_ODD_ONLY) && (geo->oformat & METEOR_GEO_EVEN_ONLY)) { printf( "%s: ioctl: Geometry odd or even only.\n", bktr_name(bktr)); return( EINVAL ); } /* set/clear even/odd flags */ if (geo->oformat & METEOR_GEO_ODD_ONLY) bktr->flags |= METEOR_ONLY_ODD_FIELDS; else bktr->flags &= ~METEOR_ONLY_ODD_FIELDS; if (geo->oformat & METEOR_GEO_EVEN_ONLY) bktr->flags |= METEOR_ONLY_EVEN_FIELDS; else bktr->flags &= ~METEOR_ONLY_EVEN_FIELDS; if (geo->columns <= 0) { printf( "%s: ioctl: %d: columns must be greater than zero.\n", bktr_name(bktr), geo->columns); error = EINVAL; } else if ((geo->columns & 0x3fe) != geo->columns) { printf( "%s: ioctl: %d: columns too large or not even.\n", bktr_name(bktr), geo->columns); error = EINVAL; } if (geo->rows <= 0) { printf( "%s: ioctl: %d: rows must be greater than zero.\n", bktr_name(bktr), geo->rows); error = EINVAL; } else if (((geo->rows & 0x7fe) != geo->rows) || ((geo->oformat & METEOR_GEO_FIELD_MASK) && ((geo->rows & 0x3fe) != geo->rows)) ) { printf( "%s: ioctl: %d: rows too large or not even.\n", bktr_name(bktr), geo->rows); error = EINVAL; } if (geo->frames > 32) { printf("%s: ioctl: too many frames.\n", bktr_name(bktr)); error = EINVAL; } if (error) return( error ); bktr->dma_prog_loaded = FALSE; OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); if ((temp=(geo->rows * geo->columns * geo->frames * 2))) { if (geo->oformat & METEOR_GEO_RGB24) temp = temp * 2; /* meteor_mem structure for SYNC Capture */ if (geo->frames > 1) temp += PAGE_SIZE; temp = btoc(temp); if ((int) temp > bktr->alloc_pages && bktr->video.addr == 0) { /*****************************/ /* *** OS Dependant code *** */ /*****************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) bus_dmamap_t dmamap; buf = get_bktr_mem(bktr, &dmamap, temp * PAGE_SIZE); if (buf != 0) { free_bktr_mem(bktr, bktr->dm_mem, bktr->bigbuf); bktr->dm_mem = dmamap; #else buf = get_bktr_mem(unit, temp*PAGE_SIZE); if (buf != 0) { contigfree( (void *)(uintptr_t)bktr->bigbuf, (bktr->alloc_pages * PAGE_SIZE), M_DEVBUF); #endif bktr->bigbuf = buf; bktr->alloc_pages = temp; if (bootverbose) printf("%s: ioctl: Allocating %d bytes\n", bktr_name(bktr), (int)(temp*PAGE_SIZE)); } else error = ENOMEM; } } if (error) return error; bktr->rows = geo->rows; bktr->cols = geo->columns; bktr->frames = geo->frames; /* Pixel format (if in meteor pixfmt compatibility mode) */ if ( bktr->pixfmt_compat ) { bktr->format = METEOR_GEO_YUV_422; switch (geo->oformat & METEOR_GEO_OUTPUT_MASK) { case 0: /* default */ case METEOR_GEO_RGB16: bktr->format = METEOR_GEO_RGB16; break; case METEOR_GEO_RGB24: bktr->format = METEOR_GEO_RGB24; break; case METEOR_GEO_YUV_422: bktr->format = METEOR_GEO_YUV_422; if (geo->oformat & METEOR_GEO_YUV_12) bktr->format = METEOR_GEO_YUV_12; break; case METEOR_GEO_YUV_PACKED: bktr->format = METEOR_GEO_YUV_PACKED; break; } bktr->pixfmt = oformat_meteor_to_bt( bktr->format ); } if (bktr->flags & METEOR_CAP_MASK) { if (bktr->flags & (METEOR_CONTIN|METEOR_SYNCAP)) { switch(bktr->flags & METEOR_ONLY_FIELDS_MASK) { case METEOR_ONLY_ODD_FIELDS: bktr->flags |= METEOR_WANT_ODD; break; case METEOR_ONLY_EVEN_FIELDS: bktr->flags |= METEOR_WANT_EVEN; break; default: bktr->flags |= METEOR_WANT_MASK; break; } start_capture(bktr, METEOR_CONTIN); OUTL(bktr, BKTR_INT_STAT, INL(bktr, BKTR_INT_STAT)); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_ENABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, bktr->capcontrol); OUTL(bktr, BKTR_INT_MASK, BT848_INT_MYSTERYBIT | BT848_INT_VSYNC | BT848_INT_FMTCHG); } } break; /* end of METEORSETGEO */ /* FIXME. The Capture Area currently has the following restrictions: GENERAL y_offset may need to be even in interlaced modes RGB24 - Interlaced mode x_size must be greater than or equal to 1.666*METEORSETGEO width (cols) y_size must be greater than or equal to METEORSETGEO height (rows) RGB24 - Even Only (or Odd Only) mode x_size must be greater than or equal to 1.666*METEORSETGEO width (cols) y_size must be greater than or equal to 2*METEORSETGEO height (rows) YUV12 - Interlaced mode x_size must be greater than or equal to METEORSETGEO width (cols) y_size must be greater than or equal to METEORSETGEO height (rows) YUV12 - Even Only (or Odd Only) mode x_size must be greater than or equal to METEORSETGEO width (cols) y_size must be greater than or equal to 2*METEORSETGEO height (rows) */ case BT848_SCAPAREA: /* set capture area of each video frame */ /* can't change parameters while capturing */ if (bktr->flags & METEOR_CAP_MASK) return( EBUSY ); cap_area = (struct bktr_capture_area *) arg; bktr->capture_area_x_offset = cap_area->x_offset; bktr->capture_area_y_offset = cap_area->y_offset; bktr->capture_area_x_size = cap_area->x_size; bktr->capture_area_y_size = cap_area->y_size; bktr->capture_area_enabled = TRUE; bktr->dma_prog_loaded = FALSE; break; case BT848_GCAPAREA: /* get capture area of each video frame */ cap_area = (struct bktr_capture_area *) arg; if (bktr->capture_area_enabled == FALSE) { cap_area->x_offset = 0; cap_area->y_offset = 0; cap_area->x_size = format_params[ bktr->format_params].scaled_hactive; cap_area->y_size = format_params[ bktr->format_params].vactive; } else { cap_area->x_offset = bktr->capture_area_x_offset; cap_area->y_offset = bktr->capture_area_y_offset; cap_area->x_size = bktr->capture_area_x_size; cap_area->y_size = bktr->capture_area_y_size; } break; default: return common_ioctl( bktr, cmd, arg ); } return( 0 ); } /* * tuner ioctls */ int tuner_ioctl( bktr_ptr_t bktr, int unit, ioctl_cmd_t cmd, caddr_t arg, struct thread* td ) { int tmp_int; int temp, temp1; int offset; int count; u_char *buf; u_long par; u_char write; int i2c_addr; int i2c_port; u_long data; switch ( cmd ) { case REMOTE_GETKEY: /* Read the last key pressed by the Remote Control */ if (bktr->remote_control == 0) return (EINVAL); remote_read(bktr, (struct bktr_remote *)arg); break; #if defined( TUNER_AFC ) case TVTUNER_SETAFC: bktr->tuner.afc = (*(int *)arg != 0); break; case TVTUNER_GETAFC: *(int *)arg = bktr->tuner.afc; /* XXX Perhaps use another bit to indicate AFC success? */ break; #endif /* TUNER_AFC */ case TVTUNER_SETCHNL: temp_mute( bktr, TRUE ); temp = tv_channel( bktr, (int)*(unsigned long *)arg ); if ( temp < 0 ) { temp_mute( bktr, FALSE ); return( EINVAL ); } *(unsigned long *)arg = temp; /* after every channel change, we must restart the MSP34xx */ /* audio chip to reselect NICAM STEREO or MONO audio */ if ( bktr->card.msp3400c ) msp_autodetect( bktr ); /* after every channel change, we must restart the DPL35xx */ if ( bktr->card.dpl3518a ) dpl_autodetect( bktr ); temp_mute( bktr, FALSE ); break; case TVTUNER_GETCHNL: *(unsigned long *)arg = bktr->tuner.channel; break; case TVTUNER_SETTYPE: temp = *(unsigned long *)arg; if ( (temp < CHNLSET_MIN) || (temp > CHNLSET_MAX) ) return( EINVAL ); bktr->tuner.chnlset = temp; break; case TVTUNER_GETTYPE: *(unsigned long *)arg = bktr->tuner.chnlset; break; case TVTUNER_GETSTATUS: temp = get_tuner_status( bktr ); *(unsigned long *)arg = temp & 0xff; break; case TVTUNER_SETFREQ: temp_mute( bktr, TRUE ); temp = tv_freq( bktr, (int)*(unsigned long *)arg, TV_FREQUENCY); temp_mute( bktr, FALSE ); if ( temp < 0 ) { temp_mute( bktr, FALSE ); return( EINVAL ); } *(unsigned long *)arg = temp; /* after every channel change, we must restart the MSP34xx */ /* audio chip to reselect NICAM STEREO or MONO audio */ if ( bktr->card.msp3400c ) msp_autodetect( bktr ); /* after every channel change, we must restart the DPL35xx */ if ( bktr->card.dpl3518a ) dpl_autodetect( bktr ); temp_mute( bktr, FALSE ); break; case TVTUNER_GETFREQ: *(unsigned long *)arg = bktr->tuner.frequency; break; case TVTUNER_GETCHNLSET: return tuner_getchnlset((struct bktr_chnlset *)arg); case BT848_SAUDIO: /* set audio channel */ if ( set_audio( bktr, *(int*)arg ) < 0 ) return( EIO ); break; /* hue is a 2's compliment number, -90' to +89.3' in 0.7' steps */ case BT848_SHUE: /* set hue */ OUTB(bktr, BKTR_HUE, (u_char)(*(int*)arg & 0xff)); break; case BT848_GHUE: /* get hue */ *(int*)arg = (signed char)(INB(bktr, BKTR_HUE) & 0xff); break; /* brightness is a 2's compliment #, -50 to +%49.6% in 0.39% steps */ case BT848_SBRIG: /* set brightness */ OUTB(bktr, BKTR_BRIGHT, (u_char)(*(int *)arg & 0xff)); break; case BT848_GBRIG: /* get brightness */ *(int *)arg = (signed char)(INB(bktr, BKTR_BRIGHT) & 0xff); break; /* */ case BT848_SCSAT: /* set chroma saturation */ tmp_int = *(int*)arg; temp = INB(bktr, BKTR_E_CONTROL); temp1 = INB(bktr, BKTR_O_CONTROL); if ( tmp_int & BIT_EIGHT_HIGH ) { temp |= (BT848_E_CONTROL_SAT_U_MSB | BT848_E_CONTROL_SAT_V_MSB); temp1 |= (BT848_O_CONTROL_SAT_U_MSB | BT848_O_CONTROL_SAT_V_MSB); } else { temp &= ~(BT848_E_CONTROL_SAT_U_MSB | BT848_E_CONTROL_SAT_V_MSB); temp1 &= ~(BT848_O_CONTROL_SAT_U_MSB | BT848_O_CONTROL_SAT_V_MSB); } OUTB(bktr, BKTR_SAT_U_LO, (u_char)(tmp_int & 0xff)); OUTB(bktr, BKTR_SAT_V_LO, (u_char)(tmp_int & 0xff)); OUTB(bktr, BKTR_E_CONTROL, temp); OUTB(bktr, BKTR_O_CONTROL, temp1); break; case BT848_GCSAT: /* get chroma saturation */ tmp_int = (int)(INB(bktr, BKTR_SAT_V_LO) & 0xff); if ( INB(bktr, BKTR_E_CONTROL) & BT848_E_CONTROL_SAT_V_MSB ) tmp_int |= BIT_EIGHT_HIGH; *(int*)arg = tmp_int; break; /* */ case BT848_SVSAT: /* set chroma V saturation */ tmp_int = *(int*)arg; temp = INB(bktr, BKTR_E_CONTROL); temp1 = INB(bktr, BKTR_O_CONTROL); if ( tmp_int & BIT_EIGHT_HIGH) { temp |= BT848_E_CONTROL_SAT_V_MSB; temp1 |= BT848_O_CONTROL_SAT_V_MSB; } else { temp &= ~BT848_E_CONTROL_SAT_V_MSB; temp1 &= ~BT848_O_CONTROL_SAT_V_MSB; } OUTB(bktr, BKTR_SAT_V_LO, (u_char)(tmp_int & 0xff)); OUTB(bktr, BKTR_E_CONTROL, temp); OUTB(bktr, BKTR_O_CONTROL, temp1); break; case BT848_GVSAT: /* get chroma V saturation */ tmp_int = (int)INB(bktr, BKTR_SAT_V_LO) & 0xff; if ( INB(bktr, BKTR_E_CONTROL) & BT848_E_CONTROL_SAT_V_MSB ) tmp_int |= BIT_EIGHT_HIGH; *(int*)arg = tmp_int; break; /* */ case BT848_SUSAT: /* set chroma U saturation */ tmp_int = *(int*)arg; temp = INB(bktr, BKTR_E_CONTROL); temp1 = INB(bktr, BKTR_O_CONTROL); if ( tmp_int & BIT_EIGHT_HIGH ) { temp |= BT848_E_CONTROL_SAT_U_MSB; temp1 |= BT848_O_CONTROL_SAT_U_MSB; } else { temp &= ~BT848_E_CONTROL_SAT_U_MSB; temp1 &= ~BT848_O_CONTROL_SAT_U_MSB; } OUTB(bktr, BKTR_SAT_U_LO, (u_char)(tmp_int & 0xff)); OUTB(bktr, BKTR_E_CONTROL, temp); OUTB(bktr, BKTR_O_CONTROL, temp1); break; case BT848_GUSAT: /* get chroma U saturation */ tmp_int = (int)INB(bktr, BKTR_SAT_U_LO) & 0xff; if ( INB(bktr, BKTR_E_CONTROL) & BT848_E_CONTROL_SAT_U_MSB ) tmp_int |= BIT_EIGHT_HIGH; *(int*)arg = tmp_int; break; /* lr 970528 luma notch etc - 3 high bits of e_control/o_control */ case BT848_SLNOTCH: /* set luma notch */ tmp_int = (*(int *)arg & 0x7) << 5 ; OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~0xe0); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) & ~0xe0); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) | tmp_int); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) | tmp_int); break; case BT848_GLNOTCH: /* get luma notch */ *(int *)arg = (int) ( (INB(bktr, BKTR_E_CONTROL) & 0xe0) >> 5) ; break; /* */ case BT848_SCONT: /* set contrast */ tmp_int = *(int*)arg; temp = INB(bktr, BKTR_E_CONTROL); temp1 = INB(bktr, BKTR_O_CONTROL); if ( tmp_int & BIT_EIGHT_HIGH ) { temp |= BT848_E_CONTROL_CON_MSB; temp1 |= BT848_O_CONTROL_CON_MSB; } else { temp &= ~BT848_E_CONTROL_CON_MSB; temp1 &= ~BT848_O_CONTROL_CON_MSB; } OUTB(bktr, BKTR_CONTRAST_LO, (u_char)(tmp_int & 0xff)); OUTB(bktr, BKTR_E_CONTROL, temp); OUTB(bktr, BKTR_O_CONTROL, temp1); break; case BT848_GCONT: /* get contrast */ tmp_int = (int)INB(bktr, BKTR_CONTRAST_LO) & 0xff; if ( INB(bktr, BKTR_E_CONTROL) & BT848_E_CONTROL_CON_MSB ) tmp_int |= BIT_EIGHT_HIGH; *(int*)arg = tmp_int; break; /* FIXME: SCBARS and CCBARS require a valid int * */ /* argument to succeed, but its not used; consider */ /* using the arg to store the on/off state so */ /* there's only one ioctl() needed to turn cbars on/off */ case BT848_SCBARS: /* set colorbar output */ OUTB(bktr, BKTR_COLOR_CTL, INB(bktr, BKTR_COLOR_CTL) | BT848_COLOR_CTL_COLOR_BARS); break; case BT848_CCBARS: /* clear colorbar output */ OUTB(bktr, BKTR_COLOR_CTL, INB(bktr, BKTR_COLOR_CTL) & ~(BT848_COLOR_CTL_COLOR_BARS)); break; case BT848_GAUDIO: /* get audio channel */ temp = bktr->audio_mux_select; if ( bktr->audio_mute_state == TRUE ) temp |= AUDIO_MUTE; *(int*)arg = temp; break; case BT848_SBTSC: /* set audio channel */ if ( set_BTSC( bktr, *(int*)arg ) < 0 ) return( EIO ); break; case BT848_WEEPROM: /* write eeprom */ offset = (((struct eeProm *)arg)->offset); count = (((struct eeProm *)arg)->count); buf = &(((struct eeProm *)arg)->bytes[ 0 ]); if ( writeEEProm( bktr, offset, count, buf ) < 0 ) return( EIO ); break; case BT848_REEPROM: /* read eeprom */ offset = (((struct eeProm *)arg)->offset); count = (((struct eeProm *)arg)->count); buf = &(((struct eeProm *)arg)->bytes[ 0 ]); if ( readEEProm( bktr, offset, count, buf ) < 0 ) return( EIO ); break; case BT848_SIGNATURE: offset = (((struct eeProm *)arg)->offset); count = (((struct eeProm *)arg)->count); buf = &(((struct eeProm *)arg)->bytes[ 0 ]); if ( signCard( bktr, offset, count, buf ) < 0 ) return( EIO ); break; /* Ioctl's for direct gpio access */ #ifdef BKTR_GPIO_ACCESS case BT848_GPIO_GET_EN: *(int*)arg = INL(bktr, BKTR_GPIO_OUT_EN); break; case BT848_GPIO_SET_EN: OUTL(bktr, BKTR_GPIO_OUT_EN, *(int*)arg); break; case BT848_GPIO_GET_DATA: *(int*)arg = INL(bktr, BKTR_GPIO_DATA); break; case BT848_GPIO_SET_DATA: OUTL(bktr, BKTR_GPIO_DATA, *(int*)arg); break; #endif /* BKTR_GPIO_ACCESS */ /* Ioctl's for running the tuner device in radio mode */ case RADIO_GETMODE: *(unsigned char *)arg = bktr->tuner.radio_mode; break; case RADIO_SETMODE: bktr->tuner.radio_mode = *(unsigned char *)arg; break; case RADIO_GETFREQ: *(unsigned long *)arg = bktr->tuner.frequency; break; case RADIO_SETFREQ: /* The argument to this ioctl is NOT freq*16. It is ** freq*100. */ temp=(int)*(unsigned long *)arg; #ifdef BKTR_RADIO_DEBUG printf("%s: arg=%d temp=%d\n", bktr_name(bktr), (int)*(unsigned long *)arg, temp); #endif #ifndef BKTR_RADIO_NOFREQCHECK /* According to the spec. sheet the band: 87.5MHz-108MHz */ /* is supported. */ if(temp<8750 || temp>10800) { printf("%s: Radio frequency out of range\n", bktr_name(bktr)); return(EINVAL); } #endif temp_mute( bktr, TRUE ); temp = tv_freq( bktr, temp, FM_RADIO_FREQUENCY ); temp_mute( bktr, FALSE ); #ifdef BKTR_RADIO_DEBUG if(temp) printf("%s: tv_freq returned: %d\n", bktr_name(bktr), temp); #endif if ( temp < 0 ) return( EINVAL ); *(unsigned long *)arg = temp; break; /* Luigi's I2CWR ioctl */ case BT848_I2CWR: par = *(u_long *)arg; write = (par >> 24) & 0xff ; i2c_addr = (par >> 16) & 0xff ; i2c_port = (par >> 8) & 0xff ; data = (par) & 0xff ; if (write) { i2cWrite( bktr, i2c_addr, i2c_port, data); } else { data = i2cRead( bktr, i2c_addr); } *(u_long *)arg = (par & 0xffffff00) | ( data & 0xff ); break; #ifdef BT848_MSP_READ /* I2C ioctls to allow userland access to the MSP chip */ case BT848_MSP_READ: { struct bktr_msp_control *msp; msp = (struct bktr_msp_control *) arg; msp->data = msp_dpl_read(bktr, bktr->msp_addr, msp->function, msp->address); break; } case BT848_MSP_WRITE: { struct bktr_msp_control *msp; msp = (struct bktr_msp_control *) arg; msp_dpl_write(bktr, bktr->msp_addr, msp->function, msp->address, msp->data ); break; } case BT848_MSP_RESET: msp_dpl_reset(bktr, bktr->msp_addr); break; #endif default: return common_ioctl( bktr, cmd, arg ); } return( 0 ); } /* * common ioctls */ static int common_ioctl( bktr_ptr_t bktr, ioctl_cmd_t cmd, caddr_t arg ) { int pixfmt; unsigned int temp; struct meteor_pixfmt *pf_pub; switch (cmd) { case METEORSINPUT: /* set input device */ /*Bt848 has 3 MUX Inputs. Bt848A/849A/878/879 has 4 MUX Inputs*/ /* On the original bt848 boards, */ /* Tuner is MUX0, RCA is MUX1, S-Video is MUX2 */ /* On the Hauppauge bt878 boards, */ /* Tuner is MUX0, RCA is MUX3 */ /* Unfortunatly Meteor driver codes DEV_RCA as DEV_0, so we */ /* stick with this system in our Meteor Emulation */ switch(*(unsigned long *)arg & METEOR_DEV_MASK) { /* this is the RCA video input */ case 0: /* default */ case METEOR_INPUT_DEV0: /* METEOR_INPUT_DEV_RCA: */ bktr->flags = (bktr->flags & ~METEOR_DEV_MASK) | METEOR_DEV0; OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) & ~BT848_IFORM_MUXSEL); /* work around for new Hauppauge 878 cards */ if ((bktr->card.card_id == CARD_HAUPPAUGE) && (bktr->id==BROOKTREE_878 || bktr->id==BROOKTREE_879) ) OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX3); else OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX1); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~BT848_E_CONTROL_COMP); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) & ~BT848_O_CONTROL_COMP); set_audio( bktr, AUDIO_EXTERN ); break; /* this is the tuner input */ case METEOR_INPUT_DEV1: bktr->flags = (bktr->flags & ~METEOR_DEV_MASK) | METEOR_DEV1; OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) & ~BT848_IFORM_MUXSEL); OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX0); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~BT848_E_CONTROL_COMP); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) & ~BT848_O_CONTROL_COMP); set_audio( bktr, AUDIO_TUNER ); break; /* this is the S-VHS input, but with a composite camera */ case METEOR_INPUT_DEV2: bktr->flags = (bktr->flags & ~METEOR_DEV_MASK) | METEOR_DEV2; OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) & ~BT848_IFORM_MUXSEL); OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX2); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~BT848_E_CONTROL_COMP); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~BT848_O_CONTROL_COMP); set_audio( bktr, AUDIO_EXTERN ); break; /* this is the S-VHS input */ case METEOR_INPUT_DEV_SVIDEO: bktr->flags = (bktr->flags & ~METEOR_DEV_MASK) | METEOR_DEV_SVIDEO; OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) & ~BT848_IFORM_MUXSEL); OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX2); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) | BT848_E_CONTROL_COMP); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) | BT848_O_CONTROL_COMP); set_audio( bktr, AUDIO_EXTERN ); break; case METEOR_INPUT_DEV3: if ((bktr->id == BROOKTREE_848A) || (bktr->id == BROOKTREE_849A) || (bktr->id == BROOKTREE_878) || (bktr->id == BROOKTREE_879) ) { bktr->flags = (bktr->flags & ~METEOR_DEV_MASK) | METEOR_DEV3; OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) & ~BT848_IFORM_MUXSEL); /* work around for new Hauppauge 878 cards */ if ((bktr->card.card_id == CARD_HAUPPAUGE) && (bktr->id==BROOKTREE_878 || bktr->id==BROOKTREE_879) ) OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX1); else OUTB(bktr, BKTR_IFORM, INB(bktr, BKTR_IFORM) | BT848_IFORM_M_MUX3); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) & ~BT848_E_CONTROL_COMP); OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) & ~BT848_O_CONTROL_COMP); set_audio( bktr, AUDIO_EXTERN ); break; } default: return( EINVAL ); } break; case METEORGINPUT: /* get input device */ *(u_long *)arg = bktr->flags & METEOR_DEV_MASK; break; case METEORSACTPIXFMT: if (( *(int *)arg < 0 ) || ( *(int *)arg >= PIXFMT_TABLE_SIZE )) return( EINVAL ); bktr->pixfmt = *(int *)arg; OUTB(bktr, BKTR_COLOR_CTL, (INB(bktr, BKTR_COLOR_CTL) & 0xf0) | pixfmt_swap_flags( bktr->pixfmt )); bktr->pixfmt_compat = FALSE; break; case METEORGACTPIXFMT: *(int *)arg = bktr->pixfmt; break; case METEORGSUPPIXFMT : pf_pub = (struct meteor_pixfmt *)arg; pixfmt = pf_pub->index; if (( pixfmt < 0 ) || ( pixfmt >= PIXFMT_TABLE_SIZE )) return( EINVAL ); memcpy( pf_pub, &pixfmt_table[ pixfmt ].public, sizeof( *pf_pub ) ); /* Patch in our format index */ pf_pub->index = pixfmt; break; #if defined( STATUS_SUM ) case BT848_GSTATUS: /* reap status */ { DECLARE_INTR_MASK(s); DISABLE_INTR(s); temp = status_sum; status_sum = 0; ENABLE_INTR(s); *(u_int*)arg = temp; break; } #endif /* STATUS_SUM */ default: return( ENOTTY ); } return( 0 ); } /****************************************************************************** * bt848 RISC programming routines: */ /* * */ #ifdef BT848_DEBUG static int dump_bt848( bktr_ptr_t bktr ) { int r[60]={ 4, 8, 0xc, 0x8c, 0x10, 0x90, 0x14, 0x94, 0x18, 0x98, 0x1c, 0x9c, 0x20, 0xa0, 0x24, 0xa4, 0x28, 0x2c, 0xac, 0x30, 0x34, 0x38, 0x3c, 0x40, 0xc0, 0x48, 0x4c, 0xcc, 0x50, 0xd0, 0xd4, 0x60, 0x64, 0x68, 0x6c, 0xec, 0xd8, 0xdc, 0xe0, 0xe4, 0, 0, 0, 0 }; int i; for (i = 0; i < 40; i+=4) { printf("%s: Reg:value : \t%x:%x \t%x:%x \t %x:%x \t %x:%x\n", bktr_name(bktr), r[i], INL(bktr, r[i]), r[i+1], INL(bktr, r[i+1]), r[i+2], INL(bktr, r[i+2]), r[i+3], INL(bktr, r[i+3]])); } printf("%s: INT STAT %x \n", bktr_name(bktr), INL(bktr, BKTR_INT_STAT)); printf("%s: Reg INT_MASK %x \n", bktr_name(bktr), INL(bktr, BKTR_INT_MASK)); printf("%s: Reg GPIO_DMA_CTL %x \n", bktr_name(bktr), INW(bktr, BKTR_GPIO_DMA_CTL)); return( 0 ); } #endif /* * build write instruction */ #define BKTR_FM1 0x6 /* packed data to follow */ #define BKTR_FM3 0xe /* planar data to follow */ #define BKTR_VRE 0x4 /* Marks the end of the even field */ #define BKTR_VRO 0xC /* Marks the end of the odd field */ #define BKTR_PXV 0x0 /* valid word (never used) */ #define BKTR_EOL 0x1 /* last dword, 4 bytes */ #define BKTR_SOL 0x2 /* first dword */ #define OP_WRITE (0x1 << 28) #define OP_SKIP (0x2 << 28) #define OP_WRITEC (0x5 << 28) #define OP_JUMP (0x7 << 28) #define OP_SYNC (0x8 << 28) #define OP_WRITE123 (0x9 << 28) #define OP_WRITES123 (0xb << 28) #define OP_SOL (1 << 27) /* first instr for scanline */ #define OP_EOL (1 << 26) #define BKTR_RESYNC (1 << 15) #define BKTR_GEN_IRQ (1 << 24) /* * The RISC status bits can be set/cleared in the RISC programs * and tested in the Interrupt Handler */ #define BKTR_SET_RISC_STATUS_BIT0 (1 << 16) #define BKTR_SET_RISC_STATUS_BIT1 (1 << 17) #define BKTR_SET_RISC_STATUS_BIT2 (1 << 18) #define BKTR_SET_RISC_STATUS_BIT3 (1 << 19) #define BKTR_CLEAR_RISC_STATUS_BIT0 (1 << 20) #define BKTR_CLEAR_RISC_STATUS_BIT1 (1 << 21) #define BKTR_CLEAR_RISC_STATUS_BIT2 (1 << 22) #define BKTR_CLEAR_RISC_STATUS_BIT3 (1 << 23) #define BKTR_TEST_RISC_STATUS_BIT0 (1 << 28) #define BKTR_TEST_RISC_STATUS_BIT1 (1 << 29) #define BKTR_TEST_RISC_STATUS_BIT2 (1 << 30) -#define BKTR_TEST_RISC_STATUS_BIT3 (1 << 31) +#define BKTR_TEST_RISC_STATUS_BIT3 (1U << 31) static bool_t notclipped (bktr_reg_t * bktr, int x, int width) { int i; bktr_clip_t * clip_node; bktr->clip_start = -1; bktr->last_y = 0; bktr->y = 0; bktr->y2 = width; bktr->line_length = width; bktr->yclip = -1; bktr->yclip2 = -1; bktr->current_col = 0; if (bktr->max_clip_node == 0 ) return TRUE; clip_node = (bktr_clip_t *) &bktr->clip_list[0]; for (i = 0; i < bktr->max_clip_node; i++ ) { clip_node = (bktr_clip_t *) &bktr->clip_list[i]; if (x >= clip_node->x_min && x <= clip_node->x_max ) { bktr->clip_start = i; return FALSE; } } return TRUE; } static bool_t getline(bktr_reg_t *bktr, int x ) { int i, j; bktr_clip_t * clip_node ; if (bktr->line_length == 0 || bktr->current_col >= bktr->line_length) return FALSE; bktr->y = min(bktr->last_y, bktr->line_length); bktr->y2 = bktr->line_length; bktr->yclip = bktr->yclip2 = -1; for (i = bktr->clip_start; i < bktr->max_clip_node; i++ ) { clip_node = (bktr_clip_t *) &bktr->clip_list[i]; if (x >= clip_node->x_min && x <= clip_node->x_max) { if (bktr->last_y <= clip_node->y_min) { bktr->y = min(bktr->last_y, bktr->line_length); bktr->y2 = min(clip_node->y_min, bktr->line_length); bktr->yclip = min(clip_node->y_min, bktr->line_length); bktr->yclip2 = min(clip_node->y_max, bktr->line_length); bktr->last_y = bktr->yclip2; bktr->clip_start = i; for (j = i+1; j < bktr->max_clip_node; j++ ) { clip_node = (bktr_clip_t *) &bktr->clip_list[j]; if (x >= clip_node->x_min && x <= clip_node->x_max) { if (bktr->last_y >= clip_node->y_min) { bktr->yclip2 = min(clip_node->y_max, bktr->line_length); bktr->last_y = bktr->yclip2; bktr->clip_start = j; } } else break ; } return TRUE; } } } if (bktr->current_col <= bktr->line_length) { bktr->current_col = bktr->line_length; return TRUE; } return FALSE; } static bool_t split(bktr_reg_t * bktr, volatile uint32_t **dma_prog, int width , u_long operation, int pixel_width, volatile u_char ** target_buffer, int cols ) { u_long flag, flag2; struct meteor_pixfmt *pf = &pixfmt_table[ bktr->pixfmt ].public; u_int skip, start_skip; /* For RGB24, we need to align the component in FIFO Byte Lane 0 */ /* to the 1st byte in the mem dword containing our start addr. */ /* BTW, we know this pixfmt's 1st byte is Blue; thus the start addr */ /* must be Blue. */ start_skip = 0; if (( pf->type == METEOR_PIXTYPE_RGB ) && ( pf->Bpp == 3 )) switch ( ((uintptr_t) (volatile void *) *target_buffer) % 4 ) { case 2 : start_skip = 4 ; break; case 1 : start_skip = 8 ; break; } if ((width * pixel_width) < DMA_BT848_SPLIT ) { if ( width == cols) { flag = OP_SOL | OP_EOL; } else if (bktr->current_col == 0 ) { flag = OP_SOL; } else if (bktr->current_col == cols) { flag = OP_EOL; } else flag = 0; skip = 0; if (( flag & OP_SOL ) && ( start_skip > 0 )) { *(*dma_prog)++ = OP_SKIP | OP_SOL | start_skip; flag &= ~OP_SOL; skip = start_skip; } *(*dma_prog)++ = operation | flag | (width * pixel_width - skip); if (operation != OP_SKIP ) *(*dma_prog)++ = (uintptr_t) (volatile void *) *target_buffer; *target_buffer += width * pixel_width; bktr->current_col += width; } else { if (bktr->current_col == 0 && width == cols) { flag = OP_SOL ; flag2 = OP_EOL; } else if (bktr->current_col == 0 ) { flag = OP_SOL; flag2 = 0; } else if (bktr->current_col >= cols) { flag = 0; flag2 = OP_EOL; } else { flag = 0; flag2 = 0; } skip = 0; if (( flag & OP_SOL ) && ( start_skip > 0 )) { *(*dma_prog)++ = OP_SKIP | OP_SOL | start_skip; flag &= ~OP_SOL; skip = start_skip; } *(*dma_prog)++ = operation | flag | (width * pixel_width / 2 - skip); if (operation != OP_SKIP ) *(*dma_prog)++ = (uintptr_t) (volatile void *) *target_buffer ; *target_buffer += (width * pixel_width / 2) ; if ( operation == OP_WRITE ) operation = OP_WRITEC; *(*dma_prog)++ = operation | flag2 | (width * pixel_width / 2); *target_buffer += (width * pixel_width / 2) ; bktr->current_col += width; } return TRUE; } /* * Generate the RISC instructions to capture both VBI and video images */ static void rgb_vbi_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ) { int i; volatile uint32_t target_buffer, buffer, target,width; volatile uint32_t pitch; volatile uint32_t *dma_prog; /* DMA prog is an array of 32 bit RISC instructions */ volatile uint32_t *loop_point; struct meteor_pixfmt_internal *pf_int = &pixfmt_table[ bktr->pixfmt ]; u_int Bpp = pf_int->public.Bpp; unsigned int vbisamples; /* VBI samples per line */ unsigned int vbilines; /* VBI lines per field */ unsigned int num_dwords; /* DWORDS per line */ vbisamples = format_params[bktr->format_params].vbi_num_samples; vbilines = format_params[bktr->format_params].vbi_num_lines; num_dwords = vbisamples/4; OUTB(bktr, BKTR_COLOR_FMT, pf_int->color_fmt); OUTB(bktr, BKTR_ADC, SYNC_LEVEL); OUTB(bktr, BKTR_VBI_PACK_SIZE, ((num_dwords)) & 0xff); OUTB(bktr, BKTR_VBI_PACK_DEL, ((num_dwords)>> 8) & 0x01); /* no hdelay */ /* no ext frame */ OUTB(bktr, BKTR_OFORM, 0x00); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) | 0x40); /* set chroma comb */ OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) | 0x40); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) & ~0x80); /* clear Ycomb */ OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) & ~0x80); /* disable gamma correction removal */ OUTB(bktr, BKTR_COLOR_CTL, INB(bktr, BKTR_COLOR_CTL) | BT848_COLOR_CTL_GAMMA); if (cols > 385 ) { OUTB(bktr, BKTR_E_VTC, 0); OUTB(bktr, BKTR_O_VTC, 0); } else { OUTB(bktr, BKTR_E_VTC, 1); OUTB(bktr, BKTR_O_VTC, 1); } bktr->capcontrol = 3 << 2 | 3; dma_prog = (uint32_t *) bktr->dma_prog; /* Construct Write */ if (bktr->video.addr) { target_buffer = (u_long) bktr->video.addr; pitch = bktr->video.width; } else { target_buffer = (u_long) vtophys(bktr->bigbuf); pitch = cols*Bpp; } buffer = target_buffer; /* Wait for the VRE sync marking the end of the Even and * the start of the Odd field. Resync here. */ *dma_prog++ = OP_SYNC | BKTR_RESYNC |BKTR_VRE; *dma_prog++ = 0; loop_point = dma_prog; /* store the VBI data */ /* look for sync with packed data */ *dma_prog++ = OP_SYNC | BKTR_FM1; *dma_prog++ = 0; for(i = 0; i < vbilines; i++) { *dma_prog++ = OP_WRITE | OP_SOL | OP_EOL | vbisamples; *dma_prog++ = (u_long) vtophys((caddr_t)bktr->vbidata + (i * VBI_LINE_SIZE)); } if ( (i_flag == 2/*Odd*/) || (i_flag==3) /*interlaced*/ ) { /* store the Odd field video image */ /* look for sync with packed data */ *dma_prog++ = OP_SYNC | BKTR_FM1; *dma_prog++ = 0; /* NULL WORD */ width = cols; for (i = 0; i < (rows/interlace); i++) { target = target_buffer; if ( notclipped(bktr, i, width)) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } else { while(getline(bktr, i)) { if (bktr->y != bktr->y2 ) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } if (bktr->yclip != bktr->yclip2 ) { split(bktr,(volatile uint32_t **) &dma_prog, bktr->yclip2 - bktr->yclip, OP_SKIP, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } } } target_buffer += interlace * pitch; } } /* end if */ /* Grab the Even field */ /* Look for the VRO, end of Odd field, marker */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_RESYNC | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ /* store the VBI data */ /* look for sync with packed data */ *dma_prog++ = OP_SYNC | BKTR_FM1; *dma_prog++ = 0; for(i = 0; i < vbilines; i++) { *dma_prog++ = OP_WRITE | OP_SOL | OP_EOL | vbisamples; *dma_prog++ = (u_long) vtophys((caddr_t)bktr->vbidata + ((i+MAX_VBI_LINES) * VBI_LINE_SIZE)); } /* store the video image */ if (i_flag == 1) /*Even Only*/ target_buffer = buffer; if (i_flag == 3) /*interlaced*/ target_buffer = buffer+pitch; if ((i_flag == 1) /*Even Only*/ || (i_flag==3) /*interlaced*/) { /* look for sync with packed data */ *dma_prog++ = OP_SYNC | BKTR_FM1; *dma_prog++ = 0; /* NULL WORD */ width = cols; for (i = 0; i < (rows/interlace); i++) { target = target_buffer; if ( notclipped(bktr, i, width)) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } else { while(getline(bktr, i)) { if (bktr->y != bktr->y2 ) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } if (bktr->yclip != bktr->yclip2 ) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->yclip2 - bktr->yclip, OP_SKIP, Bpp, (volatile u_char **)(uintptr_t) &target, cols); } } } target_buffer += interlace * pitch; } } /* Look for end of 'Even Field' */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_RESYNC | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog++ = (u_long ) vtophys(loop_point) ; *dma_prog++ = 0; /* NULL WORD */ } static void rgb_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ) { int i; volatile uint32_t target_buffer, buffer, target,width; volatile uint32_t pitch; volatile uint32_t *dma_prog; struct meteor_pixfmt_internal *pf_int = &pixfmt_table[ bktr->pixfmt ]; u_int Bpp = pf_int->public.Bpp; OUTB(bktr, BKTR_COLOR_FMT, pf_int->color_fmt); OUTB(bktr, BKTR_VBI_PACK_SIZE, 0); OUTB(bktr, BKTR_VBI_PACK_DEL, 0); OUTB(bktr, BKTR_ADC, SYNC_LEVEL); OUTB(bktr, BKTR_OFORM, 0x00); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) | 0x40); /* set chroma comb */ OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) | 0x40); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) & ~0x80); /* clear Ycomb */ OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) & ~0x80); /* disable gamma correction removal */ OUTB(bktr, BKTR_COLOR_CTL, INB(bktr, BKTR_COLOR_CTL) | BT848_COLOR_CTL_GAMMA); if (cols > 385 ) { OUTB(bktr, BKTR_E_VTC, 0); OUTB(bktr, BKTR_O_VTC, 0); } else { OUTB(bktr, BKTR_E_VTC, 1); OUTB(bktr, BKTR_O_VTC, 1); } bktr->capcontrol = 3 << 2 | 3; dma_prog = (uint32_t *) bktr->dma_prog; /* Construct Write */ if (bktr->video.addr) { target_buffer = (uint32_t) bktr->video.addr; pitch = bktr->video.width; } else { target_buffer = (uint32_t) vtophys(bktr->bigbuf); pitch = cols*Bpp; } buffer = target_buffer; /* contruct sync : for video packet format */ *dma_prog++ = OP_SYNC | BKTR_RESYNC | BKTR_FM1; /* sync, mode indicator packed data */ *dma_prog++ = 0; /* NULL WORD */ width = cols; for (i = 0; i < (rows/interlace); i++) { target = target_buffer; if ( notclipped(bktr, i, width)) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } else { while(getline(bktr, i)) { if (bktr->y != bktr->y2 ) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } if (bktr->yclip != bktr->yclip2 ) { split(bktr,(volatile uint32_t **) &dma_prog, bktr->yclip2 - bktr->yclip, OP_SKIP, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } } } target_buffer += interlace * pitch; } switch (i_flag) { case 1: /* sync vre */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t ) vtophys(bktr->dma_prog); return; case 2: /* sync vro */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t ) vtophys(bktr->dma_prog); return; case 3: /* sync vro */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_RESYNC | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; ; *dma_prog = (uint32_t ) vtophys(bktr->odd_dma_prog); break; } if (interlace == 2) { target_buffer = buffer + pitch; dma_prog = (uint32_t *) bktr->odd_dma_prog; /* sync vre IRQ bit */ *dma_prog++ = OP_SYNC | BKTR_RESYNC | BKTR_FM1; *dma_prog++ = 0; /* NULL WORD */ width = cols; for (i = 0; i < (rows/interlace); i++) { target = target_buffer; if ( notclipped(bktr, i, width)) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } else { while(getline(bktr, i)) { if (bktr->y != bktr->y2 ) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->y2 - bktr->y, OP_WRITE, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } if (bktr->yclip != bktr->yclip2 ) { split(bktr, (volatile uint32_t **) &dma_prog, bktr->yclip2 - bktr->yclip, OP_SKIP, Bpp, (volatile u_char **)(uintptr_t)&target, cols); } } } target_buffer += interlace * pitch; } } /* sync vre IRQ bit */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_RESYNC | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog++ = (uint32_t ) vtophys(bktr->dma_prog) ; *dma_prog++ = 0; /* NULL WORD */ } /* * */ static void yuvpack_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ) { int i; volatile unsigned int inst; volatile unsigned int inst3; volatile uint32_t target_buffer, buffer; volatile uint32_t *dma_prog; struct meteor_pixfmt_internal *pf_int = &pixfmt_table[ bktr->pixfmt ]; int b; OUTB(bktr, BKTR_COLOR_FMT, pf_int->color_fmt); OUTB(bktr, BKTR_E_SCLOOP, INB(bktr, BKTR_E_SCLOOP) | BT848_E_SCLOOP_CAGC); /* enable chroma comb */ OUTB(bktr, BKTR_O_SCLOOP, INB(bktr, BKTR_O_SCLOOP) | BT848_O_SCLOOP_CAGC); OUTB(bktr, BKTR_COLOR_CTL, INB(bktr, BKTR_COLOR_CTL) | BT848_COLOR_CTL_RGB_DED | BT848_COLOR_CTL_GAMMA); OUTB(bktr, BKTR_ADC, SYNC_LEVEL); bktr->capcontrol = 1 << 6 | 1 << 4 | 1 << 2 | 3; bktr->capcontrol = 3 << 2 | 3; dma_prog = (uint32_t *) bktr->dma_prog; /* Construct Write */ /* write , sol, eol */ inst = OP_WRITE | OP_SOL | (cols); /* write , sol, eol */ inst3 = OP_WRITE | OP_EOL | (cols); if (bktr->video.addr) target_buffer = (uint32_t) bktr->video.addr; else target_buffer = (uint32_t) vtophys(bktr->bigbuf); buffer = target_buffer; /* contruct sync : for video packet format */ /* sync, mode indicator packed data */ *dma_prog++ = OP_SYNC | BKTR_RESYNC | BKTR_FM1; *dma_prog++ = 0; /* NULL WORD */ b = cols; for (i = 0; i < (rows/interlace); i++) { *dma_prog++ = inst; *dma_prog++ = target_buffer; *dma_prog++ = inst3; *dma_prog++ = target_buffer + b; target_buffer += interlace*(cols * 2); } switch (i_flag) { case 1: /* sync vre */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); return; case 2: /* sync vro */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); return; case 3: /* sync vro */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_RESYNC | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog = (uint32_t) vtophys(bktr->odd_dma_prog); break; } if (interlace == 2) { target_buffer = (uint32_t) buffer + cols*2; dma_prog = (uint32_t *) bktr->odd_dma_prog; /* sync vre */ *dma_prog++ = OP_SYNC | BKTR_RESYNC | BKTR_FM1; *dma_prog++ = 0; /* NULL WORD */ for (i = 0; i < (rows/interlace) ; i++) { *dma_prog++ = inst; *dma_prog++ = target_buffer; *dma_prog++ = inst3; *dma_prog++ = target_buffer + b; target_buffer += interlace * ( cols*2); } } /* sync vro IRQ bit */ *dma_prog++ = OP_SYNC | BKTR_GEN_IRQ | BKTR_RESYNC | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); *dma_prog++ = 0; /* NULL WORD */ } /* * */ static void yuv422_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ){ int i; volatile unsigned int inst; volatile uint32_t target_buffer, t1, buffer; volatile uint32_t *dma_prog; struct meteor_pixfmt_internal *pf_int = &pixfmt_table[ bktr->pixfmt ]; OUTB(bktr, BKTR_COLOR_FMT, pf_int->color_fmt); dma_prog = (uint32_t*) bktr->dma_prog; bktr->capcontrol = 1 << 6 | 1 << 4 | 3; OUTB(bktr, BKTR_ADC, SYNC_LEVEL); OUTB(bktr, BKTR_OFORM, 0x00); OUTB(bktr, BKTR_E_CONTROL, INB(bktr, BKTR_E_CONTROL) | BT848_E_CONTROL_LDEC); /* disable luma decimation */ OUTB(bktr, BKTR_O_CONTROL, INB(bktr, BKTR_O_CONTROL) | BT848_O_CONTROL_LDEC); OUTB(bktr, BKTR_E_SCLOOP, INB(bktr, BKTR_E_SCLOOP) | BT848_E_SCLOOP_CAGC); /* chroma agc enable */ OUTB(bktr, BKTR_O_SCLOOP, INB(bktr, BKTR_O_SCLOOP) | BT848_O_SCLOOP_CAGC); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) & ~0x80); /* clear Ycomb */ OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) & ~0x80); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) | 0x40); /* set chroma comb */ OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) | 0x40); /* disable gamma correction removal */ OUTB(bktr, BKTR_COLOR_CTL, INB(bktr, BKTR_COLOR_CTL) | BT848_COLOR_CTL_GAMMA); /* Construct Write */ inst = OP_WRITE123 | OP_SOL | OP_EOL | (cols); if (bktr->video.addr) target_buffer = (uint32_t) bktr->video.addr; else target_buffer = (uint32_t) vtophys(bktr->bigbuf); buffer = target_buffer; t1 = buffer; /* contruct sync : for video packet format */ *dma_prog++ = OP_SYNC | 1 << 15 | BKTR_FM3; /*sync, mode indicator packed data*/ *dma_prog++ = 0; /* NULL WORD */ for (i = 0; i < (rows/interlace ) ; i++) { *dma_prog++ = inst; *dma_prog++ = cols/2 | cols/2 << 16; *dma_prog++ = target_buffer; *dma_prog++ = t1 + (cols*rows) + i*cols/2 * interlace; *dma_prog++ = t1 + (cols*rows) + (cols*rows/2) + i*cols/2 * interlace; target_buffer += interlace*cols; } switch (i_flag) { case 1: *dma_prog++ = OP_SYNC | 1 << 24 | BKTR_VRE; /*sync vre*/ *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); return; case 2: *dma_prog++ = OP_SYNC | 1 << 24 | BKTR_VRO; /*sync vre*/ *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); return; case 3: *dma_prog++ = OP_SYNC | 1 << 24 | 1 << 15 | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog = (uint32_t) vtophys(bktr->odd_dma_prog); break; } if (interlace == 2) { dma_prog = (uint32_t *) bktr->odd_dma_prog; target_buffer = (uint32_t) buffer + cols; t1 = buffer + cols/2; *dma_prog++ = OP_SYNC | 1 << 15 | BKTR_FM3; *dma_prog++ = 0; /* NULL WORD */ for (i = 0; i < (rows/interlace ) ; i++) { *dma_prog++ = inst; *dma_prog++ = cols/2 | cols/2 << 16; *dma_prog++ = target_buffer; *dma_prog++ = t1 + (cols*rows) + i*cols/2 * interlace; *dma_prog++ = t1 + (cols*rows) + (cols*rows/2) + i*cols/2 * interlace; target_buffer += interlace*cols; } } *dma_prog++ = OP_SYNC | 1 << 24 | 1 << 15 | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog) ; *dma_prog++ = 0; /* NULL WORD */ } /* * */ static void yuv12_prog( bktr_ptr_t bktr, char i_flag, int cols, int rows, int interlace ){ int i; volatile unsigned int inst; volatile unsigned int inst1; volatile uint32_t target_buffer, t1, buffer; volatile uint32_t *dma_prog; struct meteor_pixfmt_internal *pf_int = &pixfmt_table[ bktr->pixfmt ]; OUTB(bktr, BKTR_COLOR_FMT, pf_int->color_fmt); dma_prog = (uint32_t *) bktr->dma_prog; bktr->capcontrol = 1 << 6 | 1 << 4 | 3; OUTB(bktr, BKTR_ADC, SYNC_LEVEL); OUTB(bktr, BKTR_OFORM, 0x0); /* Construct Write */ inst = OP_WRITE123 | OP_SOL | OP_EOL | (cols); inst1 = OP_WRITES123 | OP_SOL | OP_EOL | (cols); if (bktr->video.addr) target_buffer = (uint32_t) bktr->video.addr; else target_buffer = (uint32_t) vtophys(bktr->bigbuf); buffer = target_buffer; t1 = buffer; *dma_prog++ = OP_SYNC | 1 << 15 | BKTR_FM3; /*sync, mode indicator packed data*/ *dma_prog++ = 0; /* NULL WORD */ for (i = 0; i < (rows/interlace )/2 ; i++) { *dma_prog++ = inst; *dma_prog++ = cols/2 | (cols/2 << 16); *dma_prog++ = target_buffer; *dma_prog++ = t1 + (cols*rows) + i*cols/2 * interlace; *dma_prog++ = t1 + (cols*rows) + (cols*rows/4) + i*cols/2 * interlace; target_buffer += interlace*cols; *dma_prog++ = inst1; *dma_prog++ = cols/2 | (cols/2 << 16); *dma_prog++ = target_buffer; target_buffer += interlace*cols; } switch (i_flag) { case 1: *dma_prog++ = OP_SYNC | 1 << 24 | BKTR_VRE; /*sync vre*/ *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); return; case 2: *dma_prog++ = OP_SYNC | 1 << 24 | BKTR_VRO; /*sync vro*/ *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); return; case 3: *dma_prog++ = OP_SYNC | 1 << 24 | 1 << 15 | BKTR_VRO; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP ; *dma_prog = (uint32_t) vtophys(bktr->odd_dma_prog); break; } if (interlace == 2) { dma_prog = (uint32_t *) bktr->odd_dma_prog; target_buffer = (uint32_t) buffer + cols; t1 = buffer + cols/2; *dma_prog++ = OP_SYNC | 1 << 15 | BKTR_FM3; *dma_prog++ = 0; /* NULL WORD */ for (i = 0; i < ((rows/interlace )/2 ) ; i++) { *dma_prog++ = inst; *dma_prog++ = cols/2 | (cols/2 << 16); *dma_prog++ = target_buffer; *dma_prog++ = t1 + (cols*rows) + i*cols/2 * interlace; *dma_prog++ = t1 + (cols*rows) + (cols*rows/4) + i*cols/2 * interlace; target_buffer += interlace*cols; *dma_prog++ = inst1; *dma_prog++ = cols/2 | (cols/2 << 16); *dma_prog++ = target_buffer; target_buffer += interlace*cols; } } *dma_prog++ = OP_SYNC | 1 << 24 | 1 << 15 | BKTR_VRE; *dma_prog++ = 0; /* NULL WORD */ *dma_prog++ = OP_JUMP; *dma_prog++ = (uint32_t) vtophys(bktr->dma_prog); *dma_prog++ = 0; /* NULL WORD */ } /* * */ static void build_dma_prog( bktr_ptr_t bktr, char i_flag ) { int rows, cols, interlace; int tmp_int; unsigned int temp; struct format_params *fp; struct meteor_pixfmt_internal *pf_int = &pixfmt_table[ bktr->pixfmt ]; fp = &format_params[bktr->format_params]; OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); /* disable FIFO & RISC, leave other bits alone */ OUTW(bktr, BKTR_GPIO_DMA_CTL, INW(bktr, BKTR_GPIO_DMA_CTL) & ~FIFO_RISC_ENABLED); /* set video parameters */ if (bktr->capture_area_enabled) temp = ((quad_t ) fp->htotal* (quad_t) bktr->capture_area_x_size * 4096 / fp->scaled_htotal / bktr->cols) - 4096; else temp = ((quad_t ) fp->htotal* (quad_t) fp->scaled_hactive * 4096 / fp->scaled_htotal / bktr->cols) - 4096; /* printf("%s: HSCALE value is %d\n", bktr_name(bktr), temp); */ OUTB(bktr, BKTR_E_HSCALE_LO, temp & 0xff); OUTB(bktr, BKTR_O_HSCALE_LO, temp & 0xff); OUTB(bktr, BKTR_E_HSCALE_HI, (temp >> 8) & 0xff); OUTB(bktr, BKTR_O_HSCALE_HI, (temp >> 8) & 0xff); /* horizontal active */ temp = bktr->cols; /* printf("%s: HACTIVE value is %d\n", bktr_name(bktr), temp); */ OUTB(bktr, BKTR_E_HACTIVE_LO, temp & 0xff); OUTB(bktr, BKTR_O_HACTIVE_LO, temp & 0xff); OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) & ~0x3); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) & ~0x3); OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) | ((temp >> 8) & 0x3)); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) | ((temp >> 8) & 0x3)); /* horizontal delay */ if (bktr->capture_area_enabled) temp = ( (fp->hdelay* fp->scaled_hactive + bktr->capture_area_x_offset* fp->scaled_htotal) * bktr->cols) / (bktr->capture_area_x_size * fp->hactive); else temp = (fp->hdelay * bktr->cols) / fp->hactive; temp = temp & 0x3fe; /* printf("%s: HDELAY value is %d\n", bktr_name(bktr), temp); */ OUTB(bktr, BKTR_E_DELAY_LO, temp & 0xff); OUTB(bktr, BKTR_O_DELAY_LO, temp & 0xff); OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) & ~0xc); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) & ~0xc); OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) | ((temp >> 6) & 0xc)); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) | ((temp >> 6) & 0xc)); /* vertical scale */ if (bktr->capture_area_enabled) { if (bktr->flags & METEOR_ONLY_ODD_FIELDS || bktr->flags & METEOR_ONLY_EVEN_FIELDS) tmp_int = 65536 - (((bktr->capture_area_y_size * 256 + (bktr->rows/2)) / bktr->rows) - 512); else { tmp_int = 65536 - (((bktr->capture_area_y_size * 512 + (bktr->rows / 2)) / bktr->rows) - 512); } } else { if (bktr->flags & METEOR_ONLY_ODD_FIELDS || bktr->flags & METEOR_ONLY_EVEN_FIELDS) tmp_int = 65536 - (((fp->vactive * 256 + (bktr->rows/2)) / bktr->rows) - 512); else { tmp_int = 65536 - (((fp->vactive * 512 + (bktr->rows / 2)) / bktr->rows) - 512); } } tmp_int &= 0x1fff; /* printf("%s: VSCALE value is %d\n", bktr_name(bktr), tmp_int); */ OUTB(bktr, BKTR_E_VSCALE_LO, tmp_int & 0xff); OUTB(bktr, BKTR_O_VSCALE_LO, tmp_int & 0xff); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) & ~0x1f); OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) & ~0x1f); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) | ((tmp_int >> 8) & 0x1f)); OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) | ((tmp_int >> 8) & 0x1f)); /* vertical active */ if (bktr->capture_area_enabled) temp = bktr->capture_area_y_size; else temp = fp->vactive; /* printf("%s: VACTIVE is %d\n", bktr_name(bktr), temp); */ OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) & ~0x30); OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) | ((temp >> 4) & 0x30)); OUTB(bktr, BKTR_E_VACTIVE_LO, temp & 0xff); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) & ~0x30); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) | ((temp >> 4) & 0x30)); OUTB(bktr, BKTR_O_VACTIVE_LO, temp & 0xff); /* vertical delay */ if (bktr->capture_area_enabled) temp = fp->vdelay + (bktr->capture_area_y_offset); else temp = fp->vdelay; /* printf("%s: VDELAY is %d\n", bktr_name(bktr), temp); */ OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) & ~0xC0); OUTB(bktr, BKTR_E_CROP, INB(bktr, BKTR_E_CROP) | ((temp >> 2) & 0xC0)); OUTB(bktr, BKTR_E_VDELAY_LO, temp & 0xff); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) & ~0xC0); OUTB(bktr, BKTR_O_CROP, INB(bktr, BKTR_O_CROP) | ((temp >> 2) & 0xC0)); OUTB(bktr, BKTR_O_VDELAY_LO, temp & 0xff); /* end of video params */ if ((bktr->xtal_pll_mode == BT848_USE_PLL) && (fp->iform_xtsel==BT848_IFORM_X_XT1)) { OUTB(bktr, BKTR_TGCTRL, BT848_TGCTRL_TGCKI_PLL); /* Select PLL mode */ } else { OUTB(bktr, BKTR_TGCTRL, BT848_TGCTRL_TGCKI_XTAL); /* Select Normal xtal 0/xtal 1 mode */ } /* capture control */ switch (i_flag) { case 1: bktr->bktr_cap_ctl = (BT848_CAP_CTL_DITH_FRAME | BT848_CAP_CTL_EVEN); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) & ~0x20); OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) & ~0x20); interlace = 1; break; case 2: bktr->bktr_cap_ctl = (BT848_CAP_CTL_DITH_FRAME | BT848_CAP_CTL_ODD); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) & ~0x20); OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) & ~0x20); interlace = 1; break; default: bktr->bktr_cap_ctl = (BT848_CAP_CTL_DITH_FRAME | BT848_CAP_CTL_EVEN | BT848_CAP_CTL_ODD); OUTB(bktr, BKTR_E_VSCALE_HI, INB(bktr, BKTR_E_VSCALE_HI) | 0x20); OUTB(bktr, BKTR_O_VSCALE_HI, INB(bktr, BKTR_O_VSCALE_HI) | 0x20); interlace = 2; break; } OUTL(bktr, BKTR_RISC_STRT_ADD, vtophys(bktr->dma_prog)); rows = bktr->rows; cols = bktr->cols; bktr->vbiflags &= ~VBI_CAPTURE; /* default - no vbi capture */ /* RGB Grabs. If /dev/vbi is already open, or we are a PAL/SECAM */ /* user, then use the rgb_vbi RISC program. */ /* Otherwise, use the normal rgb RISC program */ if (pf_int->public.type == METEOR_PIXTYPE_RGB) { if ( (bktr->vbiflags & VBI_OPEN) ||(bktr->format_params == BT848_IFORM_F_PALBDGHI) ||(bktr->format_params == BT848_IFORM_F_SECAM) ){ bktr->bktr_cap_ctl |= BT848_CAP_CTL_VBI_EVEN | BT848_CAP_CTL_VBI_ODD; bktr->vbiflags |= VBI_CAPTURE; rgb_vbi_prog(bktr, i_flag, cols, rows, interlace); return; } else { rgb_prog(bktr, i_flag, cols, rows, interlace); return; } } if ( pf_int->public.type == METEOR_PIXTYPE_YUV ) { yuv422_prog(bktr, i_flag, cols, rows, interlace); OUTB(bktr, BKTR_COLOR_CTL, (INB(bktr, BKTR_COLOR_CTL) & 0xf0) | pixfmt_swap_flags( bktr->pixfmt )); return; } if ( pf_int->public.type == METEOR_PIXTYPE_YUV_PACKED ) { yuvpack_prog(bktr, i_flag, cols, rows, interlace); OUTB(bktr, BKTR_COLOR_CTL, (INB(bktr, BKTR_COLOR_CTL) & 0xf0) | pixfmt_swap_flags( bktr->pixfmt )); return; } if ( pf_int->public.type == METEOR_PIXTYPE_YUV_12 ) { yuv12_prog(bktr, i_flag, cols, rows, interlace); OUTB(bktr, BKTR_COLOR_CTL, (INB(bktr, BKTR_COLOR_CTL) & 0xf0) | pixfmt_swap_flags( bktr->pixfmt )); return; } return; } /****************************************************************************** * video & video capture specific routines: */ /* * */ static void start_capture( bktr_ptr_t bktr, unsigned type ) { u_char i_flag; struct format_params *fp; fp = &format_params[bktr->format_params]; /* If requested, clear out capture buf first */ if (bktr->clr_on_start && (bktr->video.addr == 0)) { bzero((caddr_t)bktr->bigbuf, (size_t)bktr->rows * bktr->cols * bktr->frames * pixfmt_table[ bktr->pixfmt ].public.Bpp); } OUTB(bktr, BKTR_DSTATUS, 0); OUTL(bktr, BKTR_INT_STAT, INL(bktr, BKTR_INT_STAT)); bktr->flags |= type; bktr->flags &= ~METEOR_WANT_MASK; switch(bktr->flags & METEOR_ONLY_FIELDS_MASK) { case METEOR_ONLY_EVEN_FIELDS: bktr->flags |= METEOR_WANT_EVEN; i_flag = 1; break; case METEOR_ONLY_ODD_FIELDS: bktr->flags |= METEOR_WANT_ODD; i_flag = 2; break; default: bktr->flags |= METEOR_WANT_MASK; i_flag = 3; break; } /* TDEC is only valid for continuous captures */ if ( type == METEOR_SINGLE ) { u_short fps_save = bktr->fps; set_fps(bktr, fp->frame_rate); bktr->fps = fps_save; } else set_fps(bktr, bktr->fps); if (bktr->dma_prog_loaded == FALSE) { build_dma_prog(bktr, i_flag); bktr->dma_prog_loaded = TRUE; } OUTL(bktr, BKTR_RISC_STRT_ADD, vtophys(bktr->dma_prog)); } /* * */ static void set_fps( bktr_ptr_t bktr, u_short fps ) { struct format_params *fp; int i_flag; fp = &format_params[bktr->format_params]; switch(bktr->flags & METEOR_ONLY_FIELDS_MASK) { case METEOR_ONLY_EVEN_FIELDS: bktr->flags |= METEOR_WANT_EVEN; i_flag = 1; break; case METEOR_ONLY_ODD_FIELDS: bktr->flags |= METEOR_WANT_ODD; i_flag = 1; break; default: bktr->flags |= METEOR_WANT_MASK; i_flag = 2; break; } OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); OUTL(bktr, BKTR_INT_STAT, ALL_INTS_CLEARED); bktr->fps = fps; OUTB(bktr, BKTR_TDEC, 0); if (fps < fp->frame_rate) OUTB(bktr, BKTR_TDEC, i_flag*(fp->frame_rate - fps) & 0x3f); else OUTB(bktr, BKTR_TDEC, 0); return; } /* * Given a pixfmt index, compute the bt848 swap_flags necessary to * achieve the specified swapping. * Note that without bt swapping, 2Bpp and 3Bpp modes are written * byte-swapped, and 4Bpp modes are byte and word swapped (see Table 6 * and read R->L). * Note also that for 3Bpp, we may additionally need to do some creative * SKIPing to align the FIFO bytelines with the target buffer (see split()). * This is abstracted here: e.g. no swaps = RGBA; byte & short swap = ABGR * as one would expect. */ static u_int pixfmt_swap_flags( int pixfmt ) { struct meteor_pixfmt *pf = &pixfmt_table[ pixfmt ].public; u_int swapf = 0; switch ( pf->Bpp ) { case 2 : swapf = ( pf->swap_bytes ? 0 : BSWAP ); break; case 3 : /* no swaps supported for 3bpp - makes no sense w/ bt848 */ break; case 4 : if ( pf->swap_bytes ) swapf = pf->swap_shorts ? 0 : WSWAP; else swapf = pf->swap_shorts ? BSWAP : (BSWAP | WSWAP); break; } return swapf; } /* * Converts meteor-defined pixel formats (e.g. METEOR_GEO_RGB16) into * our pixfmt_table indices. */ static int oformat_meteor_to_bt( u_long format ) { int i; struct meteor_pixfmt *pf1, *pf2; /* Find format in compatibility table */ for ( i = 0; i < METEOR_PIXFMT_TABLE_SIZE; i++ ) if ( meteor_pixfmt_table[i].meteor_format == format ) break; if ( i >= METEOR_PIXFMT_TABLE_SIZE ) return -1; pf1 = &meteor_pixfmt_table[i].public; /* Match it with an entry in master pixel format table */ for ( i = 0; i < PIXFMT_TABLE_SIZE; i++ ) { pf2 = &pixfmt_table[i].public; if (( pf1->type == pf2->type ) && ( pf1->Bpp == pf2->Bpp ) && !bcmp( pf1->masks, pf2->masks, sizeof( pf1->masks )) && ( pf1->swap_bytes == pf2->swap_bytes ) && ( pf1->swap_shorts == pf2->swap_shorts )) break; } if ( i >= PIXFMT_TABLE_SIZE ) return -1; return i; } /****************************************************************************** * i2c primitives: */ /* */ #define I2CBITTIME (0x5<<4) /* 5 * 0.48uS */ #define I2CBITTIME_878 (1 << 7) #define I2C_READ 0x01 #define I2C_COMMAND (I2CBITTIME | \ BT848_DATA_CTL_I2CSCL | \ BT848_DATA_CTL_I2CSDA) #define I2C_COMMAND_878 (I2CBITTIME_878 | \ BT848_DATA_CTL_I2CSCL | \ BT848_DATA_CTL_I2CSDA) /* Select between old i2c code and new iicbus / smbus code */ #if defined(BKTR_USE_FREEBSD_SMBUS) /* * The hardware interface is actually SMB commands */ int i2cWrite( bktr_ptr_t bktr, int addr, int byte1, int byte2 ) { char cmd; if (bktr->id == BROOKTREE_848 || bktr->id == BROOKTREE_848A || bktr->id == BROOKTREE_849A) cmd = I2C_COMMAND; else cmd = I2C_COMMAND_878; if (byte2 != -1) { if (smbus_writew(bktr->i2c_sc.smbus, addr, cmd, (short)(((byte2 & 0xff) << 8) | (byte1 & 0xff)))) return (-1); } else { if (smbus_writeb(bktr->i2c_sc.smbus, addr, cmd, (char)(byte1 & 0xff))) return (-1); } /* return OK */ return( 0 ); } int i2cRead( bktr_ptr_t bktr, int addr ) { char result; char cmd; if (bktr->id == BROOKTREE_848 || bktr->id == BROOKTREE_848A || bktr->id == BROOKTREE_849A) cmd = I2C_COMMAND; else cmd = I2C_COMMAND_878; if (smbus_readb(bktr->i2c_sc.smbus, addr, cmd, &result)) return (-1); return ((int)((unsigned char)result)); } #define IICBUS(bktr) ((bktr)->i2c_sc.iicbb) /* The MSP34xx and DPL35xx Audio chip require i2c bus writes of up */ /* to 5 bytes which the bt848 automated i2c bus controller cannot handle */ /* Therefore we need low level control of the i2c bus hardware */ /* Write to the MSP or DPL registers */ void msp_dpl_write(bktr_ptr_t bktr, int i2c_addr, unsigned char dev, unsigned int addr, unsigned int data) { unsigned char addr_l, addr_h, data_h, data_l ; addr_h = (addr >>8) & 0xff; addr_l = addr & 0xff; data_h = (data >>8) & 0xff; data_l = data & 0xff; iicbus_start(IICBUS(bktr), i2c_addr, 0 /* no timeout? */); iicbus_write_byte(IICBUS(bktr), dev, 0); iicbus_write_byte(IICBUS(bktr), addr_h, 0); iicbus_write_byte(IICBUS(bktr), addr_l, 0); iicbus_write_byte(IICBUS(bktr), data_h, 0); iicbus_write_byte(IICBUS(bktr), data_l, 0); iicbus_stop(IICBUS(bktr)); return; } /* Read from the MSP or DPL registers */ unsigned int msp_dpl_read(bktr_ptr_t bktr, int i2c_addr, unsigned char dev, unsigned int addr) { unsigned int data; unsigned char addr_l, addr_h, dev_r; int read; u_char data_read[2]; addr_h = (addr >>8) & 0xff; addr_l = addr & 0xff; dev_r = dev+1; /* XXX errors ignored */ iicbus_start(IICBUS(bktr), i2c_addr, 0 /* no timeout? */); iicbus_write_byte(IICBUS(bktr), dev_r, 0); iicbus_write_byte(IICBUS(bktr), addr_h, 0); iicbus_write_byte(IICBUS(bktr), addr_l, 0); iicbus_repeated_start(IICBUS(bktr), i2c_addr +1, 0 /* no timeout? */); iicbus_read(IICBUS(bktr), data_read, 2, &read, IIC_LAST_READ, 0); iicbus_stop(IICBUS(bktr)); data = (data_read[0]<<8) | data_read[1]; return (data); } /* Reset the MSP or DPL chip */ /* The user can block the reset (which is handy if you initialise the * MSP and/or DPL audio in another operating system first (eg in Windows) */ void msp_dpl_reset( bktr_ptr_t bktr, int i2c_addr ) { #ifndef BKTR_NO_MSP_RESET /* put into reset mode */ iicbus_start(IICBUS(bktr), i2c_addr, 0 /* no timeout? */); iicbus_write_byte(IICBUS(bktr), 0x00, 0); iicbus_write_byte(IICBUS(bktr), 0x80, 0); iicbus_write_byte(IICBUS(bktr), 0x00, 0); iicbus_stop(IICBUS(bktr)); /* put back to operational mode */ iicbus_start(IICBUS(bktr), i2c_addr, 0 /* no timeout? */); iicbus_write_byte(IICBUS(bktr), 0x00, 0); iicbus_write_byte(IICBUS(bktr), 0x00, 0); iicbus_write_byte(IICBUS(bktr), 0x00, 0); iicbus_stop(IICBUS(bktr)); #endif return; } static void remote_read(bktr_ptr_t bktr, struct bktr_remote *remote) { int read; /* XXX errors ignored */ iicbus_start(IICBUS(bktr), bktr->remote_control_addr, 0 /* no timeout? */); iicbus_read(IICBUS(bktr), remote->data, 3, &read, IIC_LAST_READ, 0); iicbus_stop(IICBUS(bktr)); return; } #else /* defined(BKTR_USE_FREEBSD_SMBUS) */ /* * Program the i2c bus directly */ int i2cWrite( bktr_ptr_t bktr, int addr, int byte1, int byte2 ) { u_long x; u_long data; /* clear status bits */ OUTL(bktr, BKTR_INT_STAT, BT848_INT_RACK | BT848_INT_I2CDONE); /* build the command datum */ if (bktr->id == BROOKTREE_848 || bktr->id == BROOKTREE_848A || bktr->id == BROOKTREE_849A) { data = ((addr & 0xff) << 24) | ((byte1 & 0xff) << 16) | I2C_COMMAND; } else { data = ((addr & 0xff) << 24) | ((byte1 & 0xff) << 16) | I2C_COMMAND_878; } if ( byte2 != -1 ) { data |= ((byte2 & 0xff) << 8); data |= BT848_DATA_CTL_I2CW3B; } /* write the address and data */ OUTL(bktr, BKTR_I2C_DATA_CTL, data); /* wait for completion */ for ( x = 0x7fffffff; x; --x ) { /* safety valve */ if ( INL(bktr, BKTR_INT_STAT) & BT848_INT_I2CDONE ) break; } /* check for ACK */ if ( !x || !(INL(bktr, BKTR_INT_STAT) & BT848_INT_RACK) ) return( -1 ); /* return OK */ return( 0 ); } /* * */ int i2cRead( bktr_ptr_t bktr, int addr ) { u_long x; /* clear status bits */ OUTL(bktr, BKTR_INT_STAT, BT848_INT_RACK | BT848_INT_I2CDONE); /* write the READ address */ /* The Bt878 and Bt879 differed on the treatment of i2c commands */ if (bktr->id == BROOKTREE_848 || bktr->id == BROOKTREE_848A || bktr->id == BROOKTREE_849A) { OUTL(bktr, BKTR_I2C_DATA_CTL, ((addr & 0xff) << 24) | I2C_COMMAND); } else { OUTL(bktr, BKTR_I2C_DATA_CTL, ((addr & 0xff) << 24) | I2C_COMMAND_878); } /* wait for completion */ for ( x = 0x7fffffff; x; --x ) { /* safety valve */ if ( INL(bktr, BKTR_INT_STAT) & BT848_INT_I2CDONE ) break; } /* check for ACK */ if ( !x || !(INL(bktr, BKTR_INT_STAT) & BT848_INT_RACK) ) return( -1 ); /* it was a read */ return( (INL(bktr, BKTR_I2C_DATA_CTL) >> 8) & 0xff ); } /* The MSP34xx Audio chip require i2c bus writes of up to 5 bytes which the */ /* bt848 automated i2c bus controller cannot handle */ /* Therefore we need low level control of the i2c bus hardware */ /* Idea for the following functions are from elsewhere in this driver and */ /* from the Linux BTTV i2c driver by Gerd Knorr */ #define BITD 40 static void i2c_start( bktr_ptr_t bktr) { OUTL(bktr, BKTR_I2C_DATA_CTL, 1); DELAY( BITD ); /* release data */ OUTL(bktr, BKTR_I2C_DATA_CTL, 3); DELAY( BITD ); /* release clock */ OUTL(bktr, BKTR_I2C_DATA_CTL, 2); DELAY( BITD ); /* lower data */ OUTL(bktr, BKTR_I2C_DATA_CTL, 0); DELAY( BITD ); /* lower clock */ } static void i2c_stop( bktr_ptr_t bktr) { OUTL(bktr, BKTR_I2C_DATA_CTL, 0); DELAY( BITD ); /* lower clock & data */ OUTL(bktr, BKTR_I2C_DATA_CTL, 2); DELAY( BITD ); /* release clock */ OUTL(bktr, BKTR_I2C_DATA_CTL, 3); DELAY( BITD ); /* release data */ } static int i2c_write_byte( bktr_ptr_t bktr, unsigned char data) { int x; int status; /* write out the byte */ for ( x = 7; x >= 0; --x ) { if ( data & (1<= 0; --x ) { OUTL(bktr, BKTR_I2C_DATA_CTL, 3); DELAY( BITD ); /* strobe clock */ bit = INL(bktr, BKTR_I2C_DATA_CTL) & 1; /* read the data bit */ if ( bit ) byte |= (1<>8) & 0xff; addr_l = addr & 0xff; data_h = (data >>8) & 0xff; data_l = data & 0xff; i2c_start(bktr); i2c_write_byte(bktr, msp_w_addr); i2c_write_byte(bktr, dev); i2c_write_byte(bktr, addr_h); i2c_write_byte(bktr, addr_l); i2c_write_byte(bktr, data_h); i2c_write_byte(bktr, data_l); i2c_stop(bktr); } /* Read from the MSP or DPL registers */ unsigned int msp_dpl_read(bktr_ptr_t bktr, int i2c_addr, unsigned char dev, unsigned int addr){ unsigned int data; unsigned char addr_l, addr_h, data_1, data_2, dev_r ; addr_h = (addr >>8) & 0xff; addr_l = addr & 0xff; dev_r = dev+1; i2c_start(bktr); i2c_write_byte(bktr,i2c_addr); i2c_write_byte(bktr,dev_r); i2c_write_byte(bktr,addr_h); i2c_write_byte(bktr,addr_l); i2c_start(bktr); i2c_write_byte(bktr,i2c_addr+1); i2c_read_byte(bktr,&data_1, 0); i2c_read_byte(bktr,&data_2, 1); i2c_stop(bktr); data = (data_1<<8) | data_2; return data; } /* Reset the MSP or DPL chip */ /* The user can block the reset (which is handy if you initialise the * MSP audio in another operating system first (eg in Windows) */ void msp_dpl_reset( bktr_ptr_t bktr, int i2c_addr ) { #ifndef BKTR_NO_MSP_RESET /* put into reset mode */ i2c_start(bktr); i2c_write_byte(bktr, i2c_addr); i2c_write_byte(bktr, 0x00); i2c_write_byte(bktr, 0x80); i2c_write_byte(bktr, 0x00); i2c_stop(bktr); /* put back to operational mode */ i2c_start(bktr); i2c_write_byte(bktr, i2c_addr); i2c_write_byte(bktr, 0x00); i2c_write_byte(bktr, 0x00); i2c_write_byte(bktr, 0x00); i2c_stop(bktr); #endif return; } static void remote_read(bktr_ptr_t bktr, struct bktr_remote *remote) { /* XXX errors ignored */ i2c_start(bktr); i2c_write_byte(bktr,bktr->remote_control_addr); i2c_read_byte(bktr,&(remote->data[0]), 0); i2c_read_byte(bktr,&(remote->data[1]), 0); i2c_read_byte(bktr,&(remote->data[2]), 0); i2c_stop(bktr); return; } #endif /* defined(BKTR_USE_FREEBSD_SMBUS) */ #if defined( I2C_SOFTWARE_PROBE ) /* * we are keeping this around for any parts that we need to probe * but that CANNOT be probed via an i2c read. * this is necessary because the hardware i2c mechanism * cannot be programmed for 1 byte writes. * currently there are no known i2c parts that we need to probe * and that cannot be safely read. */ static int i2cProbe( bktr_ptr_t bktr, int addr ); #define BITD 40 #define EXTRA_START /* * probe for an I2C device at addr. */ static int i2cProbe( bktr_ptr_t bktr, int addr ) { int x, status; /* the START */ #if defined( EXTRA_START ) OUTL(bktr, BKTR_I2C_DATA_CTL, 1); DELAY( BITD ); /* release data */ OUTL(bktr, BKTR_I2C_DATA_CTL, 3); DELAY( BITD ); /* release clock */ #endif /* EXTRA_START */ OUTL(bktr, BKTR_I2C_DATA_CTL, 2); DELAY( BITD ); /* lower data */ OUTL(bktr, BKTR_I2C_DATA_CTL, 0); DELAY( BITD ); /* lower clock */ /* write addr */ for ( x = 7; x >= 0; --x ) { if ( addr & (1< __FBSDID("$FreeBSD$"); #ifndef _I915_REG_H_ #define _I915_REG_H_ /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_ENABLED 0x4 #define INTEL_GMCH_MEM_MASK 0x1 #define INTEL_GMCH_MEM_64M 0x1 #define INTEL_GMCH_MEM_128M 0 #define INTEL_GMCH_GMS_MASK (0xf << 4) #define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) #define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) #define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) #define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) #define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) #define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) #define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) #define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) #define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4) #define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4) #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) /* PCI config space */ #define HPLLCC 0xc0 /* 855 only */ #define GC_CLOCK_CONTROL_MASK (3 << 0) #define GC_CLOCK_133_200 (0 << 0) #define GC_CLOCK_100_200 (1 << 0) #define GC_CLOCK_100_133 (2 << 0) #define GC_CLOCK_166_250 (3 << 0) #define GCFGC 0xf0 /* 915+ only */ #define GC_LOW_FREQUENCY_ENABLE (1 << 7) #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) #define GC_DISPLAY_CLOCK_MASK (7 << 4) #define LBB 0xf4 /* VGA stuff */ #define VGA_ST01_MDA 0x3ba #define VGA_ST01_CGA 0x3da #define VGA_MSR_WRITE 0x3c2 #define VGA_MSR_READ 0x3cc #define VGA_MSR_MEM_EN (1<<1) #define VGA_MSR_CGA_MODE (1<<0) #define VGA_SR_INDEX 0x3c4 #define VGA_SR_DATA 0x3c5 #define VGA_AR_INDEX 0x3c0 #define VGA_AR_VID_EN (1<<5) #define VGA_AR_DATA_WRITE 0x3c0 #define VGA_AR_DATA_READ 0x3c1 #define VGA_GR_INDEX 0x3ce #define VGA_GR_DATA 0x3cf /* GR05 */ #define VGA_GR_MEM_READ_MODE_SHIFT 3 #define VGA_GR_MEM_READ_MODE_PLANE 1 /* GR06 */ #define VGA_GR_MEM_MODE_MASK 0xc #define VGA_GR_MEM_MODE_SHIFT 2 #define VGA_GR_MEM_A0000_AFFFF 0 #define VGA_GR_MEM_A0000_BFFFF 1 #define VGA_GR_MEM_B0000_B7FFF 2 #define VGA_GR_MEM_B0000_BFFFF 3 #define VGA_DACMASK 0x3c6 #define VGA_DACRX 0x3c7 #define VGA_DACWX 0x3c8 #define VGA_DACDATA 0x3c9 #define VGA_CR_INDEX_MDA 0x3b4 #define VGA_CR_DATA_MDA 0x3b5 #define VGA_CR_INDEX_CGA 0x3d4 #define VGA_CR_DATA_CGA 0x3d5 /* * Memory interface instructions used by the kernel */ #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) #define MI_NOOP MI_INSTR(0, 0) #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) #define MI_FLUSH MI_INSTR(0x04, 0) #define MI_READ_FLUSH (1 << 0) #define MI_EXE_FLUSH (1 << 1) #define MI_NO_WRITE_FLUSH (1 << 2) #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) /* * 3D instructions used by the kernel */ #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) #define SC_ENABLE_MASK (0x1<<0) #define SC_ENABLE (0x1<<0) #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) #define SCI_YMIN_MASK (0xffff<<16) #define SCI_XMIN_MASK (0xffff<<0) #define SCI_YMAX_MASK (0xffff<<16) #define SCI_XMAX_MASK (0xffff<<0) #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_32 (3<<24) #define BLT_ROP_GXCOPY (0xcc<<16) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) #define ASYNC_FLIP (1<<22) #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) /* * Fence registers */ #define FENCE_REG_830_0 0x2000 #define FENCE_REG_945_8 0x3000 #define I830_FENCE_START_MASK 0x07f80000 #define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) #define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) #define FENCE_REG_965_0 0x03000 #define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_TILING_Y_SHIFT 1 #define I965_FENCE_REG_VALID (1<<0) /* * Instruction and interrupt control regs */ #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 #define PRB0_CTL 0x0203c #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 #define RING_NO_REPORT 0x00000000 #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define IPEIR 0x02088 #define NOPID 0x02094 #define HWSTAM 0x02098 #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 #define IMR 0x020a8 #define ISR 0x020ac #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) #define I915_DISPLAY_PORT_INTERRUPT (1<<17) #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) #define I915_HWB_OOM_INTERRUPT (1<<13) #define I915_SYNC_STATUS_INTERRUPT (1<<12) #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) #define I915_DEBUG_INTERRUPT (1<<2) #define I915_USER_INTERRUPT (1<<1) #define I915_ASLE_INTERRUPT (1<<0) #define EIR 0x020b0 #define EMR 0x020b4 #define ESR 0x020b8 #define INSTPM 0x020c0 #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC_SELF 0x020e0 /* 915+ only */ #define MI_ARB_STATE 0x020e4 /* 915+ only */ #define CACHE_MODE_0 0x02120 /* 915+ only */ #define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_DEPTH_EVICT_DISABLE (1<<4) #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) #define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ /* * Framebuffer compression (915+ only) */ #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ #define FBC_LL_BASE 0x03204 /* 4k page aligned */ #define FBC_CONTROL 0x03208 #define FBC_CTL_EN (1<<31) #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c #define FBC_CMD_COMPRESS (1<<0) #define FBC_STATUS 0x03210 #define FBC_STAT_COMPRESSING (1<<31) #define FBC_STAT_COMPRESSED (1<<30) #define FBC_STAT_MODIFIED (1<<29) #define FBC_STAT_CURRENT_LINE (1<<0) #define FBC_CONTROL2 0x03214 #define FBC_CTL_FENCE_DBL (0<<4) #define FBC_CTL_IDLE_IMM (0<<2) #define FBC_CTL_IDLE_FULL (1<<2) #define FBC_CTL_IDLE_LINE (2<<2) #define FBC_CTL_IDLE_DEBUG (3<<2) #define FBC_CTL_CPU_FENCE (1<<1) #define FBC_CTL_PLANEA (0<<0) #define FBC_CTL_PLANEB (1<<0) #define FBC_FENCE_OFF 0x0321b #define FBC_LL_SIZE (1536) /* * GPIO regs */ #define GPIOA 0x5010 #define GPIOB 0x5014 #define GPIOC 0x5018 #define GPIOD 0x501c #define GPIOE 0x5020 #define GPIOF 0x5024 #define GPIOG 0x5028 #define GPIOH 0x502c # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) # define GPIO_CLOCK_VAL_MASK (1 << 2) # define GPIO_CLOCK_VAL_OUT (1 << 3) # define GPIO_CLOCK_VAL_IN (1 << 4) # define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) # define GPIO_DATA_DIR_MASK (1 << 8) # define GPIO_DATA_DIR_IN (0 << 9) # define GPIO_DATA_DIR_OUT (1 << 9) # define GPIO_DATA_VAL_MASK (1 << 10) # define GPIO_DATA_VAL_OUT (1 << 11) # define GPIO_DATA_VAL_IN (1 << 12) # define GPIO_DATA_PULLUP_DISABLE (1 << 13) /* * Clock control & power management */ #define VGA0 0x6000 #define VGA1 0x6004 #define VGA_PD 0x6010 #define VGA0_PD_P2_DIV_4 (1 << 7) #define VGA0_PD_P1_DIV_2 (1 << 5) #define VGA0_PD_P1_SHIFT 0 #define VGA0_PD_P1_MASK (0x1f << 0) #define VGA1_PD_P2_DIV_4 (1 << 15) #define VGA1_PD_P1_DIV_2 (1 << 13) #define VGA1_PD_P1_SHIFT 8 #define VGA1_PD_P1_MASK (0x1f << 8) #define DPLL_A 0x06014 #define DPLL_B 0x06018 -#define DPLL_VCO_ENABLE (1 << 31) +#define DPLL_VCO_ENABLE (1U << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) #define DPLL_VGA_MODE_DIS (1 << 28) #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ #define DPLL_MODE_MASK (3 << 26) #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ #define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) #define I915_CRC_ERROR_ENABLE (1UL<<29) #define I915_CRC_DONE_ENABLE (1UL<<28) #define I915_GMBUS_EVENT_ENABLE (1UL<<27) #define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) #define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) #define I915_DPST_EVENT_ENABLE (1UL<<23) #define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) #define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) #define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) #define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) #define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) #define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) #define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) #define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) #define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) #define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) #define I915_DPST_EVENT_STATUS (1UL<<7) #define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) #define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) #define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) #define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) #define I915_OVERLAY_UPDATED_STATUS (1UL<<0) #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 #define SR01_SCREEN_OFF (1<<5) #define PPCR 0x61204 #define PPCR_ON (1<<0) #define DVOB 0x61140 #define DVOB_ON (1<<31) #define DVOC 0x61160 #define DVOC_ON (1<<31) #define LVDS 0x61180 #define LVDS_ON (1<<31) #define ADPA 0x61100 #define ADPA_DPMS_MASK (~(3<<10)) #define ADPA_DPMS_ON (0<<10) #define ADPA_DPMS_SUSPEND (1<<10) #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) #define RING_TAIL 0x00 #define TAIL_ADDR 0x001FFFF8 #define RING_HEAD 0x04 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_START 0x08 #define START_ADDR 0xFFFFF000 #define RING_LEN 0x0C #define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 #define RING_NO_REPORT 0x00000000 #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 /* * The i830 generation, in LVDS mode, defines P1 as the bit number set within * this field (only one bit may be set). */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 #define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 /* i830, required in DVO non-gang */ #define PLL_P2_DIVIDE_BY_4 (1 << 23) #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ #define PLL_REF_INPUT_DREFCLK (0 << 13) #define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ #define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) #define PLL_REF_INPUT_MASK (3 << 13) #define PLL_LOAD_PULSE_PHASE_SHIFT 9 /* * Parallel to Serial Load Pulse phase selection. * Selects the phase for the 10X DPLL clock for the PCIe * digital display port. The range is 4 to 13; 10 or more * is just a flip delay. The default is 6 */ #define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) #define DISPLAY_RATE_SELECT_FPA1 (1 << 8) /* * SDVO multiplier for 945G/GM. Not used on 965. */ #define SDVO_MULTIPLIER_MASK 0x000000ff #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 #define DPLL_A_MD 0x0601c /* 965+ only */ /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. * * Value is pixels minus 1. Must be set to 1 pixel for SDVO. */ #define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 #define DPLL_MD_UDI_DIVIDER_SHIFT 24 /* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ #define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 #define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 /* * SDVO/UDI pixel multiplier. * * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus * clock rate is 10 times the DPLL clock. At low resolution/refresh rate * modes, the bus rate would be below the limits, so SDVO allows for stuffing * dummy bytes in the datastream at an increased clock rate, with both sides of * the link knowing how many bytes are fill. * * So, for a mode with a dotclock of 65Mhz, we would want to double the clock * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be * set to 130Mhz, and the SDVO multiplier set to 2x in this register and * through an SDVO command. * * This register field has values of multiplication factor minus 1, with * a maximum multiplier of 5 for SDVO. */ #define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 #define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 /* * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. * This best be set to the default value (3) or the CRT won't work. No, * I don't entirely understand what this does... */ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define DPLL_B_MD 0x06020 /* 965+ only */ #define FPA0 0x06040 #define FPA1 0x06044 #define FPB0 0x06048 #define FPB1 0x0604c #define FP_N_DIV_MASK 0x003f0000 #define FP_N_IGD_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 #define FP_M1_DIV_MASK 0x00003f00 #define FP_M1_DIV_SHIFT 8 #define FP_M2_DIV_MASK 0x0000003f #define FP_M2_IGD_DIV_MASK 0x000000ff #define FP_M2_DIV_SHIFT 0 #define DPLL_TEST 0x606c #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) #define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) #define DPLLB_TEST_N_BYPASS (1 << 19) #define DPLLB_TEST_M_BYPASS (1 << 18) #define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) #define DPLLA_TEST_N_BYPASS (1 << 3) #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 #define CG_2D_DIS 0x6200 #define CG_3D_DIS 0x6204 /* * Palette regs */ #define PALETTE_A 0x0a000 #define PALETTE_B 0x0a800 /* MCH MMIO space */ /* * MCHBAR mirror. * * This mirrors the MCHBAR MMIO space whose location is determined by * device 0 function 0's pci config register 0x44 or 0x48 and matches it in * every way. It is not accessible from the CP register read instructions. * */ #define MCHBAR_MIRROR_BASE 0x10000 /** 915-945 and GM965 MCH register controlling DRAM channel access */ #define DCC 0x10200 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) /** 965 MCH register controlling DRAM channel configuration */ #define C0DRB3 0x10206 #define C1DRB3 0x10606 /** GM965 GM45 render standby register */ #define MCHBAR_RENDER_STANDBY 0x111B8 #define PEG_BAND_GAP_DATA 0x14d68 /* * Overlay regs */ #define OVADD 0x30000 #define DOVSTA 0x30008 #define OC_BUF (0x3<<20) #define OGAMC5 0x30010 #define OGAMC4 0x30014 #define OGAMC3 0x30018 #define OGAMC2 0x3001c #define OGAMC1 0x30020 #define OGAMC0 0x30024 /* * Display engine regs */ /* Pipe A timing regs */ #define HTOTAL_A 0x60000 #define HBLANK_A 0x60004 #define HSYNC_A 0x60008 #define VTOTAL_A 0x6000c #define VBLANK_A 0x60010 #define VSYNC_A 0x60014 #define PIPEASRC 0x6001c #define BCLRPAT_A 0x60020 /* Pipe B timing regs */ #define HTOTAL_B 0x61000 #define HBLANK_B 0x61004 #define HSYNC_B 0x61008 #define VTOTAL_B 0x6100c #define VBLANK_B 0x61010 #define VSYNC_B 0x61014 #define PIPEBSRC 0x6101c #define BCLRPAT_B 0x61020 /* VGA port control */ #define ADPA 0x61100 #define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 #define ADPA_VSYNC_CNTL_DISABLE (1<<11) #define ADPA_VSYNC_CNTL_ENABLE 0 #define ADPA_HSYNC_CNTL_DISABLE (1<<10) #define ADPA_HSYNC_CNTL_ENABLE 0 #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) #define ADPA_VSYNC_ACTIVE_LOW 0 #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) #define ADPA_HSYNC_ACTIVE_LOW 0 #define ADPA_DPMS_MASK (~(3<<10)) #define ADPA_DPMS_ON (0<<10) #define ADPA_DPMS_SUSPEND (1<<10) #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) #define HDMIC_HOTPLUG_INT_EN (1 << 28) #define HDMID_HOTPLUG_INT_EN (1 << 27) #define SDVOB_HOTPLUG_INT_EN (1 << 26) #define SDVOC_HOTPLUG_INT_EN (1 << 25) #define TV_HOTPLUG_INT_EN (1 << 18) #define CRT_HOTPLUG_INT_EN (1 << 9) #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) #define HDMIC_HOTPLUG_INT_STATUS (1 << 28) #define HDMID_HOTPLUG_INT_STATUS (1 << 27) #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) #define SDVOC_HOTPLUG_INT_STATUS (1 << 7) #define SDVOB_HOTPLUG_INT_STATUS (1 << 6) /* SDVO port control */ #define SDVOB 0x61140 #define SDVOC 0x61160 -#define SDVO_ENABLE (1 << 31) +#define SDVO_ENABLE (1U << 31) #define SDVO_PIPE_B_SELECT (1 << 30) #define SDVO_STALL_SELECT (1 << 29) #define SDVO_INTERRUPT_ENABLE (1 << 26) /** * 915G/GM SDVO pixel multiplier. * * Programmed value is multiplier - 1, up to 5x. * * \sa DPLL_MD_UDI_MULTIPLIER_MASK */ #define SDVO_PORT_MULTIPLY_MASK (7 << 23) #define SDVO_PORT_MULTIPLY_SHIFT 23 #define SDVO_PHASE_SELECT_MASK (15 << 19) #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) #define SDVOC_GANG_MODE (1 << 16) #define SDVO_ENCODING_SDVO (0x0 << 10) #define SDVO_ENCODING_HDMI (0x2 << 10) /** Requird for HDMI operation */ #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) #define SDVO_BORDER_ENABLE (1 << 7) #define SDVO_AUDIO_ENABLE (1 << 6) /** New with 965, default is to be set */ #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) /** New with 965, default is to be set */ #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) #define SDVOB_PCIE_CONCURRENCY (1 << 3) #define SDVO_DETECTED (1 << 2) /* Bits to be preserved when writing */ #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) #define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) /* DVO port control */ #define DVOA 0x61120 #define DVOB 0x61140 #define DVOC 0x61160 -#define DVO_ENABLE (1 << 31) +#define DVO_ENABLE (1U << 31) #define DVO_PIPE_B_SELECT (1 << 30) #define DVO_PIPE_STALL_UNUSED (0 << 28) #define DVO_PIPE_STALL (1 << 28) #define DVO_PIPE_STALL_TV (2 << 28) #define DVO_PIPE_STALL_MASK (3 << 28) #define DVO_USE_VGA_SYNC (1 << 15) #define DVO_DATA_ORDER_I740 (0 << 14) #define DVO_DATA_ORDER_FP (1 << 14) #define DVO_VSYNC_DISABLE (1 << 11) #define DVO_HSYNC_DISABLE (1 << 10) #define DVO_VSYNC_TRISTATE (1 << 9) #define DVO_HSYNC_TRISTATE (1 << 8) #define DVO_BORDER_ENABLE (1 << 7) #define DVO_DATA_ORDER_GBRG (1 << 6) #define DVO_DATA_ORDER_RGGB (0 << 6) #define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) #define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) #define DVO_VSYNC_ACTIVE_HIGH (1 << 4) #define DVO_HSYNC_ACTIVE_HIGH (1 << 3) #define DVO_BLANK_ACTIVE_HIGH (1 << 2) #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ #define DVO_PRESERVE_MASK (0x7<<24) #define DVOA_SRCDIM 0x61124 #define DVOB_SRCDIM 0x61144 #define DVOC_SRCDIM 0x61164 #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 #define DVO_SRCDIM_VERTICAL_SHIFT 0 /* LVDS port control */ #define LVDS 0x61180 /* * Enables the LVDS port. This bit must be set before DPLLs are enabled, as * the DPLL semantics change when the LVDS is assigned to that pipe. */ -#define LVDS_PORT_EN (1 << 31) +#define LVDS_PORT_EN (1U << 31) /* Selects pipe B for LVDS data. Must be set on pre-965. */ #define LVDS_PIPEB_SELECT (1 << 30) /* * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per * pixel. */ #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) /* * Controls the A3 data pair, which contains the additional LSBs for 24 bit * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be * on. */ #define LVDS_A3_POWER_MASK (3 << 6) #define LVDS_A3_POWER_DOWN (0 << 6) #define LVDS_A3_POWER_UP (3 << 6) /* * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP * is set. */ #define LVDS_CLKB_POWER_MASK (3 << 4) #define LVDS_CLKB_POWER_DOWN (0 << 4) #define LVDS_CLKB_POWER_UP (3 << 4) /* * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 * setting for whether we are in dual-channel mode. The B3 pair will * additionally only be powered up when LVDS_A3_POWER_UP is set. */ #define LVDS_B0B3_POWER_MASK (3 << 2) #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) /* Panel power sequencing */ #define PP_STATUS 0x61200 -#define PP_ON (1 << 31) +#define PP_ON (1U << 31) /* * Indicates that all dependencies of the panel are on: * * - PLL enabled * - pipe enabled * - LVDS/DVOB/DVOC on */ #define PP_READY (1 << 30) #define PP_SEQUENCE_NONE (0 << 28) #define PP_SEQUENCE_ON (1 << 28) #define PP_SEQUENCE_OFF (2 << 28) #define PP_SEQUENCE_MASK 0x30000000 #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 #define PP_OFF_DELAYS 0x6120c #define PP_DIVISOR 0x61210 /* Panel fitting */ #define PFIT_CONTROL 0x61230 -#define PFIT_ENABLE (1 << 31) +#define PFIT_ENABLE (1U << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 #define VERT_INTERP_DISABLE (0 << 10) #define VERT_INTERP_BILINEAR (1 << 10) #define VERT_INTERP_MASK (3 << 10) #define VERT_AUTO_SCALE (1 << 9) #define HORIZ_INTERP_DISABLE (0 << 6) #define HORIZ_INTERP_BILINEAR (1 << 6) #define HORIZ_INTERP_MASK (3 << 6) #define HORIZ_AUTO_SCALE (1 << 5) #define PANEL_8TO6_DITHER_ENABLE (1 << 3) #define PFIT_PGM_RATIOS 0x61234 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 #define PFIT_AUTO_RATIOS 0x61238 /* Backlight control */ #define BLC_PWM_CTL 0x61254 #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) #define BLC_PWM_CTL2 0x61250 /* 965+ only */ #define BLM_COMBINATION_MODE (1 << 30) /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. * * The actual value is this field multiplied by two. */ #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) #define BLM_LEGACY_MODE (1 << 16) /* * This is the number of cycles out of the backlight modulation cycle for which * the backlight is on. * * This field must be no greater than the number of cycles in the complete * backlight modulation cycle. */ #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) /* TV port control */ #define TV_CTL 0x68000 /** Enables the TV encoder */ -# define TV_ENC_ENABLE (1 << 31) +# define TV_ENC_ENABLE (1U << 31) /** Sources the TV encoder input from pipe B instead of A. */ # define TV_ENC_PIPEB_SELECT (1 << 30) /** Outputs composite video (DAC A only) */ # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) /** Outputs SVideo video (DAC B/C) */ # define TV_ENC_OUTPUT_SVIDEO (1 << 28) /** Outputs Component video (DAC A/B/C) */ # define TV_ENC_OUTPUT_COMPONENT (2 << 28) /** Outputs Composite and SVideo (DAC A/B/C) */ # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) # define TV_TRILEVEL_SYNC (1 << 21) /** Enables slow sync generation (945GM only) */ # define TV_SLOW_SYNC (1 << 20) /** Selects 4x oversampling for 480i and 576p */ # define TV_OVERSAMPLE_4X (0 << 18) /** Selects 2x oversampling for 720p and 1080i */ # define TV_OVERSAMPLE_2X (1 << 18) /** Selects no oversampling for 1080p */ # define TV_OVERSAMPLE_NONE (2 << 18) /** Selects 8x oversampling */ # define TV_OVERSAMPLE_8X (3 << 18) /** Selects progressive mode rather than interlaced */ # define TV_PROGRESSIVE (1 << 17) /** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ # define TV_PAL_BURST (1 << 16) /** Field for setting delay of Y compared to C */ # define TV_YC_SKEW_MASK (7 << 12) /** Enables a fix for 480p/576p standard definition modes on the 915GM only */ # define TV_ENC_SDP_FIX (1 << 11) /** * Enables a fix for the 915GM only. * * Not sure what it does. */ # define TV_ENC_C0_FIX (1 << 10) /** Bits that must be preserved by software */ # define TV_CTL_SAVE ((3 << 8) | (3 << 6)) # define TV_FUSE_STATE_MASK (3 << 4) /** Read-only state that reports all features enabled */ # define TV_FUSE_STATE_ENABLED (0 << 4) /** Read-only state that reports that Macrovision is disabled in hardware*/ # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) /** Read-only state that reports that TV-out is disabled in hardware. */ # define TV_FUSE_STATE_DISABLED (2 << 4) /** Normal operation */ # define TV_TEST_MODE_NORMAL (0 << 0) /** Encoder test pattern 1 - combo pattern */ # define TV_TEST_MODE_PATTERN_1 (1 << 0) /** Encoder test pattern 2 - full screen vertical 75% color bars */ # define TV_TEST_MODE_PATTERN_2 (2 << 0) /** Encoder test pattern 3 - full screen horizontal 75% color bars */ # define TV_TEST_MODE_PATTERN_3 (3 << 0) /** Encoder test pattern 4 - random noise */ # define TV_TEST_MODE_PATTERN_4 (4 << 0) /** Encoder test pattern 5 - linear color ramps */ # define TV_TEST_MODE_PATTERN_5 (5 << 0) /** * This test mode forces the DACs to 50% of full output. * * This is used for load detection in combination with TVDAC_SENSE_MASK */ # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) # define TV_TEST_MODE_MASK (7 << 0) #define TV_DAC 0x68004 /** * Reports that DAC state change logic has reported change (RO). * * This gets cleared when TV_DAC_STATE_EN is cleared */ -# define TVDAC_STATE_CHG (1 << 31) +# define TVDAC_STATE_CHG (1U << 31) # define TVDAC_SENSE_MASK (7 << 28) /** Reports that DAC A voltage is above the detect threshold */ # define TVDAC_A_SENSE (1 << 30) /** Reports that DAC B voltage is above the detect threshold */ # define TVDAC_B_SENSE (1 << 29) /** Reports that DAC C voltage is above the detect threshold */ # define TVDAC_C_SENSE (1 << 28) /** * Enables DAC state detection logic, for load-based TV detection. * * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set * to off, for load detection to work. */ # define TVDAC_STATE_CHG_EN (1 << 27) /** Sets the DAC A sense value to high */ # define TVDAC_A_SENSE_CTL (1 << 26) /** Sets the DAC B sense value to high */ # define TVDAC_B_SENSE_CTL (1 << 25) /** Sets the DAC C sense value to high */ # define TVDAC_C_SENSE_CTL (1 << 24) /** Overrides the ENC_ENABLE and DAC voltage levels */ # define DAC_CTL_OVERRIDE (1 << 7) /** Sets the slew rate. Must be preserved in software */ # define ENC_TVDAC_SLEW_FAST (1 << 6) # define DAC_A_1_3_V (0 << 4) # define DAC_A_1_1_V (1 << 4) # define DAC_A_0_7_V (2 << 4) # define DAC_A_OFF (3 << 4) # define DAC_B_1_3_V (0 << 2) # define DAC_B_1_1_V (1 << 2) # define DAC_B_0_7_V (2 << 2) # define DAC_B_OFF (3 << 2) # define DAC_C_1_3_V (0 << 0) # define DAC_C_1_1_V (1 << 0) # define DAC_C_0_7_V (2 << 0) # define DAC_C_OFF (3 << 0) /** * CSC coefficients are stored in a floating point format with 9 bits of * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with * -1 (0x3) being the only legal negative value. */ #define TV_CSC_Y 0x68010 # define TV_RY_MASK 0x07ff0000 # define TV_RY_SHIFT 16 # define TV_GY_MASK 0x00000fff # define TV_GY_SHIFT 0 #define TV_CSC_Y2 0x68014 # define TV_BY_MASK 0x07ff0000 # define TV_BY_SHIFT 16 /** * Y attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AY_MASK 0x000003ff # define TV_AY_SHIFT 0 #define TV_CSC_U 0x68018 # define TV_RU_MASK 0x07ff0000 # define TV_RU_SHIFT 16 # define TV_GU_MASK 0x000007ff # define TV_GU_SHIFT 0 #define TV_CSC_U2 0x6801c # define TV_BU_MASK 0x07ff0000 # define TV_BU_SHIFT 16 /** * U attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AU_MASK 0x000003ff # define TV_AU_SHIFT 0 #define TV_CSC_V 0x68020 # define TV_RV_MASK 0x0fff0000 # define TV_RV_SHIFT 16 # define TV_GV_MASK 0x000007ff # define TV_GV_SHIFT 0 #define TV_CSC_V2 0x68024 # define TV_BV_MASK 0x07ff0000 # define TV_BV_SHIFT 16 /** * V attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AV_MASK 0x000007ff # define TV_AV_SHIFT 0 #define TV_CLR_KNOBS 0x68028 /** 2s-complement brightness adjustment */ # define TV_BRIGHTNESS_MASK 0xff000000 # define TV_BRIGHTNESS_SHIFT 24 /** Contrast adjustment, as a 2.6 unsigned floating point number */ # define TV_CONTRAST_MASK 0x00ff0000 # define TV_CONTRAST_SHIFT 16 /** Saturation adjustment, as a 2.6 unsigned floating point number */ # define TV_SATURATION_MASK 0x0000ff00 # define TV_SATURATION_SHIFT 8 /** Hue adjustment, as an integer phase angle in degrees */ # define TV_HUE_MASK 0x000000ff # define TV_HUE_SHIFT 0 #define TV_CLR_LEVEL 0x6802c /** Controls the DAC level for black */ # define TV_BLACK_LEVEL_MASK 0x01ff0000 # define TV_BLACK_LEVEL_SHIFT 16 /** Controls the DAC level for blanking */ # define TV_BLANK_LEVEL_MASK 0x000001ff # define TV_BLANK_LEVEL_SHIFT 0 #define TV_H_CTL_1 0x68030 /** Number of pixels in the hsync. */ # define TV_HSYNC_END_MASK 0x1fff0000 # define TV_HSYNC_END_SHIFT 16 /** Total number of pixels minus one in the line (display and blanking). */ # define TV_HTOTAL_MASK 0x00001fff # define TV_HTOTAL_SHIFT 0 #define TV_H_CTL_2 0x68034 /** Enables the colorburst (needed for non-component color) */ -# define TV_BURST_ENA (1 << 31) +# define TV_BURST_ENA (1U << 31) /** Offset of the colorburst from the start of hsync, in pixels minus one. */ # define TV_HBURST_START_SHIFT 16 # define TV_HBURST_START_MASK 0x1fff0000 /** Length of the colorburst */ # define TV_HBURST_LEN_SHIFT 0 # define TV_HBURST_LEN_MASK 0x0001fff #define TV_H_CTL_3 0x68038 /** End of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_END_SHIFT 16 # define TV_HBLANK_END_MASK 0x1fff0000 /** Start of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_START_SHIFT 0 # define TV_HBLANK_START_MASK 0x0001fff #define TV_V_CTL_1 0x6803c /** XXX */ # define TV_NBR_END_SHIFT 16 # define TV_NBR_END_MASK 0x07ff0000 /** XXX */ # define TV_VI_END_F1_SHIFT 8 # define TV_VI_END_F1_MASK 0x00003f00 /** XXX */ # define TV_VI_END_F2_SHIFT 0 # define TV_VI_END_F2_MASK 0x0000003f #define TV_V_CTL_2 0x68040 /** Length of vsync, in half lines */ # define TV_VSYNC_LEN_MASK 0x07ff0000 # define TV_VSYNC_LEN_SHIFT 16 /** Offset of the start of vsync in field 1, measured in one less than the * number of half lines. */ # define TV_VSYNC_START_F1_MASK 0x00007f00 # define TV_VSYNC_START_F1_SHIFT 8 /** * Offset of the start of vsync in field 2, measured in one less than the * number of half lines. */ # define TV_VSYNC_START_F2_MASK 0x0000007f # define TV_VSYNC_START_F2_SHIFT 0 #define TV_V_CTL_3 0x68044 /** Enables generation of the equalization signal */ -# define TV_EQUAL_ENA (1 << 31) +# define TV_EQUAL_ENA (1U << 31) /** Length of vsync, in half lines */ # define TV_VEQ_LEN_MASK 0x007f0000 # define TV_VEQ_LEN_SHIFT 16 /** Offset of the start of equalization in field 1, measured in one less than * the number of half lines. */ # define TV_VEQ_START_F1_MASK 0x0007f00 # define TV_VEQ_START_F1_SHIFT 8 /** * Offset of the start of equalization in field 2, measured in one less than * the number of half lines. */ # define TV_VEQ_START_F2_MASK 0x000007f # define TV_VEQ_START_F2_SHIFT 0 #define TV_V_CTL_4 0x68048 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F1_MASK 0x003f0000 # define TV_VBURST_START_F1_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F1_MASK 0x000000ff # define TV_VBURST_END_F1_SHIFT 0 #define TV_V_CTL_5 0x6804c /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F2_MASK 0x003f0000 # define TV_VBURST_START_F2_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F2_MASK 0x000000ff # define TV_VBURST_END_F2_SHIFT 0 #define TV_V_CTL_6 0x68050 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F3_MASK 0x003f0000 # define TV_VBURST_START_F3_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F3_MASK 0x000000ff # define TV_VBURST_END_F3_SHIFT 0 #define TV_V_CTL_7 0x68054 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F4_MASK 0x003f0000 # define TV_VBURST_START_F4_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F4_MASK 0x000000ff # define TV_VBURST_END_F4_SHIFT 0 #define TV_SC_CTL_1 0x68060 /** Turns on the first subcarrier phase generation DDA */ -# define TV_SC_DDA1_EN (1 << 31) +# define TV_SC_DDA1_EN (1U << 31) /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA2_EN (1 << 30) /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA3_EN (1 << 29) /** Sets the subcarrier DDA to reset frequency every other field */ # define TV_SC_RESET_EVERY_2 (0 << 24) /** Sets the subcarrier DDA to reset frequency every fourth field */ # define TV_SC_RESET_EVERY_4 (1 << 24) /** Sets the subcarrier DDA to reset frequency every eighth field */ # define TV_SC_RESET_EVERY_8 (2 << 24) /** Sets the subcarrier DDA to never reset the frequency */ # define TV_SC_RESET_NEVER (3 << 24) /** Sets the peak amplitude of the colorburst.*/ # define TV_BURST_LEVEL_MASK 0x00ff0000 # define TV_BURST_LEVEL_SHIFT 16 /** Sets the increment of the first subcarrier phase generation DDA */ # define TV_SCDDA1_INC_MASK 0x00000fff # define TV_SCDDA1_INC_SHIFT 0 #define TV_SC_CTL_2 0x68064 /** Sets the rollover for the second subcarrier phase generation DDA */ # define TV_SCDDA2_SIZE_MASK 0x7fff0000 # define TV_SCDDA2_SIZE_SHIFT 16 /** Sets the increent of the second subcarrier phase generation DDA */ # define TV_SCDDA2_INC_MASK 0x00007fff # define TV_SCDDA2_INC_SHIFT 0 #define TV_SC_CTL_3 0x68068 /** Sets the rollover for the third subcarrier phase generation DDA */ # define TV_SCDDA3_SIZE_MASK 0x7fff0000 # define TV_SCDDA3_SIZE_SHIFT 16 /** Sets the increent of the third subcarrier phase generation DDA */ # define TV_SCDDA3_INC_MASK 0x00007fff # define TV_SCDDA3_INC_SHIFT 0 #define TV_WIN_POS 0x68070 /** X coordinate of the display from the start of horizontal active */ # define TV_XPOS_MASK 0x1fff0000 # define TV_XPOS_SHIFT 16 /** Y coordinate of the display from the start of vertical active (NBR) */ # define TV_YPOS_MASK 0x00000fff # define TV_YPOS_SHIFT 0 #define TV_WIN_SIZE 0x68074 /** Horizontal size of the display window, measured in pixels*/ # define TV_XSIZE_MASK 0x1fff0000 # define TV_XSIZE_SHIFT 16 /** * Vertical size of the display window, measured in pixels. * * Must be even for interlaced modes. */ # define TV_YSIZE_MASK 0x00000fff # define TV_YSIZE_SHIFT 0 #define TV_FILTER_CTL_1 0x68080 /** * Enables automatic scaling calculation. * * If set, the rest of the registers are ignored, and the calculated values can * be read back from the register. */ -# define TV_AUTO_SCALE (1 << 31) +# define TV_AUTO_SCALE (1U << 31) /** * Disables the vertical filter. * * This is required on modes more than 1024 pixels wide */ # define TV_V_FILTER_BYPASS (1 << 29) /** Enables adaptive vertical filtering */ # define TV_VADAPT (1 << 28) # define TV_VADAPT_MODE_MASK (3 << 26) /** Selects the least adaptive vertical filtering mode */ # define TV_VADAPT_MODE_LEAST (0 << 26) /** Selects the moderately adaptive vertical filtering mode */ # define TV_VADAPT_MODE_MODERATE (1 << 26) /** Selects the most adaptive vertical filtering mode */ # define TV_VADAPT_MODE_MOST (3 << 26) /** * Sets the horizontal scaling factor. * * This should be the fractional part of the horizontal scaling factor divided * by the oversampling rate. TV_HSCALE should be less than 1, and set to: * * (src width - 1) / ((oversample * dest width) - 1) */ # define TV_HSCALE_FRAC_MASK 0x00003fff # define TV_HSCALE_FRAC_SHIFT 0 #define TV_FILTER_CTL_2 0x68084 /** * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) */ # define TV_VSCALE_INT_MASK 0x00038000 # define TV_VSCALE_INT_SHIFT 15 /** * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. * * \sa TV_VSCALE_INT_MASK */ # define TV_VSCALE_FRAC_MASK 0x00007fff # define TV_VSCALE_FRAC_SHIFT 0 #define TV_FILTER_CTL_3 0x68088 /** * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) * * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. */ # define TV_VSCALE_IP_INT_MASK 0x00038000 # define TV_VSCALE_IP_INT_SHIFT 15 /** * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. * * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. * * \sa TV_VSCALE_IP_INT_MASK */ # define TV_VSCALE_IP_FRAC_MASK 0x00007fff # define TV_VSCALE_IP_FRAC_SHIFT 0 #define TV_CC_CONTROL 0x68090 -# define TV_CC_ENABLE (1 << 31) +# define TV_CC_ENABLE (1U << 31) /** * Specifies which field to send the CC data in. * * CC data is usually sent in field 0. */ # define TV_CC_FID_MASK (1 << 27) # define TV_CC_FID_SHIFT 27 /** Sets the horizontal position of the CC data. Usually 135. */ # define TV_CC_HOFF_MASK 0x03ff0000 # define TV_CC_HOFF_SHIFT 16 /** Sets the vertical position of the CC data. Usually 21 */ # define TV_CC_LINE_MASK 0x0000003f # define TV_CC_LINE_SHIFT 0 #define TV_CC_DATA 0x68094 -# define TV_CC_RDY (1 << 31) +# define TV_CC_RDY (1U << 31) /** Second word of CC data to be transmitted. */ # define TV_CC_DATA_2_MASK 0x007f0000 # define TV_CC_DATA_2_SHIFT 16 /** First word of CC data to be transmitted. */ # define TV_CC_DATA_1_MASK 0x0000007f # define TV_CC_DATA_1_SHIFT 0 #define TV_H_LUMA_0 0x68100 #define TV_H_LUMA_59 0x681ec #define TV_H_CHROMA_0 0x68200 #define TV_H_CHROMA_59 0x682ec #define TV_V_LUMA_0 0x68300 #define TV_V_LUMA_42 0x683a8 #define TV_V_CHROMA_0 0x68400 #define TV_V_CHROMA_42 0x684a8 /* Display & cursor control */ /* Pipe A */ #define PIPEADSL 0x70000 #define PIPEACONF 0x70008 #define PIPEACONF_ENABLE (1<<31) #define PIPEACONF_DISABLE 0 #define PIPEACONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) #define PIPEACONF_SINGLE_WIDE 0 #define PIPEACONF_PIPE_UNLOCKED 0 #define PIPEACONF_PIPE_LOCKED (1<<25) #define PIPEACONF_PALETTE 0 #define PIPEACONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) #define PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) #define PIPE_CRC_DONE_ENABLE (1UL<<28) #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) #define PIPE_DPST_EVENT_ENABLE (1UL<<23) #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) #define PIPE_DPST_EVENT_STATUS (1UL<<7) #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) #define DSPARB_BSTART_SHIFT 0 /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code * should work: * * do { * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> * PIPE_FRAME_HIGH_SHIFT; * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> * PIPE_FRAME_LOW_SHIFT); * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> * PIPE_FRAME_HIGH_SHIFT); * } while (high1 != high2); * frame = (high1 << 8) | low1; */ #define PIPEAFRAMEHIGH 0x70040 #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 #define PIPEAFRAMEPIXEL 0x70044 #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 /* GM45+ just has to be different */ #define PIPEA_FRMCOUNT_GM45 0x70040 #define PIPEA_FLIPCOUNT_GM45 0x70044 /* Cursor A & B regs */ #define CURACNTR 0x70080 #define CURSOR_MODE_DISABLE 0x00 #define CURSOR_MODE_64_32B_AX 0x07 #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) #define MCURSOR_GAMMA_ENABLE (1 << 26) #define CURABASE 0x70084 #define CURAPOS 0x70088 #define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 #define CURBCNTR 0x700c0 #define CURBBASE 0x700c4 #define CURBPOS 0x700c8 /* Display A control */ #define DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) #define DISPLAY_PLANE_DISABLE 0 #define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) #define DISPPLANE_8BPP (0x2<<26) #define DISPPLANE_15_16BPP (0x4<<26) #define DISPPLANE_16BPP (0x5<<26) #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) #define DISPPLANE_32BPP (0x7<<26) #define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_SEL_PIPE_MASK (1<<24) #define DISPPLANE_SEL_PIPE_A 0 #define DISPPLANE_SEL_PIPE_B (1<<24) #define DISPPLANE_SRC_KEY_ENABLE (1<<22) #define DISPPLANE_SRC_KEY_DISABLE 0 #define DISPPLANE_LINE_DOUBLE (1<<20) #define DISPPLANE_NO_LINE_DOUBLE 0 #define DISPPLANE_STEREO_POLARITY_FIRST 0 #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) #define DSPAADDR 0x70184 #define DSPASTRIDE 0x70188 #define DSPAPOS 0x7018C /* reserved */ #define DSPASIZE 0x70190 #define DSPASURF 0x7019C /* 965+ only */ #define DSPATILEOFF 0x701A4 /* 965+ only */ /* VBIOS flags */ #define SWF00 0x71410 #define SWF01 0x71414 #define SWF02 0x71418 #define SWF03 0x7141c #define SWF04 0x71420 #define SWF05 0x71424 #define SWF06 0x71428 #define SWF10 0x70410 #define SWF11 0x70414 #define SWF14 0x71420 #define SWF30 0x72414 #define SWF31 0x72418 #define SWF32 0x7241c /* Pipe B */ #define PIPEBDSL 0x71000 #define PIPEBCONF 0x71008 #define PIPEBSTAT 0x71024 #define PIPEBFRAMEHIGH 0x71040 #define PIPEBFRAMEPIXEL 0x71044 #define PIPEB_FRMCOUNT_GM45 0x71040 #define PIPEB_FLIPCOUNT_GM45 0x71044 /* Display B control */ #define DSPBCNTR 0x71180 #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) #define DISPPLANE_ALPHA_TRANS_DISABLE 0 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) #define DSPBADDR 0x71184 #define DSPBSTRIDE 0x71188 #define DSPBPOS 0x7118C #define DSPBSIZE 0x71190 #define DSPBSURF 0x7119C #define DSPBTILEOFF 0x711A4 /* VBIOS regs */ #define VGACNTRL 0x71400 -# define VGA_DISP_DISABLE (1 << 31) +# define VGA_DISP_DISABLE (1U << 31) # define VGA_2X_MODE (1 << 30) # define VGA_PIPE_B_SELECT (1 << 29) #endif /* _I915_REG_H_ */ Index: head/sys/dev/drm/mach64_drv.h =================================================================== --- head/sys/dev/drm/mach64_drv.h (revision 258779) +++ head/sys/dev/drm/mach64_drv.h (revision 258780) @@ -1,863 +1,863 @@ /* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*- * Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com */ /*- * Copyright 2000 Gareth Hughes * Copyright 2002 Frank C. Earl * Copyright 2002-2003 Leif Delgass * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes * Frank C. Earl * Leif Delgass * José Fonseca */ #include __FBSDID("$FreeBSD$"); #ifndef __MACH64_DRV_H__ #define __MACH64_DRV_H__ /* General customization: */ #define DRIVER_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca" #define DRIVER_NAME "mach64" #define DRIVER_DESC "DRM module for the ATI Rage Pro" #define DRIVER_DATE "20060718" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 /* FIXME: remove these when not needed */ /* Development driver options */ #define MACH64_EXTRA_CHECKING 0 /* Extra sanity checks for DMA/freelist management */ #define MACH64_VERBOSE 0 /* Verbose debugging output */ typedef struct drm_mach64_freelist { struct list_head list; /* List pointers for free_list, placeholders, or pending list */ struct drm_buf *buf; /* Pointer to the buffer */ int discard; /* This flag is set when we're done (re)using a buffer */ u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */ } drm_mach64_freelist_t; typedef struct drm_mach64_descriptor_ring { void *start; /* write pointer (cpu address) to start of descriptor ring */ u32 start_addr; /* bus address of beginning of descriptor ring */ int size; /* size of ring in bytes */ u32 head_addr; /* bus address of descriptor ring head */ u32 head; /* dword offset of descriptor ring head */ u32 tail; /* dword offset of descriptor ring tail */ u32 tail_mask; /* mask used to wrap ring */ int space; /* number of free bytes in ring */ } drm_mach64_descriptor_ring_t; typedef struct drm_mach64_private { drm_mach64_sarea_t *sarea_priv; int is_pci; drm_mach64_dma_mode_t driver_mode; /* Async DMA, sync DMA, or MMIO */ int usec_timeout; /* Timeout for the wait functions */ drm_mach64_descriptor_ring_t ring; /* DMA descriptor table (ring buffer) */ int ring_running; /* Is bus mastering is enabled */ struct list_head free_list; /* Free-list head */ struct list_head placeholders; /* Placeholder list for buffers held by clients */ struct list_head pending; /* Buffers pending completion */ u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */ unsigned int fb_bpp; unsigned int front_offset, front_pitch; unsigned int back_offset, back_pitch; unsigned int depth_bpp; unsigned int depth_offset, depth_pitch; atomic_t vbl_received; /**< Number of vblanks received. */ u32 front_offset_pitch; u32 back_offset_pitch; u32 depth_offset_pitch; drm_local_map_t *sarea; drm_local_map_t *fb; drm_local_map_t *mmio; drm_local_map_t *ring_map; drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */ drm_local_map_t *agp_textures; } drm_mach64_private_t; extern struct drm_ioctl_desc mach64_ioctls[]; extern int mach64_max_ioctl; /* mach64_dma.c */ extern int mach64_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_dma_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_dma_flush(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_dma_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void mach64_driver_lastclose(struct drm_device * dev); extern int mach64_init_freelist(struct drm_device * dev); extern void mach64_destroy_freelist(struct drm_device * dev); extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv); extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_buf); extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries); extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv); extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n); extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv); extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv); extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv); extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv); extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv); extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, drm_mach64_freelist_t *_entry); extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, drm_mach64_freelist_t *_entry); extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv); extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv); extern int mach64_do_cleanup_dma(struct drm_device * dev); /* mach64_state.c */ extern int mach64_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_get_param(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mach64_driver_load(struct drm_device * dev, unsigned long flags); extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc); extern int mach64_enable_vblank(struct drm_device *dev, int crtc); extern void mach64_disable_vblank(struct drm_device *dev, int crtc); extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS); extern void mach64_driver_irq_preinstall(struct drm_device *dev); extern int mach64_driver_irq_postinstall(struct drm_device *dev); extern void mach64_driver_irq_uninstall(struct drm_device *dev); /* ================================================================ * Registers */ #define MACH64_AGP_BASE 0x0148 #define MACH64_AGP_CNTL 0x014c #define MACH64_ALPHA_TST_CNTL 0x0550 #define MACH64_DSP_CONFIG 0x0420 #define MACH64_DSP_ON_OFF 0x0424 #define MACH64_EXT_MEM_CNTL 0x04ac #define MACH64_GEN_TEST_CNTL 0x04d0 #define MACH64_HW_DEBUG 0x047c #define MACH64_MEM_ADDR_CONFIG 0x0434 #define MACH64_MEM_BUF_CNTL 0x042c #define MACH64_MEM_CNTL 0x04b0 #define MACH64_BM_ADDR 0x0648 #define MACH64_BM_COMMAND 0x0188 #define MACH64_BM_DATA 0x0648 #define MACH64_BM_FRAME_BUF_OFFSET 0x0180 #define MACH64_BM_GUI_TABLE 0x01b8 #define MACH64_BM_GUI_TABLE_CMD 0x064c # define MACH64_CIRCULAR_BUF_SIZE_16KB (0 << 0) # define MACH64_CIRCULAR_BUF_SIZE_32KB (1 << 0) # define MACH64_CIRCULAR_BUF_SIZE_64KB (2 << 0) # define MACH64_CIRCULAR_BUF_SIZE_128KB (3 << 0) -# define MACH64_LAST_DESCRIPTOR (1 << 31) +# define MACH64_LAST_DESCRIPTOR (1U << 31) #define MACH64_BM_HOSTDATA 0x0644 #define MACH64_BM_STATUS 0x018c #define MACH64_BM_SYSTEM_MEM_ADDR 0x0184 #define MACH64_BM_SYSTEM_TABLE 0x01bc #define MACH64_BUS_CNTL 0x04a0 # define MACH64_BUS_MSTR_RESET (1 << 1) # define MACH64_BUS_APER_REG_DIS (1 << 4) # define MACH64_BUS_FLUSH_BUF (1 << 2) # define MACH64_BUS_MASTER_DIS (1 << 6) # define MACH64_BUS_EXT_REG_EN (1 << 27) #define MACH64_CLR_CMP_CLR 0x0700 #define MACH64_CLR_CMP_CNTL 0x0708 #define MACH64_CLR_CMP_MASK 0x0704 #define MACH64_CONFIG_CHIP_ID 0x04e0 #define MACH64_CONFIG_CNTL 0x04dc #define MACH64_CONFIG_STAT0 0x04e4 #define MACH64_CONFIG_STAT1 0x0494 #define MACH64_CONFIG_STAT2 0x0498 #define MACH64_CONTEXT_LOAD_CNTL 0x072c #define MACH64_CONTEXT_MASK 0x0720 #define MACH64_COMPOSITE_SHADOW_ID 0x0798 #define MACH64_CRC_SIG 0x04e8 #define MACH64_CUSTOM_MACRO_CNTL 0x04d4 #define MACH64_DP_BKGD_CLR 0x06c0 #define MACH64_DP_FOG_CLR 0x06c4 #define MACH64_DP_FGRD_BKGD_CLR 0x06e0 #define MACH64_DP_FRGD_CLR 0x06c4 #define MACH64_DP_FGRD_CLR_MIX 0x06dc #define MACH64_DP_MIX 0x06d4 # define BKGD_MIX_NOT_D (0 << 0) # define BKGD_MIX_ZERO (1 << 0) # define BKGD_MIX_ONE (2 << 0) # define MACH64_BKGD_MIX_D (3 << 0) # define BKGD_MIX_NOT_S (4 << 0) # define BKGD_MIX_D_XOR_S (5 << 0) # define BKGD_MIX_NOT_D_XOR_S (6 << 0) # define MACH64_BKGD_MIX_S (7 << 0) # define BKGD_MIX_NOT_D_OR_NOT_S (8 << 0) # define BKGD_MIX_D_OR_NOT_S (9 << 0) # define BKGD_MIX_NOT_D_OR_S (10 << 0) # define BKGD_MIX_D_OR_S (11 << 0) # define BKGD_MIX_D_AND_S (12 << 0) # define BKGD_MIX_NOT_D_AND_S (13 << 0) # define BKGD_MIX_D_AND_NOT_S (14 << 0) # define BKGD_MIX_NOT_D_AND_NOT_S (15 << 0) # define BKGD_MIX_D_PLUS_S_DIV2 (23 << 0) # define FRGD_MIX_NOT_D (0 << 16) # define FRGD_MIX_ZERO (1 << 16) # define FRGD_MIX_ONE (2 << 16) # define FRGD_MIX_D (3 << 16) # define FRGD_MIX_NOT_S (4 << 16) # define FRGD_MIX_D_XOR_S (5 << 16) # define FRGD_MIX_NOT_D_XOR_S (6 << 16) # define MACH64_FRGD_MIX_S (7 << 16) # define FRGD_MIX_NOT_D_OR_NOT_S (8 << 16) # define FRGD_MIX_D_OR_NOT_S (9 << 16) # define FRGD_MIX_NOT_D_OR_S (10 << 16) # define FRGD_MIX_D_OR_S (11 << 16) # define FRGD_MIX_D_AND_S (12 << 16) # define FRGD_MIX_NOT_D_AND_S (13 << 16) # define FRGD_MIX_D_AND_NOT_S (14 << 16) # define FRGD_MIX_NOT_D_AND_NOT_S (15 << 16) # define FRGD_MIX_D_PLUS_S_DIV2 (23 << 16) #define MACH64_DP_PIX_WIDTH 0x06d0 # define MACH64_HOST_TRIPLE_ENABLE (1 << 13) # define MACH64_BYTE_ORDER_MSB_TO_LSB (0 << 24) # define MACH64_BYTE_ORDER_LSB_TO_MSB (1 << 24) #define MACH64_DP_SRC 0x06d8 # define MACH64_BKGD_SRC_BKGD_CLR (0 << 0) # define MACH64_BKGD_SRC_FRGD_CLR (1 << 0) # define MACH64_BKGD_SRC_HOST (2 << 0) # define MACH64_BKGD_SRC_BLIT (3 << 0) # define MACH64_BKGD_SRC_PATTERN (4 << 0) # define MACH64_BKGD_SRC_3D (5 << 0) # define MACH64_FRGD_SRC_BKGD_CLR (0 << 8) # define MACH64_FRGD_SRC_FRGD_CLR (1 << 8) # define MACH64_FRGD_SRC_HOST (2 << 8) # define MACH64_FRGD_SRC_BLIT (3 << 8) # define MACH64_FRGD_SRC_PATTERN (4 << 8) # define MACH64_FRGD_SRC_3D (5 << 8) # define MACH64_MONO_SRC_ONE (0 << 16) # define MACH64_MONO_SRC_PATTERN (1 << 16) # define MACH64_MONO_SRC_HOST (2 << 16) # define MACH64_MONO_SRC_BLIT (3 << 16) #define MACH64_DP_WRITE_MASK 0x06c8 #define MACH64_DST_CNTL 0x0530 # define MACH64_DST_X_RIGHT_TO_LEFT (0 << 0) # define MACH64_DST_X_LEFT_TO_RIGHT (1 << 0) # define MACH64_DST_Y_BOTTOM_TO_TOP (0 << 1) # define MACH64_DST_Y_TOP_TO_BOTTOM (1 << 1) # define MACH64_DST_X_MAJOR (0 << 2) # define MACH64_DST_Y_MAJOR (1 << 2) # define MACH64_DST_X_TILE (1 << 3) # define MACH64_DST_Y_TILE (1 << 4) # define MACH64_DST_LAST_PEL (1 << 5) # define MACH64_DST_POLYGON_ENABLE (1 << 6) # define MACH64_DST_24_ROTATION_ENABLE (1 << 7) #define MACH64_DST_HEIGHT_WIDTH 0x0518 #define MACH64_DST_OFF_PITCH 0x0500 #define MACH64_DST_WIDTH_HEIGHT 0x06ec #define MACH64_DST_X_Y 0x06e8 #define MACH64_DST_Y_X 0x050c #define MACH64_FIFO_STAT 0x0710 # define MACH64_FIFO_SLOT_MASK 0x0000ffff -# define MACH64_FIFO_ERR (1 << 31) +# define MACH64_FIFO_ERR (1U << 31) #define MACH64_GEN_TEST_CNTL 0x04d0 # define MACH64_GUI_ENGINE_ENABLE (1 << 8) #define MACH64_GUI_CMDFIFO_DEBUG 0x0170 #define MACH64_GUI_CMDFIFO_DATA 0x0174 #define MACH64_GUI_CNTL 0x0178 # define MACH64_CMDFIFO_SIZE_MASK 0x00000003ul # define MACH64_CMDFIFO_SIZE_192 0x00000000ul # define MACH64_CMDFIFO_SIZE_128 0x00000001ul # define MACH64_CMDFIFO_SIZE_64 0x00000002ul #define MACH64_GUI_STAT 0x0738 # define MACH64_GUI_ACTIVE (1 << 0) #define MACH64_GUI_TRAJ_CNTL 0x0730 #define MACH64_HOST_CNTL 0x0640 #define MACH64_HOST_DATA0 0x0600 #define MACH64_ONE_OVER_AREA 0x029c #define MACH64_ONE_OVER_AREA_UC 0x0300 #define MACH64_PAT_REG0 0x0680 #define MACH64_PAT_REG1 0x0684 #define MACH64_SC_LEFT 0x06a0 #define MACH64_SC_RIGHT 0x06a4 #define MACH64_SC_LEFT_RIGHT 0x06a8 #define MACH64_SC_TOP 0x06ac #define MACH64_SC_BOTTOM 0x06b0 #define MACH64_SC_TOP_BOTTOM 0x06b4 #define MACH64_SCALE_3D_CNTL 0x05fc #define MACH64_SCRATCH_REG0 0x0480 #define MACH64_SCRATCH_REG1 0x0484 #define MACH64_SECONDARY_TEX_OFF 0x0778 #define MACH64_SETUP_CNTL 0x0304 #define MACH64_SRC_CNTL 0x05b4 # define MACH64_SRC_BM_ENABLE (1 << 8) # define MACH64_SRC_BM_SYNC (1 << 9) # define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM (0 << 10) # define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME (1 << 10) # define MACH64_SRC_BM_OP_REG_TO_SYSTEM (2 << 10) # define MACH64_SRC_BM_OP_SYSTEM_TO_REG (3 << 10) #define MACH64_SRC_HEIGHT1 0x0594 #define MACH64_SRC_HEIGHT2 0x05ac #define MACH64_SRC_HEIGHT1_WIDTH1 0x0598 #define MACH64_SRC_HEIGHT2_WIDTH2 0x05b0 #define MACH64_SRC_OFF_PITCH 0x0580 #define MACH64_SRC_WIDTH1 0x0590 #define MACH64_SRC_Y_X 0x058c #define MACH64_TEX_0_OFF 0x05c0 #define MACH64_TEX_CNTL 0x0774 #define MACH64_TEX_SIZE_PITCH 0x0770 #define MACH64_TIMER_CONFIG 0x0428 #define MACH64_VERTEX_1_ARGB 0x0254 #define MACH64_VERTEX_1_S 0x0240 #define MACH64_VERTEX_1_SECONDARY_S 0x0328 #define MACH64_VERTEX_1_SECONDARY_T 0x032c #define MACH64_VERTEX_1_SECONDARY_W 0x0330 #define MACH64_VERTEX_1_SPEC_ARGB 0x024c #define MACH64_VERTEX_1_T 0x0244 #define MACH64_VERTEX_1_W 0x0248 #define MACH64_VERTEX_1_X_Y 0x0258 #define MACH64_VERTEX_1_Z 0x0250 #define MACH64_VERTEX_2_ARGB 0x0274 #define MACH64_VERTEX_2_S 0x0260 #define MACH64_VERTEX_2_SECONDARY_S 0x0334 #define MACH64_VERTEX_2_SECONDARY_T 0x0338 #define MACH64_VERTEX_2_SECONDARY_W 0x033c #define MACH64_VERTEX_2_SPEC_ARGB 0x026c #define MACH64_VERTEX_2_T 0x0264 #define MACH64_VERTEX_2_W 0x0268 #define MACH64_VERTEX_2_X_Y 0x0278 #define MACH64_VERTEX_2_Z 0x0270 #define MACH64_VERTEX_3_ARGB 0x0294 #define MACH64_VERTEX_3_S 0x0280 #define MACH64_VERTEX_3_SECONDARY_S 0x02a0 #define MACH64_VERTEX_3_SECONDARY_T 0x02a4 #define MACH64_VERTEX_3_SECONDARY_W 0x02a8 #define MACH64_VERTEX_3_SPEC_ARGB 0x028c #define MACH64_VERTEX_3_T 0x0284 #define MACH64_VERTEX_3_W 0x0288 #define MACH64_VERTEX_3_X_Y 0x0298 #define MACH64_VERTEX_3_Z 0x0290 #define MACH64_Z_CNTL 0x054c #define MACH64_Z_OFF_PITCH 0x0548 #define MACH64_CRTC_VLINE_CRNT_VLINE 0x0410 # define MACH64_CRTC_VLINE_MASK 0x000007ff # define MACH64_CRTC_CRNT_VLINE_MASK 0x07ff0000 #define MACH64_CRTC_OFF_PITCH 0x0414 #define MACH64_CRTC_INT_CNTL 0x0418 # define MACH64_CRTC_VBLANK (1 << 0) # define MACH64_CRTC_VBLANK_INT_EN (1 << 1) # define MACH64_CRTC_VBLANK_INT (1 << 2) # define MACH64_CRTC_VLINE_INT_EN (1 << 3) # define MACH64_CRTC_VLINE_INT (1 << 4) # define MACH64_CRTC_VLINE_SYNC (1 << 5) /* 0=even, 1=odd */ # define MACH64_CRTC_FRAME (1 << 6) /* 0=even, 1=odd */ # define MACH64_CRTC_SNAPSHOT_INT_EN (1 << 7) # define MACH64_CRTC_SNAPSHOT_INT (1 << 8) # define MACH64_CRTC_I2C_INT_EN (1 << 9) # define MACH64_CRTC_I2C_INT (1 << 10) # define MACH64_CRTC2_VBLANK (1 << 11) /* LT Pro */ # define MACH64_CRTC2_VBLANK_INT_EN (1 << 12) /* LT Pro */ # define MACH64_CRTC2_VBLANK_INT (1 << 13) /* LT Pro */ # define MACH64_CRTC2_VLINE_INT_EN (1 << 14) /* LT Pro */ # define MACH64_CRTC2_VLINE_INT (1 << 15) /* LT Pro */ # define MACH64_CRTC_CAPBUF0_INT_EN (1 << 16) # define MACH64_CRTC_CAPBUF0_INT (1 << 17) # define MACH64_CRTC_CAPBUF1_INT_EN (1 << 18) # define MACH64_CRTC_CAPBUF1_INT (1 << 19) # define MACH64_CRTC_OVERLAY_EOF_INT_EN (1 << 20) # define MACH64_CRTC_OVERLAY_EOF_INT (1 << 21) # define MACH64_CRTC_ONESHOT_CAP_INT_EN (1 << 22) # define MACH64_CRTC_ONESHOT_CAP_INT (1 << 23) # define MACH64_CRTC_BUSMASTER_EOL_INT_EN (1 << 24) # define MACH64_CRTC_BUSMASTER_EOL_INT (1 << 25) # define MACH64_CRTC_GP_INT_EN (1 << 26) # define MACH64_CRTC_GP_INT (1 << 27) # define MACH64_CRTC2_VLINE_SYNC (1 << 28) /* LT Pro */ /* 0=even, 1=odd */ # define MACH64_CRTC_SNAPSHOT2_INT_EN (1 << 29) /* LT Pro */ # define MACH64_CRTC_SNAPSHOT2_INT (1 << 30) /* LT Pro */ -# define MACH64_CRTC_VBLANK2_INT (1 << 31) +# define MACH64_CRTC_VBLANK2_INT (1U << 31) # define MACH64_CRTC_INT_ENS \ ( \ MACH64_CRTC_VBLANK_INT_EN | \ MACH64_CRTC_VLINE_INT_EN | \ MACH64_CRTC_SNAPSHOT_INT_EN | \ MACH64_CRTC_I2C_INT_EN | \ MACH64_CRTC2_VBLANK_INT_EN | \ MACH64_CRTC2_VLINE_INT_EN | \ MACH64_CRTC_CAPBUF0_INT_EN | \ MACH64_CRTC_CAPBUF1_INT_EN | \ MACH64_CRTC_OVERLAY_EOF_INT_EN | \ MACH64_CRTC_ONESHOT_CAP_INT_EN | \ MACH64_CRTC_BUSMASTER_EOL_INT_EN | \ MACH64_CRTC_GP_INT_EN | \ MACH64_CRTC_SNAPSHOT2_INT_EN | \ 0 \ ) # define MACH64_CRTC_INT_ACKS \ ( \ MACH64_CRTC_VBLANK_INT | \ MACH64_CRTC_VLINE_INT | \ MACH64_CRTC_SNAPSHOT_INT | \ MACH64_CRTC_I2C_INT | \ MACH64_CRTC2_VBLANK_INT | \ MACH64_CRTC2_VLINE_INT | \ MACH64_CRTC_CAPBUF0_INT | \ MACH64_CRTC_CAPBUF1_INT | \ MACH64_CRTC_OVERLAY_EOF_INT | \ MACH64_CRTC_ONESHOT_CAP_INT | \ MACH64_CRTC_BUSMASTER_EOL_INT | \ MACH64_CRTC_GP_INT | \ MACH64_CRTC_SNAPSHOT2_INT | \ MACH64_CRTC_VBLANK2_INT | \ 0 \ ) #define MACH64_DATATYPE_CI8 2 #define MACH64_DATATYPE_ARGB1555 3 #define MACH64_DATATYPE_RGB565 4 #define MACH64_DATATYPE_ARGB8888 6 #define MACH64_DATATYPE_RGB332 7 #define MACH64_DATATYPE_Y8 8 #define MACH64_DATATYPE_RGB8 9 #define MACH64_DATATYPE_VYUY422 11 #define MACH64_DATATYPE_YVYU422 12 #define MACH64_DATATYPE_AYUV444 14 #define MACH64_DATATYPE_ARGB4444 15 #define MACH64_READ(reg) DRM_READ32(dev_priv->mmio, (reg) ) #define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) ) #define DWMREG0 0x0400 #define DWMREG0_END 0x07ff #define DWMREG1 0x0000 #define DWMREG1_END 0x03ff #define ISREG0(r) (((r) >= DWMREG0) && ((r) <= DWMREG0_END)) #define DMAREG0(r) (((r) - DWMREG0) >> 2) #define DMAREG1(r) ((((r) - DWMREG1) >> 2 ) | 0x0100) #define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r)) #define MMREG0 0x0000 #define MMREG0_END 0x00ff #define ISMMREG0(r) (((r) >= MMREG0) && ((r) <= MMREG0_END)) #define MMSELECT0(r) (((r) << 2) + DWMREG0) #define MMSELECT1(r) (((((r) & 0xff) << 2) + DWMREG1)) #define MMSELECT(r) (ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r)) /* ================================================================ * DMA constants */ /* DMA descriptor field indices: * The descriptor fields are loaded into the read-only * BM_* system bus master registers during a bus-master operation */ #define MACH64_DMA_FRAME_BUF_OFFSET 0 /* BM_FRAME_BUF_OFFSET */ #define MACH64_DMA_SYS_MEM_ADDR 1 /* BM_SYSTEM_MEM_ADDR */ #define MACH64_DMA_COMMAND 2 /* BM_COMMAND */ #define MACH64_DMA_RESERVED 3 /* BM_STATUS */ /* BM_COMMAND descriptor field flags */ #define MACH64_DMA_HOLD_OFFSET (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */ #define MACH64_DMA_EOL (1<<31) /* End of descriptor list flag */ #define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */ #define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */ /* ================================================================ * Ring operations * * Since the Mach64 bus master engine requires polling, these functions end * up being called frequently, hence being inline. */ static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv) { drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ring->head_addr, ring->head, ring->tail, ring->space); if (mach64_do_wait_for_idle(dev_priv) < 0) { mach64_do_engine_reset(dev_priv); } if (dev_priv->driver_mode != MACH64_MODE_MMIO) { /* enable bus mastering and block 1 registers */ MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL) & ~MACH64_BUS_MASTER_DIS) | MACH64_BUS_EXT_REG_EN); mach64_do_wait_for_idle(dev_priv); } /* reset descriptor table ring head */ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); dev_priv->ring_running = 1; } static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv, drm_mach64_descriptor_ring_t * ring) { DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ring->head_addr, ring->head, ring->tail, ring->space); /* reset descriptor table ring head */ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); if (dev_priv->driver_mode == MACH64_MODE_MMIO) { mach64_do_dispatch_pseudo_dma(dev_priv); } else { /* enable GUI bus mastering, and sync the bus master to the GUI */ MACH64_WRITE(MACH64_SRC_CNTL, MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | MACH64_SRC_BM_OP_SYSTEM_TO_REG); /* kick off the transfer */ MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0); if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) { if ((mach64_do_wait_for_idle(dev_priv)) < 0) { DRM_ERROR("idle failed, resetting engine\n"); mach64_dump_engine_info(dev_priv); mach64_do_engine_reset(dev_priv); return; } mach64_do_release_used_buffers(dev_priv); } } } /** * Poll the ring head and make sure the bus master is alive. * * Mach64's bus master engine will stop if there are no more entries to process. * This function polls the engine for the last processed entry and calls * mach64_ring_resume if there is an unprocessed entry. * * Note also that, since we update the ring tail while the bus master engine is * in operation, it is possible that the last tail update was too late to be * processed, and the bus master engine stops at the previous tail position. * Therefore it is important to call this function frequently. */ static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv, drm_mach64_descriptor_ring_t * ring) { DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ring->head_addr, ring->head, ring->tail, ring->space); if (!dev_priv->ring_running) { mach64_ring_start(dev_priv); if (ring->head != ring->tail) { mach64_ring_resume(dev_priv, ring); } } else { /* GUI_ACTIVE must be read before BM_GUI_TABLE to * correctly determine the ring head */ int gui_active = MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE; ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0; if (gui_active) { /* If not idle, BM_GUI_TABLE points one descriptor * past the current head */ if (ring->head_addr == ring->start_addr) { ring->head_addr += ring->size; } ring->head_addr -= 4 * sizeof(u32); } if (ring->head_addr < ring->start_addr || ring->head_addr >= ring->start_addr + ring->size) { DRM_ERROR("bad ring head address: 0x%08x\n", ring->head_addr); mach64_dump_ring_info(dev_priv); mach64_do_engine_reset(dev_priv); return; } ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32); if (!gui_active && ring->head != ring->tail) { mach64_ring_resume(dev_priv, ring); } } } static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv) { DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", dev_priv->ring.head_addr, dev_priv->ring.head, dev_priv->ring.tail, dev_priv->ring.space); /* restore previous SRC_CNTL to disable busmastering */ mach64_do_wait_for_fifo(dev_priv, 1); MACH64_WRITE(MACH64_SRC_CNTL, 0); /* disable busmastering but keep the block 1 registers enabled */ mach64_do_wait_for_idle(dev_priv); MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL) | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN); dev_priv->ring_running = 0; } static __inline__ void mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv) { drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; DRM_DEBUG("\n"); mach64_ring_tick(dev_priv, ring); ring->space = (ring->head - ring->tail) * sizeof(u32); if (ring->space <= 0) { ring->space += ring->size; } } /* ================================================================ * DMA macros * * Mach64's ring buffer doesn't take register writes directly. These * have to be written indirectly in DMA buffers. These macros simplify * the task of setting up a buffer, writing commands to it, and * queuing the buffer in the ring. */ #define DMALOCALS \ drm_mach64_freelist_t *_entry = NULL; \ struct drm_buf *_buf = NULL; \ u32 *_buf_wptr; int _outcount #define GETBUFPTR( __buf ) \ ((dev_priv->is_pci) ? \ ((u32 *)(__buf)->address) : \ ((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset))) #define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address) #define GETRINGOFFSET() (_entry->ring_ofs) static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t * dev_priv, drm_mach64_freelist_t ** entry, struct drm_buf * buf) { struct list_head *ptr; #if MACH64_EXTRA_CHECKING if (list_empty(&dev_priv->pending)) { DRM_ERROR("Empty pending list in \n"); return -EINVAL; } #endif ptr = dev_priv->pending.prev; *entry = list_entry(ptr, drm_mach64_freelist_t, list); while ((*entry)->buf != buf) { if (ptr == &dev_priv->pending) { return -EFAULT; } ptr = ptr->prev; *entry = list_entry(ptr, drm_mach64_freelist_t, list); } return 0; } #define DMASETPTR( _p ) \ do { \ _buf = (_p); \ _outcount = 0; \ _buf_wptr = GETBUFPTR( _buf ); \ } while(0) /* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */ #define DMAGETPTR( file_priv, dev_priv, n ) \ do { \ if ( MACH64_VERBOSE ) { \ DRM_INFO( "DMAGETPTR( %d )\n", (n) ); \ } \ _buf = mach64_freelist_get( dev_priv ); \ if (_buf == NULL) { \ DRM_ERROR("couldn't get buffer in DMAGETPTR\n"); \ return -EAGAIN; \ } \ if (_buf->pending) { \ DRM_ERROR("pending buf in DMAGETPTR\n"); \ return -EFAULT; \ } \ _buf->file_priv = file_priv; \ _outcount = 0; \ \ _buf_wptr = GETBUFPTR( _buf ); \ } while (0) #define DMAOUTREG( reg, val ) \ do { \ if ( MACH64_VERBOSE ) { \ DRM_INFO( " DMAOUTREG( 0x%x = 0x%08x )\n", \ reg, val ); \ } \ _buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg)); \ _buf_wptr[_outcount++] = cpu_to_le32((val)); \ _buf->used += 8; \ } while (0) #define DMAADVANCE( dev_priv, _discard ) \ do { \ struct list_head *ptr; \ int ret; \ \ if ( MACH64_VERBOSE ) { \ DRM_INFO( "DMAADVANCE() in \n" ); \ } \ \ if (_buf->used <= 0) { \ DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \ _buf->idx ); \ return -EFAULT; \ } \ if (_buf->pending) { \ /* This is a resued buffer, so we need to find it in the pending list */ \ if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \ return ret; \ } \ if (_entry->discard) { \ DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \ return -EFAULT; \ } \ } else { \ if (list_empty(&dev_priv->placeholders)) { \ DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \ return -EFAULT; \ } \ ptr = dev_priv->placeholders.next; \ list_del(ptr); \ _entry = list_entry(ptr, drm_mach64_freelist_t, list); \ _buf->pending = 1; \ _entry->buf = _buf; \ list_add_tail(ptr, &dev_priv->pending); \ } \ _entry->discard = (_discard); \ if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \ return ret; \ } while (0) #define DMADISCARDBUF() \ do { \ if (_entry == NULL) { \ int ret; \ if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ DRM_ERROR( "couldn't find pending buf %d\n", \ _buf->idx ); \ return ret; \ } \ } \ _entry->discard = 1; \ } while(0) #define DMAADVANCEHOSTDATA( dev_priv ) \ do { \ struct list_head *ptr; \ int ret; \ \ if ( MACH64_VERBOSE ) { \ DRM_INFO( "DMAADVANCEHOSTDATA() in \n" ); \ } \ \ if (_buf->used <= 0) { \ DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \ return -EFAULT; \ } \ if (list_empty(&dev_priv->placeholders)) { \ DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \ return -EFAULT; \ } \ \ ptr = dev_priv->placeholders.next; \ list_del(ptr); \ _entry = list_entry(ptr, drm_mach64_freelist_t, list); \ _entry->buf = _buf; \ _entry->buf->pending = 1; \ list_add_tail(ptr, &dev_priv->pending); \ _entry->discard = 1; \ if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \ return ret; \ } while (0) #endif /* __MACH64_DRV_H__ */ Index: head/sys/dev/drm/mga_drv.h =================================================================== --- head/sys/dev/drm/mga_drv.h (revision 258779) +++ head/sys/dev/drm/mga_drv.h (revision 258780) @@ -1,694 +1,694 @@ /* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes */ #include __FBSDID("$FreeBSD$"); #ifndef __MGA_DRV_H__ #define __MGA_DRV_H__ /* General customization: */ #define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." #define DRIVER_NAME "mga" #define DRIVER_DESC "Matrox G200/G400" #define DRIVER_DATE "20060319" #define DRIVER_MAJOR 3 #define DRIVER_MINOR 2 #define DRIVER_PATCHLEVEL 2 typedef struct drm_mga_primary_buffer { u8 *start; u8 *end; int size; u32 tail; int space; volatile long wrapped; volatile u32 *status; u32 last_flush; u32 last_wrap; u32 high_mark; } drm_mga_primary_buffer_t; typedef struct drm_mga_freelist { struct drm_mga_freelist *next; struct drm_mga_freelist *prev; drm_mga_age_t age; struct drm_buf *buf; } drm_mga_freelist_t; typedef struct { drm_mga_freelist_t *list_entry; int discard; int dispatched; } drm_mga_buf_priv_t; typedef struct drm_mga_private { drm_mga_primary_buffer_t prim; drm_mga_sarea_t *sarea_priv; drm_mga_freelist_t *head; drm_mga_freelist_t *tail; unsigned int warp_pipe; unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES]; int chipset; int usec_timeout; /** * If set, the new DMA initialization sequence was used. This is * primarilly used to select how the driver should uninitialized its * internal DMA structures. */ int used_new_dma_init; /** * If AGP memory is used for DMA buffers, this will be the value * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer). */ u32 dma_access; /** * If AGP memory is used for DMA buffers, this will be the value * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI * transfer). */ u32 wagp_enable; /** * \name MMIO region parameters. * * \sa drm_mga_private_t::mmio */ /*@{*/ u32 mmio_base; /**< Bus address of base of MMIO. */ u32 mmio_size; /**< Size of the MMIO region. */ /*@}*/ u32 clear_cmd; u32 maccess; atomic_t vbl_received; /**< Number of vblanks received. */ wait_queue_head_t fence_queue; atomic_t last_fence_retired; u32 next_fence_to_post; unsigned int fb_cpp; unsigned int front_offset; unsigned int front_pitch; unsigned int back_offset; unsigned int back_pitch; unsigned int depth_cpp; unsigned int depth_offset; unsigned int depth_pitch; unsigned int texture_offset; unsigned int texture_size; drm_local_map_t *sarea; drm_local_map_t *mmio; drm_local_map_t *status; drm_local_map_t *warp; drm_local_map_t *primary; drm_local_map_t *agp_textures; unsigned long agp_handle; unsigned int agp_size; } drm_mga_private_t; extern struct drm_ioctl_desc mga_ioctls[]; extern int mga_max_ioctl; /* mga_dma.c */ extern int mga_dma_bootstrap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_dma_flush(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_dma_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_dma_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_driver_load(struct drm_device *dev, unsigned long flags); extern int mga_driver_unload(struct drm_device * dev); extern void mga_driver_lastclose(struct drm_device * dev); extern int mga_driver_dma_quiescent(struct drm_device * dev); extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); /* mga_warp.c */ extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); extern int mga_warp_init(drm_mga_private_t * dev_priv); /* mga_irq.c */ extern int mga_enable_vblank(struct drm_device *dev, int crtc); extern void mga_disable_vblank(struct drm_device *dev, int crtc); extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); extern void mga_driver_irq_preinstall(struct drm_device * dev); extern int mga_driver_irq_postinstall(struct drm_device * dev); extern void mga_driver_irq_uninstall(struct drm_device * dev); extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() #if defined(__linux__) && defined(__alpha__) #define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle)) #define MGA_ADDR( reg ) (MGA_BASE(reg) + reg) #define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg ) #define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg ) #define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg))) #define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg))) #define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0) #define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0) static inline u32 _MGA_READ(u32 * addr) { DRM_MEMORYBARRIER(); return *(volatile u32 *)addr; } #else #define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg)) #define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg)) #define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val)) #define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val)) #endif #define DWGREG0 0x1c00 #define DWGREG0_END 0x1dff #define DWGREG1 0x2c00 #define DWGREG1_END 0x2dff #define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END) #define DMAREG0(r) (u8)((r - DWGREG0) >> 2) #define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80) #define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r)) /* ================================================================ * Helper macross... */ #define MGA_EMIT_STATE( dev_priv, dirty ) \ do { \ if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \ if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \ mga_g400_emit_state( dev_priv ); \ } else { \ mga_g200_emit_state( dev_priv ); \ } \ } \ } while (0) #define WRAP_TEST_WITH_RETURN( dev_priv ) \ do { \ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ if ( mga_is_idle( dev_priv ) ) { \ mga_do_dma_wrap_end( dev_priv ); \ } else if ( dev_priv->prim.space < \ dev_priv->prim.high_mark ) { \ if ( MGA_DMA_DEBUG ) \ DRM_INFO( "wrap...\n"); \ return -EBUSY; \ } \ } \ } while (0) #define WRAP_WAIT_WITH_RETURN( dev_priv ) \ do { \ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ if ( MGA_DMA_DEBUG ) \ DRM_INFO( "wrap...\n"); \ return -EBUSY; \ } \ mga_do_dma_wrap_end( dev_priv ); \ } \ } while (0) /* ================================================================ * Primary DMA command stream */ #define MGA_VERBOSE 0 #define DMA_LOCALS unsigned int write; volatile u8 *prim; #define DMA_BLOCK_SIZE (5 * sizeof(u32)) #define BEGIN_DMA( n ) \ do { \ if ( MGA_VERBOSE ) { \ DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \ DRM_INFO( " space=0x%x req=0x%zx\n", \ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ } \ prim = dev_priv->prim.start; \ write = dev_priv->prim.tail; \ } while (0) #define BEGIN_DMA_WRAP() \ do { \ if ( MGA_VERBOSE ) { \ DRM_INFO( "BEGIN_DMA()\n" ); \ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ } \ prim = dev_priv->prim.start; \ write = dev_priv->prim.tail; \ } while (0) #define ADVANCE_DMA() \ do { \ dev_priv->prim.tail = write; \ if ( MGA_VERBOSE ) { \ DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \ write, dev_priv->prim.space ); \ } \ } while (0) #define FLUSH_DMA() \ do { \ if ( 0 ) { \ DRM_INFO( "\n" ); \ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ dev_priv->prim.tail, \ MGA_READ( MGA_PRIMADDRESS ) - \ dev_priv->primary->offset ); \ } \ if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \ if ( dev_priv->prim.space < \ dev_priv->prim.high_mark ) { \ mga_do_dma_wrap_start( dev_priv ); \ } else { \ mga_do_dma_flush( dev_priv ); \ } \ } \ } while (0) /* Never use this, always use DMA_BLOCK(...) for primary DMA output. */ #define DMA_WRITE( offset, val ) \ do { \ if ( MGA_VERBOSE ) { \ DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04zx\n", \ (u32)(val), write + (offset) * sizeof(u32) ); \ } \ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ } while (0) #define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \ do { \ DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \ (DMAREG( reg1 ) << 8) | \ (DMAREG( reg2 ) << 16) | \ (DMAREG( reg3 ) << 24)) ); \ DMA_WRITE( 1, val0 ); \ DMA_WRITE( 2, val1 ); \ DMA_WRITE( 3, val2 ); \ DMA_WRITE( 4, val3 ); \ write += DMA_BLOCK_SIZE; \ } while (0) /* Buffer aging via primary DMA stream head pointer. */ #define SET_AGE( age, h, w ) \ do { \ (age)->head = h; \ (age)->wrap = w; \ } while (0) #define TEST_AGE( age, h, w ) ( (age)->wrap < w || \ ( (age)->wrap == w && \ (age)->head < h ) ) #define AGE_BUFFER( buf_priv ) \ do { \ drm_mga_freelist_t *entry = (buf_priv)->list_entry; \ if ( (buf_priv)->dispatched ) { \ entry->age.head = (dev_priv->prim.tail + \ dev_priv->primary->offset); \ entry->age.wrap = dev_priv->sarea_priv->last_wrap; \ } else { \ entry->age.head = 0; \ entry->age.wrap = 0; \ } \ } while (0) #define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \ MGA_DWGENGSTS | \ MGA_ENDPRDMASTS) #define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \ MGA_ENDPRDMASTS) #define MGA_DMA_DEBUG 0 /* A reduced set of the mga registers. */ #define MGA_CRTC_INDEX 0x1fd4 #define MGA_CRTC_DATA 0x1fd5 /* CRTC11 */ #define MGA_VINTCLR (1 << 4) #define MGA_VINTEN (1 << 5) #define MGA_ALPHACTRL 0x2c7c #define MGA_AR0 0x1c60 #define MGA_AR1 0x1c64 #define MGA_AR2 0x1c68 #define MGA_AR3 0x1c6c #define MGA_AR4 0x1c70 #define MGA_AR5 0x1c74 #define MGA_AR6 0x1c78 #define MGA_CXBNDRY 0x1c80 #define MGA_CXLEFT 0x1ca0 #define MGA_CXRIGHT 0x1ca4 #define MGA_DMAPAD 0x1c54 #define MGA_DSTORG 0x2cb8 #define MGA_DWGCTL 0x1c00 # define MGA_OPCOD_MASK (15 << 0) # define MGA_OPCOD_TRAP (4 << 0) # define MGA_OPCOD_TEXTURE_TRAP (6 << 0) # define MGA_OPCOD_BITBLT (8 << 0) # define MGA_OPCOD_ILOAD (9 << 0) # define MGA_ATYPE_MASK (7 << 4) # define MGA_ATYPE_RPL (0 << 4) # define MGA_ATYPE_RSTR (1 << 4) # define MGA_ATYPE_ZI (3 << 4) # define MGA_ATYPE_BLK (4 << 4) # define MGA_ATYPE_I (7 << 4) # define MGA_LINEAR (1 << 7) # define MGA_ZMODE_MASK (7 << 8) # define MGA_ZMODE_NOZCMP (0 << 8) # define MGA_ZMODE_ZE (2 << 8) # define MGA_ZMODE_ZNE (3 << 8) # define MGA_ZMODE_ZLT (4 << 8) # define MGA_ZMODE_ZLTE (5 << 8) # define MGA_ZMODE_ZGT (6 << 8) # define MGA_ZMODE_ZGTE (7 << 8) # define MGA_SOLID (1 << 11) # define MGA_ARZERO (1 << 12) # define MGA_SGNZERO (1 << 13) # define MGA_SHIFTZERO (1 << 14) # define MGA_BOP_MASK (15 << 16) # define MGA_BOP_ZERO (0 << 16) # define MGA_BOP_DST (10 << 16) # define MGA_BOP_SRC (12 << 16) # define MGA_BOP_ONE (15 << 16) # define MGA_TRANS_SHIFT 20 # define MGA_TRANS_MASK (15 << 20) # define MGA_BLTMOD_MASK (15 << 25) # define MGA_BLTMOD_BMONOLEF (0 << 25) # define MGA_BLTMOD_BMONOWF (4 << 25) # define MGA_BLTMOD_PLAN (1 << 25) # define MGA_BLTMOD_BFCOL (2 << 25) # define MGA_BLTMOD_BU32BGR (3 << 25) # define MGA_BLTMOD_BU32RGB (7 << 25) # define MGA_BLTMOD_BU24BGR (11 << 25) # define MGA_BLTMOD_BU24RGB (15 << 25) # define MGA_PATTERN (1 << 29) # define MGA_TRANSC (1 << 30) -# define MGA_CLIPDIS (1 << 31) +# define MGA_CLIPDIS (1U << 31) #define MGA_DWGSYNC 0x2c4c #define MGA_FCOL 0x1c24 #define MGA_FIFOSTATUS 0x1e10 #define MGA_FOGCOL 0x1cf4 #define MGA_FXBNDRY 0x1c84 #define MGA_FXLEFT 0x1ca8 #define MGA_FXRIGHT 0x1cac #define MGA_ICLEAR 0x1e18 # define MGA_SOFTRAPICLR (1 << 0) # define MGA_VLINEICLR (1 << 5) #define MGA_IEN 0x1e1c # define MGA_SOFTRAPIEN (1 << 0) # define MGA_VLINEIEN (1 << 5) #define MGA_LEN 0x1c5c #define MGA_MACCESS 0x1c04 #define MGA_PITCH 0x1c8c #define MGA_PLNWT 0x1c1c #define MGA_PRIMADDRESS 0x1e58 # define MGA_DMA_GENERAL (0 << 0) # define MGA_DMA_BLIT (1 << 0) # define MGA_DMA_VECTOR (2 << 0) # define MGA_DMA_VERTEX (3 << 0) #define MGA_PRIMEND 0x1e5c # define MGA_PRIMNOSTART (1 << 0) # define MGA_PAGPXFER (1 << 1) #define MGA_PRIMPTR 0x1e50 # define MGA_PRIMPTREN0 (1 << 0) # define MGA_PRIMPTREN1 (1 << 1) #define MGA_RST 0x1e40 # define MGA_SOFTRESET (1 << 0) # define MGA_SOFTEXTRST (1 << 1) #define MGA_SECADDRESS 0x2c40 #define MGA_SECEND 0x2c44 #define MGA_SETUPADDRESS 0x2cd0 #define MGA_SETUPEND 0x2cd4 #define MGA_SGN 0x1c58 #define MGA_SOFTRAP 0x2c48 #define MGA_SRCORG 0x2cb4 # define MGA_SRMMAP_MASK (1 << 0) # define MGA_SRCMAP_FB (0 << 0) # define MGA_SRCMAP_SYSMEM (1 << 0) # define MGA_SRCACC_MASK (1 << 1) # define MGA_SRCACC_PCI (0 << 1) # define MGA_SRCACC_AGP (1 << 1) #define MGA_STATUS 0x1e14 # define MGA_SOFTRAPEN (1 << 0) # define MGA_VSYNCPEN (1 << 4) # define MGA_VLINEPEN (1 << 5) # define MGA_DWGENGSTS (1 << 16) # define MGA_ENDPRDMASTS (1 << 17) #define MGA_STENCIL 0x2cc8 #define MGA_STENCILCTL 0x2ccc #define MGA_TDUALSTAGE0 0x2cf8 #define MGA_TDUALSTAGE1 0x2cfc #define MGA_TEXBORDERCOL 0x2c5c #define MGA_TEXCTL 0x2c30 #define MGA_TEXCTL2 0x2c3c # define MGA_DUALTEX (1 << 7) # define MGA_G400_TC2_MAGIC (1 << 15) -# define MGA_MAP1_ENABLE (1 << 31) +# define MGA_MAP1_ENABLE (1U << 31) #define MGA_TEXFILTER 0x2c58 #define MGA_TEXHEIGHT 0x2c2c #define MGA_TEXORG 0x2c24 # define MGA_TEXORGMAP_MASK (1 << 0) # define MGA_TEXORGMAP_FB (0 << 0) # define MGA_TEXORGMAP_SYSMEM (1 << 0) # define MGA_TEXORGACC_MASK (1 << 1) # define MGA_TEXORGACC_PCI (0 << 1) # define MGA_TEXORGACC_AGP (1 << 1) #define MGA_TEXORG1 0x2ca4 #define MGA_TEXORG2 0x2ca8 #define MGA_TEXORG3 0x2cac #define MGA_TEXORG4 0x2cb0 #define MGA_TEXTRANS 0x2c34 #define MGA_TEXTRANSHIGH 0x2c38 #define MGA_TEXWIDTH 0x2c28 #define MGA_WACCEPTSEQ 0x1dd4 #define MGA_WCODEADDR 0x1e6c #define MGA_WFLAG 0x1dc4 #define MGA_WFLAG1 0x1de0 #define MGA_WFLAGNB 0x1e64 #define MGA_WFLAGNB1 0x1e08 #define MGA_WGETMSB 0x1dc8 #define MGA_WIADDR 0x1dc0 #define MGA_WIADDR2 0x1dd8 # define MGA_WMODE_SUSPEND (0 << 0) # define MGA_WMODE_RESUME (1 << 0) # define MGA_WMODE_JUMP (2 << 0) # define MGA_WMODE_START (3 << 0) # define MGA_WAGP_ENABLE (1 << 2) #define MGA_WMISC 0x1e70 # define MGA_WUCODECACHE_ENABLE (1 << 0) # define MGA_WMASTER_ENABLE (1 << 1) # define MGA_WCACHEFLUSH_ENABLE (1 << 3) #define MGA_WVRTXSZ 0x1dcc #define MGA_YBOT 0x1c9c #define MGA_YDST 0x1c90 #define MGA_YDSTLEN 0x1c88 #define MGA_YDSTORG 0x1c94 #define MGA_YTOP 0x1c98 #define MGA_ZORG 0x1c0c /* This finishes the current batch of commands */ #define MGA_EXEC 0x0100 /* AGP PLL encoding (for G200 only). */ #define MGA_AGP_PLL 0x1e4c # define MGA_AGP2XPLL_DISABLE (0 << 0) # define MGA_AGP2XPLL_ENABLE (1 << 0) /* Warp registers */ #define MGA_WR0 0x2d00 #define MGA_WR1 0x2d04 #define MGA_WR2 0x2d08 #define MGA_WR3 0x2d0c #define MGA_WR4 0x2d10 #define MGA_WR5 0x2d14 #define MGA_WR6 0x2d18 #define MGA_WR7 0x2d1c #define MGA_WR8 0x2d20 #define MGA_WR9 0x2d24 #define MGA_WR10 0x2d28 #define MGA_WR11 0x2d2c #define MGA_WR12 0x2d30 #define MGA_WR13 0x2d34 #define MGA_WR14 0x2d38 #define MGA_WR15 0x2d3c #define MGA_WR16 0x2d40 #define MGA_WR17 0x2d44 #define MGA_WR18 0x2d48 #define MGA_WR19 0x2d4c #define MGA_WR20 0x2d50 #define MGA_WR21 0x2d54 #define MGA_WR22 0x2d58 #define MGA_WR23 0x2d5c #define MGA_WR24 0x2d60 #define MGA_WR25 0x2d64 #define MGA_WR26 0x2d68 #define MGA_WR27 0x2d6c #define MGA_WR28 0x2d70 #define MGA_WR29 0x2d74 #define MGA_WR30 0x2d78 #define MGA_WR31 0x2d7c #define MGA_WR32 0x2d80 #define MGA_WR33 0x2d84 #define MGA_WR34 0x2d88 #define MGA_WR35 0x2d8c #define MGA_WR36 0x2d90 #define MGA_WR37 0x2d94 #define MGA_WR38 0x2d98 #define MGA_WR39 0x2d9c #define MGA_WR40 0x2da0 #define MGA_WR41 0x2da4 #define MGA_WR42 0x2da8 #define MGA_WR43 0x2dac #define MGA_WR44 0x2db0 #define MGA_WR45 0x2db4 #define MGA_WR46 0x2db8 #define MGA_WR47 0x2dbc #define MGA_WR48 0x2dc0 #define MGA_WR49 0x2dc4 #define MGA_WR50 0x2dc8 #define MGA_WR51 0x2dcc #define MGA_WR52 0x2dd0 #define MGA_WR53 0x2dd4 #define MGA_WR54 0x2dd8 #define MGA_WR55 0x2ddc #define MGA_WR56 0x2de0 #define MGA_WR57 0x2de4 #define MGA_WR58 0x2de8 #define MGA_WR59 0x2dec #define MGA_WR60 0x2df0 #define MGA_WR61 0x2df4 #define MGA_WR62 0x2df8 #define MGA_WR63 0x2dfc # define MGA_G400_WR_MAGIC (1 << 6) # define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */ #define MGA_ILOAD_ALIGN 64 #define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1) #define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \ MGA_ATYPE_I | \ MGA_ZMODE_NOZCMP | \ MGA_ARZERO | \ MGA_SGNZERO | \ MGA_BOP_SRC | \ (15 << MGA_TRANS_SHIFT)) #define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \ MGA_ZMODE_NOZCMP | \ MGA_SOLID | \ MGA_ARZERO | \ MGA_SGNZERO | \ MGA_SHIFTZERO | \ MGA_BOP_SRC | \ (0 << MGA_TRANS_SHIFT) | \ MGA_BLTMOD_BMONOLEF | \ MGA_TRANSC | \ MGA_CLIPDIS) #define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \ MGA_ATYPE_RPL | \ MGA_SGNZERO | \ MGA_SHIFTZERO | \ MGA_BOP_SRC | \ (0 << MGA_TRANS_SHIFT) | \ MGA_BLTMOD_BFCOL | \ MGA_CLIPDIS) /* Simple idle test. */ static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv) { u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; return (status == MGA_ENDPRDMASTS); } #endif Index: head/sys/dev/drm/r128_drv.h =================================================================== --- head/sys/dev/drm/r128_drv.h (revision 258779) +++ head/sys/dev/drm/r128_drv.h (revision 258780) @@ -1,529 +1,529 @@ /* r128_drv.h -- Private header for r128 driver -*- linux-c -*- * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Kevin E. Martin * Gareth Hughes * Michel D�zer */ #include __FBSDID("$FreeBSD$"); #ifndef __R128_DRV_H__ #define __R128_DRV_H__ /* General customization: */ #define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." #define DRIVER_NAME "r128" #define DRIVER_DESC "ATI Rage 128" #define DRIVER_DATE "20030725" /* Interface history: * * ?? - ?? * 2.4 - Add support for ycbcr textures (no new ioctls) * 2.5 - Add FLIP ioctl, disable FULLSCREEN. */ #define DRIVER_MAJOR 2 #define DRIVER_MINOR 5 #define DRIVER_PATCHLEVEL 0 #define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR ) typedef struct drm_r128_freelist { unsigned int age; struct drm_buf *buf; struct drm_r128_freelist *next; struct drm_r128_freelist *prev; } drm_r128_freelist_t; typedef struct drm_r128_ring_buffer { u32 *start; u32 *end; int size; int size_l2qw; u32 tail; u32 tail_mask; int space; int high_mark; } drm_r128_ring_buffer_t; typedef struct drm_r128_private { drm_r128_ring_buffer_t ring; drm_r128_sarea_t *sarea_priv; int cce_mode; int cce_fifo_size; int cce_running; drm_r128_freelist_t *head; drm_r128_freelist_t *tail; int usec_timeout; int is_pci; unsigned long cce_buffers_offset; atomic_t idle_count; int page_flipping; int current_page; u32 crtc_offset; u32 crtc_offset_cntl; atomic_t vbl_received; u32 color_fmt; unsigned int front_offset; unsigned int front_pitch; unsigned int back_offset; unsigned int back_pitch; u32 depth_fmt; unsigned int depth_offset; unsigned int depth_pitch; unsigned int span_offset; u32 front_pitch_offset_c; u32 back_pitch_offset_c; u32 depth_pitch_offset_c; u32 span_pitch_offset_c; drm_local_map_t *sarea; drm_local_map_t *mmio; drm_local_map_t *cce_ring; drm_local_map_t *ring_rptr; drm_local_map_t *agp_textures; struct drm_ati_pcigart_info gart_info; } drm_r128_private_t; typedef struct drm_r128_buf_priv { u32 age; int prim; int discard; int dispatched; drm_r128_freelist_t *list_entry; } drm_r128_buf_priv_t; extern struct drm_ioctl_desc r128_ioctls[]; extern int r128_max_ioctl; /* r128_cce.c */ extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void r128_freelist_reset(struct drm_device * dev); extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); extern int r128_do_cleanup_cce(struct drm_device * dev); extern int r128_enable_vblank(struct drm_device *dev, int crtc); extern void r128_disable_vblank(struct drm_device *dev, int crtc); extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); extern void r128_driver_irq_preinstall(struct drm_device * dev); extern int r128_driver_irq_postinstall(struct drm_device * dev); extern void r128_driver_irq_uninstall(struct drm_device * dev); extern void r128_driver_lastclose(struct drm_device * dev); extern int r128_driver_load(struct drm_device * dev, unsigned long flags); extern void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv); extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* Register definitions, register access macros and drmAddMap constants * for Rage 128 kernel driver. */ #define R128_AUX_SC_CNTL 0x1660 # define R128_AUX1_SC_EN (1 << 0) # define R128_AUX1_SC_MODE_OR (0 << 1) # define R128_AUX1_SC_MODE_NAND (1 << 1) # define R128_AUX2_SC_EN (1 << 2) # define R128_AUX2_SC_MODE_OR (0 << 3) # define R128_AUX2_SC_MODE_NAND (1 << 3) # define R128_AUX3_SC_EN (1 << 4) # define R128_AUX3_SC_MODE_OR (0 << 5) # define R128_AUX3_SC_MODE_NAND (1 << 5) #define R128_AUX1_SC_LEFT 0x1664 #define R128_AUX1_SC_RIGHT 0x1668 #define R128_AUX1_SC_TOP 0x166c #define R128_AUX1_SC_BOTTOM 0x1670 #define R128_AUX2_SC_LEFT 0x1674 #define R128_AUX2_SC_RIGHT 0x1678 #define R128_AUX2_SC_TOP 0x167c #define R128_AUX2_SC_BOTTOM 0x1680 #define R128_AUX3_SC_LEFT 0x1684 #define R128_AUX3_SC_RIGHT 0x1688 #define R128_AUX3_SC_TOP 0x168c #define R128_AUX3_SC_BOTTOM 0x1690 #define R128_BRUSH_DATA0 0x1480 #define R128_BUS_CNTL 0x0030 # define R128_BUS_MASTER_DIS (1 << 6) #define R128_CLOCK_CNTL_INDEX 0x0008 #define R128_CLOCK_CNTL_DATA 0x000c # define R128_PLL_WR_EN (1 << 7) #define R128_CONSTANT_COLOR_C 0x1d34 #define R128_CRTC_OFFSET 0x0224 #define R128_CRTC_OFFSET_CNTL 0x0228 # define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16) #define R128_DP_GUI_MASTER_CNTL 0x146c # define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) # define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) # define R128_GMC_BRUSH_SOLID_COLOR (13 << 4) # define R128_GMC_BRUSH_NONE (15 << 4) # define R128_GMC_DST_16BPP (4 << 8) # define R128_GMC_DST_24BPP (5 << 8) # define R128_GMC_DST_32BPP (6 << 8) # define R128_GMC_DST_DATATYPE_SHIFT 8 # define R128_GMC_SRC_DATATYPE_COLOR (3 << 12) # define R128_DP_SRC_SOURCE_MEMORY (2 << 24) # define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24) # define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28) # define R128_GMC_AUX_CLIP_DIS (1 << 29) # define R128_GMC_WR_MSK_DIS (1 << 30) # define R128_ROP3_S 0x00cc0000 # define R128_ROP3_P 0x00f00000 #define R128_DP_WRITE_MASK 0x16cc #define R128_DST_PITCH_OFFSET_C 0x1c80 -# define R128_DST_TILE (1 << 31) +# define R128_DST_TILE (1U << 31) #define R128_GEN_INT_CNTL 0x0040 # define R128_CRTC_VBLANK_INT_EN (1 << 0) #define R128_GEN_INT_STATUS 0x0044 # define R128_CRTC_VBLANK_INT (1 << 0) # define R128_CRTC_VBLANK_INT_AK (1 << 0) #define R128_GEN_RESET_CNTL 0x00f0 # define R128_SOFT_RESET_GUI (1 << 0) #define R128_GUI_SCRATCH_REG0 0x15e0 #define R128_GUI_SCRATCH_REG1 0x15e4 #define R128_GUI_SCRATCH_REG2 0x15e8 #define R128_GUI_SCRATCH_REG3 0x15ec #define R128_GUI_SCRATCH_REG4 0x15f0 #define R128_GUI_SCRATCH_REG5 0x15f4 #define R128_GUI_STAT 0x1740 # define R128_GUI_FIFOCNT_MASK 0x0fff -# define R128_GUI_ACTIVE (1 << 31) +# define R128_GUI_ACTIVE (1U << 31) #define R128_MCLK_CNTL 0x000f # define R128_FORCE_GCP (1 << 16) # define R128_FORCE_PIPE3D_CP (1 << 17) # define R128_FORCE_RCP (1 << 18) #define R128_PC_GUI_CTLSTAT 0x1748 #define R128_PC_NGUI_CTLSTAT 0x0184 # define R128_PC_FLUSH_GUI (3 << 0) # define R128_PC_RI_GUI (1 << 2) # define R128_PC_FLUSH_ALL 0x00ff -# define R128_PC_BUSY (1 << 31) +# define R128_PC_BUSY (1U << 31) #define R128_PCI_GART_PAGE 0x017c #define R128_PRIM_TEX_CNTL_C 0x1cb0 #define R128_SCALE_3D_CNTL 0x1a00 #define R128_SEC_TEX_CNTL_C 0x1d00 #define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c #define R128_SETUP_CNTL 0x1bc4 #define R128_STEN_REF_MASK_C 0x1d40 #define R128_TEX_CNTL_C 0x1c9c # define R128_TEX_CACHE_FLUSH (1 << 23) #define R128_WAIT_UNTIL 0x1720 # define R128_EVENT_CRTC_OFFSET (1 << 0) #define R128_WINDOW_XY_OFFSET 0x1bcc /* CCE registers */ #define R128_PM4_BUFFER_OFFSET 0x0700 #define R128_PM4_BUFFER_CNTL 0x0704 # define R128_PM4_MASK (15 << 28) # define R128_PM4_NONPM4 (0 << 28) # define R128_PM4_192PIO (1 << 28) # define R128_PM4_192BM (2 << 28) # define R128_PM4_128PIO_64INDBM (3 << 28) # define R128_PM4_128BM_64INDBM (4 << 28) # define R128_PM4_64PIO_128INDBM (5 << 28) # define R128_PM4_64BM_128INDBM (6 << 28) # define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) # define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) # define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) # define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27) #define R128_PM4_BUFFER_WM_CNTL 0x0708 # define R128_WMA_SHIFT 0 # define R128_WMB_SHIFT 8 # define R128_WMC_SHIFT 16 # define R128_WB_WM_SHIFT 24 #define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c #define R128_PM4_BUFFER_DL_RPTR 0x0710 #define R128_PM4_BUFFER_DL_WPTR 0x0714 -# define R128_PM4_BUFFER_DL_DONE (1 << 31) +# define R128_PM4_BUFFER_DL_DONE (1U << 31) #define R128_PM4_VC_FPU_SETUP 0x071c #define R128_PM4_IW_INDOFF 0x0738 #define R128_PM4_IW_INDSIZE 0x073c #define R128_PM4_STAT 0x07b8 # define R128_PM4_FIFOCNT_MASK 0x0fff # define R128_PM4_BUSY (1 << 16) -# define R128_PM4_GUI_ACTIVE (1 << 31) +# define R128_PM4_GUI_ACTIVE (1U << 31) #define R128_PM4_MICROCODE_ADDR 0x07d4 #define R128_PM4_MICROCODE_RADDR 0x07d8 #define R128_PM4_MICROCODE_DATAH 0x07dc #define R128_PM4_MICROCODE_DATAL 0x07e0 #define R128_PM4_BUFFER_ADDR 0x07f0 #define R128_PM4_MICRO_CNTL 0x07fc # define R128_PM4_MICRO_FREERUN (1 << 30) #define R128_PM4_FIFO_DATA_EVEN 0x1000 #define R128_PM4_FIFO_DATA_ODD 0x1004 /* CCE command packets */ #define R128_CCE_PACKET0 0x00000000 #define R128_CCE_PACKET1 0x40000000 #define R128_CCE_PACKET2 0x80000000 #define R128_CCE_PACKET3 0xC0000000 # define R128_CNTL_HOSTDATA_BLT 0x00009400 # define R128_CNTL_PAINT_MULTI 0x00009A00 # define R128_CNTL_BITBLT_MULTI 0x00009B00 # define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300 #define R128_CCE_PACKET_MASK 0xC0000000 #define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 #define R128_CCE_PACKET0_REG_MASK 0x000007ff #define R128_CCE_PACKET1_REG0_MASK 0x000007ff #define R128_CCE_PACKET1_REG1_MASK 0x003ff800 #define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000 #define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001 #define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002 #define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007 #define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010 #define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020 #define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030 #define R128_CCE_VC_CNTL_NUM_SHIFT 16 #define R128_DATATYPE_VQ 0 #define R128_DATATYPE_CI4 1 #define R128_DATATYPE_CI8 2 #define R128_DATATYPE_ARGB1555 3 #define R128_DATATYPE_RGB565 4 #define R128_DATATYPE_RGB888 5 #define R128_DATATYPE_ARGB8888 6 #define R128_DATATYPE_RGB332 7 #define R128_DATATYPE_Y8 8 #define R128_DATATYPE_RGB8 9 #define R128_DATATYPE_CI16 10 #define R128_DATATYPE_YVYU422 11 #define R128_DATATYPE_VYUY422 12 #define R128_DATATYPE_AYUV444 14 #define R128_DATATYPE_ARGB4444 15 /* Constants */ #define R128_AGP_OFFSET 0x02000000 #define R128_WATERMARK_L 16 #define R128_WATERMARK_M 8 #define R128_WATERMARK_N 8 #define R128_WATERMARK_K 128 #define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0 #define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1 #define R128_MAX_VB_AGE 0x7fffffff #define R128_MAX_VB_VERTS (0xffff) #define R128_RING_HIGH_MARK 128 #define R128_PERFORMANCE_BOXES 0 #define R128_PCIGART_TABLE_SIZE 32768 #define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) #define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) #define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) #define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) #define R128_WRITE_PLL(addr,val) \ do { \ R128_WRITE8(R128_CLOCK_CNTL_INDEX, \ ((addr) & 0x1f) | R128_PLL_WR_EN); \ R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ } while (0) #define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ ((n) << 16) | ((reg) >> 2)) #define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ (((reg1) >> 2) << 11) | ((reg0) >> 2)) #define CCE_PACKET2() (R128_CCE_PACKET2) #define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ (pkt) | ((n) << 16)) static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) { drm_r128_ring_buffer_t *ring = &dev_priv->ring; ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); if (ring->space <= 0) ring->space += ring->size; } /* ================================================================ * Misc helper macros */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ if ( ring->space < ring->high_mark ) { \ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ r128_update_ring_snapshot( dev_priv ); \ if ( ring->space >= ring->high_mark ) \ goto __ring_space_done; \ DRM_UDELAY(1); \ } \ DRM_ERROR( "ring space check failed!\n" ); \ return -EBUSY; \ } \ __ring_space_done: \ ; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ do { \ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \ int __ret = r128_do_cce_idle( dev_priv ); \ if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ r128_freelist_reset( dev ); \ } \ } while (0) #define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \ OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \ OUT_RING( R128_EVENT_CRTC_OFFSET ); \ } while (0) /* ================================================================ * Ring control */ #define R128_VERBOSE 0 #define RING_LOCALS \ int write, _nr; unsigned int tail_mask; volatile u32 *ring; #define BEGIN_RING( n ) do { \ if ( R128_VERBOSE ) { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ COMMIT_RING(); \ r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ tail_mask = dev_priv->ring.tail_mask; \ } while (0) /* You can set this to zero if you want. If the card locks up, you'll * need to keep this set. It works around a bug in early revs of the * Rage 128 chipset, where the CCE would read 32 dwords past the end of * the ring buffer before wrapping around. */ #define R128_BROKEN_CCE 1 #define ADVANCE_RING() do { \ if ( R128_VERBOSE ) { \ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ if ( R128_BROKEN_CCE && write < 32 ) { \ memcpy( dev_priv->ring.end, \ dev_priv->ring.start, \ write * sizeof(u32) ); \ } \ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \ DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & tail_mask), \ write, __LINE__); \ } else \ dev_priv->ring.tail = write; \ } while (0) #define COMMIT_RING() do { \ if ( R128_VERBOSE ) { \ DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \ dev_priv->ring.tail ); \ } \ DRM_MEMORYBARRIER(); \ R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \ R128_READ( R128_PM4_BUFFER_DL_WPTR ); \ } while (0) #define OUT_RING( x ) do { \ if ( R128_VERBOSE ) { \ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ (unsigned int)(x), write ); \ } \ ring[write++] = cpu_to_le32( x ); \ write &= tail_mask; \ } while (0) #endif /* __R128_DRV_H__ */ Index: head/sys/dev/drm/r300_reg.h =================================================================== --- head/sys/dev/drm/r300_reg.h (revision 258779) +++ head/sys/dev/drm/r300_reg.h (revision 258780) @@ -1,1786 +1,1786 @@ /************************************************************************** Copyright (C) 2004-2005 Nicolai Haehnle et al. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation on the rights to use, copy, modify, merge, publish, distribute, sub license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ #include __FBSDID("$FreeBSD$"); /* *INDENT-OFF* */ #ifndef _R300_REG_H #define _R300_REG_H #define R300_MC_INIT_MISC_LAT_TIMER 0x180 # define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 # define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4 # define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8 # define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12 # define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16 # define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20 # define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24 # define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28 #define R300_MC_INIT_GFX_LAT_TIMER 0x154 # define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0 # define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4 # define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8 # define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12 # define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16 # define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20 # define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24 # define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28 /* * This file contains registers and constants for the R300. They have been * found mostly by examining command buffers captured using glxtest, as well * as by extrapolating some known registers and constants from the R200. * I am fairly certain that they are correct unless stated otherwise * in comments. */ #define R300_SE_VPORT_XSCALE 0x1D98 #define R300_SE_VPORT_XOFFSET 0x1D9C #define R300_SE_VPORT_YSCALE 0x1DA0 #define R300_SE_VPORT_YOFFSET 0x1DA4 #define R300_SE_VPORT_ZSCALE 0x1DA8 #define R300_SE_VPORT_ZOFFSET 0x1DAC /* * Vertex Array Processing (VAP) Control * Stolen from r200 code from Christoph Brill (It's a guess!) */ #define R300_VAP_CNTL 0x2080 /* This register is written directly and also starts data section * in many 3d CP_PACKET3's */ #define R300_VAP_VF_CNTL 0x2084 # define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0 # define R300_VAP_VF_CNTL__PRIM_NONE (0<<0) # define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0) # define R300_VAP_VF_CNTL__PRIM_LINES (2<<0) # define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0) # define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0) # define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0) # define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0) # define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0) # define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0) # define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0) # define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0) # define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4 /* State based - direct writes to registers trigger vertex generation */ # define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4) # define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4) # define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4) # define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4) /* I don't think I saw these three used.. */ # define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6 # define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9 # define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10 /* index size - when not set the indices are assumed to be 16 bit */ # define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11) /* number of vertices */ # define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16 /* BEGIN: Wild guesses */ #define R300_VAP_OUTPUT_VTX_FMT_0 0x2090 # define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0) # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1) # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */ # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */ # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */ # define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ #define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 /* each of the following is 3 bits wide, specifies number of components */ # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 /* END: Wild guesses */ #define R300_SE_VTE_CNTL 0x20b0 # define R300_VPORT_X_SCALE_ENA 0x00000001 # define R300_VPORT_X_OFFSET_ENA 0x00000002 # define R300_VPORT_Y_SCALE_ENA 0x00000004 # define R300_VPORT_Y_OFFSET_ENA 0x00000008 # define R300_VPORT_Z_SCALE_ENA 0x00000010 # define R300_VPORT_Z_OFFSET_ENA 0x00000020 # define R300_VTX_XY_FMT 0x00000100 # define R300_VTX_Z_FMT 0x00000200 # define R300_VTX_W0_FMT 0x00000400 # define R300_VTX_W0_NORMALIZE 0x00000800 # define R300_VTX_ST_DENORMALIZED 0x00001000 /* BEGIN: Vertex data assembly - lots of uncertainties */ /* gap */ #define R300_VAP_CNTL_STATUS 0x2140 # define R300_VC_NO_SWAP (0 << 0) # define R300_VC_16BIT_SWAP (1 << 0) # define R300_VC_32BIT_SWAP (2 << 0) # define R300_VAP_TCL_BYPASS (1 << 8) /* gap */ /* Where do we get our vertex data? * * Vertex data either comes either from immediate mode registers or from * vertex arrays. * There appears to be no mixed mode (though we can force the pitch of * vertex arrays to 0, effectively reusing the same element over and over * again). * * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure * if these registers influence vertex array processing. * * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3. * * In both cases, vertex attributes are then passed through INPUT_ROUTE. * * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data * into the vertex processor's input registers. * The first word routes the first input, the second word the second, etc. * The corresponding input is routed into the register with the given index. * The list is ended by a word with INPUT_ROUTE_END set. * * Always set COMPONENTS_4 in immediate mode. */ #define R300_VAP_INPUT_ROUTE_0_0 0x2150 # define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0) # define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0) # define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0) # define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0) # define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */ # define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8 # define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */ # define R300_VAP_INPUT_ROUTE_END (1 << 13) # define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */ # define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */ # define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */ # define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */ #define R300_VAP_INPUT_ROUTE_0_1 0x2154 #define R300_VAP_INPUT_ROUTE_0_2 0x2158 #define R300_VAP_INPUT_ROUTE_0_3 0x215C #define R300_VAP_INPUT_ROUTE_0_4 0x2160 #define R300_VAP_INPUT_ROUTE_0_5 0x2164 #define R300_VAP_INPUT_ROUTE_0_6 0x2168 #define R300_VAP_INPUT_ROUTE_0_7 0x216C /* gap */ /* Notes: * - always set up to produce at least two attributes: * if vertex program uses only position, fglrx will set normal, too * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal. */ #define R300_VAP_INPUT_CNTL_0 0x2180 # define R300_INPUT_CNTL_0_COLOR 0x00000001 #define R300_VAP_INPUT_CNTL_1 0x2184 # define R300_INPUT_CNTL_POS 0x00000001 # define R300_INPUT_CNTL_NORMAL 0x00000002 # define R300_INPUT_CNTL_COLOR 0x00000004 # define R300_INPUT_CNTL_TC0 0x00000400 # define R300_INPUT_CNTL_TC1 0x00000800 # define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */ # define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */ # define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */ # define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */ # define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */ # define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */ /* gap */ /* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0 * are set to a swizzling bit pattern, other words are 0. * * In immediate mode, the pattern is always set to xyzw. In vertex array * mode, the swizzling pattern is e.g. used to set zw components in texture * coordinates with only tweo components. */ #define R300_VAP_INPUT_ROUTE_1_0 0x21E0 # define R300_INPUT_ROUTE_SELECT_X 0 # define R300_INPUT_ROUTE_SELECT_Y 1 # define R300_INPUT_ROUTE_SELECT_Z 2 # define R300_INPUT_ROUTE_SELECT_W 3 # define R300_INPUT_ROUTE_SELECT_ZERO 4 # define R300_INPUT_ROUTE_SELECT_ONE 5 # define R300_INPUT_ROUTE_SELECT_MASK 7 # define R300_INPUT_ROUTE_X_SHIFT 0 # define R300_INPUT_ROUTE_Y_SHIFT 3 # define R300_INPUT_ROUTE_Z_SHIFT 6 # define R300_INPUT_ROUTE_W_SHIFT 9 # define R300_INPUT_ROUTE_ENABLE (15 << 12) #define R300_VAP_INPUT_ROUTE_1_1 0x21E4 #define R300_VAP_INPUT_ROUTE_1_2 0x21E8 #define R300_VAP_INPUT_ROUTE_1_3 0x21EC #define R300_VAP_INPUT_ROUTE_1_4 0x21F0 #define R300_VAP_INPUT_ROUTE_1_5 0x21F4 #define R300_VAP_INPUT_ROUTE_1_6 0x21F8 #define R300_VAP_INPUT_ROUTE_1_7 0x21FC /* END: Vertex data assembly */ /* gap */ /* BEGIN: Upload vertex program and data */ /* * The programmable vertex shader unit has a memory bank of unknown size * that can be written to in 16 byte units by writing the address into * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs). * * Pointers into the memory bank are always in multiples of 16 bytes. * * The memory bank is divided into areas with fixed meaning. * * Starting at address UPLOAD_PROGRAM: Vertex program instructions. * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB), * whereas the difference between known addresses suggests size 512. * * Starting at address UPLOAD_PARAMETERS: Vertex program parameters. * Native reported limits and the VPI layout suggest size 256, whereas * difference between known addresses suggests size 512. * * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the * floating point pointsize. The exact purpose of this state is uncertain, * as there is also the R300_RE_POINTSIZE register. * * Multiple vertex programs and parameter sets can be loaded at once, * which could explain the size discrepancy. */ #define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200 # define R300_PVS_UPLOAD_PROGRAM 0x00000000 # define R300_PVS_UPLOAD_PARAMETERS 0x00000200 # define R300_PVS_UPLOAD_POINTSIZE 0x00000406 /* gap */ #define R300_VAP_PVS_UPLOAD_DATA 0x2208 /* END: Upload vertex program and data */ /* gap */ /* I do not know the purpose of this register. However, I do know that * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL * for normal rendering. */ #define R300_VAP_UNKNOWN_221C 0x221C # define R300_221C_NORMAL 0x00000000 # define R300_221C_CLEAR 0x0001C000 /* These seem to be per-pixel and per-vertex X and Y clipping planes. The first * plane is per-pixel and the second plane is per-vertex. * * This was determined by experimentation alone but I believe it is correct. * * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest. */ #define R300_VAP_CLIP_X_0 0x2220 #define R300_VAP_CLIP_X_1 0x2224 #define R300_VAP_CLIP_Y_0 0x2228 #define R300_VAP_CLIP_Y_1 0x2230 /* gap */ /* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between * rendering commands and overwriting vertex program parameters. * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and * avoids bugs caused by still running shaders reading bad data from memory. */ #define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 /* Absolutely no clue what this register is about. */ #define R300_VAP_UNKNOWN_2288 0x2288 # define R300_2288_R300 0x00750000 /* -- nh */ # define R300_2288_RV350 0x0000FFFF /* -- Vladimir */ /* gap */ /* Addresses are relative to the vertex program instruction area of the * memory bank. PROGRAM_END points to the last instruction of the active * program * * The meaning of the two UNKNOWN fields is obviously not known. However, * experiments so far have shown that both *must* point to an instruction * inside the vertex program, otherwise the GPU locks up. * * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to * position takes place. * * Most likely this is used to ignore rest of the program in cases * where group of verts aren't visible. For some reason this "section" * is sometimes accepted other instruction that have no relationship with * position calculations. */ #define R300_VAP_PVS_CNTL_1 0x22D0 # define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 # define R300_PVS_CNTL_1_POS_END_SHIFT 10 # define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 /* Addresses are relative the vertex program parameters area. */ #define R300_VAP_PVS_CNTL_2 0x22D4 # define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 # define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 #define R300_VAP_PVS_CNTL_3 0x22D8 # define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10 # define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0 /* The entire range from 0x2300 to 0x2AC inclusive seems to be used for * immediate vertices */ #define R300_VAP_VTX_COLOR_R 0x2464 #define R300_VAP_VTX_COLOR_G 0x2468 #define R300_VAP_VTX_COLOR_B 0x246C #define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */ #define R300_VAP_VTX_POS_0_Y_1 0x2494 #define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */ #define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */ #define R300_VAP_VTX_POS_0_Y_2 0x24A4 #define R300_VAP_VTX_POS_0_Z_2 0x24A8 /* write 0 to indicate end of packet? */ #define R300_VAP_VTX_END_OF_PKT 0x24AC /* gap */ /* These are values from r300_reg/r300_reg.h - they are known to be correct * and are here so we can use one register file instead of several * - Vladimir */ #define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000 # define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5) # define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16) #define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004 /* each of the following is 3 bits wide, specifies number of components */ # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 /* UNK30 seems to enables point to quad transformation on textures * (or something closely related to that). * This bit is rather fatal at the time being due to lackings at pixel * shader side */ #define R300_GB_ENABLE 0x4008 # define R300_GB_POINT_STUFF_ENABLE (1<<0) # define R300_GB_LINE_STUFF_ENABLE (1<<1) # define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2) # define R300_GB_STENCIL_AUTO_ENABLE (1<<4) # define R300_GB_UNK31 (1<<31) /* each of the following is 2 bits wide */ #define R300_GB_TEX_REPLICATE 0 #define R300_GB_TEX_ST 1 #define R300_GB_TEX_STR 2 # define R300_GB_TEX0_SOURCE_SHIFT 16 # define R300_GB_TEX1_SOURCE_SHIFT 18 # define R300_GB_TEX2_SOURCE_SHIFT 20 # define R300_GB_TEX3_SOURCE_SHIFT 22 # define R300_GB_TEX4_SOURCE_SHIFT 24 # define R300_GB_TEX5_SOURCE_SHIFT 26 # define R300_GB_TEX6_SOURCE_SHIFT 28 # define R300_GB_TEX7_SOURCE_SHIFT 30 /* MSPOS - positions for multisample antialiasing (?) */ #define R300_GB_MSPOS0 0x4010 /* shifts - each of the fields is 4 bits */ # define R300_GB_MSPOS0__MS_X0_SHIFT 0 # define R300_GB_MSPOS0__MS_Y0_SHIFT 4 # define R300_GB_MSPOS0__MS_X1_SHIFT 8 # define R300_GB_MSPOS0__MS_Y1_SHIFT 12 # define R300_GB_MSPOS0__MS_X2_SHIFT 16 # define R300_GB_MSPOS0__MS_Y2_SHIFT 20 # define R300_GB_MSPOS0__MSBD0_Y 24 # define R300_GB_MSPOS0__MSBD0_X 28 #define R300_GB_MSPOS1 0x4014 # define R300_GB_MSPOS1__MS_X3_SHIFT 0 # define R300_GB_MSPOS1__MS_Y3_SHIFT 4 # define R300_GB_MSPOS1__MS_X4_SHIFT 8 # define R300_GB_MSPOS1__MS_Y4_SHIFT 12 # define R300_GB_MSPOS1__MS_X5_SHIFT 16 # define R300_GB_MSPOS1__MS_Y5_SHIFT 20 # define R300_GB_MSPOS1__MSBD1 24 #define R300_GB_TILE_CONFIG 0x4018 # define R300_GB_TILE_ENABLE (1<<0) # define R300_GB_TILE_PIPE_COUNT_RV300 0 # define R300_GB_TILE_PIPE_COUNT_R300 (3<<1) # define R300_GB_TILE_PIPE_COUNT_R420 (7<<1) # define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1) # define R300_GB_TILE_SIZE_8 0 # define R300_GB_TILE_SIZE_16 (1<<4) # define R300_GB_TILE_SIZE_32 (2<<4) # define R300_GB_SUPER_SIZE_1 (0<<6) # define R300_GB_SUPER_SIZE_2 (1<<6) # define R300_GB_SUPER_SIZE_4 (2<<6) # define R300_GB_SUPER_SIZE_8 (3<<6) # define R300_GB_SUPER_SIZE_16 (4<<6) # define R300_GB_SUPER_SIZE_32 (5<<6) # define R300_GB_SUPER_SIZE_64 (6<<6) # define R300_GB_SUPER_SIZE_128 (7<<6) # define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */ # define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */ # define R300_GB_SUPER_TILE_A 0 # define R300_GB_SUPER_TILE_B (1<<15) # define R300_GB_SUBPIXEL_1_12 0 # define R300_GB_SUBPIXEL_1_16 (1<<16) #define R300_GB_FIFO_SIZE 0x4024 /* each of the following is 2 bits wide */ #define R300_GB_FIFO_SIZE_32 0 #define R300_GB_FIFO_SIZE_64 1 #define R300_GB_FIFO_SIZE_128 2 #define R300_GB_FIFO_SIZE_256 3 # define R300_SC_IFIFO_SIZE_SHIFT 0 # define R300_SC_TZFIFO_SIZE_SHIFT 2 # define R300_SC_BFIFO_SIZE_SHIFT 4 # define R300_US_OFIFO_SIZE_SHIFT 12 # define R300_US_WFIFO_SIZE_SHIFT 14 /* the following use the same constants as above, but meaning is is times 2 (i.e. instead of 32 words it means 64 */ # define R300_RS_TFIFO_SIZE_SHIFT 6 # define R300_RS_CFIFO_SIZE_SHIFT 8 # define R300_US_RAM_SIZE_SHIFT 10 /* watermarks, 3 bits wide */ # define R300_RS_HIGHWATER_COL_SHIFT 16 # define R300_RS_HIGHWATER_TEX_SHIFT 19 # define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ # define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 #define R300_GB_SELECT 0x401C # define R300_GB_FOG_SELECT_C0A 0 # define R300_GB_FOG_SELECT_C1A 1 # define R300_GB_FOG_SELECT_C2A 2 # define R300_GB_FOG_SELECT_C3A 3 # define R300_GB_FOG_SELECT_1_1_W 4 # define R300_GB_FOG_SELECT_Z 5 # define R300_GB_DEPTH_SELECT_Z 0 # define R300_GB_DEPTH_SELECT_1_1_W (1<<3) # define R300_GB_W_SELECT_1_W 0 # define R300_GB_W_SELECT_1 (1<<4) #define R300_GB_AA_CONFIG 0x4020 # define R300_AA_DISABLE 0x00 # define R300_AA_ENABLE 0x01 # define R300_AA_SUBSAMPLES_2 0 # define R300_AA_SUBSAMPLES_3 (1<<1) # define R300_AA_SUBSAMPLES_4 (2<<1) # define R300_AA_SUBSAMPLES_6 (3<<1) /* gap */ /* Zero to flush caches. */ #define R300_TX_INVALTAGS 0x4100 #define R300_TX_FLUSH 0x0 /* The upper enable bits are guessed, based on fglrx reported limits. */ #define R300_TX_ENABLE 0x4104 # define R300_TX_ENABLE_0 (1 << 0) # define R300_TX_ENABLE_1 (1 << 1) # define R300_TX_ENABLE_2 (1 << 2) # define R300_TX_ENABLE_3 (1 << 3) # define R300_TX_ENABLE_4 (1 << 4) # define R300_TX_ENABLE_5 (1 << 5) # define R300_TX_ENABLE_6 (1 << 6) # define R300_TX_ENABLE_7 (1 << 7) # define R300_TX_ENABLE_8 (1 << 8) # define R300_TX_ENABLE_9 (1 << 9) # define R300_TX_ENABLE_10 (1 << 10) # define R300_TX_ENABLE_11 (1 << 11) # define R300_TX_ENABLE_12 (1 << 12) # define R300_TX_ENABLE_13 (1 << 13) # define R300_TX_ENABLE_14 (1 << 14) # define R300_TX_ENABLE_15 (1 << 15) /* The pointsize is given in multiples of 6. The pointsize can be * enormous: Clear() renders a single point that fills the entire * framebuffer. */ #define R300_RE_POINTSIZE 0x421C # define R300_POINTSIZE_Y_SHIFT 0 # define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */ # define R300_POINTSIZE_X_SHIFT 16 # define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */ # define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6) /* The line width is given in multiples of 6. * In default mode lines are classified as vertical lines. * HO: horizontal * VE: vertical or horizontal * HO & VE: no classification */ #define R300_RE_LINE_CNT 0x4234 # define R300_LINESIZE_SHIFT 0 # define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */ # define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6) # define R300_LINE_CNT_HO (1 << 16) # define R300_LINE_CNT_VE (1 << 17) /* Some sort of scale or clamp value for texcoordless textures. */ #define R300_RE_UNK4238 0x4238 /* Something shade related */ #define R300_RE_SHADE 0x4274 #define R300_RE_SHADE_MODEL 0x4278 # define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa # define R300_RE_SHADE_MODEL_FLAT 0x39595 /* Dangerous */ #define R300_RE_POLYGON_MODE 0x4288 # define R300_PM_ENABLED (1 << 0) # define R300_PM_FRONT_POINT (0 << 0) # define R300_PM_BACK_POINT (0 << 0) # define R300_PM_FRONT_LINE (1 << 4) # define R300_PM_FRONT_FILL (1 << 5) # define R300_PM_BACK_LINE (1 << 7) # define R300_PM_BACK_FILL (1 << 8) /* Fog parameters */ #define R300_RE_FOG_SCALE 0x4294 #define R300_RE_FOG_START 0x4298 /* Not sure why there are duplicate of factor and constant values. * My best guess so far is that there are separate zbiases for test and write. * Ordering might be wrong. * Some of the tests indicate that fgl has a fallback implementation of zbias * via pixel shaders. */ #define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */ #define R300_RE_ZBIAS_T_FACTOR 0x42A4 #define R300_RE_ZBIAS_T_CONSTANT 0x42A8 #define R300_RE_ZBIAS_W_FACTOR 0x42AC #define R300_RE_ZBIAS_W_CONSTANT 0x42B0 /* This register needs to be set to (1<<1) for RV350 to correctly * perform depth test (see --vb-triangles in r300_demo) * Don't know about other chips. - Vladimir * This is set to 3 when GL_POLYGON_OFFSET_FILL is on. * My guess is that there are two bits for each zbias primitive * (FILL, LINE, POINT). * One to enable depth test and one for depth write. * Yet this doesn't explain why depth writes work ... */ #define R300_RE_OCCLUSION_CNTL 0x42B4 # define R300_OCCLUSION_ON (1<<1) #define R300_RE_CULL_CNTL 0x42B8 # define R300_CULL_FRONT (1 << 0) # define R300_CULL_BACK (1 << 1) # define R300_FRONT_FACE_CCW (0 << 2) # define R300_FRONT_FACE_CW (1 << 2) /* BEGIN: Rasterization / Interpolators - many guesses */ /* 0_UNKNOWN_18 has always been set except for clear operations. * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends * on the vertex program, *not* the fragment program) */ #define R300_RS_CNTL_0 0x4300 # define R300_RS_CNTL_TC_CNT_SHIFT 2 # define R300_RS_CNTL_TC_CNT_MASK (7 << 2) /* number of color interpolators used */ # define R300_RS_CNTL_CI_CNT_SHIFT 7 # define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18) /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */ #define R300_RS_CNTL_1 0x4304 /* gap */ /* Only used for texture coordinates. * Use the source field to route texture coordinate input from the * vertex program to the desired interpolator. Note that the source * field is relative to the outputs the vertex program *actually* * writes. If a vertex program only writes texcoord[1], this will * be source index 0. * Set INTERP_USED on all interpolators that produce data used by * the fragment program. INTERP_USED looks like a swizzling mask, * but I haven't seen it used that way. * * Note: The _UNKNOWN constants are always set in their respective * register. I don't know if this is necessary. */ #define R300_RS_INTERP_0 0x4310 #define R300_RS_INTERP_1 0x4314 # define R300_RS_INTERP_1_UNKNOWN 0x40 #define R300_RS_INTERP_2 0x4318 # define R300_RS_INTERP_2_UNKNOWN 0x80 #define R300_RS_INTERP_3 0x431C # define R300_RS_INTERP_3_UNKNOWN 0xC0 #define R300_RS_INTERP_4 0x4320 #define R300_RS_INTERP_5 0x4324 #define R300_RS_INTERP_6 0x4328 #define R300_RS_INTERP_7 0x432C # define R300_RS_INTERP_SRC_SHIFT 2 # define R300_RS_INTERP_SRC_MASK (7 << 2) # define R300_RS_INTERP_USED 0x00D10000 /* These DWORDs control how vertex data is routed into fragment program * registers, after interpolators. */ #define R300_RS_ROUTE_0 0x4330 #define R300_RS_ROUTE_1 0x4334 #define R300_RS_ROUTE_2 0x4338 #define R300_RS_ROUTE_3 0x433C /* GUESS */ #define R300_RS_ROUTE_4 0x4340 /* GUESS */ #define R300_RS_ROUTE_5 0x4344 /* GUESS */ #define R300_RS_ROUTE_6 0x4348 /* GUESS */ #define R300_RS_ROUTE_7 0x434C /* GUESS */ # define R300_RS_ROUTE_SOURCE_INTERP_0 0 # define R300_RS_ROUTE_SOURCE_INTERP_1 1 # define R300_RS_ROUTE_SOURCE_INTERP_2 2 # define R300_RS_ROUTE_SOURCE_INTERP_3 3 # define R300_RS_ROUTE_SOURCE_INTERP_4 4 # define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */ # define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */ # define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */ # define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */ # define R300_RS_ROUTE_DEST_SHIFT 6 # define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */ /* Special handling for color: When the fragment program uses color, * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the * color register index. * * Apparently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state. * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly * correct or not. - Oliver. */ # define R300_RS_ROUTE_0_COLOR (1 << 14) # define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17 # define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */ /* As above, but for secondary color */ # define R300_RS_ROUTE_1_COLOR1 (1 << 14) # define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17 # define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17) # define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) /* END: Rasterization / Interpolators - many guesses */ /* Hierarchical Z Enable */ #define R300_SC_HYPERZ 0x43a4 # define R300_SC_HYPERZ_DISABLE (0 << 0) # define R300_SC_HYPERZ_ENABLE (1 << 0) # define R300_SC_HYPERZ_MIN (0 << 1) # define R300_SC_HYPERZ_MAX (1 << 1) # define R300_SC_HYPERZ_ADJ_256 (0 << 2) # define R300_SC_HYPERZ_ADJ_128 (1 << 2) # define R300_SC_HYPERZ_ADJ_64 (2 << 2) # define R300_SC_HYPERZ_ADJ_32 (3 << 2) # define R300_SC_HYPERZ_ADJ_16 (4 << 2) # define R300_SC_HYPERZ_ADJ_8 (5 << 2) # define R300_SC_HYPERZ_ADJ_4 (6 << 2) # define R300_SC_HYPERZ_ADJ_2 (7 << 2) # define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5) # define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5) # define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6) # define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6) #define R300_SC_EDGERULE 0x43a8 /* BEGIN: Scissors and cliprects */ /* There are four clipping rectangles. Their corner coordinates are inclusive. * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending * on whether the pixel is inside cliprects 0-3, respectively. For example, * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned * the number 3 (binary 0011). * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set, * the pixel is rasterized. * * In addition to this, there is a scissors rectangle. Only pixels inside the * scissors rectangle are drawn. (coordinates are inclusive) * * For some reason, the top-left corner of the framebuffer is at (1440, 1440) * for the purpose of clipping and scissors. */ #define R300_RE_CLIPRECT_TL_0 0x43B0 #define R300_RE_CLIPRECT_BR_0 0x43B4 #define R300_RE_CLIPRECT_TL_1 0x43B8 #define R300_RE_CLIPRECT_BR_1 0x43BC #define R300_RE_CLIPRECT_TL_2 0x43C0 #define R300_RE_CLIPRECT_BR_2 0x43C4 #define R300_RE_CLIPRECT_TL_3 0x43C8 #define R300_RE_CLIPRECT_BR_3 0x43CC # define R300_CLIPRECT_OFFSET 1440 # define R300_CLIPRECT_MASK 0x1FFF # define R300_CLIPRECT_X_SHIFT 0 # define R300_CLIPRECT_X_MASK (0x1FFF << 0) # define R300_CLIPRECT_Y_SHIFT 13 # define R300_CLIPRECT_Y_MASK (0x1FFF << 13) #define R300_RE_CLIPRECT_CNTL 0x43D0 # define R300_CLIP_OUT (1 << 0) # define R300_CLIP_0 (1 << 1) # define R300_CLIP_1 (1 << 2) # define R300_CLIP_10 (1 << 3) # define R300_CLIP_2 (1 << 4) # define R300_CLIP_20 (1 << 5) # define R300_CLIP_21 (1 << 6) # define R300_CLIP_210 (1 << 7) # define R300_CLIP_3 (1 << 8) # define R300_CLIP_30 (1 << 9) # define R300_CLIP_31 (1 << 10) # define R300_CLIP_310 (1 << 11) # define R300_CLIP_32 (1 << 12) # define R300_CLIP_320 (1 << 13) # define R300_CLIP_321 (1 << 14) # define R300_CLIP_3210 (1 << 15) /* gap */ #define R300_RE_SCISSORS_TL 0x43E0 #define R300_RE_SCISSORS_BR 0x43E4 # define R300_SCISSORS_OFFSET 1440 # define R300_SCISSORS_X_SHIFT 0 # define R300_SCISSORS_X_MASK (0x1FFF << 0) # define R300_SCISSORS_Y_SHIFT 13 # define R300_SCISSORS_Y_MASK (0x1FFF << 13) /* END: Scissors and cliprects */ /* BEGIN: Texture specification */ /* * The texture specification dwords are grouped by meaning and not by texture * unit. This means that e.g. the offset for texture image unit N is found in * register TX_OFFSET_0 + (4*N) */ #define R300_TX_FILTER_0 0x4400 # define R300_TX_REPEAT 0 # define R300_TX_MIRRORED 1 # define R300_TX_CLAMP 4 # define R300_TX_CLAMP_TO_EDGE 2 # define R300_TX_CLAMP_TO_BORDER 6 # define R300_TX_WRAP_S_SHIFT 0 # define R300_TX_WRAP_S_MASK (7 << 0) # define R300_TX_WRAP_T_SHIFT 3 # define R300_TX_WRAP_T_MASK (7 << 3) # define R300_TX_WRAP_Q_SHIFT 6 # define R300_TX_WRAP_Q_MASK (7 << 6) # define R300_TX_MAG_FILTER_NEAREST (1 << 9) # define R300_TX_MAG_FILTER_LINEAR (2 << 9) # define R300_TX_MAG_FILTER_MASK (3 << 9) # define R300_TX_MIN_FILTER_NEAREST (1 << 11) # define R300_TX_MIN_FILTER_LINEAR (2 << 11) # define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11) # define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11) # define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) # define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) /* NOTE: NEAREST doesn't seem to exist. * I'm not setting MAG_FILTER_MASK and (3 << 11) on for all * anisotropy modes because that would void selected mag filter */ # define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13) # define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13) # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13) # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13) # define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) ) # define R300_TX_MAX_ANISO_1_TO_1 (0 << 21) # define R300_TX_MAX_ANISO_2_TO_1 (2 << 21) # define R300_TX_MAX_ANISO_4_TO_1 (4 << 21) # define R300_TX_MAX_ANISO_8_TO_1 (6 << 21) # define R300_TX_MAX_ANISO_16_TO_1 (8 << 21) # define R300_TX_MAX_ANISO_MASK (14 << 21) #define R300_TX_FILTER1_0 0x4440 # define R300_CHROMA_KEY_MODE_DISABLE 0 # define R300_CHROMA_KEY_FORCE 1 # define R300_CHROMA_KEY_BLEND 2 # define R300_MC_ROUND_NORMAL (0<<2) # define R300_MC_ROUND_MPEG4 (1<<2) # define R300_LOD_BIAS_MASK 0x1fff # define R300_EDGE_ANISO_EDGE_DIAG (0<<13) # define R300_EDGE_ANISO_EDGE_ONLY (1<<13) # define R300_MC_COORD_TRUNCATE_DISABLE (0<<14) # define R300_MC_COORD_TRUNCATE_MPEG (1<<14) # define R300_TX_TRI_PERF_0_8 (0<<15) # define R300_TX_TRI_PERF_1_8 (1<<15) # define R300_TX_TRI_PERF_1_4 (2<<15) # define R300_TX_TRI_PERF_3_8 (3<<15) # define R300_ANISO_THRESHOLD_MASK (7<<17) #define R300_TX_SIZE_0 0x4480 # define R300_TX_WIDTHMASK_SHIFT 0 # define R300_TX_WIDTHMASK_MASK (2047 << 0) # define R300_TX_HEIGHTMASK_SHIFT 11 # define R300_TX_HEIGHTMASK_MASK (2047 << 11) # define R300_TX_UNK23 (1 << 23) # define R300_TX_MAX_MIP_LEVEL_SHIFT 26 # define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26) # define R300_TX_SIZE_PROJECTED (1<<30) # define R300_TX_SIZE_TXPITCH_EN (1<<31) #define R300_TX_FORMAT_0 0x44C0 /* The interpretation of the format word by Wladimir van der Laan */ /* The X, Y, Z and W refer to the layout of the components. They are given meanings as R, G, B and Alpha by the swizzle specification */ # define R300_TX_FORMAT_X8 0x0 # define R300_TX_FORMAT_X16 0x1 # define R300_TX_FORMAT_Y4X4 0x2 # define R300_TX_FORMAT_Y8X8 0x3 # define R300_TX_FORMAT_Y16X16 0x4 # define R300_TX_FORMAT_Z3Y3X2 0x5 # define R300_TX_FORMAT_Z5Y6X5 0x6 # define R300_TX_FORMAT_Z6Y5X5 0x7 # define R300_TX_FORMAT_Z11Y11X10 0x8 # define R300_TX_FORMAT_Z10Y11X11 0x9 # define R300_TX_FORMAT_W4Z4Y4X4 0xA # define R300_TX_FORMAT_W1Z5Y5X5 0xB # define R300_TX_FORMAT_W8Z8Y8X8 0xC # define R300_TX_FORMAT_W2Z10Y10X10 0xD # define R300_TX_FORMAT_W16Z16Y16X16 0xE # define R300_TX_FORMAT_DXT1 0xF # define R300_TX_FORMAT_DXT3 0x10 # define R300_TX_FORMAT_DXT5 0x11 # define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ # define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ # define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ # define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ /* 0x16 - some 16 bit green format.. ?? */ # define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ # define R300_TX_FORMAT_CUBIC_MAP (1 << 26) /* gap */ /* Floating point formats */ /* Note - hardware supports both 16 and 32 bit floating point */ # define R300_TX_FORMAT_FL_I16 0x18 # define R300_TX_FORMAT_FL_I16A16 0x19 # define R300_TX_FORMAT_FL_R16G16B16A16 0x1A # define R300_TX_FORMAT_FL_I32 0x1B # define R300_TX_FORMAT_FL_I32A32 0x1C # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D /* alpha modes, convenience mostly */ /* if you have alpha, pick constant appropriate to the number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ # define R300_TX_FORMAT_ALPHA_1CH 0x000 # define R300_TX_FORMAT_ALPHA_2CH 0x200 # define R300_TX_FORMAT_ALPHA_4CH 0x600 # define R300_TX_FORMAT_ALPHA_NONE 0xA00 /* Swizzling */ /* constants */ # define R300_TX_FORMAT_X 0 # define R300_TX_FORMAT_Y 1 # define R300_TX_FORMAT_Z 2 # define R300_TX_FORMAT_W 3 # define R300_TX_FORMAT_ZERO 4 # define R300_TX_FORMAT_ONE 5 /* 2.0*Z, everything above 1.0 is set to 0.0 */ # define R300_TX_FORMAT_CUT_Z 6 /* 2.0*W, everything above 1.0 is set to 0.0 */ # define R300_TX_FORMAT_CUT_W 7 # define R300_TX_FORMAT_B_SHIFT 18 # define R300_TX_FORMAT_G_SHIFT 15 # define R300_TX_FORMAT_R_SHIFT 12 # define R300_TX_FORMAT_A_SHIFT 9 /* Convenience macro to take care of layout and swizzling */ # define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \ ((R300_TX_FORMAT_##B)< 0.5, return ARG0, else return ARG1 * - CMP: If ARG2 < 0, return ARG1, else return ARG0 * - FLR: use FRC+MAD * - XPD: use MAD+MAD * - SGE, SLT: use MAD+CMP * - RSQ: use ABS modifier for argument * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation * (e.g. RCP) into color register * - apparently, there's no quick DST operation * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2" * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0" * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1" * * Operand selection * First stage selects three sources from the available registers and * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha). * fglrx sorts the three source fields: Registers before constants, * lower indices before higher indices; I do not know whether this is * necessary. * * fglrx fills unused sources with "read constant 0" * According to specs, you cannot select more than two different constants. * * Second stage selects the operands from the sources. This is defined in * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants * zero and one. * Swizzling and negation happens in this stage, as well. * * Important: Color and alpha seem to be mostly separate, i.e. their sources * selection appears to be fully independent (the register storage is probably * physically split into a color and an alpha section). * However (because of the apparent physical split), there is some interaction * WRT swizzling. If, for example, you want to load an R component into an * Alpha operand, this R component is taken from a *color* source, not from * an alpha source. The corresponding register doesn't even have to appear in * the alpha sources list. (I hope this all makes sense to you) * * Destination selection * The destination register index is in FPI1 (color) and FPI3 (alpha) * together with enable bits. * There are separate enable bits for writing into temporary registers * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* * /DSTA_OUTPUT). You can write to both at once, or not write at all (the * same index must be used for both). * * Note: There is a special form for LRP * - Argument order is the same as in ARB_fragment_program. * - Operation is MAD * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP * - Set FPI0/FPI2_SPECIAL_LRP * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */ #define R300_PFS_INSTR1_0 0x46C0 # define R300_FPI1_SRC0C_SHIFT 0 # define R300_FPI1_SRC0C_MASK (31 << 0) # define R300_FPI1_SRC0C_CONST (1 << 5) # define R300_FPI1_SRC1C_SHIFT 6 # define R300_FPI1_SRC1C_MASK (31 << 6) # define R300_FPI1_SRC1C_CONST (1 << 11) # define R300_FPI1_SRC2C_SHIFT 12 # define R300_FPI1_SRC2C_MASK (31 << 12) # define R300_FPI1_SRC2C_CONST (1 << 17) # define R300_FPI1_SRC_MASK 0x0003ffff # define R300_FPI1_DSTC_SHIFT 18 # define R300_FPI1_DSTC_MASK (31 << 18) # define R300_FPI1_DSTC_REG_MASK_SHIFT 23 # define R300_FPI1_DSTC_REG_X (1 << 23) # define R300_FPI1_DSTC_REG_Y (1 << 24) # define R300_FPI1_DSTC_REG_Z (1 << 25) # define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26 # define R300_FPI1_DSTC_OUTPUT_X (1 << 26) # define R300_FPI1_DSTC_OUTPUT_Y (1 << 27) # define R300_FPI1_DSTC_OUTPUT_Z (1 << 28) #define R300_PFS_INSTR3_0 0x47C0 # define R300_FPI3_SRC0A_SHIFT 0 # define R300_FPI3_SRC0A_MASK (31 << 0) # define R300_FPI3_SRC0A_CONST (1 << 5) # define R300_FPI3_SRC1A_SHIFT 6 # define R300_FPI3_SRC1A_MASK (31 << 6) # define R300_FPI3_SRC1A_CONST (1 << 11) # define R300_FPI3_SRC2A_SHIFT 12 # define R300_FPI3_SRC2A_MASK (31 << 12) # define R300_FPI3_SRC2A_CONST (1 << 17) # define R300_FPI3_SRC_MASK 0x0003ffff # define R300_FPI3_DSTA_SHIFT 18 # define R300_FPI3_DSTA_MASK (31 << 18) # define R300_FPI3_DSTA_REG (1 << 23) # define R300_FPI3_DSTA_OUTPUT (1 << 24) # define R300_FPI3_DSTA_DEPTH (1 << 27) #define R300_PFS_INSTR0_0 0x48C0 # define R300_FPI0_ARGC_SRC0C_XYZ 0 # define R300_FPI0_ARGC_SRC0C_XXX 1 # define R300_FPI0_ARGC_SRC0C_YYY 2 # define R300_FPI0_ARGC_SRC0C_ZZZ 3 # define R300_FPI0_ARGC_SRC1C_XYZ 4 # define R300_FPI0_ARGC_SRC1C_XXX 5 # define R300_FPI0_ARGC_SRC1C_YYY 6 # define R300_FPI0_ARGC_SRC1C_ZZZ 7 # define R300_FPI0_ARGC_SRC2C_XYZ 8 # define R300_FPI0_ARGC_SRC2C_XXX 9 # define R300_FPI0_ARGC_SRC2C_YYY 10 # define R300_FPI0_ARGC_SRC2C_ZZZ 11 # define R300_FPI0_ARGC_SRC0A 12 # define R300_FPI0_ARGC_SRC1A 13 # define R300_FPI0_ARGC_SRC2A 14 # define R300_FPI0_ARGC_SRC1C_LRP 15 # define R300_FPI0_ARGC_ZERO 20 # define R300_FPI0_ARGC_ONE 21 /* GUESS */ # define R300_FPI0_ARGC_HALF 22 # define R300_FPI0_ARGC_SRC0C_YZX 23 # define R300_FPI0_ARGC_SRC1C_YZX 24 # define R300_FPI0_ARGC_SRC2C_YZX 25 # define R300_FPI0_ARGC_SRC0C_ZXY 26 # define R300_FPI0_ARGC_SRC1C_ZXY 27 # define R300_FPI0_ARGC_SRC2C_ZXY 28 # define R300_FPI0_ARGC_SRC0CA_WZY 29 # define R300_FPI0_ARGC_SRC1CA_WZY 30 # define R300_FPI0_ARGC_SRC2CA_WZY 31 # define R300_FPI0_ARG0C_SHIFT 0 # define R300_FPI0_ARG0C_MASK (31 << 0) # define R300_FPI0_ARG0C_NEG (1 << 5) # define R300_FPI0_ARG0C_ABS (1 << 6) # define R300_FPI0_ARG1C_SHIFT 7 # define R300_FPI0_ARG1C_MASK (31 << 7) # define R300_FPI0_ARG1C_NEG (1 << 12) # define R300_FPI0_ARG1C_ABS (1 << 13) # define R300_FPI0_ARG2C_SHIFT 14 # define R300_FPI0_ARG2C_MASK (31 << 14) # define R300_FPI0_ARG2C_NEG (1 << 19) # define R300_FPI0_ARG2C_ABS (1 << 20) # define R300_FPI0_SPECIAL_LRP (1 << 21) # define R300_FPI0_OUTC_MAD (0 << 23) # define R300_FPI0_OUTC_DP3 (1 << 23) # define R300_FPI0_OUTC_DP4 (2 << 23) # define R300_FPI0_OUTC_MIN (4 << 23) # define R300_FPI0_OUTC_MAX (5 << 23) # define R300_FPI0_OUTC_CMPH (7 << 23) # define R300_FPI0_OUTC_CMP (8 << 23) # define R300_FPI0_OUTC_FRC (9 << 23) # define R300_FPI0_OUTC_REPL_ALPHA (10 << 23) # define R300_FPI0_OUTC_SAT (1 << 30) -# define R300_FPI0_INSERT_NOP (1 << 31) +# define R300_FPI0_INSERT_NOP (1U << 31) #define R300_PFS_INSTR2_0 0x49C0 # define R300_FPI2_ARGA_SRC0C_X 0 # define R300_FPI2_ARGA_SRC0C_Y 1 # define R300_FPI2_ARGA_SRC0C_Z 2 # define R300_FPI2_ARGA_SRC1C_X 3 # define R300_FPI2_ARGA_SRC1C_Y 4 # define R300_FPI2_ARGA_SRC1C_Z 5 # define R300_FPI2_ARGA_SRC2C_X 6 # define R300_FPI2_ARGA_SRC2C_Y 7 # define R300_FPI2_ARGA_SRC2C_Z 8 # define R300_FPI2_ARGA_SRC0A 9 # define R300_FPI2_ARGA_SRC1A 10 # define R300_FPI2_ARGA_SRC2A 11 # define R300_FPI2_ARGA_SRC1A_LRP 15 # define R300_FPI2_ARGA_ZERO 16 # define R300_FPI2_ARGA_ONE 17 /* GUESS */ # define R300_FPI2_ARGA_HALF 18 # define R300_FPI2_ARG0A_SHIFT 0 # define R300_FPI2_ARG0A_MASK (31 << 0) # define R300_FPI2_ARG0A_NEG (1 << 5) /* GUESS */ # define R300_FPI2_ARG0A_ABS (1 << 6) # define R300_FPI2_ARG1A_SHIFT 7 # define R300_FPI2_ARG1A_MASK (31 << 7) # define R300_FPI2_ARG1A_NEG (1 << 12) /* GUESS */ # define R300_FPI2_ARG1A_ABS (1 << 13) # define R300_FPI2_ARG2A_SHIFT 14 # define R300_FPI2_ARG2A_MASK (31 << 14) # define R300_FPI2_ARG2A_NEG (1 << 19) /* GUESS */ # define R300_FPI2_ARG2A_ABS (1 << 20) # define R300_FPI2_SPECIAL_LRP (1 << 21) # define R300_FPI2_OUTA_MAD (0 << 23) # define R300_FPI2_OUTA_DP4 (1 << 23) # define R300_FPI2_OUTA_MIN (2 << 23) # define R300_FPI2_OUTA_MAX (3 << 23) # define R300_FPI2_OUTA_CMP (6 << 23) # define R300_FPI2_OUTA_FRC (7 << 23) # define R300_FPI2_OUTA_EX2 (8 << 23) # define R300_FPI2_OUTA_LG2 (9 << 23) # define R300_FPI2_OUTA_RCP (10 << 23) # define R300_FPI2_OUTA_RSQ (11 << 23) # define R300_FPI2_OUTA_SAT (1 << 30) -# define R300_FPI2_UNKNOWN_31 (1 << 31) +# define R300_FPI2_UNKNOWN_31 (1U << 31) /* END: Fragment program instruction set */ /* Fog state and color */ #define R300_RE_FOG_STATE 0x4BC0 # define R300_FOG_ENABLE (1 << 0) # define R300_FOG_MODE_LINEAR (0 << 1) # define R300_FOG_MODE_EXP (1 << 1) # define R300_FOG_MODE_EXP2 (2 << 1) # define R300_FOG_MODE_MASK (3 << 1) #define R300_FOG_COLOR_R 0x4BC8 #define R300_FOG_COLOR_G 0x4BCC #define R300_FOG_COLOR_B 0x4BD0 #define R300_PP_ALPHA_TEST 0x4BD4 # define R300_REF_ALPHA_MASK 0x000000ff # define R300_ALPHA_TEST_FAIL (0 << 8) # define R300_ALPHA_TEST_LESS (1 << 8) # define R300_ALPHA_TEST_LEQUAL (3 << 8) # define R300_ALPHA_TEST_EQUAL (2 << 8) # define R300_ALPHA_TEST_GEQUAL (6 << 8) # define R300_ALPHA_TEST_GREATER (4 << 8) # define R300_ALPHA_TEST_NEQUAL (5 << 8) # define R300_ALPHA_TEST_PASS (7 << 8) # define R300_ALPHA_TEST_OP_MASK (7 << 8) # define R300_ALPHA_TEST_ENABLE (1 << 11) /* gap */ /* Fragment program parameters in 7.16 floating point */ #define R300_PFS_PARAM_0_X 0x4C00 #define R300_PFS_PARAM_0_Y 0x4C04 #define R300_PFS_PARAM_0_Z 0x4C08 #define R300_PFS_PARAM_0_W 0x4C0C /* GUESS: PARAM_31 is last, based on native limits reported by fglrx */ #define R300_PFS_PARAM_31_X 0x4DF0 #define R300_PFS_PARAM_31_Y 0x4DF4 #define R300_PFS_PARAM_31_Z 0x4DF8 #define R300_PFS_PARAM_31_W 0x4DFC /* Notes: * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in * the application * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND * are set to the same * function (both registers are always set up completely in any case) * - Most blend flags are simply copied from R200 and not tested yet */ #define R300_RB3D_CBLEND 0x4E04 #define R300_RB3D_ABLEND 0x4E08 /* the following only appear in CBLEND */ # define R300_BLEND_ENABLE (1 << 0) # define R300_BLEND_UNKNOWN (3 << 1) # define R300_BLEND_NO_SEPARATE (1 << 3) /* the following are shared between CBLEND and ABLEND */ # define R300_FCN_MASK (3 << 12) # define R300_COMB_FCN_ADD_CLAMP (0 << 12) # define R300_COMB_FCN_ADD_NOCLAMP (1 << 12) # define R300_COMB_FCN_SUB_CLAMP (2 << 12) # define R300_COMB_FCN_SUB_NOCLAMP (3 << 12) # define R300_COMB_FCN_MIN (4 << 12) # define R300_COMB_FCN_MAX (5 << 12) # define R300_COMB_FCN_RSUB_CLAMP (6 << 12) # define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12) # define R300_BLEND_GL_ZERO (32) # define R300_BLEND_GL_ONE (33) # define R300_BLEND_GL_SRC_COLOR (34) # define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35) # define R300_BLEND_GL_DST_COLOR (36) # define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37) # define R300_BLEND_GL_SRC_ALPHA (38) # define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39) # define R300_BLEND_GL_DST_ALPHA (40) # define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41) # define R300_BLEND_GL_SRC_ALPHA_SATURATE (42) # define R300_BLEND_GL_CONST_COLOR (43) # define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44) # define R300_BLEND_GL_CONST_ALPHA (45) # define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46) # define R300_BLEND_MASK (63) # define R300_SRC_BLEND_SHIFT (16) # define R300_DST_BLEND_SHIFT (24) #define R300_RB3D_BLEND_COLOR 0x4E10 #define R300_RB3D_COLORMASK 0x4E0C # define R300_COLORMASK0_B (1<<0) # define R300_COLORMASK0_G (1<<1) # define R300_COLORMASK0_R (1<<2) # define R300_COLORMASK0_A (1<<3) /* gap */ #define R300_RB3D_COLOROFFSET0 0x4E28 # define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ #define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ #define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ #define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */ /* gap */ /* Bit 16: Larger tiles * Bit 17: 4x2 tiles * Bit 18: Extremely weird tile like, but some pixels duplicated? */ #define R300_RB3D_COLORPITCH0 0x4E38 # define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ # define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ # define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ # define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ # define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ # define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ # define R300_COLOR_FORMAT_RGB565 (2 << 22) # define R300_COLOR_FORMAT_ARGB8888 (3 << 22) #define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */ #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ #define R300_RB3D_AARESOLVE_CTL 0x4E88 /* gap */ /* Guess by Vladimir. * Set to 0A before 3D operations, set to 02 afterwards. */ /*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/ # define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002 # define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A /* gap */ /* There seems to be no "write only" setting, so use Z-test = ALWAYS * for this. * Bit (1<<8) is the "test" bit. so plain write is 6 - vd */ #define R300_ZB_CNTL 0x4F00 # define R300_STENCIL_ENABLE (1 << 0) # define R300_Z_ENABLE (1 << 1) # define R300_Z_WRITE_ENABLE (1 << 2) # define R300_Z_SIGNED_COMPARE (1 << 3) # define R300_STENCIL_FRONT_BACK (1 << 4) #define R300_ZB_ZSTENCILCNTL 0x4f04 /* functions */ # define R300_ZS_NEVER 0 # define R300_ZS_LESS 1 # define R300_ZS_LEQUAL 2 # define R300_ZS_EQUAL 3 # define R300_ZS_GEQUAL 4 # define R300_ZS_GREATER 5 # define R300_ZS_NOTEQUAL 6 # define R300_ZS_ALWAYS 7 # define R300_ZS_MASK 7 /* operations */ # define R300_ZS_KEEP 0 # define R300_ZS_ZERO 1 # define R300_ZS_REPLACE 2 # define R300_ZS_INCR 3 # define R300_ZS_DECR 4 # define R300_ZS_INVERT 5 # define R300_ZS_INCR_WRAP 6 # define R300_ZS_DECR_WRAP 7 # define R300_Z_FUNC_SHIFT 0 /* front and back refer to operations done for front and back faces, i.e. separate stencil function support */ # define R300_S_FRONT_FUNC_SHIFT 3 # define R300_S_FRONT_SFAIL_OP_SHIFT 6 # define R300_S_FRONT_ZPASS_OP_SHIFT 9 # define R300_S_FRONT_ZFAIL_OP_SHIFT 12 # define R300_S_BACK_FUNC_SHIFT 15 # define R300_S_BACK_SFAIL_OP_SHIFT 18 # define R300_S_BACK_ZPASS_OP_SHIFT 21 # define R300_S_BACK_ZFAIL_OP_SHIFT 24 #define R300_ZB_STENCILREFMASK 0x4f08 # define R300_STENCILREF_SHIFT 0 # define R300_STENCILREF_MASK 0x000000ff # define R300_STENCILMASK_SHIFT 8 # define R300_STENCILMASK_MASK 0x0000ff00 # define R300_STENCILWRITEMASK_SHIFT 16 # define R300_STENCILWRITEMASK_MASK 0x00ff0000 /* gap */ #define R300_ZB_FORMAT 0x4f10 # define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0) # define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0) # define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0) /* reserved up to (15 << 0) */ # define R300_INVERT_13E3_LEADING_ONES (0 << 4) # define R300_INVERT_13E3_LEADING_ZEROS (1 << 4) #define R300_ZB_ZTOP 0x4F14 # define R300_ZTOP_DISABLE (0 << 0) # define R300_ZTOP_ENABLE (1 << 0) /* gap */ #define R300_ZB_ZCACHE_CTLSTAT 0x4f18 # define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0) # define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0) # define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1) # define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1) # define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31) -# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31) +# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1U << 31) #define R300_ZB_BW_CNTL 0x4f1c # define R300_HIZ_DISABLE (0 << 0) # define R300_HIZ_ENABLE (1 << 0) # define R300_HIZ_MIN (0 << 1) # define R300_HIZ_MAX (1 << 1) # define R300_FAST_FILL_DISABLE (0 << 2) # define R300_FAST_FILL_ENABLE (1 << 2) # define R300_RD_COMP_DISABLE (0 << 3) # define R300_RD_COMP_ENABLE (1 << 3) # define R300_WR_COMP_DISABLE (0 << 4) # define R300_WR_COMP_ENABLE (1 << 4) # define R300_ZB_CB_CLEAR_RMW (0 << 5) # define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5) # define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6) # define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6) # define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7) # define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7) # define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8) # define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8) # define R500_BMASK_ENABLE (0 << 10) # define R500_BMASK_DISABLE (1 << 10) # define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11) # define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11) # define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12) # define R500_HIZ_FP_EXP_BITS_1 (1 << 12) # define R500_HIZ_FP_EXP_BITS_2 (2 << 12) # define R500_HIZ_FP_EXP_BITS_3 (3 << 12) # define R500_HIZ_FP_EXP_BITS_4 (4 << 12) # define R500_HIZ_FP_EXP_BITS_5 (5 << 12) # define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15) # define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15) # define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16) # define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16) # define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17) # define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17) # define R500_PEQ_PACKING_DISABLE (0 << 18) # define R500_PEQ_PACKING_ENABLE (1 << 18) # define R500_COVERED_PTR_MASKING_DISABLE (0 << 18) # define R500_COVERED_PTR_MASKING_ENABLE (1 << 18) /* gap */ /* Z Buffer Address Offset. * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles. */ #define R300_ZB_DEPTHOFFSET 0x4f20 /* Z Buffer Pitch and Endian Control */ #define R300_ZB_DEPTHPITCH 0x4f24 # define R300_DEPTHPITCH_MASK 0x00003FFC # define R300_DEPTHMACROTILE_DISABLE (0 << 16) # define R300_DEPTHMACROTILE_ENABLE (1 << 16) # define R300_DEPTHMICROTILE_LINEAR (0 << 17) # define R300_DEPTHMICROTILE_TILED (1 << 17) # define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17) # define R300_DEPTHENDIAN_NO_SWAP (0 << 18) # define R300_DEPTHENDIAN_WORD_SWAP (1 << 18) # define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18) # define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18) /* Z Buffer Clear Value */ #define R300_ZB_DEPTHCLEARVALUE 0x4f28 #define R300_ZB_ZMASK_OFFSET 0x4f30 #define R300_ZB_ZMASK_PITCH 0x4f34 #define R300_ZB_ZMASK_WRINDEX 0x4f38 #define R300_ZB_ZMASK_DWORD 0x4f3c #define R300_ZB_ZMASK_RDINDEX 0x4f40 /* Hierarchical Z Memory Offset */ #define R300_ZB_HIZ_OFFSET 0x4f44 /* Hierarchical Z Write Index */ #define R300_ZB_HIZ_WRINDEX 0x4f48 /* Hierarchical Z Data */ #define R300_ZB_HIZ_DWORD 0x4f4c /* Hierarchical Z Read Index */ #define R300_ZB_HIZ_RDINDEX 0x4f50 /* Hierarchical Z Pitch */ #define R300_ZB_HIZ_PITCH 0x4f54 /* Z Buffer Z Pass Counter Data */ #define R300_ZB_ZPASS_DATA 0x4f58 /* Z Buffer Z Pass Counter Address */ #define R300_ZB_ZPASS_ADDR 0x4f5c /* Depth buffer X and Y coordinate offset */ #define R300_ZB_DEPTHXY_OFFSET 0x4f60 # define R300_DEPTHX_OFFSET_SHIFT 1 # define R300_DEPTHX_OFFSET_MASK 0x000007FE # define R300_DEPTHY_OFFSET_SHIFT 17 # define R300_DEPTHY_OFFSET_MASK 0x07FE0000 /* Sets the fifo sizes */ #define R500_ZB_FIFO_SIZE 0x4fd0 # define R500_OP_FIFO_SIZE_FULL (0 << 0) # define R500_OP_FIFO_SIZE_HALF (1 << 0) # define R500_OP_FIFO_SIZE_QUATER (2 << 0) # define R500_OP_FIFO_SIZE_EIGTHS (4 << 0) /* Stencil Reference Value and Mask for backfacing quads */ /* R300_ZB_STENCILREFMASK handles front face */ #define R500_ZB_STENCILREFMASK_BF 0x4fd4 # define R500_STENCILREF_SHIFT 0 # define R500_STENCILREF_MASK 0x000000ff # define R500_STENCILMASK_SHIFT 8 # define R500_STENCILMASK_MASK 0x0000ff00 # define R500_STENCILWRITEMASK_SHIFT 16 # define R500_STENCILWRITEMASK_MASK 0x00ff0000 /* BEGIN: Vertex program instruction set */ /* Every instruction is four dwords long: * DWORD 0: output and opcode * DWORD 1: first argument * DWORD 2: second argument * DWORD 3: third argument * * Notes: * - ABS r, a is implemented as MAX r, a, -a * - MOV is implemented as ADD to zero * - XPD is implemented as MUL + MAD * - FLR is implemented as FRC + ADD * - apparently, fglrx tries to schedule instructions so that there is at * least one instruction between the write to a temporary and the first * read from said temporary; however, violations of this scheduling are * allowed * - register indices seem to be unrelated with OpenGL aliasing to * conventional state * - only one attribute and one parameter can be loaded at a time; however, * the same attribute/parameter can be used for more than one argument * - the second software argument for POW is the third hardware argument * (no idea why) * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2 * * There is some magic surrounding LIT: * The single argument is replicated across all three inputs, but swizzled: * First argument: xyzy * Second argument: xyzx * Third argument: xyzw * Whenever the result is used later in the fragment program, fglrx forces * x and w to be 1.0 in the input selection; I don't know whether this is * strictly necessary */ #define R300_VPI_OUT_OP_DOT (1 << 0) #define R300_VPI_OUT_OP_MUL (2 << 0) #define R300_VPI_OUT_OP_ADD (3 << 0) #define R300_VPI_OUT_OP_MAD (4 << 0) #define R300_VPI_OUT_OP_DST (5 << 0) #define R300_VPI_OUT_OP_FRC (6 << 0) #define R300_VPI_OUT_OP_MAX (7 << 0) #define R300_VPI_OUT_OP_MIN (8 << 0) #define R300_VPI_OUT_OP_SGE (9 << 0) #define R300_VPI_OUT_OP_SLT (10 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */ #define R300_VPI_OUT_OP_UNK12 (12 << 0) #define R300_VPI_OUT_OP_ARL (13 << 0) #define R300_VPI_OUT_OP_EXP (65 << 0) #define R300_VPI_OUT_OP_LOG (66 << 0) /* Used in fog computations, scalar(scalar) */ #define R300_VPI_OUT_OP_UNK67 (67 << 0) #define R300_VPI_OUT_OP_LIT (68 << 0) #define R300_VPI_OUT_OP_POW (69 << 0) #define R300_VPI_OUT_OP_RCP (70 << 0) #define R300_VPI_OUT_OP_RSQ (72 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */ #define R300_VPI_OUT_OP_UNK73 (73 << 0) #define R300_VPI_OUT_OP_EX2 (75 << 0) #define R300_VPI_OUT_OP_LG2 (76 << 0) #define R300_VPI_OUT_OP_MAD_2 (128 << 0) /* all temps, vector(scalar, vector, vector) */ #define R300_VPI_OUT_OP_UNK129 (129 << 0) #define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8) #define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8) #define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8) #define R300_VPI_OUT_REG_CLASS_MASK (31 << 8) #define R300_VPI_OUT_REG_INDEX_SHIFT 13 /* GUESS based on fglrx native limits */ #define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) #define R300_VPI_OUT_WRITE_X (1 << 20) #define R300_VPI_OUT_WRITE_Y (1 << 21) #define R300_VPI_OUT_WRITE_Z (1 << 22) #define R300_VPI_OUT_WRITE_W (1 << 23) #define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0) #define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0) #define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0) #define R300_VPI_IN_REG_CLASS_NONE (9 << 0) #define R300_VPI_IN_REG_CLASS_MASK (31 << 0) #define R300_VPI_IN_REG_INDEX_SHIFT 5 /* GUESS based on fglrx native limits */ #define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* The R300 can select components from the input register arbitrarily. * Use the following constants, shifted by the component shift you * want to select */ #define R300_VPI_IN_SELECT_X 0 #define R300_VPI_IN_SELECT_Y 1 #define R300_VPI_IN_SELECT_Z 2 #define R300_VPI_IN_SELECT_W 3 #define R300_VPI_IN_SELECT_ZERO 4 #define R300_VPI_IN_SELECT_ONE 5 #define R300_VPI_IN_SELECT_MASK 7 #define R300_VPI_IN_X_SHIFT 13 #define R300_VPI_IN_Y_SHIFT 16 #define R300_VPI_IN_Z_SHIFT 19 #define R300_VPI_IN_W_SHIFT 22 #define R300_VPI_IN_NEG_X (1 << 25) #define R300_VPI_IN_NEG_Y (1 << 26) #define R300_VPI_IN_NEG_Z (1 << 27) #define R300_VPI_IN_NEG_W (1 << 28) /* END: Vertex program instruction set */ /* BEGIN: Packet 3 commands */ /* A primitive emission dword. */ #define R300_PRIM_TYPE_NONE (0 << 0) #define R300_PRIM_TYPE_POINT (1 << 0) #define R300_PRIM_TYPE_LINE (2 << 0) #define R300_PRIM_TYPE_LINE_STRIP (3 << 0) #define R300_PRIM_TYPE_TRI_LIST (4 << 0) #define R300_PRIM_TYPE_TRI_FAN (5 << 0) #define R300_PRIM_TYPE_TRI_STRIP (6 << 0) #define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0) #define R300_PRIM_TYPE_RECT_LIST (8 << 0) #define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) /* GUESS (based on r200) */ #define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) #define R300_PRIM_TYPE_LINE_LOOP (12 << 0) #define R300_PRIM_TYPE_QUADS (13 << 0) #define R300_PRIM_TYPE_QUAD_STRIP (14 << 0) #define R300_PRIM_TYPE_POLYGON (15 << 0) #define R300_PRIM_TYPE_MASK 0xF #define R300_PRIM_WALK_IND (1 << 4) #define R300_PRIM_WALK_LIST (2 << 4) #define R300_PRIM_WALK_RING (3 << 4) #define R300_PRIM_WALK_MASK (3 << 4) /* GUESS (based on r200) */ #define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) #define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) #define R300_PRIM_NUM_VERTICES_SHIFT 16 #define R300_PRIM_NUM_VERTICES_MASK 0xffff /* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR. * Two parameter dwords: * 0. The first parameter appears to be always 0 * 1. The second parameter is a standard primitive emission dword. */ #define R300_PACKET3_3D_DRAW_VBUF 0x00002800 /* Specify the full set of vertex arrays as (address, stride). * The first parameter is the number of vertex arrays specified. * The rest of the command is a variable length list of blocks, where * each block is three dwords long and specifies two arrays. * The first dword of a block is split into two words, the lower significant * word refers to the first array, the more significant word to the second * array in the block. * The low byte of each word contains the size of an array entry in dwords, * the high byte contains the stride of the array. * The second dword of a block contains the pointer to the first array, * the third dword of a block contains the pointer to the second array. * Note that if the total number of arrays is odd, the third dword of * the last block is omitted. */ #define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00 #define R300_PACKET3_INDX_BUFFER 0x00003300 # define R300_EB_UNK1_SHIFT 24 # define R300_EB_UNK1 (0x80<<24) # define R300_EB_UNK2 0x0810 #define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400 #define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 /* END: Packet 3 commands */ /* Color formats for 2d packets */ #define R300_CP_COLOR_FORMAT_CI8 2 #define R300_CP_COLOR_FORMAT_ARGB1555 3 #define R300_CP_COLOR_FORMAT_RGB565 4 #define R300_CP_COLOR_FORMAT_ARGB8888 6 #define R300_CP_COLOR_FORMAT_RGB332 7 #define R300_CP_COLOR_FORMAT_RGB8 9 #define R300_CP_COLOR_FORMAT_ARGB4444 15 /* * CP type-3 packets */ #define R300_CP_CMD_BITBLT_MULTI 0xC0009B00 #define R500_VAP_INDEX_OFFSET 0x208c #define R500_GA_US_VECTOR_INDEX 0x4250 #define R500_GA_US_VECTOR_DATA 0x4254 #define R500_RS_IP_0 0x4074 #define R500_RS_INST_0 0x4320 #define R500_US_CONFIG 0x4600 #define R500_US_FC_CTRL 0x4624 #define R500_US_CODE_ADDR 0x4630 #define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0 #define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8 #define R300_SU_REG_DEST 0x42c8 #define RV530_FG_ZBREG_DEST 0x4be8 #define R300_ZB_ZPASS_DATA 0x4f58 #define R300_ZB_ZPASS_ADDR 0x4f5c #endif /* _R300_REG_H */ /* *INDENT-ON* */ Index: head/sys/dev/drm/r600_blit.c =================================================================== --- head/sys/dev/drm/r600_blit.c (revision 258779) +++ head/sys/dev/drm/r600_blit.c (revision 258780) @@ -1,1999 +1,1999 @@ /*- * Copyright 2009 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher */ #include __FBSDID("$FreeBSD$"); #include "dev/drm/drmP.h" #include "dev/drm/drm.h" #include "dev/drm/radeon_drm.h" #include "dev/drm/radeon_drv.h" static u32 r6xx_default_state[] = { 0xc0002400, 0x00000000, 0xc0012800, 0x80000000, 0x80000000, 0xc0004600, 0x00000016, 0xc0016800, 0x00000010, 0x00028000, 0xc0016800, 0x00000010, 0x00008000, 0xc0016800, 0x00000542, 0x07000003, 0xc0016800, 0x000005c5, 0x00000000, 0xc0016800, 0x00000363, 0x00000000, 0xc0016800, 0x0000060c, 0x82000000, 0xc0016800, 0x0000060e, 0x01020204, 0xc0016f00, 0x00000000, 0x00000000, 0xc0016f00, 0x00000001, 0x00000000, 0xc0096900, 0x0000022a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000004, 0x00000000, 0xc0016900, 0x0000000a, 0x00000000, 0xc0016900, 0x0000000b, 0x00000000, 0xc0016900, 0x0000010c, 0x00000000, 0xc0016900, 0x0000010d, 0x00000000, 0xc0016900, 0x00000200, 0x00000000, 0xc0016900, 0x00000343, 0x00000060, 0xc0016900, 0x00000344, 0x00000040, 0xc0016900, 0x00000351, 0x0000aa00, 0xc0016900, 0x00000104, 0x00000000, 0xc0016900, 0x0000010e, 0x00000000, 0xc0046900, 0x00000105, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0036900, 0x00000109, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x0000030c, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x00000048, 0x3f800000, 0x00000000, 0x3f800000, 0x3f800000, 0xc0016900, 0x0000008e, 0x0000000f, 0xc0016900, 0x00000080, 0x00000000, 0xc0016900, 0x00000083, 0x0000ffff, 0xc0016900, 0x00000084, 0x00000000, 0xc0016900, 0x00000085, 0x20002000, 0xc0016900, 0x00000086, 0x00000000, 0xc0016900, 0x00000087, 0x20002000, 0xc0016900, 0x00000088, 0x00000000, 0xc0016900, 0x00000089, 0x20002000, 0xc0016900, 0x0000008a, 0x00000000, 0xc0016900, 0x0000008b, 0x20002000, 0xc0016900, 0x0000008c, 0x00000000, 0xc0016900, 0x00000094, 0x80000000, 0xc0016900, 0x00000095, 0x20002000, 0xc0026900, 0x000000b4, 0x00000000, 0x3f800000, 0xc0016900, 0x00000096, 0x80000000, 0xc0016900, 0x00000097, 0x20002000, 0xc0026900, 0x000000b6, 0x00000000, 0x3f800000, 0xc0016900, 0x00000098, 0x80000000, 0xc0016900, 0x00000099, 0x20002000, 0xc0026900, 0x000000b8, 0x00000000, 0x3f800000, 0xc0016900, 0x0000009a, 0x80000000, 0xc0016900, 0x0000009b, 0x20002000, 0xc0026900, 0x000000ba, 0x00000000, 0x3f800000, 0xc0016900, 0x0000009c, 0x80000000, 0xc0016900, 0x0000009d, 0x20002000, 0xc0026900, 0x000000bc, 0x00000000, 0x3f800000, 0xc0016900, 0x0000009e, 0x80000000, 0xc0016900, 0x0000009f, 0x20002000, 0xc0026900, 0x000000be, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a0, 0x80000000, 0xc0016900, 0x000000a1, 0x20002000, 0xc0026900, 0x000000c0, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a2, 0x80000000, 0xc0016900, 0x000000a3, 0x20002000, 0xc0026900, 0x000000c2, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a4, 0x80000000, 0xc0016900, 0x000000a5, 0x20002000, 0xc0026900, 0x000000c4, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a6, 0x80000000, 0xc0016900, 0x000000a7, 0x20002000, 0xc0026900, 0x000000c6, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a8, 0x80000000, 0xc0016900, 0x000000a9, 0x20002000, 0xc0026900, 0x000000c8, 0x00000000, 0x3f800000, 0xc0016900, 0x000000aa, 0x80000000, 0xc0016900, 0x000000ab, 0x20002000, 0xc0026900, 0x000000ca, 0x00000000, 0x3f800000, 0xc0016900, 0x000000ac, 0x80000000, 0xc0016900, 0x000000ad, 0x20002000, 0xc0026900, 0x000000cc, 0x00000000, 0x3f800000, 0xc0016900, 0x000000ae, 0x80000000, 0xc0016900, 0x000000af, 0x20002000, 0xc0026900, 0x000000ce, 0x00000000, 0x3f800000, 0xc0016900, 0x000000b0, 0x80000000, 0xc0016900, 0x000000b1, 0x20002000, 0xc0026900, 0x000000d0, 0x00000000, 0x3f800000, 0xc0016900, 0x000000b2, 0x80000000, 0xc0016900, 0x000000b3, 0x20002000, 0xc0026900, 0x000000d2, 0x00000000, 0x3f800000, 0xc0016900, 0x00000293, 0x00004010, 0xc0016900, 0x00000300, 0x00000000, 0xc0016900, 0x00000301, 0x00000000, 0xc0016900, 0x00000312, 0xffffffff, 0xc0016900, 0x00000307, 0x00000000, 0xc0016900, 0x00000308, 0x00000000, 0xc0016900, 0x00000283, 0x00000000, 0xc0016900, 0x00000292, 0x00000000, 0xc0066900, 0x0000010f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000206, 0x00000000, 0xc0016900, 0x00000207, 0x00000000, 0xc0016900, 0x00000208, 0x00000000, 0xc0046900, 0x00000303, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0xc0016900, 0x00000205, 0x00000004, 0xc0016900, 0x00000280, 0x00000000, 0xc0016900, 0x00000281, 0x00000000, 0xc0016900, 0x0000037e, 0x00000000, 0xc0016900, 0x00000382, 0x00000000, 0xc0016900, 0x00000380, 0x00000000, 0xc0016900, 0x00000383, 0x00000000, 0xc0016900, 0x00000381, 0x00000000, 0xc0016900, 0x00000282, 0x00000008, 0xc0016900, 0x00000302, 0x0000002d, 0xc0016900, 0x0000037f, 0x00000000, 0xc0016900, 0x000001b2, 0x00000000, 0xc0016900, 0x000001b6, 0x00000000, 0xc0016900, 0x000001b7, 0x00000000, 0xc0016900, 0x000001b8, 0x00000000, 0xc0016900, 0x000001b9, 0x00000000, 0xc0016900, 0x00000225, 0x00000000, 0xc0016900, 0x00000229, 0x00000000, 0xc0016900, 0x00000237, 0x00000000, 0xc0016900, 0x00000100, 0x00000800, 0xc0016900, 0x00000101, 0x00000000, 0xc0016900, 0x00000102, 0x00000000, 0xc0016900, 0x000002a8, 0x00000000, 0xc0016900, 0x000002a9, 0x00000000, 0xc0016900, 0x00000103, 0x00000000, 0xc0016900, 0x00000284, 0x00000000, 0xc0016900, 0x00000290, 0x00000000, 0xc0016900, 0x00000285, 0x00000000, 0xc0016900, 0x00000286, 0x00000000, 0xc0016900, 0x00000287, 0x00000000, 0xc0016900, 0x00000288, 0x00000000, 0xc0016900, 0x00000289, 0x00000000, 0xc0016900, 0x0000028a, 0x00000000, 0xc0016900, 0x0000028b, 0x00000000, 0xc0016900, 0x0000028c, 0x00000000, 0xc0016900, 0x0000028d, 0x00000000, 0xc0016900, 0x0000028e, 0x00000000, 0xc0016900, 0x0000028f, 0x00000000, 0xc0016900, 0x000002a1, 0x00000000, 0xc0016900, 0x000002a5, 0x00000000, 0xc0016900, 0x000002ac, 0x00000000, 0xc0016900, 0x000002ad, 0x00000000, 0xc0016900, 0x000002ae, 0x00000000, 0xc0016900, 0x000002c8, 0x00000000, 0xc0016900, 0x00000206, 0x00000100, 0xc0016900, 0x00000204, 0x00010000, 0xc0036e00, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0xc0016900, 0x0000008f, 0x0000000f, 0xc0016900, 0x000001e8, 0x00000001, 0xc0016900, 0x00000202, 0x00cc0000, 0xc0016900, 0x00000205, 0x00000244, 0xc0016900, 0x00000203, 0x00000210, 0xc0016900, 0x000001b1, 0x00000000, 0xc0016900, 0x00000185, 0x00000000, 0xc0016900, 0x000001b3, 0x00000001, 0xc0016900, 0x000001b4, 0x00000000, 0xc0016900, 0x00000191, 0x00000b00, 0xc0016900, 0x000001b5, 0x00000000, }; static u32 r7xx_default_state[] = { 0xc0012800, 0x80000000, 0x80000000, 0xc0004600, 0x00000016, 0xc0016800, 0x00000010, 0x00028000, 0xc0016800, 0x00000010, 0x00008000, 0xc0016800, 0x00000542, 0x07000002, 0xc0016800, 0x000005c5, 0x00000000, 0xc0016800, 0x00000363, 0x00004000, 0xc0016800, 0x0000060c, 0x00000000, 0xc0016800, 0x0000060e, 0x00420204, 0xc0016f00, 0x00000000, 0x00000000, 0xc0016f00, 0x00000001, 0x00000000, 0xc0096900, 0x0000022a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000004, 0x00000000, 0xc0016900, 0x0000000a, 0x00000000, 0xc0016900, 0x0000000b, 0x00000000, 0xc0016900, 0x0000010c, 0x00000000, 0xc0016900, 0x0000010d, 0x00000000, 0xc0016900, 0x00000200, 0x00000000, 0xc0016900, 0x00000343, 0x00000060, 0xc0016900, 0x00000344, 0x00000000, 0xc0016900, 0x00000351, 0x0000aa00, 0xc0016900, 0x00000104, 0x00000000, 0xc0016900, 0x0000010e, 0x00000000, 0xc0046900, 0x00000105, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x0000030c, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x0000008e, 0x0000000f, 0xc0016900, 0x00000080, 0x00000000, 0xc0016900, 0x00000083, 0x0000ffff, 0xc0016900, 0x00000084, 0x00000000, 0xc0016900, 0x00000085, 0x20002000, 0xc0016900, 0x00000086, 0x00000000, 0xc0016900, 0x00000087, 0x20002000, 0xc0016900, 0x00000088, 0x00000000, 0xc0016900, 0x00000089, 0x20002000, 0xc0016900, 0x0000008a, 0x00000000, 0xc0016900, 0x0000008b, 0x20002000, 0xc0016900, 0x0000008c, 0xaaaaaaaa, 0xc0016900, 0x00000094, 0x80000000, 0xc0016900, 0x00000095, 0x20002000, 0xc0026900, 0x000000b4, 0x00000000, 0x3f800000, 0xc0016900, 0x00000096, 0x80000000, 0xc0016900, 0x00000097, 0x20002000, 0xc0026900, 0x000000b6, 0x00000000, 0x3f800000, 0xc0016900, 0x00000098, 0x80000000, 0xc0016900, 0x00000099, 0x20002000, 0xc0026900, 0x000000b8, 0x00000000, 0x3f800000, 0xc0016900, 0x0000009a, 0x80000000, 0xc0016900, 0x0000009b, 0x20002000, 0xc0026900, 0x000000ba, 0x00000000, 0x3f800000, 0xc0016900, 0x0000009c, 0x80000000, 0xc0016900, 0x0000009d, 0x20002000, 0xc0026900, 0x000000bc, 0x00000000, 0x3f800000, 0xc0016900, 0x0000009e, 0x80000000, 0xc0016900, 0x0000009f, 0x20002000, 0xc0026900, 0x000000be, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a0, 0x80000000, 0xc0016900, 0x000000a1, 0x20002000, 0xc0026900, 0x000000c0, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a2, 0x80000000, 0xc0016900, 0x000000a3, 0x20002000, 0xc0026900, 0x000000c2, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a4, 0x80000000, 0xc0016900, 0x000000a5, 0x20002000, 0xc0026900, 0x000000c4, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a6, 0x80000000, 0xc0016900, 0x000000a7, 0x20002000, 0xc0026900, 0x000000c6, 0x00000000, 0x3f800000, 0xc0016900, 0x000000a8, 0x80000000, 0xc0016900, 0x000000a9, 0x20002000, 0xc0026900, 0x000000c8, 0x00000000, 0x3f800000, 0xc0016900, 0x000000aa, 0x80000000, 0xc0016900, 0x000000ab, 0x20002000, 0xc0026900, 0x000000ca, 0x00000000, 0x3f800000, 0xc0016900, 0x000000ac, 0x80000000, 0xc0016900, 0x000000ad, 0x20002000, 0xc0026900, 0x000000cc, 0x00000000, 0x3f800000, 0xc0016900, 0x000000ae, 0x80000000, 0xc0016900, 0x000000af, 0x20002000, 0xc0026900, 0x000000ce, 0x00000000, 0x3f800000, 0xc0016900, 0x000000b0, 0x80000000, 0xc0016900, 0x000000b1, 0x20002000, 0xc0026900, 0x000000d0, 0x00000000, 0x3f800000, 0xc0016900, 0x000000b2, 0x80000000, 0xc0016900, 0x000000b3, 0x20002000, 0xc0026900, 0x000000d2, 0x00000000, 0x3f800000, 0xc0016900, 0x00000293, 0x00514000, 0xc0016900, 0x00000300, 0x00000000, 0xc0016900, 0x00000301, 0x00000000, 0xc0016900, 0x00000312, 0xffffffff, 0xc0016900, 0x00000307, 0x00000000, 0xc0016900, 0x00000308, 0x00000000, 0xc0016900, 0x00000283, 0x00000000, 0xc0016900, 0x00000292, 0x00000000, 0xc0066900, 0x0000010f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000206, 0x00000000, 0xc0016900, 0x00000207, 0x00000000, 0xc0016900, 0x00000208, 0x00000000, 0xc0046900, 0x00000303, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0xc0016900, 0x00000205, 0x00000004, 0xc0016900, 0x00000280, 0x00000000, 0xc0016900, 0x00000281, 0x00000000, 0xc0016900, 0x0000037e, 0x00000000, 0xc0016900, 0x00000382, 0x00000000, 0xc0016900, 0x00000380, 0x00000000, 0xc0016900, 0x00000383, 0x00000000, 0xc0016900, 0x00000381, 0x00000000, 0xc0016900, 0x00000282, 0x00000008, 0xc0016900, 0x00000302, 0x0000002d, 0xc0016900, 0x0000037f, 0x00000000, 0xc0016900, 0x000001b2, 0x00000001, 0xc0016900, 0x000001b6, 0x00000000, 0xc0016900, 0x000001b7, 0x00000000, 0xc0016900, 0x000001b8, 0x00000000, 0xc0016900, 0x000001b9, 0x00000000, 0xc0016900, 0x00000225, 0x00000000, 0xc0016900, 0x00000229, 0x00000000, 0xc0016900, 0x00000237, 0x00000000, 0xc0016900, 0x00000100, 0x00000800, 0xc0016900, 0x00000101, 0x00000000, 0xc0016900, 0x00000102, 0x00000000, 0xc0016900, 0x000002a8, 0x00000000, 0xc0016900, 0x000002a9, 0x00000000, 0xc0016900, 0x00000103, 0x00000000, 0xc0016900, 0x00000284, 0x00000000, 0xc0016900, 0x00000290, 0x00000000, 0xc0016900, 0x00000285, 0x00000000, 0xc0016900, 0x00000286, 0x00000000, 0xc0016900, 0x00000287, 0x00000000, 0xc0016900, 0x00000288, 0x00000000, 0xc0016900, 0x00000289, 0x00000000, 0xc0016900, 0x0000028a, 0x00000000, 0xc0016900, 0x0000028b, 0x00000000, 0xc0016900, 0x0000028c, 0x00000000, 0xc0016900, 0x0000028d, 0x00000000, 0xc0016900, 0x0000028e, 0x00000000, 0xc0016900, 0x0000028f, 0x00000000, 0xc0016900, 0x000002a1, 0x00000000, 0xc0016900, 0x000002a5, 0x00000000, 0xc0016900, 0x000002ac, 0x00000000, 0xc0016900, 0x000002ad, 0x00000000, 0xc0016900, 0x000002ae, 0x00000000, 0xc0016900, 0x000002c8, 0x00000000, 0xc0016900, 0x00000206, 0x00000100, 0xc0016900, 0x00000204, 0x00010000, 0xc0036e00, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0xc0016900, 0x0000008f, 0x0000000f, 0xc0016900, 0x000001e8, 0x00000001, 0xc0016900, 0x00000202, 0x00cc0000, 0xc0016900, 0x00000205, 0x00000244, 0xc0016900, 0x00000203, 0x00000210, 0xc0016900, 0x000001b1, 0x00000000, 0xc0016900, 0x00000185, 0x00000000, 0xc0016900, 0x000001b3, 0x00000001, 0xc0016900, 0x000001b4, 0x00000000, 0xc0016900, 0x00000191, 0x00000b00, 0xc0016900, 0x000001b5, 0x00000000, }; /* same for r6xx/r7xx */ static u32 r6xx_vs[] = { 0x00000004, 0x81000000, 0x0000203c, 0x94000b08, 0x00004000, 0x14200b1a, 0x00000000, 0x00000000, 0x3c000000, 0x68cd1000, 0x00080000, 0x00000000, }; static u32 r6xx_ps[] = { 0x00000002, 0x80800000, 0x00000000, 0x94200688, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; #define DI_PT_RECTLIST 0x11 #define DI_INDEX_SIZE_16_BIT 0x0 #define DI_SRC_SEL_AUTO_INDEX 0x2 #define FMT_8 1 #define FMT_5_6_5 8 #define FMT_8_8_8_8 0x1a #define COLOR_8 1 #define COLOR_5_6_5 8 #define COLOR_8_8_8_8 0x1a #define R600_CB0_DEST_BASE_ENA (1 << 6) #define R600_TC_ACTION_ENA (1 << 23) #define R600_VC_ACTION_ENA (1 << 24) #define R600_CB_ACTION_ENA (1 << 25) #define R600_DB_ACTION_ENA (1 << 26) #define R600_SH_ACTION_ENA (1 << 27) #define R600_SMX_ACTION_ENA (1 << 28) #define R600_CB_COLOR0_SIZE 0x28060 #define R600_CB_COLOR0_VIEW 0x28080 #define R600_CB_COLOR0_INFO 0x280a0 #define R600_CB_COLOR0_TILE 0x280c0 #define R600_CB_COLOR0_FRAG 0x280e0 #define R600_CB_COLOR0_MASK 0x28100 #define R600_SQ_PGM_START_VS 0x28858 #define R600_SQ_PGM_RESOURCES_VS 0x28868 #define R600_SQ_PGM_CF_OFFSET_VS 0x288d0 #define R600_SQ_PGM_START_PS 0x28840 #define R600_SQ_PGM_RESOURCES_PS 0x28850 #define R600_SQ_PGM_EXPORTS_PS 0x28854 #define R600_SQ_PGM_CF_OFFSET_PS 0x288cc #define R600_VGT_PRIMITIVE_TYPE 0x8958 #define R600_PA_SC_SCREEN_SCISSOR_TL 0x28030 #define R600_PA_SC_GENERIC_SCISSOR_TL 0x28240 #define R600_PA_SC_WINDOW_SCISSOR_TL 0x28204 #define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0 #define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1 #define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2 #define R600_SQ_TEX_VTX_VALID_BUFFER 0x3 /* packet 3 type offsets */ #define R600_SET_CONFIG_REG_OFFSET 0x00008000 #define R600_SET_CONFIG_REG_END 0x0000ac00 #define R600_SET_CONTEXT_REG_OFFSET 0x00028000 #define R600_SET_CONTEXT_REG_END 0x00029000 #define R600_SET_ALU_CONST_OFFSET 0x00030000 #define R600_SET_ALU_CONST_END 0x00032000 #define R600_SET_RESOURCE_OFFSET 0x00038000 #define R600_SET_RESOURCE_END 0x0003c000 #define R600_SET_SAMPLER_OFFSET 0x0003c000 #define R600_SET_SAMPLER_END 0x0003cff0 #define R600_SET_CTL_CONST_OFFSET 0x0003cff0 #define R600_SET_CTL_CONST_END 0x0003e200 #define R600_SET_LOOP_CONST_OFFSET 0x0003e200 #define R600_SET_LOOP_CONST_END 0x0003e380 #define R600_SET_BOOL_CONST_OFFSET 0x0003e380 #define R600_SET_BOOL_CONST_END 0x00040000 /* Packet 3 types */ #define R600_IT_INDIRECT_BUFFER_END 0x00001700 #define R600_IT_SET_PREDICATION 0x00002000 #define R600_IT_REG_RMW 0x00002100 #define R600_IT_COND_EXEC 0x00002200 #define R600_IT_PRED_EXEC 0x00002300 #define R600_IT_START_3D_CMDBUF 0x00002400 #define R600_IT_DRAW_INDEX_2 0x00002700 #define R600_IT_CONTEXT_CONTROL 0x00002800 #define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900 #define R600_IT_INDEX_TYPE 0x00002A00 #define R600_IT_DRAW_INDEX 0x00002B00 #define R600_IT_DRAW_INDEX_AUTO 0x00002D00 #define R600_IT_DRAW_INDEX_IMMD 0x00002E00 #define R600_IT_NUM_INSTANCES 0x00002F00 #define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400 #define R600_IT_INDIRECT_BUFFER_MP 0x00003800 #define R600_IT_MEM_SEMAPHORE 0x00003900 #define R600_IT_MPEG_INDEX 0x00003A00 #define R600_IT_WAIT_REG_MEM 0x00003C00 #define R600_IT_MEM_WRITE 0x00003D00 #define R600_IT_INDIRECT_BUFFER 0x00003200 #define R600_IT_CP_INTERRUPT 0x00004000 #define R600_IT_SURFACE_SYNC 0x00004300 #define R600_IT_ME_INITIALIZE 0x00004400 #define R600_IT_COND_WRITE 0x00004500 #define R600_IT_EVENT_WRITE 0x00004600 #define R600_IT_EVENT_WRITE_EOP 0x00004700 #define R600_IT_ONE_REG_WRITE 0x00005700 #define R600_IT_SET_CONFIG_REG 0x00006800 #define R600_IT_SET_CONTEXT_REG 0x00006900 #define R600_IT_SET_ALU_CONST 0x00006A00 #define R600_IT_SET_BOOL_CONST 0x00006B00 #define R600_IT_SET_LOOP_CONST 0x00006C00 #define R600_IT_SET_RESOURCE 0x00006D00 #define R600_IT_SET_SAMPLER 0x00006E00 #define R600_IT_SET_CTL_CONST 0x00006F00 #define R600_IT_SURFACE_BASE_UPDATE 0x00007300 static inline void set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr) { u32 cb_color_info; int pitch, slice; RING_LOCALS; DRM_DEBUG("\n"); h = (h + 7) & ~7; if (h < 8) h = 8; cb_color_info = ((format << 2) | (1 << 27)); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) && ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) { BEGIN_RING(21 + 2); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0)); OUT_RING(2 << 0); } else { BEGIN_RING(21); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); } OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((pitch << 0) | (slice << 10)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(cb_color_info); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); ADVANCE_RING(); } static inline void cp_set_surface_sync(drm_radeon_private_t *dev_priv, u32 sync_type, u32 size, u64 mc_addr) { u32 cp_coher_size; RING_LOCALS; DRM_DEBUG("\n"); if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); BEGIN_RING(5); OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3)); OUT_RING(sync_type); OUT_RING(cp_coher_size); OUT_RING((mc_addr >> 8)); OUT_RING(10); /* poll interval */ ADVANCE_RING(); } static inline void set_shaders(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u64 gpu_addr; int shader_size, i; u32 *vs, *ps; uint32_t sq_pgm_resources; RING_LOCALS; DRM_DEBUG("\n"); /* load shaders */ vs = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset); ps = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + 256); shader_size = sizeof(r6xx_vs) / 4; for (i= 0; i < shader_size; i++) vs[i] = r6xx_vs[i]; shader_size = sizeof(r6xx_ps) / 4; for (i= 0; i < shader_size; i++) ps[i] = r6xx_ps[i]; dev_priv->blit_vb->used = 512; gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset; /* setup shader regs */ sq_pgm_resources = (1 << 0); BEGIN_RING(9 + 12); /* VS */ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(sq_pgm_resources); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); /* PS */ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((gpu_addr + 256) >> 8); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(sq_pgm_resources | (1 << 28)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(2); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); ADVANCE_RING(); cp_set_surface_sync(dev_priv, R600_SH_ACTION_ENA, 512, gpu_addr); } static inline void set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) { uint32_t sq_vtx_constant_word2; RING_LOCALS; DRM_DEBUG("\n"); sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); OUT_RING(0x460); OUT_RING(gpu_addr & 0xffffffff); OUT_RING(48 - 1); OUT_RING(sq_vtx_constant_word2); OUT_RING(1 << 0); OUT_RING(0); OUT_RING(0); OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30); ADVANCE_RING(); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)) cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(dev_priv, R600_VC_ACTION_ENA, 48, gpu_addr); } static inline void set_tex_resource(drm_radeon_private_t *dev_priv, int format, int w, int h, int pitch, u64 gpu_addr) { uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; RING_LOCALS; DRM_DEBUG("\n"); if (h < 1) h = 1; sq_tex_resource_word0 = (1 << 0); sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | ((w - 1) << 19)); sq_tex_resource_word1 = (format << 26); sq_tex_resource_word1 |= ((h - 1) << 0); sq_tex_resource_word4 = ((1 << 14) | (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25)); BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); OUT_RING(0); OUT_RING(sq_tex_resource_word0); OUT_RING(sq_tex_resource_word1); OUT_RING(gpu_addr >> 8); OUT_RING(gpu_addr >> 8); OUT_RING(sq_tex_resource_word4); OUT_RING(0); OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30); ADVANCE_RING(); } static inline void set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(12); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((x1 << 0) | (y1 << 16)); OUT_RING((x2 << 0) | (y2 << 16)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); - OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31)); + OUT_RING((x1 << 0) | (y1 << 16) | (1U << 31)); OUT_RING((x2 << 0) | (y2 << 16)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); - OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31)); + OUT_RING((x1 << 0) | (y1 << 16) | (1U << 31)); OUT_RING((x2 << 0) | (y2 << 16)); ADVANCE_RING(); } static inline void draw_auto(drm_radeon_private_t *dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(10); OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(DI_PT_RECTLIST); OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); OUT_RING(DI_INDEX_SIZE_16_BIT); OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); OUT_RING(1); OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1)); OUT_RING(3); OUT_RING(DI_SRC_SEL_AUTO_INDEX); ADVANCE_RING(); COMMIT_RING(); } static inline void set_default_state(drm_radeon_private_t *dev_priv) { int default_state_dw, i; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; RING_LOCALS; switch ((dev_priv->flags & RADEON_FAMILY_MASK)) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)) sq_config = 0; else sq_config = R600_VC_ENABLE; sq_config |= (R600_DX9_CONSTS | R600_ALU_INST_PREFER_VECTOR | R600_PS_PRIO(0) | R600_VS_PRIO(1) | R600_GS_PRIO(2) | R600_ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) | R600_NUM_VS_GPRS(num_vs_gprs) | R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) | R600_NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) | R600_NUM_VS_THREADS(num_vs_threads) | R600_NUM_GS_THREADS(num_gs_threads) | R600_NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries)); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { default_state_dw = sizeof(r7xx_default_state) / 4; BEGIN_RING(default_state_dw + 10); for (i = 0; i < default_state_dw; i++) OUT_RING(r7xx_default_state[i]); } else { default_state_dw = sizeof(r6xx_default_state) / 4; BEGIN_RING(default_state_dw + 10); for (i = 0; i < default_state_dw; i++) OUT_RING(r6xx_default_state[i]); } OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); /* SQ config */ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6)); OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(sq_config); OUT_RING(sq_gpr_resource_mgmt_1); OUT_RING(sq_gpr_resource_mgmt_2); OUT_RING(sq_thread_resource_mgmt); OUT_RING(sq_stack_resource_mgmt_1); OUT_RING(sq_stack_resource_mgmt_2); ADVANCE_RING(); } static inline uint32_t i2f(uint32_t input) { u32 result, i, exponent, fraction; if ((input & 0x3fff) == 0) result = 0; /* 0 is a special case */ else { exponent = 140; /* exponent biased by 127; */ fraction = (input & 0x3fff) << 10; /* cheat and only handle numbers below 2^^15 */ for (i = 0; i < 14; i++) { if (fraction & 0x800000) break; else { fraction = fraction << 1; /* keep shifting left until top bit = 1 */ exponent = exponent -1; } } result = exponent << 23 | (fraction & 0x7fffff); /* mask off top bit; assumed 1 */ } return result; } int r600_prepare_blit_copy(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); dev_priv->blit_vb = radeon_freelist_get(dev); if (!dev_priv->blit_vb) { DRM_ERROR("Unable to allocate vertex buffer for blit\n"); return -EAGAIN; } set_default_state(dev_priv); set_shaders(dev); return 0; } void r600_done_blit_copy(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(5); OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); /* wait for 3D idle clean */ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN); ADVANCE_RING(); COMMIT_RING(); dev_priv->blit_vb->used = 0; radeon_cp_discard_buffer(dev, dev_priv->blit_vb); } void r600_blit_copy(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int size_bytes) { drm_radeon_private_t *dev_priv = dev->dev_private; int max_bytes; u64 vb_addr; u32 *vb; vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); DRM_DEBUG("src=0x%016jx, dst=0x%016jx, size=%d\n", src_gpu_addr, dst_gpu_addr, size_bytes); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; while (size_bytes) { int cur_size = size_bytes; int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; src_gpu_addr = src_gpu_addr & ~255; dst_gpu_addr = dst_gpu_addr & ~255; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { dev_priv->blit_vb->used = 0; radeon_cp_discard_buffer(dev, dev_priv->blit_vb); dev_priv->blit_vb = radeon_freelist_get(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); } vb[0] = i2f(dst_x); vb[1] = 0; vb[2] = i2f(src_x); vb[3] = 0; vb[4] = i2f(dst_x); vb[5] = i2f(h); vb[6] = i2f(src_x); vb[7] = i2f(h); vb[8] = i2f(dst_x + cur_size); vb[9] = i2f(h); vb[10] = i2f(src_x + cur_size); vb[11] = i2f(h); /* src */ set_tex_resource(dev_priv, FMT_8, src_x + cur_size, h, src_x + cur_size, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst */ set_render_target(dev_priv, COLOR_8, dst_x + cur_size, h, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; dev_priv->blit_vb->used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } else { max_bytes = 8192 * 4; while (size_bytes) { int cur_size = size_bytes; int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; src_gpu_addr = src_gpu_addr & ~255; dst_gpu_addr = dst_gpu_addr & ~255; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { dev_priv->blit_vb->used = 0; radeon_cp_discard_buffer(dev, dev_priv->blit_vb); dev_priv->blit_vb = radeon_freelist_get(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); } vb[0] = i2f(dst_x / 4); vb[1] = 0; vb[2] = i2f(src_x / 4); vb[3] = 0; vb[4] = i2f(dst_x / 4); vb[5] = i2f(h); vb[6] = i2f(src_x / 4); vb[7] = i2f(h); vb[8] = i2f((dst_x + cur_size) / 4); vb[9] = i2f(h); vb[10] = i2f((src_x + cur_size) / 4); vb[11] = i2f(h); /* src */ set_tex_resource(dev_priv, FMT_8_8_8_8, (src_x + cur_size) / 4, h, (src_x + cur_size) / 4, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst */ set_render_target(dev_priv, COLOR_8_8_8_8, (dst_x + cur_size) / 4, h, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; dev_priv->blit_vb->used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } } void r600_blit_swap(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int sx, int sy, int dx, int dy, int w, int h, int src_pitch, int dst_pitch, int cpp) { drm_radeon_private_t *dev_priv = dev->dev_private; int cb_format, tex_format; int sx2, sy2, dx2, dy2; u64 vb_addr; u32 *vb; if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { dev_priv->blit_vb->used = 0; radeon_cp_discard_buffer(dev, dev_priv->blit_vb); dev_priv->blit_vb = radeon_freelist_get(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); } vb = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + dev_priv->blit_vb->used); sx2 = sx + w; sy2 = sy + h; dx2 = dx + w; dy2 = dy + h; vb[0] = i2f(dx); vb[1] = i2f(dy); vb[2] = i2f(sx); vb[3] = i2f(sy); vb[4] = i2f(dx); vb[5] = i2f(dy2); vb[6] = i2f(sx); vb[7] = i2f(sy2); vb[8] = i2f(dx2); vb[9] = i2f(dy2); vb[10] = i2f(sx2); vb[11] = i2f(sy2); switch(cpp) { case 4: cb_format = COLOR_8_8_8_8; tex_format = FMT_8_8_8_8; break; case 2: cb_format = COLOR_5_6_5; tex_format = FMT_5_6_5; break; default: cb_format = COLOR_8; tex_format = FMT_8; break; } /* src */ set_tex_resource(dev_priv, tex_format, src_pitch / cpp, sy2, src_pitch / cpp, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr); /* dst */ set_render_target(dev_priv, cb_format, dst_pitch / cpp, dy2, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, dx, dy, dx2, dy2); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, dst_pitch * dy2, dst_gpu_addr); dev_priv->blit_vb->used += 12 * 4; } Index: head/sys/dev/drm/radeon_cp.c =================================================================== --- head/sys/dev/drm/radeon_cp.c (revision 258779) +++ head/sys/dev/drm/radeon_cp.c (revision 258780) @@ -1,2130 +1,2130 @@ /*- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * Copyright 2007 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Kevin E. Martin * Gareth Hughes */ #include __FBSDID("$FreeBSD$"); #include "dev/drm/drmP.h" #include "dev/drm/drm.h" #include "dev/drm/drm_sarea.h" #include "dev/drm/radeon_drm.h" #include "dev/drm/radeon_drv.h" #include "dev/drm/r300_reg.h" #include "dev/drm/radeon_microcode.h" #define RADEON_FIFO_DEBUG 0 static int radeon_do_cleanup_cp(struct drm_device * dev); static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off) { u32 val; if (dev_priv->flags & RADEON_IS_AGP) { val = DRM_READ32(dev_priv->ring_rptr, off); } else { val = *(((volatile u32 *) dev_priv->ring_rptr->virtual) + (off / sizeof(u32))); val = le32_to_cpu(val); } return val; } u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv) { if (dev_priv->writeback_works) return radeon_read_ring_rptr(dev_priv, 0); else { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_CP_RB_RPTR); else return RADEON_READ(RADEON_CP_RB_RPTR); } } void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val) { if (dev_priv->flags & RADEON_IS_AGP) DRM_WRITE32(dev_priv->ring_rptr, off, val); else *(((volatile u32 *) dev_priv->ring_rptr->virtual) + (off / sizeof(u32))) = cpu_to_le32(val); } void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val) { radeon_write_ring_rptr(dev_priv, 0, val); } u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index) { if (dev_priv->writeback_works) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(index)); else return radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(index)); } else { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_SCRATCH_REG0 + 4*index); else return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index); } } u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr) { u32 ret; if (addr < 0x10000) ret = DRM_READ32(dev_priv->mmio, addr); else { DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr); ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA); } return ret; } static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); ret = RADEON_READ(R520_MC_IND_DATA); RADEON_WRITE(R520_MC_IND_INDEX, 0); return ret; } static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); ret = RADEON_READ(RS480_NB_MC_DATA); RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); return ret; } static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); ret = RADEON_READ(RS690_MC_DATA); RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); return ret; } static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); ret = RADEON_READ(RS600_MC_DATA); return ret; } static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) return RS690_READ_MCIND(dev_priv, addr); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) return RS600_READ_MCIND(dev_priv, addr); else return RS480_READ_MCIND(dev_priv, addr); } u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) return RADEON_READ(R700_MC_VM_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_MC_VM_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); else return RADEON_READ(RADEON_MC_FB_LOCATION); } static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); else RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); } void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) { /*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); else RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); } void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) { u32 agp_base_hi = upper_32_bits(agp_base); u32 agp_base_lo = agp_base & 0xffffffff; u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff; /* R6xx/R7xx must be aligned to a 4MB boundry */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo); RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); } else { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); } } void radeon_enable_bm(struct drm_radeon_private *dev_priv) { u32 tmp; /* Turn on bus mastering */ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { /* rs600/rs690/rs740 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } /* PCIE cards appears to not need this */ } static int RADEON_READ_PLL(struct drm_device * dev, int addr) { drm_radeon_private_t *dev_priv = dev->dev_private; RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); return RADEON_READ(RADEON_CLOCK_CNTL_DATA); } static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) { RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); return RADEON_READ(RADEON_PCIE_DATA); } #if RADEON_FIFO_DEBUG static void radeon_status(drm_radeon_private_t * dev_priv) { printk("%s:\n", __func__); printk("RBBM_STATUS = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); printk("CP_RB_RTPR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); printk("CP_RB_WTPR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); printk("AIC_CNTL = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); printk("AIC_STAT = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_STAT)); printk("AIC_PT_BASE = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); printk("TLB_ADDR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); printk("TLB_DATA = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); } #endif /* ================================================================ * Engine, FIFO control */ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) { u32 tmp; int i; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); tmp |= RADEON_RB3D_DC_FLUSH_ALL; RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) & RADEON_RB3D_DC_BUSY)) { return 0; } DRM_UDELAY(1); } } else { /* don't flush or purge cache here or lockup */ return 0; } #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) { int i; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; for (i = 0; i < dev_priv->usec_timeout; i++) { int slots = (RADEON_READ(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK); if (slots >= entries) return 0; DRM_UDELAY(1); } DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n", RADEON_READ(RADEON_RBBM_STATUS), RADEON_READ(R300_VAP_CNTL_STATUS)); #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) { int i, ret; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ret = radeon_do_wait_for_fifo(dev_priv, 64); if (ret) return ret; for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(RADEON_READ(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)) { radeon_do_pixcache_flush(dev_priv); return 0; } DRM_UDELAY(1); } DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n", RADEON_READ(RADEON_RBBM_STATUS), RADEON_READ(R300_VAP_CNTL_STATUS)); #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static void radeon_init_pipes(drm_radeon_private_t *dev_priv) { uint32_t gb_tile_config, gb_pipe_sel = 0; if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2); if ((z_pipe_sel & 3) == 3) dev_priv->num_z_pipes = 2; else dev_priv->num_z_pipes = 1; } else dev_priv->num_z_pipes = 1; /* RS4xx/RS6xx/R4xx/R5xx */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; } else { /* R3xx */ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { dev_priv->num_gb_pipes = 2; } else { /* R3Vxx */ dev_priv->num_gb_pipes = 1; } } DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); switch (dev_priv->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; default: case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); } RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | R300_DC_AUTOFLUSH_ENABLE | R300_DC_DC_DISABLE_IGNORE_PE)); } /* ================================================================ * CP control, initialization */ /* Load the microcode for the CP */ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv) { const u32 (*cp)[2]; int i; DRM_DEBUG("\n"); switch (dev_priv->flags & RADEON_FAMILY_MASK) { case CHIP_R100: case CHIP_RV100: case CHIP_RV200: case CHIP_RS100: case CHIP_RS200: DRM_INFO("Loading R100 Microcode\n"); cp = R100_cp_microcode; break; case CHIP_R200: case CHIP_RV250: case CHIP_RV280: case CHIP_RS300: DRM_INFO("Loading R200 Microcode\n"); cp = R200_cp_microcode; break; case CHIP_R300: case CHIP_R350: case CHIP_RV350: case CHIP_RV380: case CHIP_RS400: case CHIP_RS480: DRM_INFO("Loading R300 Microcode\n"); cp = R300_cp_microcode; break; case CHIP_R420: case CHIP_R423: case CHIP_RV410: DRM_INFO("Loading R400 Microcode\n"); cp = R420_cp_microcode; break; case CHIP_RS690: case CHIP_RS740: DRM_INFO("Loading RS690/RS740 Microcode\n"); cp = RS690_cp_microcode; break; case CHIP_RS600: DRM_INFO("Loading RS600 Microcode\n"); cp = RS600_cp_microcode; break; case CHIP_RV515: case CHIP_R520: case CHIP_RV530: case CHIP_R580: case CHIP_RV560: case CHIP_RV570: DRM_INFO("Loading R500 Microcode\n"); cp = R520_cp_microcode; break; default: return; } radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); for (i = 0; i != 256; i++) { RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, cp[i][1]); RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, cp[i][0]); } } /* Flush any pending commands to the CP. This should only be used just * prior to a wait for idle, as it informs the engine that the command * stream is ending. */ static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) { DRM_DEBUG("\n"); #if 0 u32 tmp; - tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); + tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1U << 31); RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); #endif } /* Wait for the CP to go idle. */ int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(6); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); COMMIT_RING(); return radeon_do_wait_for_idle(dev_priv); } /* Start the Command Processor. */ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); dev_priv->cp_running = 1; BEGIN_RING(8); /* isync can only be written through cp on r5xx write it here */ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); COMMIT_RING(); dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; } /* Reset the Command Processor. This will not flush any pending * commands, so you must wait for the CP command stream to complete * before calling this routine. */ static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) { u32 cur_read_ptr; DRM_DEBUG("\n"); cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); SET_RING_HEAD(dev_priv, cur_read_ptr); dev_priv->ring.tail = cur_read_ptr; } /* Stop the Command Processor. This will not flush any pending * commands, so you must flush the command stream and wait for the CP * to go idle before calling this routine. */ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) { DRM_DEBUG("\n"); RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); dev_priv->cp_running = 0; } /* Reset the engine. This will stop the CP if it is running. */ static int radeon_do_engine_reset(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; DRM_DEBUG("\n"); radeon_do_pixcache_flush(dev_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { /* may need something similar for newer chips */ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | RADEON_FORCEON_MCLKA | RADEON_FORCEON_MCLKB | RADEON_FORCEON_YCLKA | RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC | RADEON_FORCEON_AIC)); } rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB)); RADEON_READ(RADEON_RBBM_SOFT_RESET); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & ~(RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB))); RADEON_READ(RADEON_RBBM_SOFT_RESET); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); } /* setup the raster pipes */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) radeon_init_pipes(dev_priv); /* Reset the CP ring */ radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; /* Reset any pending vertex, indirect buffers */ radeon_freelist_reset(dev); return 0; } static void radeon_cp_init_ring_buffer(struct drm_device * dev, drm_radeon_private_t *dev_priv, struct drm_file *file_priv) { u32 ring_start, cur_read_ptr; /* Initialize the memory controller. With new memory map, the fb location * is not changed, it should have been properly initialized already. Part * of the problem is that the code below is bogus, assuming the GART is * always appended to the fb which is not necessarily the case */ if (!dev_priv->new_memmap) radeon_write_fb_location(dev_priv, ((dev_priv->gart_vm_start - 1) & 0xffff0000) | (dev_priv->fb_location >> 16)); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { radeon_write_agp_base(dev_priv, dev->agp->base); radeon_write_agp_location(dev_priv, (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16))); ring_start = (dev_priv->cp_ring->offset - dev->agp->base + dev_priv->gart_vm_start); } else #endif ring_start = (dev_priv->cp_ring->offset - dev->sg->vaddr + dev_priv->gart_vm_start); RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); /* Set the write pointer delay */ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); /* Initialize the ring buffer's read and write pointers */ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); SET_RING_HEAD(dev_priv, cur_read_ptr); dev_priv->ring.tail = cur_read_ptr; #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - dev->agp->base + dev_priv->gart_vm_start); } else #endif { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - dev->sg->vaddr + dev_priv->gart_vm_start); } /* Set ring buffer size */ #ifdef __BIG_ENDIAN RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_BUF_SWAP_32BIT | (dev_priv->ring.fetch_size_l2ow << 18) | (dev_priv->ring.rptr_update_l2qw << 8) | dev_priv->ring.size_l2qw); #else RADEON_WRITE(RADEON_CP_RB_CNTL, (dev_priv->ring.fetch_size_l2ow << 18) | (dev_priv->ring.rptr_update_l2qw << 8) | dev_priv->ring.size_l2qw); #endif /* Initialize the scratch register pointer. This will cause * the scratch register values to be written out to memory * whenever they are updated. * * We simply put this behind the ring read pointer, this works * with PCI GART as well as (whatever kind of) AGP GART */ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) + RADEON_SCRATCH_REG_OFFSET); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); radeon_enable_bm(dev_priv); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0); RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0); RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); /* reset sarea copies of these */ if (dev_priv->sarea_priv) { dev_priv->sarea_priv->last_frame = 0; dev_priv->sarea_priv->last_dispatch = 0; dev_priv->sarea_priv->last_clear = 0; } radeon_do_wait_for_idle(dev_priv); /* Sync everything up */ RADEON_WRITE(RADEON_ISYNC_CNTL, (RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); } static void radeon_test_writeback(drm_radeon_private_t * dev_priv) { u32 tmp; /* Start with assuming that writeback doesn't work */ dev_priv->writeback_works = 0; /* Writeback doesn't seem to work everywhere, test it here and possibly * enable it if it appears to work */ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { u32 val; val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1)); if (val == 0xdeadbeef) break; DRM_UDELAY(1); } if (tmp < dev_priv->usec_timeout) { dev_priv->writeback_works = 1; DRM_INFO("writeback test succeeded in %d usecs\n", tmp); } else { dev_priv->writeback_works = 0; DRM_INFO("writeback test failed\n"); } if (radeon_no_wb == 1) { dev_priv->writeback_works = 0; DRM_INFO("writeback forced off\n"); } if (!dev_priv->writeback_works) { /* Disable writeback to avoid unnecessary bus master transfer */ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); } } /* Enable or disable IGP GART on the chip */ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) { u32 temp; if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); else IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | RS480_VA_SIZE_32MB)); temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | RS480_TLB_ENABLE | RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); temp = dev_priv->gart_info.bus_addr & 0xfffff000; temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; IGP_WRITE_MCIND(RS480_GART_BASE, temp); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS)); radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); dev_priv->gart_size = 32*1024*1024; temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16)); radeon_write_agp_location(dev_priv, temp); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | RS480_VA_SIZE_32MB)); do { temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); } while (1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); do { temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); } while (1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); } else { IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); } } /* Enable or disable IGP GART on the chip */ static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on) { u32 temp; int i; if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); for (i = 0; i < 19; i++) IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i, (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | RS600_SYSTEM_ACCESS_MODE_IN_SYS | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | RS600_ENABLE_FRAGMENT_PROCESSING | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); /* disable all other contexts */ for (i = 1; i < 8; i++) IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); /* setup the page table aperture */ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, dev_priv->gart_info.bus_addr); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, dev_priv->gart_vm_start); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); /* setup the system aperture */ IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start); IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); /* enable page tables */ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT)); temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES)); /* invalidate the cache */ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); } else { IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0); temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); temp &= ~RS600_ENABLE_PAGE_TABLES; IGP_WRITE_MCIND(RS600_MC_CNTL1, temp); } } static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) { u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); if (on) { DRM_DEBUG("programming pcie %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, dev_priv->gart_vm_start); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, dev_priv->gart_info.bus_addr); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, dev_priv->gart_vm_start); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, dev_priv->gart_vm_start + dev_priv->gart_size - 1); radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_PCIE_TX_GART_EN); } else { RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); } } /* Enable or disable PCI GART on the chip */ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) { u32 tmp; if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) || (dev_priv->flags & RADEON_IS_IGPGART)) { radeon_set_igpgart(dev_priv, on); return; } if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { rs600_set_igpgart(dev_priv, on); return; } if (dev_priv->flags & RADEON_IS_PCIE) { radeon_set_pciegart(dev_priv, on); return; } tmp = RADEON_READ(RADEON_AIC_CNTL); if (on) { RADEON_WRITE(RADEON_AIC_CNTL, tmp | RADEON_PCIGART_TRANSLATE_EN); /* set PCI GART page-table base address */ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); /* set address range for PCI address translate */ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start + dev_priv->gart_size - 1); /* Turn off AGP aperture -- is this required for PCI GART? */ radeon_write_agp_location(dev_priv, 0xffffffc0); RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ } else { RADEON_WRITE(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); } } static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv) { struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info; struct radeon_virt_surface *vp; int i; for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) { if (!dev_priv->virt_surfaces[i].file_priv || dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV) break; } if (i >= 2 * RADEON_MAX_SURFACES) return -ENOMEM; vp = &dev_priv->virt_surfaces[i]; for (i = 0; i < RADEON_MAX_SURFACES; i++) { struct radeon_surface *sp = &dev_priv->surfaces[i]; if (sp->refcount) continue; vp->surface_index = i; vp->lower = gart_info->bus_addr; vp->upper = vp->lower + gart_info->table_size; vp->flags = 0; vp->file_priv = PCIGART_FILE_PRIV; sp->refcount = 1; sp->lower = vp->lower; sp->upper = vp->upper; sp->flags = 0; RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper); return 0; } return -ENOMEM; } static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); /* if we require new memory map but we don't have it fail */ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { DRM_DEBUG("Forcing AGP card to PCI mode\n"); dev_priv->flags &= ~RADEON_IS_AGP; } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) && !init->is_pci) { DRM_DEBUG("Restoring AGP flag\n"); dev_priv->flags |= RADEON_IS_AGP; } if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { DRM_ERROR("PCI GART memory not allocated!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; if (dev_priv->usec_timeout < 1 || dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { DRM_DEBUG("TIMEOUT problem!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } /* Enable vblank on CRTC1 for older X servers */ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; switch(init->func) { case RADEON_INIT_R200_CP: dev_priv->microcode_version = UCODE_R200; break; case RADEON_INIT_R300_CP: dev_priv->microcode_version = UCODE_R300; break; default: dev_priv->microcode_version = UCODE_R100; } dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. */ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); radeon_do_cleanup_cp(dev); return -EINVAL; } switch (init->fb_bpp) { case 16: dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; break; case 32: default: dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; break; } dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; switch (init->depth_bpp) { case 16: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; break; case 32: default: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; break; } dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; /* Hardware state for depth clears. Remove this if/when we no * longer clear the depth buffer with a 3D rectangle. Hard-code * all values to prevent unwanted 3D state from slipping through * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | (dev_priv->color_fmt << 10) | (dev_priv->microcode_version == UCODE_R100 ? RADEON_ZBLOCK16 : 0)); dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | RADEON_Z_TEST_ALWAYS | RADEON_STENCIL_TEST_ALWAYS | RADEON_STENCIL_S_FAIL_REPLACE | RADEON_STENCIL_ZPASS_REPLACE | RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | RADEON_FFACE_SOLID | RADEON_FLAT_SHADE_VTX_LAST | RADEON_DIFFUSE_SHADE_FLAT | RADEON_ALPHA_SHADE_FLAT | RADEON_SPECULAR_SHADE_FLAT | RADEON_FOG_SHADE_FLAT | RADEON_VTX_PIX_CENTER_OGL | RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); dev_priv->ring_offset = init->ring_offset; dev_priv->ring_rptr_offset = init->ring_rptr_offset; dev_priv->buffers_offset = init->buffers_offset; dev_priv->gart_textures_offset = init->gart_textures_offset; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); if (!dev_priv->cp_ring) { DRM_ERROR("could not find cp ring region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } if (init->gart_textures_offset) { dev_priv->gart_textures = drm_core_findmap(dev, init->gart_textures_offset); if (!dev_priv->gart_textures) { DRM_ERROR("could not find GART texture region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } dev_priv->sarea_priv = (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { drm_core_ioremap_wc(dev_priv->cp_ring, dev); drm_core_ioremap_wc(dev_priv->ring_rptr, dev); drm_core_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cp_ring->virtual || !dev_priv->ring_rptr->virtual || !dev->agp_buffer_map->virtual) { DRM_ERROR("could not find ioremap agp regions!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } else #endif { dev_priv->cp_ring->virtual = (void *)(unsigned long)dev_priv->cp_ring->offset; dev_priv->ring_rptr->virtual = (void *)(unsigned long)dev_priv->ring_rptr->offset; dev->agp_buffer_map->virtual = (void *)(unsigned long)dev->agp_buffer_map->offset; DRM_DEBUG("dev_priv->cp_ring->virtual %p\n", dev_priv->cp_ring->virtual); DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n", dev_priv->ring_rptr->virtual); DRM_DEBUG("dev->agp_buffer_map->virtual %p\n", dev->agp_buffer_map->virtual); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; dev_priv->fb_size = ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) - dev_priv->fb_location; dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | ((dev_priv->front_offset + dev_priv->fb_location) >> 10)); dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | ((dev_priv->back_offset + dev_priv->fb_location) >> 10)); dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | ((dev_priv->depth_offset + dev_priv->fb_location) >> 10)); dev_priv->gart_size = init->gart_size; /* New let's set the memory map ... */ if (dev_priv->new_memmap) { u32 base = 0; DRM_INFO("Setting GART location based on new memory map\n"); /* If using AGP, try to locate the AGP aperture at the same * location in the card and on the bus, though we have to * align it down. */ #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { base = dev->agp->base; /* Check if valid */ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", dev->agp->base); base = 0; } } #endif /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ if (base == 0) { base = dev_priv->fb_location + dev_priv->fb_size; if (base < dev_priv->fb_location || ((base + dev_priv->gart_size) & 0xfffffffful) < base) base = dev_priv->fb_location - dev_priv->gart_size; } dev_priv->gart_vm_start = base & 0xffc00000u; if (dev_priv->gart_vm_start != base) DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", base, dev_priv->gart_vm_start); } else { DRM_INFO("Setting GART location based on old memory map\n"); dev_priv->gart_vm_start = dev_priv->fb_location + RADEON_READ(RADEON_CONFIG_APER_SIZE); } #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - dev->agp->base + dev_priv->gart_vm_start); else #endif dev_priv->gart_buffers_offset = dev->agp_buffer_map->offset - dev->sg->vaddr + dev_priv->gart_vm_start; DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", dev_priv->gart_buffers_offset); dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual; dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else #endif { u32 sctrl; int ret; dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); /* if we have an offset set from userspace */ if (dev_priv->pcigart_offset_set) { dev_priv->gart_info.bus_addr = dev_priv->pcigart_offset + dev_priv->fb_location; dev_priv->gart_info.mapping.offset = dev_priv->pcigart_offset + dev_priv->fb_aper_offset; dev_priv->gart_info.mapping.size = dev_priv->gart_info.table_size; drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = dev_priv->gart_info.mapping.virtual; if (dev_priv->flags & RADEON_IS_PCIE) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB; DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", dev_priv->gart_info.addr, dev_priv->pcigart_offset); } else { if (dev_priv->flags & RADEON_IS_IGPGART) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; dev_priv->gart_info.addr = NULL; dev_priv->gart_info.bus_addr = 0; if (dev_priv->flags & RADEON_IS_PCIE) { DRM_ERROR ("Cannot use PCI Express without GART in FB memory\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } sctrl = RADEON_READ(RADEON_SURFACE_CNTL); RADEON_WRITE(RADEON_SURFACE_CNTL, 0); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) ret = r600_page_table_init(dev); else ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info); RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl); if (!ret) { DRM_ERROR("failed to init PCI GART!\n"); radeon_do_cleanup_cp(dev); return -ENOMEM; } ret = radeon_setup_pcigart_surface(dev_priv); if (ret) { DRM_ERROR("failed to setup GART surface!\n"); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) r600_page_table_cleanup(dev, &dev_priv->gart_info); else drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info); radeon_do_cleanup_cp(dev); return ret; } /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); dev_priv->last_buf = 0; radeon_do_engine_reset(dev); radeon_test_writeback(dev_priv); return 0; } static int radeon_do_cleanup_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->cp_ring != NULL) { drm_core_ioremapfree(dev_priv->cp_ring, dev); dev_priv->cp_ring = NULL; } if (dev_priv->ring_rptr != NULL) { drm_core_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } if (dev->agp_buffer_map != NULL) { drm_core_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else #endif { if (dev_priv->gart_info.bus_addr) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) r600_page_table_cleanup(dev, &dev_priv->gart_info); else { if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) DRM_ERROR("failed to cleanup PCI GART!\n"); } } if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = 0; } } /* only clear to the start of flags */ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); return 0; } /* This code will reinit the Radeon CP hardware after a resume from disc. * AFAIK, it would be very difficult to pickle the state at suspend time, so * here we make sure that all Radeon hardware initialisation is re-done without * affecting running applications. * * Charl P. Botha */ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; if (!dev_priv) { DRM_ERROR("Called with no initialization\n"); return -EINVAL; } DRM_DEBUG("Starting radeon_do_resume_cp()\n"); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else #endif { /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); radeon_do_engine_reset(dev); radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); DRM_DEBUG("radeon_do_resume_cp() complete\n"); return 0; } int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (init->func == RADEON_INIT_R300_CP) r300_init_reg_flags(dev); switch (init->func) { case RADEON_INIT_CP: case RADEON_INIT_R200_CP: case RADEON_INIT_R300_CP: return radeon_do_init_cp(dev, init, file_priv); case RADEON_INIT_R600_CP: return r600_do_init_cp(dev, init, file_priv); case RADEON_CLEANUP_CP: if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_cleanup_cp(dev); else return radeon_do_cleanup_cp(dev); } return -EINVAL; } int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (dev_priv->cp_running) { DRM_DEBUG("while CP running\n"); return 0; } if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { DRM_DEBUG("called with bogus CP mode (%d)\n", dev_priv->cp_mode); return 0; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_start(dev_priv); else radeon_do_cp_start(dev_priv); return 0; } /* Stop the CP. The engine must have been idled before calling this * routine. */ int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_cp_stop_t *stop = data; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv->cp_running) return 0; /* Flush any pending CP commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ if (stop->flush) { radeon_do_cp_flush(dev_priv); } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ if (stop->idle) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ret = r600_do_cp_idle(dev_priv); else ret = radeon_do_cp_idle(dev_priv); if (ret) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, * we will get some dropped triangles as they won't be fully * rendered before the CP is shut down. */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_stop(dev_priv); else radeon_do_cp_stop(dev_priv); /* Reset the engine */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_engine_reset(dev); else radeon_do_engine_reset(dev); return 0; } void radeon_do_release(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; int i, ret; if (dev_priv) { if (dev_priv->cp_running) { /* Stop the cp */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { while ((ret = r600_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); mtx_sleep(&ret, &dev->dev_lock, 0, "rdnrel", 1); } } else { while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); mtx_sleep(&ret, &dev->dev_lock, 0, "rdnrel", 1); } } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { r600_do_cp_stop(dev_priv); r600_do_engine_reset(dev); } else { radeon_do_cp_stop(dev_priv); radeon_do_engine_reset(dev); } } if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) { /* Disable *all* interrupts */ if (dev_priv->mmio) /* remove this after permanent addmaps */ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); if (dev_priv->mmio) { /* remove all surfaces */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, 0); } } } /* Free memory heap structures */ radeon_mem_takedown(&(dev_priv->gart_heap)); radeon_mem_takedown(&(dev_priv->fb_heap)); /* deallocate kernel resources */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cleanup_cp(dev); else radeon_do_cleanup_cp(dev); } } /* Just reset the CP ring. Called as part of an X Server engine reset. */ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_DEBUG("called before init done\n"); return -EINVAL; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_reset(dev_priv); else radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; return 0; } int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_cp_idle(dev_priv); else return radeon_do_cp_idle(dev_priv); } /* Added by Charl P. Botha to call radeon_do_resume_cp(). */ int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_resume_cp(dev, file_priv); else return radeon_do_resume_cp(dev, file_priv); } int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_engine_reset(dev); else return radeon_do_engine_reset(dev); } /* ================================================================ * Fullscreen mode */ /* KW: Deprecated to say the least: */ int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) { return 0; } /* ================================================================ * Freelist management */ /* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through * bufs until freelist code is used. Note this hides a problem with * the scratch register * (used to keep track of last buffer * completed) being written to before * the last buffer has actually * completed rendering. * * KW: It's also a good way to find free buffers quickly. * * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't * sleep. However, bugs in older versions of radeon_accel.c mean that * we essentially have to do this, else old clients will break. * * However, it does leave open a potential deadlock where all the * buffers are held by other clients, which can't release them because * they can't get the lock. */ struct drm_buf *radeon_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; struct drm_buf *buf; int i, t; int start; if (++dev_priv->last_buf >= dma->buf_count) dev_priv->last_buf = 0; start = dev_priv->last_buf; for (t = 0; t < dev_priv->usec_timeout; t++) { u32 done_age = GET_SCRATCH(dev_priv, 1); DRM_DEBUG("done_age = %d\n", done_age); for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[start]; buf_priv = buf->dev_private; if (buf->file_priv == NULL || (buf->pending && buf_priv->age <= done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; } if (++start >= dma->buf_count) start = 0; } if (t) { DRM_UDELAY(1); dev_priv->stats.freelist_loops++; } } return NULL; } void radeon_freelist_reset(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; dev_priv->last_buf = 0; for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } } /* ================================================================ * CP command submission */ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) { drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; u32 last_head = GET_RING_HEAD(dev_priv); for (i = 0; i < dev_priv->usec_timeout; i++) { u32 head = GET_RING_HEAD(dev_priv); ring->space = (head - ring->tail) * sizeof(u32); if (ring->space <= 0) ring->space += ring->size; if (ring->space > n) return 0; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if (head != last_head) i = 0; last_head = head; DRM_UDELAY(1); } /* FIXME: This return value is ignored in the BEGIN_RING macro! */ #if RADEON_FIFO_DEBUG radeon_status(dev_priv); DRM_ERROR("failed!\n"); #endif return -EBUSY; } static int radeon_cp_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma * d) { int i; struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); if (!buf) return -EBUSY; /* NOTE: broken client */ buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int ret = 0; struct drm_dma *d = data; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) { ret = radeon_cp_get_buffers(dev, file_priv, d); } return ret; } int radeon_driver_load(struct drm_device *dev, unsigned long flags) { drm_radeon_private_t *dev_priv; int ret = 0; dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; memset(dev_priv, 0, sizeof(drm_radeon_private_t)); dev->dev_private = (void *)dev_priv; dev_priv->flags = flags; switch (flags & RADEON_FAMILY_MASK) { case CHIP_R100: case CHIP_RV200: case CHIP_R200: case CHIP_R300: case CHIP_R350: case CHIP_R420: case CHIP_R423: case CHIP_RV410: case CHIP_RV515: case CHIP_R520: case CHIP_RV570: case CHIP_R580: dev_priv->flags |= RADEON_HAS_HIERZ; break; default: /* all other chips have no hierarchical z buffer */ break; } if (drm_device_is_agp(dev)) dev_priv->flags |= RADEON_IS_AGP; else if (drm_device_is_pcie(dev)) dev_priv->flags |= RADEON_IS_PCIE; else dev_priv->flags |= RADEON_IS_PCI; mtx_init(&dev_priv->cs.cs_mutex, "cs_mtx", NULL, MTX_DEF); ret = drm_addmap(dev, drm_get_resource_start(dev, 2), drm_get_resource_len(dev, 2), _DRM_REGISTERS, _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); if (ret != 0) goto error; ret = drm_vblank_init(dev, 2); if (ret != 0) goto error; dev->max_vblank_count = 0x001fffff; DRM_DEBUG("%s card detected\n", ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); return ret; error: radeon_driver_unload(dev); return ret; } /* Create mappings for registers and framebuffer so userland doesn't necessarily * have to find them. */ int radeon_driver_firstopen(struct drm_device *dev) { int ret; drm_local_map_t *map; drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ret = drm_addmap(dev, dev_priv->fb_aper_offset, drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) return ret; return 0; } int radeon_driver_unload(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); drm_rmmap(dev, dev_priv->mmio); mtx_destroy(&dev_priv->cs.cs_mutex); drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); dev->dev_private = NULL; return 0; } void radeon_commit_ring(drm_radeon_private_t *dev_priv) { int i; u32 *ring; int tail_aligned; /* check if the ring is padded out to 16-dword alignment */ tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN - 1); if (tail_aligned) { int num_p2 = RADEON_RING_ALIGN - tail_aligned; ring = dev_priv->ring.start; /* pad with some CP_PACKET2 */ for (i = 0; i < num_p2; i++) ring[dev_priv->ring.tail + i] = CP_PACKET2(); dev_priv->ring.tail += i; dev_priv->ring.space -= num_p2 * sizeof(u32); } dev_priv->ring.tail &= dev_priv->ring.tail_mask; DRM_MEMORYBARRIER(); GET_RING_HEAD( dev_priv ); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail); /* read from PCI bus to ensure correct posting */ RADEON_READ(R600_CP_RB_RPTR); } else { RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail); /* read from PCI bus to ensure correct posting */ RADEON_READ(RADEON_CP_RB_RPTR); } } Index: head/sys/dev/drm/radeon_drv.h =================================================================== --- head/sys/dev/drm/radeon_drv.h (revision 258779) +++ head/sys/dev/drm/radeon_drv.h (revision 258780) @@ -1,2128 +1,2128 @@ /* radeon_drv.h -- Private header for radeon driver -*- linux-c -*- * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Kevin E. Martin * Gareth Hughes */ #include __FBSDID("$FreeBSD$"); #ifndef __RADEON_DRV_H__ #define __RADEON_DRV_H__ /* General customization: */ #define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." #define DRIVER_NAME "radeon" #define DRIVER_DESC "ATI Radeon" #define DRIVER_DATE "20080613" /* Interface history: * * 1.1 - ?? * 1.2 - Add vertex2 ioctl (keith) * - Add stencil capability to clear ioctl (gareth, keith) * - Increase MAX_TEXTURE_LEVELS (brian) * 1.3 - Add cmdbuf ioctl (keith) * - Add support for new radeon packets (keith) * - Add getparam ioctl (keith) * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). * 1.4 - Add scratch registers to get_param ioctl. * 1.5 - Add r200 packets to cmdbuf ioctl * - Add r200 function to init ioctl * - Add 'scalar2' instruction to cmdbuf * 1.6 - Add static GART memory manager * Add irq handler (won't be turned on unless X server knows to) * Add irq ioctls and irq_active getparam. * Add wait command for cmdbuf ioctl * Add GART offset query for getparam * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] * and R200_PP_CUBIC_OFFSET_F1_[0..5]. * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) * 1.8 - Remove need to call cleanup ioctls on last client exit (keith) * Add 'GET' queries for starting additional clients on different VT's. * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl. * Add texture rectangle support for r100. * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which * clients use to tell the DRM where they think the framebuffer is * located in the card's address space * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color * and GL_EXT_blend_[func|equation]_separate on r200 * 1.12- Add R300 CP microcode support - this just loads the CP on r300 * (No 3D support yet - just microcode loading). * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters * - Add hyperz support, add hyperz flags to clear ioctl. * 1.14- Add support for color tiling * - Add R100/R200 surface allocation/free support * 1.15- Add support for texture micro tiling * - Add support for r100 cube maps * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear * texture filtering on r200 * 1.17- Add initial support for R300 (3D). * 1.18- Add support for GL_ATI_fragment_shader, new packets * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) * 1.19- Add support for gart table in FB memory and PCIE r300 * 1.20- Add support for r300 texrect * 1.21- Add support for card type getparam * 1.22- Add support for texture cache flushes (R300_TX_CNTL) * 1.23- Add new radeon memory map work from benh * 1.24- Add general-purpose packet for manipulating scratch registers (r300) * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL, * new packet type) * 1.26- Add support for variable size PCI(E) gart aperture * 1.27- Add support for IGP GART * 1.28- Add support for VBL on CRTC2 * 1.29- R500 3D cmd buffer support * 1.30- Add support for occlusion queries * 1.31- Add support for num Z pipes from GET_PARAM */ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 31 #define DRIVER_PATCHLEVEL 0 /* * Radeon chip families */ enum radeon_family { CHIP_R100, CHIP_RV100, CHIP_RS100, CHIP_RV200, CHIP_RS200, CHIP_R200, CHIP_RV250, CHIP_RS300, CHIP_RV280, CHIP_R300, CHIP_R350, CHIP_RV350, CHIP_RV380, CHIP_R420, CHIP_R423, CHIP_RV410, CHIP_RS400, CHIP_RS480, CHIP_RS600, CHIP_RS690, CHIP_RS740, CHIP_RV515, CHIP_R520, CHIP_RV530, CHIP_RV560, CHIP_RV570, CHIP_R580, CHIP_R600, CHIP_RV610, CHIP_RV630, CHIP_RV670, CHIP_RV620, CHIP_RV635, CHIP_RS780, CHIP_RS880, CHIP_RV770, CHIP_RV730, CHIP_RV710, CHIP_RV740, CHIP_LAST, }; enum radeon_cp_microcode_version { UCODE_R100, UCODE_R200, UCODE_R300, }; /* * Chip flags */ enum radeon_chip_flags { RADEON_FAMILY_MASK = 0x0000ffffUL, RADEON_FLAGS_MASK = 0xffff0000UL, RADEON_IS_MOBILITY = 0x00010000UL, RADEON_IS_IGP = 0x00020000UL, RADEON_SINGLE_CRTC = 0x00040000UL, RADEON_IS_AGP = 0x00080000UL, RADEON_HAS_HIERZ = 0x00100000UL, RADEON_IS_PCIE = 0x00200000UL, RADEON_NEW_MEMMAP = 0x00400000UL, RADEON_IS_PCI = 0x00800000UL, RADEON_IS_IGPGART = 0x01000000UL, }; typedef struct drm_radeon_freelist { unsigned int age; struct drm_buf *buf; struct drm_radeon_freelist *next; struct drm_radeon_freelist *prev; } drm_radeon_freelist_t; typedef struct drm_radeon_ring_buffer { u32 *start; u32 *end; int size; int size_l2qw; int rptr_update; /* Double Words */ int rptr_update_l2qw; /* log2 Quad Words */ int fetch_size; /* Double Words */ int fetch_size_l2ow; /* log2 Oct Words */ u32 tail; u32 tail_mask; int space; int high_mark; } drm_radeon_ring_buffer_t; typedef struct drm_radeon_depth_clear_t { u32 rb3d_cntl; u32 rb3d_zstencilcntl; u32 se_cntl; } drm_radeon_depth_clear_t; struct drm_radeon_driver_file_fields { int64_t radeon_fb_delta; }; struct mem_block { struct mem_block *next; struct mem_block *prev; int start; int size; struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; struct radeon_surface { int refcount; u32 lower; u32 upper; u32 flags; }; struct radeon_virt_surface { int surface_index; u32 lower; u32 upper; u32 flags; struct drm_file *file_priv; #define PCIGART_FILE_PRIV ((void *) -1L) }; struct drm_radeon_kernel_chunk { uint32_t chunk_id; uint32_t length_dw; uint32_t __user *chunk_data; uint32_t *kdata; }; struct drm_radeon_cs_parser { struct drm_device *dev; struct drm_file *file_priv; uint32_t num_chunks; struct drm_radeon_kernel_chunk *chunks; int ib_index; int reloc_index; uint32_t card_offset; void *ib; }; /* command submission struct */ struct drm_radeon_cs_priv { struct mtx cs_mutex; uint32_t id_wcnt; uint32_t id_scnt; uint32_t id_last_wcnt; uint32_t id_last_scnt; int (*parse)(struct drm_radeon_cs_parser *parser); void (*id_emit)(struct drm_radeon_cs_parser *parser, uint32_t *id); uint32_t (*id_last_get)(struct drm_device *dev); /* this ib handling callback are for hidding memory manager drm * from memory manager less drm, free have to emit ib discard * sequence into the ring */ int (*ib_get)(struct drm_radeon_cs_parser *parser); uint32_t (*ib_get_ptr)(struct drm_device *dev, void *ib); void (*ib_free)(struct drm_radeon_cs_parser *parser, int error); /* do a relocation either MM or non-MM */ int (*relocate)(struct drm_radeon_cs_parser *parser, uint32_t *reloc, uint64_t *offset); }; #define RADEON_FLUSH_EMITED (1 << 0) #define RADEON_PURGE_EMITED (1 << 1) typedef struct drm_radeon_private { drm_radeon_ring_buffer_t ring; drm_radeon_sarea_t *sarea_priv; u32 fb_location; u32 fb_size; int new_memmap; int gart_size; u32 gart_vm_start; unsigned long gart_buffers_offset; int cp_mode; int cp_running; drm_radeon_freelist_t *head; drm_radeon_freelist_t *tail; int last_buf; int writeback_works; int usec_timeout; int microcode_version; struct { u32 boxes; int freelist_timeouts; int freelist_loops; int requested_bufs; int last_frame_reads; int last_clear_reads; int clears; int texture_uploads; } stats; int do_boxes; int page_flipping; u32 color_fmt; unsigned int front_offset; unsigned int front_pitch; unsigned int back_offset; unsigned int back_pitch; u32 depth_fmt; unsigned int depth_offset; unsigned int depth_pitch; u32 front_pitch_offset; u32 back_pitch_offset; u32 depth_pitch_offset; drm_radeon_depth_clear_t depth_clear; unsigned long ring_offset; unsigned long ring_rptr_offset; unsigned long buffers_offset; unsigned long gart_textures_offset; drm_local_map_t *sarea; drm_local_map_t *cp_ring; drm_local_map_t *ring_rptr; drm_local_map_t *gart_textures; struct mem_block *gart_heap; struct mem_block *fb_heap; /* SW interrupt */ wait_queue_head_t swi_queue; atomic_t swi_emitted; int vblank_crtc; uint32_t irq_enable_reg; int irq_enabled; uint32_t r500_disp_irq_reg; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; unsigned long pcigart_offset; unsigned int pcigart_offset_set; struct drm_ati_pcigart_info gart_info; u32 scratch_ages[5]; /* starting from here on, data is preserved accross an open */ uint32_t flags; /* see radeon_chip_flags */ unsigned long fb_aper_offset; int num_gb_pipes; int num_z_pipes; int track_flush; drm_local_map_t *mmio; /* r6xx/r7xx pipe/shader config */ int r600_max_pipes; int r600_max_tile_pipes; int r600_max_simds; int r600_max_backends; int r600_max_gprs; int r600_max_threads; int r600_max_stack_entries; int r600_max_hw_contexts; int r600_max_gs_threads; int r600_sx_max_export_size; int r600_sx_max_export_pos_size; int r600_sx_max_export_smx_size; int r600_sq_num_cf_insts; int r700_sx_num_of_sets; int r700_sc_prim_fifo_size; int r700_sc_hiz_tile_fifo_size; int r700_sc_earlyz_tile_fifo_fize; /* r6xx/r7xx drm blit vertex buffer */ struct drm_buf *blit_vb; /* CS */ struct drm_radeon_cs_priv cs; struct drm_buf *cs_buf; } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { u32 age; } drm_radeon_buf_priv_t; typedef struct drm_radeon_kcmd_buffer { int bufsz; char *buf; int nbox; struct drm_clip_rect __user *boxes; } drm_radeon_kcmd_buffer_t; extern int radeon_no_wb; extern struct drm_ioctl_desc radeon_ioctls[]; extern int radeon_max_ioctl; extern u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv); extern void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val); #define GET_RING_HEAD(dev_priv) radeon_get_ring_head(dev_priv) #define SET_RING_HEAD(dev_priv, val) radeon_set_ring_head(dev_priv, val) /* Check whether the given hardware address is inside the framebuffer or the * GART area. */ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, u64 off) { u64 fb_start = dev_priv->fb_location; u64 fb_end = fb_start + dev_priv->fb_size - 1; u64 gart_start = dev_priv->gart_vm_start; u64 gart_end = gart_start + dev_priv->gart_size - 1; return ((off >= fb_start && off <= fb_end) || (off >= gart_start && off <= gart_end)); } /* radeon_cp.c */ extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc); extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base); extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr); extern void radeon_freelist_reset(struct drm_device * dev); extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); extern int radeon_presetup(struct drm_device *dev); extern int radeon_driver_postcleanup(struct drm_device *dev); extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_mem_takedown(struct mem_block **heap); extern void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap); extern void radeon_enable_bm(struct drm_radeon_private *dev_priv); extern u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off); extern void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val); /* radeon_irq.c */ extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state); extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_do_release(struct drm_device * dev); extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); extern int radeon_enable_vblank(struct drm_device *dev, int crtc); extern void radeon_disable_vblank(struct drm_device *dev, int crtc); extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); extern void radeon_driver_irq_preinstall(struct drm_device * dev); extern int radeon_driver_irq_postinstall(struct drm_device *dev); extern void radeon_driver_irq_uninstall(struct drm_device * dev); extern void radeon_enable_interrupt(struct drm_device *dev); extern int radeon_vblank_crtc_get(struct drm_device *dev); extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); extern int radeon_driver_firstopen(struct drm_device *dev); extern void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); extern void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv); extern void radeon_driver_lastclose(struct drm_device * dev); extern int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* r300_cmdbuf.c */ extern void r300_init_reg_flags(struct drm_device *dev); extern int r300_do_cp_cmdbuf(struct drm_device *dev, struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf); /* r600_cp.c */ extern int r600_do_engine_reset(struct drm_device *dev); extern int r600_do_cleanup_cp(struct drm_device *dev); extern int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, struct drm_file *file_priv); extern int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv); extern int r600_do_cp_idle(drm_radeon_private_t *dev_priv); extern void r600_do_cp_start(drm_radeon_private_t *dev_priv); extern void r600_do_cp_reset(drm_radeon_private_t *dev_priv); extern void r600_do_cp_stop(drm_radeon_private_t *dev_priv); extern int r600_cp_dispatch_indirect(struct drm_device *dev, struct drm_buf *buf, int start, int end); extern int r600_page_table_init(struct drm_device *dev); extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); extern void r600_cp_dispatch_swap(struct drm_device * dev); extern int r600_cp_dispatch_texture(struct drm_device * dev, struct drm_file *file_priv, drm_radeon_texture_t * tex, drm_radeon_tex_image_t * image); /* r600_blit.c */ extern int r600_prepare_blit_copy(struct drm_device *dev); extern void r600_done_blit_copy(struct drm_device *dev); extern void r600_blit_copy(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int size_bytes); extern void r600_blit_swap(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int sx, int sy, int dx, int dy, int w, int h, int src_pitch, int dst_pitch, int cpp); /* radeon_state.c */ extern void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf); /* radeon_cs.c */ extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv); extern int r600_cs_init(struct drm_device *dev); /* Flags for stats.boxes */ #define RADEON_BOX_DMA_IDLE 0x1 #define RADEON_BOX_RING_FULL 0x2 #define RADEON_BOX_FLIP 0x4 #define RADEON_BOX_WAIT_IDLE 0x8 #define RADEON_BOX_TEXTURE_LOAD 0x10 /* Register definitions, register access macros and drmAddMap constants * for Radeon kernel driver. */ #define RADEON_MM_INDEX 0x0000 #define RADEON_MM_DATA 0x0004 #define RADEON_AGP_COMMAND 0x0f60 #define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ # define RADEON_AGP_ENABLE (1<<8) #define RADEON_AUX_SCISSOR_CNTL 0x26f0 # define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) # define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) # define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26) # define RADEON_SCISSOR_0_ENABLE (1 << 28) # define RADEON_SCISSOR_1_ENABLE (1 << 29) # define RADEON_SCISSOR_2_ENABLE (1 << 30) /* * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx) * don't have an explicit bus mastering disable bit. It's handled * by the PCI D-states. PMI_BM_DIS disables D-state bus master * handling, not bus mastering itself. */ #define RADEON_BUS_CNTL 0x0030 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ # define RADEON_BUS_MASTER_DIS (1 << 6) /* rs600/rs690/rs740 */ # define RS600_BUS_MASTER_DIS (1 << 14) # define RS600_MSI_REARM (1 << 20) /* see RS400_MSI_REARM in AIC_CNTL for rs480 */ #define RADEON_BUS_CNTL1 0x0034 # define RADEON_PMI_BM_DIS (1 << 2) # define RADEON_PMI_INT_DIS (1 << 3) #define RV370_BUS_CNTL 0x004c # define RV370_PMI_BM_DIS (1 << 5) # define RV370_PMI_INT_DIS (1 << 6) #define RADEON_MSI_REARM_EN 0x0160 /* rv370/rv380, rv410, r423/r430/r480, r5xx */ # define RV370_MSI_REARM_EN (1 << 0) #define RADEON_CLOCK_CNTL_DATA 0x000c # define RADEON_PLL_WR_EN (1 << 7) #define RADEON_CLOCK_CNTL_INDEX 0x0008 #define RADEON_CONFIG_APER_SIZE 0x0108 #define RADEON_CONFIG_MEMSIZE 0x00f8 #define RADEON_CRTC_OFFSET 0x0224 #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) #define RADEON_CRTC2_OFFSET 0x0324 #define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_PCIE_INDEX 0x0030 #define RADEON_PCIE_DATA 0x0034 #define RADEON_PCIE_TX_GART_CNTL 0x10 # define RADEON_PCIE_TX_GART_EN (1 << 0) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1) # define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3) # define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3) # define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5) # define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8) #define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11 #define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 #define RADEON_PCIE_TX_GART_BASE 0x13 #define RADEON_PCIE_TX_GART_START_LO 0x14 #define RADEON_PCIE_TX_GART_START_HI 0x15 #define RADEON_PCIE_TX_GART_END_LO 0x16 #define RADEON_PCIE_TX_GART_END_HI 0x17 #define RS480_NB_MC_INDEX 0x168 # define RS480_NB_MC_IND_WR_EN (1 << 8) #define RS480_NB_MC_DATA 0x16c #define RS690_MC_INDEX 0x78 # define RS690_MC_INDEX_MASK 0x1ff # define RS690_MC_INDEX_WR_EN (1 << 9) # define RS690_MC_INDEX_WR_ACK 0x7f #define RS690_MC_DATA 0x7c /* MC indirect registers */ #define RS480_MC_MISC_CNTL 0x18 # define RS480_DISABLE_GTW (1 << 1) /* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */ # define RS480_GART_INDEX_REG_EN (1 << 12) # define RS690_BLOCK_GFX_D3_EN (1 << 14) #define RS480_K8_FB_LOCATION 0x1e #define RS480_GART_FEATURE_ID 0x2b # define RS480_HANG_EN (1 << 11) # define RS480_TLB_ENABLE (1 << 18) # define RS480_P2P_ENABLE (1 << 19) # define RS480_GTW_LAC_EN (1 << 25) # define RS480_2LEVEL_GART (0 << 30) # define RS480_1LEVEL_GART (1 << 30) -# define RS480_PDC_EN (1 << 31) +# define RS480_PDC_EN (1U << 31) #define RS480_GART_BASE 0x2c #define RS480_GART_CACHE_CNTRL 0x2e # define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */ #define RS480_AGP_ADDRESS_SPACE_SIZE 0x38 # define RS480_GART_EN (1 << 0) # define RS480_VA_SIZE_32MB (0 << 1) # define RS480_VA_SIZE_64MB (1 << 1) # define RS480_VA_SIZE_128MB (2 << 1) # define RS480_VA_SIZE_256MB (3 << 1) # define RS480_VA_SIZE_512MB (4 << 1) # define RS480_VA_SIZE_1GB (5 << 1) # define RS480_VA_SIZE_2GB (6 << 1) #define RS480_AGP_MODE_CNTL 0x39 # define RS480_POST_GART_Q_SIZE (1 << 18) # define RS480_NONGART_SNOOP (1 << 19) # define RS480_AGP_RD_BUF_SIZE (1 << 20) # define RS480_REQ_TYPE_SNOOP_SHIFT 22 # define RS480_REQ_TYPE_SNOOP_MASK 0x3 # define RS480_REQ_TYPE_SNOOP_DIS (1 << 24) #define RS480_MC_MISC_UMA_CNTL 0x5f #define RS480_MC_MCLK_CNTL 0x7a #define RS480_MC_UMA_DUALCH_CNTL 0x86 #define RS690_MC_FB_LOCATION 0x100 #define RS690_MC_AGP_LOCATION 0x101 #define RS690_MC_AGP_BASE 0x102 #define RS690_MC_AGP_BASE_2 0x103 #define RS600_MC_INDEX 0x70 # define RS600_MC_ADDR_MASK 0xffff # define RS600_MC_IND_SEQ_RBS_0 (1 << 16) # define RS600_MC_IND_SEQ_RBS_1 (1 << 17) # define RS600_MC_IND_SEQ_RBS_2 (1 << 18) # define RS600_MC_IND_SEQ_RBS_3 (1 << 19) # define RS600_MC_IND_AIC_RBS (1 << 20) # define RS600_MC_IND_CITF_ARB0 (1 << 21) # define RS600_MC_IND_CITF_ARB1 (1 << 22) # define RS600_MC_IND_WR_EN (1 << 23) #define RS600_MC_DATA 0x74 #define RS600_MC_STATUS 0x0 # define RS600_MC_IDLE (1 << 1) #define RS600_MC_FB_LOCATION 0x4 #define RS600_MC_AGP_LOCATION 0x5 #define RS600_AGP_BASE 0x6 #define RS600_AGP_BASE_2 0x7 #define RS600_MC_CNTL1 0x9 # define RS600_ENABLE_PAGE_TABLES (1 << 26) #define RS600_MC_PT0_CNTL 0x100 # define RS600_ENABLE_PT (1 << 0) # define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15) # define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21) # define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28) # define RS600_INVALIDATE_L2_CACHE (1 << 29) #define RS600_MC_PT0_CONTEXT0_CNTL 0x102 # define RS600_ENABLE_PAGE_TABLE (1 << 0) # define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1) #define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112 #define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114 #define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c #define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c #define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c #define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c #define RS600_MC_PT0_CLIENT0_CNTL 0x16c # define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0) # define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1) # define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8) # define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8) # define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8) # define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8) # define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8) # define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10) # define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10) # define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11) # define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14) # define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15) # define RS600_INVALIDATE_L1_TLB (1 << 20) #define R520_MC_IND_INDEX 0x70 #define R520_MC_IND_WR_EN (1 << 24) #define R520_MC_IND_DATA 0x74 #define RV515_MC_FB_LOCATION 0x01 #define RV515_MC_AGP_LOCATION 0x02 #define RV515_MC_AGP_BASE 0x03 #define RV515_MC_AGP_BASE_2 0x04 #define R520_MC_FB_LOCATION 0x04 #define R520_MC_AGP_LOCATION 0x05 #define R520_MC_AGP_BASE 0x06 #define R520_MC_AGP_BASE_2 0x07 #define RADEON_MPP_TB_CONFIG 0x01c0 #define RADEON_MEM_CNTL 0x0140 #define RADEON_MEM_SDRAM_MODE_REG 0x0158 #define RADEON_AGP_BASE_2 0x015c /* r200+ only */ #define RS480_AGP_BASE_2 0x0164 #define RADEON_AGP_BASE 0x0170 /* pipe config regs */ #define R400_GB_PIPE_SELECT 0x402c #define RV530_GB_PIPE_SELECT2 0x4124 #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ #define R300_GB_TILE_CONFIG 0x4018 # define R300_ENABLE_TILING (1 << 0) # define R300_PIPE_COUNT_RV350 (0 << 1) # define R300_PIPE_COUNT_R300 (3 << 1) # define R300_PIPE_COUNT_R420_3P (6 << 1) # define R300_PIPE_COUNT_R420 (7 << 1) # define R300_TILE_SIZE_8 (0 << 4) # define R300_TILE_SIZE_16 (1 << 4) # define R300_TILE_SIZE_32 (2 << 4) # define R300_SUBPIXEL_1_12 (0 << 16) # define R300_SUBPIXEL_1_16 (1 << 16) #define R300_DST_PIPE_CONFIG 0x170c -# define R300_PIPE_AUTO_CONFIG (1 << 31) +# define R300_PIPE_AUTO_CONFIG (1U << 31) #define R300_RB2D_DSTCACHE_MODE 0x3428 # define R300_DC_AUTOFLUSH_ENABLE (1 << 8) # define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17) #define RADEON_RB3D_COLOROFFSET 0x1c40 #define RADEON_RB3D_COLORPITCH 0x1c48 #define RADEON_SRC_X_Y 0x1590 #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) # define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) # define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) # define RADEON_GMC_BRUSH_NONE (15 << 4) # define RADEON_GMC_DST_16BPP (4 << 8) # define RADEON_GMC_DST_24BPP (5 << 8) # define RADEON_GMC_DST_32BPP (6 << 8) # define RADEON_GMC_DST_DATATYPE_SHIFT 8 # define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) # define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) # define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) # define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) # define RADEON_GMC_WR_MSK_DIS (1 << 30) # define RADEON_ROP3_S 0x00cc0000 # define RADEON_ROP3_P 0x00f00000 #define RADEON_DP_WRITE_MASK 0x16cc #define RADEON_SRC_PITCH_OFFSET 0x1428 #define RADEON_DST_PITCH_OFFSET 0x142c #define RADEON_DST_PITCH_OFFSET_C 0x1c80 # define RADEON_DST_TILE_LINEAR (0 << 30) # define RADEON_DST_TILE_MACRO (1 << 30) # define RADEON_DST_TILE_MICRO (2U << 30) # define RADEON_DST_TILE_BOTH (3U << 30) #define RADEON_SCRATCH_REG0 0x15e0 #define RADEON_SCRATCH_REG1 0x15e4 #define RADEON_SCRATCH_REG2 0x15e8 #define RADEON_SCRATCH_REG3 0x15ec #define RADEON_SCRATCH_REG4 0x15f0 #define RADEON_SCRATCH_REG5 0x15f4 #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 #define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x)) extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); #define GET_SCRATCH(dev_priv, x) radeon_get_scratch(dev_priv, x) #define R600_SCRATCH_REG0 0x8500 #define R600_SCRATCH_REG1 0x8504 #define R600_SCRATCH_REG2 0x8508 #define R600_SCRATCH_REG3 0x850c #define R600_SCRATCH_REG4 0x8510 #define R600_SCRATCH_REG5 0x8514 #define R600_SCRATCH_REG6 0x8518 #define R600_SCRATCH_REG7 0x851c #define R600_SCRATCH_UMSK 0x8540 #define R600_SCRATCH_ADDR 0x8544 #define R600_SCRATCHOFF(x) (R600_SCRATCH_REG_OFFSET + 4*(x)) #define RADEON_GEN_INT_CNTL 0x0040 # define RADEON_CRTC_VBLANK_MASK (1 << 0) # define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 # define RADEON_CRTC_VBLANK_STAT (1 << 0) # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) # define RADEON_CRTC2_VBLANK_STAT (1 << 9) # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) # define RADEON_SW_INT_FIRE (1 << 26) # define R500_DISPLAY_INT_STATUS (1 << 0) #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HDP_SOFT_RESET (1 << 26) # define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) # define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28) #define RADEON_ISYNC_CNTL 0x1724 # define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) # define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1) # define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2) # define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3) # define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) # define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) #define RADEON_RBBM_GUICNTL 0x172c # define RADEON_HOST_DATA_SWAP_NONE (0 << 0) # define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) # define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) # define RADEON_HOST_DATA_SWAP_HDW (3 << 0) #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_FB_LOCATION 0x0148 #define RADEON_MCLK_CNTL 0x0012 # define RADEON_FORCEON_MCLKA (1 << 16) # define RADEON_FORCEON_MCLKB (1 << 17) # define RADEON_FORCEON_YCLKA (1 << 18) # define RADEON_FORCEON_YCLKB (1 << 19) # define RADEON_FORCEON_MC (1 << 20) # define RADEON_FORCEON_AIC (1 << 21) #define RADEON_PP_BORDER_COLOR_0 0x1d40 #define RADEON_PP_BORDER_COLOR_1 0x1d44 #define RADEON_PP_BORDER_COLOR_2 0x1d48 #define RADEON_PP_CNTL 0x1c38 # define RADEON_SCISSOR_ENABLE (1 << 1) #define RADEON_PP_LUM_MATRIX 0x1d00 #define RADEON_PP_MISC 0x1c14 #define RADEON_PP_ROT_MATRIX_0 0x1d58 #define RADEON_PP_TXFILTER_0 0x1c54 #define RADEON_PP_TXOFFSET_0 0x1c5c #define RADEON_PP_TXFILTER_1 0x1c6c #define RADEON_PP_TXFILTER_2 0x1c84 #define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */ #define R300_DSTCACHE_CTLSTAT 0x1714 # define R300_RB2D_DC_FLUSH (3 << 0) # define R300_RB2D_DC_FREE (3 << 2) # define R300_RB2D_DC_FLUSH_ALL 0xf -# define R300_RB2D_DC_BUSY (1 << 31) +# define R300_RB2D_DC_BUSY (1U << 31) #define RADEON_RB3D_CNTL 0x1c3c # define RADEON_ALPHA_BLEND_ENABLE (1 << 0) # define RADEON_PLANE_MASK_ENABLE (1 << 1) # define RADEON_DITHER_ENABLE (1 << 2) # define RADEON_ROUND_ENABLE (1 << 3) # define RADEON_SCALE_DITHER_ENABLE (1 << 4) # define RADEON_DITHER_INIT (1 << 5) # define RADEON_ROP_ENABLE (1 << 6) # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) # define RADEON_ZBLOCK16 (1 << 15) #define RADEON_RB3D_DEPTHOFFSET 0x1c24 #define RADEON_RB3D_DEPTHCLEARVALUE 0x3230 #define RADEON_RB3D_DEPTHPITCH 0x1c28 #define RADEON_RB3D_PLANEMASK 0x1d84 #define RADEON_RB3D_STENCILREFMASK 0x1d7c #define RADEON_RB3D_ZCACHE_MODE 0x3250 #define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 # define RADEON_RB3D_ZC_FLUSH (1 << 0) # define RADEON_RB3D_ZC_FREE (1 << 2) # define RADEON_RB3D_ZC_FLUSH_ALL 0x5 -# define RADEON_RB3D_ZC_BUSY (1 << 31) +# define RADEON_RB3D_ZC_BUSY (1U << 31) #define R300_ZB_ZCACHE_CTLSTAT 0x4f18 # define R300_ZC_FLUSH (1 << 0) # define R300_ZC_FREE (1 << 1) -# define R300_ZC_BUSY (1 << 31) +# define R300_ZC_BUSY (1U << 31) #define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c # define RADEON_RB3D_DC_FLUSH (3 << 0) # define RADEON_RB3D_DC_FREE (3 << 2) # define RADEON_RB3D_DC_FLUSH_ALL 0xf -# define RADEON_RB3D_DC_BUSY (1 << 31) +# define RADEON_RB3D_DC_BUSY (1U << 31) #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c # define R300_RB3D_DC_FLUSH (2 << 0) # define R300_RB3D_DC_FREE (2 << 2) # define R300_RB3D_DC_FINISH (1 << 4) #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_Z_HIERARCHY_ENABLE (1 << 8) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) # define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) # define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) # define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_Z_COMPRESSION_ENABLE (1 << 28) # define RADEON_FORCE_Z_DIRTY (1 << 29) # define RADEON_Z_WRITE_ENABLE (1 << 30) -# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) +# define RADEON_Z_DECOMPRESSION_ENABLE (1U << 31) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) # define RADEON_SOFT_RESET_HI (1 << 1) # define RADEON_SOFT_RESET_SE (1 << 2) # define RADEON_SOFT_RESET_RE (1 << 3) # define RADEON_SOFT_RESET_PP (1 << 4) # define RADEON_SOFT_RESET_E2 (1 << 5) # define RADEON_SOFT_RESET_RB (1 << 6) # define RADEON_SOFT_RESET_HDP (1 << 7) /* * 6:0 Available slots in the FIFO * 8 Host Interface active * 9 CP request active * 10 FIFO request active * 11 Host Interface retry active * 12 CP retry active * 13 FIFO retry active * 14 FIFO pipeline busy * 15 Event engine busy * 16 CP command stream busy * 17 2D engine busy * 18 2D portion of render backend busy * 20 3D setup engine busy * 26 GA engine busy * 27 CBA 2D engine busy * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or * command stream queue not empty or Ring Buffer not empty */ #define RADEON_RBBM_STATUS 0x0e40 /* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */ /* #define RADEON_RBBM_STATUS 0x1740 */ /* bits 6:0 are dword slots available in the cmd fifo */ # define RADEON_RBBM_FIFOCNT_MASK 0x007f # define RADEON_HIRQ_ON_RBB (1 << 8) # define RADEON_CPRQ_ON_RBB (1 << 9) # define RADEON_CFRQ_ON_RBB (1 << 10) # define RADEON_HIRQ_IN_RTBUF (1 << 11) # define RADEON_CPRQ_IN_RTBUF (1 << 12) # define RADEON_CFRQ_IN_RTBUF (1 << 13) # define RADEON_PIPE_BUSY (1 << 14) # define RADEON_ENG_EV_BUSY (1 << 15) # define RADEON_CP_CMDSTRM_BUSY (1 << 16) # define RADEON_E2_BUSY (1 << 17) # define RADEON_RB2D_BUSY (1 << 18) # define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */ # define RADEON_VAP_BUSY (1 << 20) # define RADEON_RE_BUSY (1 << 21) /* not used on r300 */ # define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */ # define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */ # define RADEON_PB_BUSY (1 << 24) /* not used on r300 */ # define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */ # define RADEON_GA_BUSY (1 << 26) # define RADEON_CBA2D_BUSY (1 << 27) -# define RADEON_RBBM_ACTIVE (1 << 31) +# define RADEON_RBBM_ACTIVE (1U << 31) #define RADEON_RE_LINE_PATTERN 0x1cd0 #define RADEON_RE_MISC 0x26c4 #define RADEON_RE_TOP_LEFT 0x26c0 #define RADEON_RE_WIDTH_HEIGHT 0x1c44 #define RADEON_RE_STIPPLE_ADDR 0x1cc8 #define RADEON_RE_STIPPLE_DATA 0x1ccc #define RADEON_SCISSOR_TL_0 0x1cd8 #define RADEON_SCISSOR_BR_0 0x1cdc #define RADEON_SCISSOR_TL_1 0x1ce0 #define RADEON_SCISSOR_BR_1 0x1ce4 #define RADEON_SCISSOR_TL_2 0x1ce8 #define RADEON_SCISSOR_BR_2 0x1cec #define RADEON_SE_COORD_FMT 0x1c50 #define RADEON_SE_CNTL 0x1c4c # define RADEON_FFACE_CULL_CW (0 << 0) # define RADEON_BFACE_SOLID (3 << 1) # define RADEON_FFACE_SOLID (3 << 3) # define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) # define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) # define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) # define RADEON_ALPHA_SHADE_FLAT (1 << 10) # define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) # define RADEON_SPECULAR_SHADE_FLAT (1 << 12) # define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) # define RADEON_FOG_SHADE_FLAT (1 << 14) # define RADEON_FOG_SHADE_GOURAUD (2 << 14) # define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) # define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) # define RADEON_VTX_PIX_CENTER_OGL (1 << 27) # define RADEON_ROUND_MODE_TRUNC (0 << 28) # define RADEON_ROUND_PREC_8TH_PIX (1 << 30) #define RADEON_SE_CNTL_STATUS 0x2140 #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_VPORT_XSCALE 0x1d98 #define RADEON_SE_ZBIAS_FACTOR 0x1db0 #define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 #define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 #define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 # define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 # define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 #define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 #define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 # define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 #define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C #define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 #define RADEON_SURFACE_ACCESS_CLR 0x0bfc #define RADEON_SURFACE_CNTL 0x0b00 # define RADEON_SURF_TRANSLATION_DIS (1 << 8) # define RADEON_NONSURF_AP0_SWP_MASK (3 << 20) # define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20) # define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20) # define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20) # define RADEON_NONSURF_AP1_SWP_MASK (3 << 22) # define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22) # define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22) # define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22) #define RADEON_SURFACE0_INFO 0x0b0c # define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0) # define RADEON_SURF_TILE_MODE_MASK (3 << 16) # define RADEON_SURF_TILE_MODE_MACRO (0 << 16) # define RADEON_SURF_TILE_MODE_MICRO (1 << 16) # define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16) # define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16) #define RADEON_SURFACE0_LOWER_BOUND 0x0b04 #define RADEON_SURFACE0_UPPER_BOUND 0x0b08 # define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0) #define RADEON_SURFACE1_INFO 0x0b1c #define RADEON_SURFACE1_LOWER_BOUND 0x0b14 #define RADEON_SURFACE1_UPPER_BOUND 0x0b18 #define RADEON_SURFACE2_INFO 0x0b2c #define RADEON_SURFACE2_LOWER_BOUND 0x0b24 #define RADEON_SURFACE2_UPPER_BOUND 0x0b28 #define RADEON_SURFACE3_INFO 0x0b3c #define RADEON_SURFACE3_LOWER_BOUND 0x0b34 #define RADEON_SURFACE3_UPPER_BOUND 0x0b38 #define RADEON_SURFACE4_INFO 0x0b4c #define RADEON_SURFACE4_LOWER_BOUND 0x0b44 #define RADEON_SURFACE4_UPPER_BOUND 0x0b48 #define RADEON_SURFACE5_INFO 0x0b5c #define RADEON_SURFACE5_LOWER_BOUND 0x0b54 #define RADEON_SURFACE5_UPPER_BOUND 0x0b58 #define RADEON_SURFACE6_INFO 0x0b6c #define RADEON_SURFACE6_LOWER_BOUND 0x0b64 #define RADEON_SURFACE6_UPPER_BOUND 0x0b68 #define RADEON_SURFACE7_INFO 0x0b7c #define RADEON_SURFACE7_LOWER_BOUND 0x0b74 #define RADEON_SURFACE7_UPPER_BOUND 0x0b78 #define RADEON_SW_SEMAPHORE 0x013c #define RADEON_WAIT_UNTIL 0x1720 # define RADEON_WAIT_CRTC_PFLIP (1 << 0) # define RADEON_WAIT_2D_IDLE (1 << 14) # define RADEON_WAIT_3D_IDLE (1 << 15) # define RADEON_WAIT_2D_IDLECLEAN (1 << 16) # define RADEON_WAIT_3D_IDLECLEAN (1 << 17) # define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) #define RADEON_RB3D_ZMASKOFFSET 0x3234 #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c # define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) # define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) /* CP registers */ #define RADEON_CP_ME_RAM_ADDR 0x07d4 #define RADEON_CP_ME_RAM_RADDR 0x07d8 #define RADEON_CP_ME_RAM_DATAH 0x07dc #define RADEON_CP_ME_RAM_DATAL 0x07e0 #define RADEON_CP_RB_BASE 0x0700 #define RADEON_CP_RB_CNTL 0x0704 # define RADEON_BUF_SWAP_32BIT (2 << 16) # define RADEON_RB_NO_UPDATE (1 << 27) -# define RADEON_RB_RPTR_WR_ENA (1 << 31) +# define RADEON_RB_RPTR_WR_ENA (1U << 31) #define RADEON_CP_RB_RPTR_ADDR 0x070c #define RADEON_CP_RB_RPTR 0x0710 #define RADEON_CP_RB_WPTR 0x0714 #define RADEON_CP_RB_WPTR_DELAY 0x0718 # define RADEON_PRE_WRITE_TIMER_SHIFT 0 # define RADEON_PRE_WRITE_LIMIT_SHIFT 23 #define RADEON_CP_IB_BASE 0x0738 #define RADEON_CP_CSQ_CNTL 0x0740 # define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) # define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) # define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) # define RADEON_CSQ_PRIBM_INDDIS (2 << 28) # define RADEON_CSQ_PRIPIO_INDBM (3 << 28) # define RADEON_CSQ_PRIBM_INDBM (4 << 28) # define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) #define RADEON_AIC_CNTL 0x01d0 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) # define RS400_MSI_REARM (1 << 3) #define RADEON_AIC_STAT 0x01d4 #define RADEON_AIC_PT_BASE 0x01d8 #define RADEON_AIC_LO_ADDR 0x01dc #define RADEON_AIC_HI_ADDR 0x01e0 #define RADEON_AIC_TLB_ADDR 0x01e4 #define RADEON_AIC_TLB_DATA 0x01e8 /* CP command packets */ #define RADEON_CP_PACKET0 0x00000000 # define RADEON_ONE_REG_WR (1 << 15) #define RADEON_CP_PACKET1 0x40000000 #define RADEON_CP_PACKET2 0x80000000 #define RADEON_CP_PACKET3 0xC0000000 # define RADEON_CP_NOP 0x00001000 # define RADEON_CP_NEXT_CHAR 0x00001900 # define RADEON_CP_PLY_NEXTSCAN 0x00001D00 # define RADEON_CP_SET_SCISSORS 0x00001E00 /* GEN_INDX_PRIM is unsupported starting with R300 */ # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_WAIT_FOR_IDLE 0x00002600 # define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_IMMD 0x00002900 # define RADEON_3D_DRAW_INDX 0x00002A00 # define RADEON_CP_LOAD_PALETTE 0x00002C00 # define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 # define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 # define RADEON_3D_CLEAR_ZMASK 0x00003200 # define RADEON_CP_INDX_BUFFER 0x00003300 # define RADEON_CP_3D_DRAW_VBUF_2 0x00003400 # define RADEON_CP_3D_DRAW_IMMD_2 0x00003500 # define RADEON_CP_3D_DRAW_INDX_2 0x00003600 # define RADEON_3D_CLEAR_HIZ 0x00003700 # define RADEON_CP_3D_CLEAR_CMASK 0x00003802 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 # define RADEON_CNTL_SET_SCISSORS 0xC0001E00 # define R600_IT_INDIRECT_BUFFER 0x00003200 # define R600_IT_ME_INITIALIZE 0x00004400 # define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) # define R600_IT_EVENT_WRITE 0x00004600 # define R600_IT_SET_CONFIG_REG 0x00006800 # define R600_SET_CONFIG_REG_OFFSET 0x00008000 # define R600_SET_CONFIG_REG_END 0x0000ac00 #define RADEON_CP_PACKET_MASK 0xC0000000 #define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 #define RADEON_CP_PACKET0_REG_MASK 0x000007ff #define RADEON_CP_PACKET1_REG0_MASK 0x000007ff #define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 -#define RADEON_VTX_Z_PRESENT (1 << 31) +#define RADEON_VTX_Z_PRESENT (1U << 31) #define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) #define RADEON_PRIM_TYPE_NONE (0 << 0) #define RADEON_PRIM_TYPE_POINT (1 << 0) #define RADEON_PRIM_TYPE_LINE (2 << 0) #define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0) #define RADEON_PRIM_TYPE_TRI_LIST (4 << 0) #define RADEON_PRIM_TYPE_TRI_FAN (5 << 0) #define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0) #define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0) #define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) #define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) #define RADEON_PRIM_TYPE_MASK 0xf #define RADEON_PRIM_WALK_IND (1 << 4) #define RADEON_PRIM_WALK_LIST (2 << 4) #define RADEON_PRIM_WALK_RING (3 << 4) #define RADEON_COLOR_ORDER_BGRA (0 << 6) #define RADEON_COLOR_ORDER_RGBA (1 << 6) #define RADEON_MAOS_ENABLE (1 << 7) #define RADEON_VTX_FMT_R128_MODE (0 << 8) #define RADEON_VTX_FMT_RADEON_MODE (1 << 8) #define RADEON_NUM_VERTICES_SHIFT 16 #define RADEON_COLOR_FORMAT_CI8 2 #define RADEON_COLOR_FORMAT_ARGB1555 3 #define RADEON_COLOR_FORMAT_RGB565 4 #define RADEON_COLOR_FORMAT_ARGB8888 6 #define RADEON_COLOR_FORMAT_RGB332 7 #define RADEON_COLOR_FORMAT_RGB8 9 #define RADEON_COLOR_FORMAT_ARGB4444 15 #define RADEON_TXFORMAT_I8 0 #define RADEON_TXFORMAT_AI88 1 #define RADEON_TXFORMAT_RGB332 2 #define RADEON_TXFORMAT_ARGB1555 3 #define RADEON_TXFORMAT_RGB565 4 #define RADEON_TXFORMAT_ARGB4444 5 #define RADEON_TXFORMAT_ARGB8888 6 #define RADEON_TXFORMAT_RGBA8888 7 #define RADEON_TXFORMAT_Y8 8 #define RADEON_TXFORMAT_VYUY422 10 #define RADEON_TXFORMAT_YVYU422 11 #define RADEON_TXFORMAT_DXT1 12 #define RADEON_TXFORMAT_DXT23 14 #define RADEON_TXFORMAT_DXT45 15 #define R200_PP_TXCBLEND_0 0x2f00 #define R200_PP_TXCBLEND_1 0x2f10 #define R200_PP_TXCBLEND_2 0x2f20 #define R200_PP_TXCBLEND_3 0x2f30 #define R200_PP_TXCBLEND_4 0x2f40 #define R200_PP_TXCBLEND_5 0x2f50 #define R200_PP_TXCBLEND_6 0x2f60 #define R200_PP_TXCBLEND_7 0x2f70 #define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 #define R200_PP_TFACTOR_0 0x2ee0 #define R200_SE_VTX_FMT_0 0x2088 #define R200_SE_VAP_CNTL 0x2080 #define R200_SE_TCL_MATRIX_SEL_0 0x2230 #define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 #define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 #define R200_PP_TXFILTER_5 0x2ca0 #define R200_PP_TXFILTER_4 0x2c80 #define R200_PP_TXFILTER_3 0x2c60 #define R200_PP_TXFILTER_2 0x2c40 #define R200_PP_TXFILTER_1 0x2c20 #define R200_PP_TXFILTER_0 0x2c00 #define R200_PP_TXOFFSET_5 0x2d78 #define R200_PP_TXOFFSET_4 0x2d60 #define R200_PP_TXOFFSET_3 0x2d48 #define R200_PP_TXOFFSET_2 0x2d30 #define R200_PP_TXOFFSET_1 0x2d18 #define R200_PP_TXOFFSET_0 0x2d00 #define R200_PP_CUBIC_FACES_0 0x2c18 #define R200_PP_CUBIC_FACES_1 0x2c38 #define R200_PP_CUBIC_FACES_2 0x2c58 #define R200_PP_CUBIC_FACES_3 0x2c78 #define R200_PP_CUBIC_FACES_4 0x2c98 #define R200_PP_CUBIC_FACES_5 0x2cb8 #define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 #define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 #define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c #define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 #define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 #define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c #define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 #define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 #define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 #define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c #define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 #define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 #define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c #define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 #define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 #define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c #define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 #define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 #define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 #define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c #define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 #define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 #define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c #define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 #define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 #define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c #define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 #define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 #define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 #define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c #define R200_RE_AUX_SCISSOR_CNTL 0x26f0 #define R200_SE_VTE_CNTL 0x20b0 #define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 #define R200_PP_TAM_DEBUG3 0x2d9c #define R200_PP_CNTL_X 0x2cc4 #define R200_SE_VAP_CNTL_STATUS 0x2140 #define R200_RE_SCISSOR_TL_0 0x1cd8 #define R200_RE_SCISSOR_TL_1 0x1ce0 #define R200_RE_SCISSOR_TL_2 0x1ce8 #define R200_RB3D_DEPTHXY_OFFSET 0x1d60 #define R200_RE_AUX_SCISSOR_CNTL 0x26f0 #define R200_SE_VTX_STATE_CNTL 0x2180 #define R200_RE_POINTSIZE 0x2648 #define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 #define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */ #define RADEON_PP_TEX_SIZE_1 0x1d0c #define RADEON_PP_TEX_SIZE_2 0x1d14 #define RADEON_PP_CUBIC_FACES_0 0x1d24 #define RADEON_PP_CUBIC_FACES_1 0x1d28 #define RADEON_PP_CUBIC_FACES_2 0x1d2c #define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */ #define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00 #define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14 #define RADEON_SE_TCL_STATE_FLUSH 0x2284 #define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 #define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 #define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 #define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 #define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 #define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 #define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 #define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b #define R200_3D_DRAW_IMMD_2 0xC0003500 #define R200_SE_VTX_FMT_1 0x208c #define R200_RE_CNTL 0x1c50 #define R200_RB3D_BLENDCOLOR 0x3218 #define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4 #define R200_PP_TRI_PERF 0x2cf8 #define R200_PP_AFS_0 0x2f80 #define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ #define R200_VAP_PVS_CNTL_1 0x22D0 #define RADEON_CRTC_CRNT_FRAME 0x0214 #define RADEON_CRTC2_CRNT_FRAME 0x0314 #define R500_D1CRTC_STATUS 0x609c #define R500_D2CRTC_STATUS 0x689c #define R500_CRTC_V_BLANK (1<<0) #define R500_D1CRTC_FRAME_COUNT 0x60a4 #define R500_D2CRTC_FRAME_COUNT 0x68a4 #define R500_D1MODE_V_COUNTER 0x6530 #define R500_D2MODE_V_COUNTER 0x6d30 #define R500_D1MODE_VBLANK_STATUS 0x6534 #define R500_D2MODE_VBLANK_STATUS 0x6d34 #define R500_VBLANK_OCCURED (1<<0) #define R500_VBLANK_ACK (1<<4) #define R500_VBLANK_STAT (1<<12) #define R500_VBLANK_INT (1<<16) #define R500_DxMODE_INT_MASK 0x6540 #define R500_D1MODE_INT_MASK (1<<0) #define R500_D2MODE_INT_MASK (1<<8) #define R500_DISP_INTERRUPT_STATUS 0x7edc #define R500_D1_VBLANK_INTERRUPT (1 << 4) #define R500_D2_VBLANK_INTERRUPT (1 << 5) /* R6xx/R7xx registers */ #define R600_MC_VM_FB_LOCATION 0x2180 #define R600_MC_VM_AGP_TOP 0x2184 #define R600_MC_VM_AGP_BOT 0x2188 #define R600_MC_VM_AGP_BASE 0x218c #define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190 #define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 #define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 #define R700_MC_VM_FB_LOCATION 0x2024 #define R700_MC_VM_AGP_TOP 0x2028 #define R700_MC_VM_AGP_BOT 0x202c #define R700_MC_VM_AGP_BASE 0x2030 #define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c #define R600_MCD_RD_A_CNTL 0x219c #define R600_MCD_RD_B_CNTL 0x21a0 #define R600_MCD_WR_A_CNTL 0x21a4 #define R600_MCD_WR_B_CNTL 0x21a8 #define R600_MCD_RD_SYS_CNTL 0x2200 #define R600_MCD_WR_SYS_CNTL 0x2214 #define R600_MCD_RD_GFX_CNTL 0x21fc #define R600_MCD_RD_HDP_CNTL 0x2204 #define R600_MCD_RD_PDMA_CNTL 0x2208 #define R600_MCD_RD_SEM_CNTL 0x220c #define R600_MCD_WR_GFX_CNTL 0x2210 #define R600_MCD_WR_HDP_CNTL 0x2218 #define R600_MCD_WR_PDMA_CNTL 0x221c #define R600_MCD_WR_SEM_CNTL 0x2220 # define R600_MCD_L1_TLB (1 << 0) # define R600_MCD_L1_FRAG_PROC (1 << 1) # define R600_MCD_L1_STRICT_ORDERING (1 << 2) # define R600_MCD_SYSTEM_ACCESS_MODE_MASK (3 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS (2 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6) # define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8) # define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8) # define R600_MCD_SEMAPHORE_MODE (1 << 10) # define R600_MCD_WAIT_L2_QUERY (1 << 11) # define R600_MCD_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 12) # define R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15) #define R700_MC_VM_MD_L1_TLB0_CNTL 0x2654 #define R700_MC_VM_MD_L1_TLB1_CNTL 0x2658 #define R700_MC_VM_MD_L1_TLB2_CNTL 0x265c #define R700_MC_VM_MB_L1_TLB0_CNTL 0x2234 #define R700_MC_VM_MB_L1_TLB1_CNTL 0x2238 #define R700_MC_VM_MB_L1_TLB2_CNTL 0x223c #define R700_MC_VM_MB_L1_TLB3_CNTL 0x2240 # define R700_ENABLE_L1_TLB (1 << 0) # define R700_ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) # define R700_SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) # define R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) # define R700_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 15) # define R700_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 18) #define R700_MC_ARB_RAMCFG 0x2760 # define R700_NOOFBANK_SHIFT 0 # define R700_NOOFBANK_MASK 0x3 # define R700_NOOFRANK_SHIFT 2 # define R700_NOOFRANK_MASK 0x1 # define R700_NOOFROWS_SHIFT 3 # define R700_NOOFROWS_MASK 0x7 # define R700_NOOFCOLS_SHIFT 6 # define R700_NOOFCOLS_MASK 0x3 # define R700_CHANSIZE_SHIFT 8 # define R700_CHANSIZE_MASK 0x1 # define R700_BURSTLENGTH_SHIFT 9 # define R700_BURSTLENGTH_MASK 0x1 #define R600_RAMCFG 0x2408 # define R600_NOOFBANK_SHIFT 0 # define R600_NOOFBANK_MASK 0x1 # define R600_NOOFRANK_SHIFT 1 # define R600_NOOFRANK_MASK 0x1 # define R600_NOOFROWS_SHIFT 2 # define R600_NOOFROWS_MASK 0x7 # define R600_NOOFCOLS_SHIFT 5 # define R600_NOOFCOLS_MASK 0x3 # define R600_CHANSIZE_SHIFT 7 # define R600_CHANSIZE_MASK 0x1 # define R600_BURSTLENGTH_SHIFT 8 # define R600_BURSTLENGTH_MASK 0x1 #define R600_VM_L2_CNTL 0x1400 # define R600_VM_L2_CACHE_EN (1 << 0) # define R600_VM_L2_FRAG_PROC (1 << 1) # define R600_VM_ENABLE_PTE_CACHE_LRU_W (1 << 9) # define R600_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 13) # define R700_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 14) #define R600_VM_L2_CNTL2 0x1404 # define R600_VM_L2_CNTL2_INVALIDATE_ALL_L1_TLBS (1 << 0) # define R600_VM_L2_CNTL2_INVALIDATE_L2_CACHE (1 << 1) #define R600_VM_L2_CNTL3 0x1408 # define R600_VM_L2_CNTL3_BANK_SELECT_0(x) ((x) << 0) # define R600_VM_L2_CNTL3_BANK_SELECT_1(x) ((x) << 5) # define R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 10) # define R700_VM_L2_CNTL3_BANK_SELECT(x) ((x) << 0) # define R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 6) #define R600_VM_L2_STATUS 0x140c #define R600_VM_CONTEXT0_CNTL 0x1410 # define R600_VM_ENABLE_CONTEXT (1 << 0) # define R600_VM_PAGE_TABLE_DEPTH_FLAT (0 << 1) #define R600_VM_CONTEXT0_CNTL2 0x1430 #define R600_VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490 #define R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14b0 #define R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574 #define R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594 #define R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15b4 #define R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c #define R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c #define R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157c #define R600_HDP_HOST_PATH_CNTL 0x2c00 #define R600_GRBM_CNTL 0x8000 # define R600_GRBM_READ_TIMEOUT(x) ((x) << 0) #define R600_GRBM_STATUS 0x8010 # define R600_CMDFIFO_AVAIL_MASK 0x1f # define R700_CMDFIFO_AVAIL_MASK 0xf -# define R600_GUI_ACTIVE (1 << 31) +# define R600_GUI_ACTIVE (1U << 31) #define R600_GRBM_STATUS2 0x8014 #define R600_GRBM_SOFT_RESET 0x8020 # define R600_SOFT_RESET_CP (1 << 0) #define R600_WAIT_UNTIL 0x8040 #define R600_CP_SEM_WAIT_TIMER 0x85bc #define R600_CP_ME_CNTL 0x86d8 # define R600_CP_ME_HALT (1 << 28) #define R600_CP_QUEUE_THRESHOLDS 0x8760 # define R600_ROQ_IB1_START(x) ((x) << 0) # define R600_ROQ_IB2_START(x) ((x) << 8) #define R600_CP_MEQ_THRESHOLDS 0x8764 # define R700_STQ_SPLIT(x) ((x) << 0) # define R600_MEQ_END(x) ((x) << 16) # define R600_ROQ_END(x) ((x) << 24) #define R600_CP_PERFMON_CNTL 0x87fc #define R600_CP_RB_BASE 0xc100 #define R600_CP_RB_CNTL 0xc104 # define R600_RB_BUFSZ(x) ((x) << 0) # define R600_RB_BLKSZ(x) ((x) << 8) # define R600_RB_NO_UPDATE (1 << 27) -# define R600_RB_RPTR_WR_ENA (1 << 31) +# define R600_RB_RPTR_WR_ENA (1U << 31) #define R600_CP_RB_RPTR_WR 0xc108 #define R600_CP_RB_RPTR_ADDR 0xc10c #define R600_CP_RB_RPTR_ADDR_HI 0xc110 #define R600_CP_RB_WPTR 0xc114 #define R600_CP_RB_WPTR_ADDR 0xc118 #define R600_CP_RB_WPTR_ADDR_HI 0xc11c #define R600_CP_RB_RPTR 0x8700 #define R600_CP_RB_WPTR_DELAY 0x8704 #define R600_CP_PFP_UCODE_ADDR 0xc150 #define R600_CP_PFP_UCODE_DATA 0xc154 #define R600_CP_ME_RAM_RADDR 0xc158 #define R600_CP_ME_RAM_WADDR 0xc15c #define R600_CP_ME_RAM_DATA 0xc160 #define R600_CP_DEBUG 0xc1fc #define R600_PA_CL_ENHANCE 0x8a14 # define R600_CLIP_VTX_REORDER_ENA (1 << 0) # define R600_NUM_CLIP_SEQ(x) ((x) << 1) #define R600_PA_SC_LINE_STIPPLE_STATE 0x8b10 #define R600_PA_SC_MULTI_CHIP_CNTL 0x8b20 #define R700_PA_SC_FORCE_EOV_MAX_CNTS 0x8b24 # define R700_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) # define R700_FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define R600_PA_SC_AA_SAMPLE_LOCS_2S 0x8b40 #define R600_PA_SC_AA_SAMPLE_LOCS_4S 0x8b44 #define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8b48 #define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8b4c # define R600_S0_X(x) ((x) << 0) # define R600_S0_Y(x) ((x) << 4) # define R600_S1_X(x) ((x) << 8) # define R600_S1_Y(x) ((x) << 12) # define R600_S2_X(x) ((x) << 16) # define R600_S2_Y(x) ((x) << 20) # define R600_S3_X(x) ((x) << 24) # define R600_S3_Y(x) ((x) << 28) # define R600_S4_X(x) ((x) << 0) # define R600_S4_Y(x) ((x) << 4) # define R600_S5_X(x) ((x) << 8) # define R600_S5_Y(x) ((x) << 12) # define R600_S6_X(x) ((x) << 16) # define R600_S6_Y(x) ((x) << 20) # define R600_S7_X(x) ((x) << 24) # define R600_S7_Y(x) ((x) << 28) #define R600_PA_SC_FIFO_SIZE 0x8bd0 # define R600_SC_PRIM_FIFO_SIZE(x) ((x) << 0) # define R600_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 8) # define R600_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 16) #define R700_PA_SC_FIFO_SIZE_R7XX 0x8bcc # define R700_SC_PRIM_FIFO_SIZE(x) ((x) << 0) # define R700_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) # define R700_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) #define R600_PA_SC_ENHANCE 0x8bf0 # define R600_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) # define R600_FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12) #define R600_PA_SC_CLIPRECT_RULE 0x2820c #define R700_PA_SC_EDGERULE 0x28230 #define R600_PA_SC_LINE_STIPPLE 0x28a0c #define R600_PA_SC_MODE_CNTL 0x28a4c #define R600_PA_SC_AA_CONFIG 0x28c04 #define R600_SX_EXPORT_BUFFER_SIZES 0x900c # define R600_COLOR_BUFFER_SIZE(x) ((x) << 0) # define R600_POSITION_BUFFER_SIZE(x) ((x) << 8) # define R600_SMX_BUFFER_SIZE(x) ((x) << 16) #define R600_SX_DEBUG_1 0x9054 # define R600_SMX_EVENT_RELEASE (1 << 0) # define R600_ENABLE_NEW_SMX_ADDRESS (1 << 16) #define R700_SX_DEBUG_1 0x9058 # define R700_ENABLE_NEW_SMX_ADDRESS (1 << 16) #define R600_SX_MISC 0x28350 #define R600_DB_DEBUG 0x9830 -# define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) +# define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE (1U << 31) #define R600_DB_WATERMARKS 0x9838 # define R600_DEPTH_FREE(x) ((x) << 0) # define R600_DEPTH_FLUSH(x) ((x) << 5) # define R600_DEPTH_PENDING_FREE(x) ((x) << 15) # define R600_DEPTH_CACHELINE_FREE(x) ((x) << 20) #define R700_DB_DEBUG3 0x98b0 # define R700_DB_CLK_OFF_DELAY(x) ((x) << 11) #define RV700_DB_DEBUG4 0x9b8c # define RV700_DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6) #define R600_VGT_CACHE_INVALIDATION 0x88c4 # define R600_CACHE_INVALIDATION(x) ((x) << 0) # define R600_VC_ONLY 0 # define R600_TC_ONLY 1 # define R600_VC_AND_TC 2 # define R700_AUTO_INVLD_EN(x) ((x) << 6) # define R700_NO_AUTO 0 # define R700_ES_AUTO 1 # define R700_GS_AUTO 2 # define R700_ES_AND_GS_AUTO 3 #define R600_VGT_GS_PER_ES 0x88c8 #define R600_VGT_ES_PER_GS 0x88cc #define R600_VGT_GS_PER_VS 0x88e8 #define R600_VGT_GS_VERTEX_REUSE 0x88d4 #define R600_VGT_NUM_INSTANCES 0x8974 #define R600_VGT_STRMOUT_EN 0x28ab0 #define R600_VGT_EVENT_INITIATOR 0x28a90 # define R600_CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) #define R600_VGT_VERTEX_REUSE_BLOCK_CNTL 0x28c58 # define R600_VTX_REUSE_DEPTH_MASK 0xff #define R600_VGT_OUT_DEALLOC_CNTL 0x28c5c # define R600_DEALLOC_DIST_MASK 0x7f #define R600_CB_COLOR0_BASE 0x28040 #define R600_CB_COLOR1_BASE 0x28044 #define R600_CB_COLOR2_BASE 0x28048 #define R600_CB_COLOR3_BASE 0x2804c #define R600_CB_COLOR4_BASE 0x28050 #define R600_CB_COLOR5_BASE 0x28054 #define R600_CB_COLOR6_BASE 0x28058 #define R600_CB_COLOR7_BASE 0x2805c #define R600_CB_COLOR7_FRAG 0x280fc #define R600_TC_CNTL 0x9608 # define R600_TC_L2_SIZE(x) ((x) << 5) # define R600_L2_DISABLE_LATE_HIT (1 << 9) #define R600_ARB_POP 0x2418 # define R600_ENABLE_TC128 (1 << 30) #define R600_ARB_GDEC_RD_CNTL 0x246c #define R600_TA_CNTL_AUX 0x9508 # define R600_DISABLE_CUBE_WRAP (1 << 0) # define R600_DISABLE_CUBE_ANISO (1 << 1) # define R700_GETLOD_SELECT(x) ((x) << 2) # define R600_SYNC_GRADIENT (1 << 24) # define R600_SYNC_WALKER (1 << 25) # define R600_SYNC_ALIGNER (1 << 26) # define R600_BILINEAR_PRECISION_6_BIT (0 << 31) -# define R600_BILINEAR_PRECISION_8_BIT (1 << 31) +# define R600_BILINEAR_PRECISION_8_BIT (1U << 31) #define R700_TCP_CNTL 0x9610 #define R600_SMX_DC_CTL0 0xa020 # define R700_USE_HASH_FUNCTION (1 << 0) # define R700_CACHE_DEPTH(x) ((x) << 1) # define R700_FLUSH_ALL_ON_EVENT (1 << 10) # define R700_STALL_ON_EVENT (1 << 11) #define R700_SMX_EVENT_CTL 0xa02c # define R700_ES_FLUSH_CTL(x) ((x) << 0) # define R700_GS_FLUSH_CTL(x) ((x) << 3) # define R700_ACK_FLUSH_CTL(x) ((x) << 6) # define R700_SYNC_FLUSH_CTL (1 << 8) #define R600_SQ_CONFIG 0x8c00 # define R600_VC_ENABLE (1 << 0) # define R600_EXPORT_SRC_C (1 << 1) # define R600_DX9_CONSTS (1 << 2) # define R600_ALU_INST_PREFER_VECTOR (1 << 3) # define R600_DX10_CLAMP (1 << 4) # define R600_CLAUSE_SEQ_PRIO(x) ((x) << 8) # define R600_PS_PRIO(x) ((x) << 24) # define R600_VS_PRIO(x) ((x) << 26) # define R600_GS_PRIO(x) ((x) << 28) # define R600_ES_PRIO(x) ((x) << 30) #define R600_SQ_GPR_RESOURCE_MGMT_1 0x8c04 # define R600_NUM_PS_GPRS(x) ((x) << 0) # define R600_NUM_VS_GPRS(x) ((x) << 16) # define R700_DYN_GPR_ENABLE (1 << 27) # define R600_NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) #define R600_SQ_GPR_RESOURCE_MGMT_2 0x8c08 # define R600_NUM_GS_GPRS(x) ((x) << 0) # define R600_NUM_ES_GPRS(x) ((x) << 16) #define R600_SQ_THREAD_RESOURCE_MGMT 0x8c0c # define R600_NUM_PS_THREADS(x) ((x) << 0) # define R600_NUM_VS_THREADS(x) ((x) << 8) # define R600_NUM_GS_THREADS(x) ((x) << 16) # define R600_NUM_ES_THREADS(x) ((x) << 24) #define R600_SQ_STACK_RESOURCE_MGMT_1 0x8c10 # define R600_NUM_PS_STACK_ENTRIES(x) ((x) << 0) # define R600_NUM_VS_STACK_ENTRIES(x) ((x) << 16) #define R600_SQ_STACK_RESOURCE_MGMT_2 0x8c14 # define R600_NUM_GS_STACK_ENTRIES(x) ((x) << 0) # define R600_NUM_ES_STACK_ENTRIES(x) ((x) << 16) #define R600_SQ_MS_FIFO_SIZES 0x8cf0 # define R600_CACHE_FIFO_SIZE(x) ((x) << 0) # define R600_FETCH_FIFO_HIWATER(x) ((x) << 8) # define R600_DONE_FIFO_HIWATER(x) ((x) << 16) # define R600_ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8db0 # define R700_SIMDA_RING0(x) ((x) << 0) # define R700_SIMDA_RING1(x) ((x) << 8) # define R700_SIMDB_RING0(x) ((x) << 16) # define R700_SIMDB_RING1(x) ((x) << 24) #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8db4 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8db8 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8dbc #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8dc0 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8dc4 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8dc8 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8dcc #define R600_SPI_PS_IN_CONTROL_0 0x286cc # define R600_NUM_INTERP(x) ((x) << 0) # define R600_POSITION_ENA (1 << 8) # define R600_POSITION_CENTROID (1 << 9) # define R600_POSITION_ADDR(x) ((x) << 10) # define R600_PARAM_GEN(x) ((x) << 15) # define R600_PARAM_GEN_ADDR(x) ((x) << 19) # define R600_BARYC_SAMPLE_CNTL(x) ((x) << 26) # define R600_PERSP_GRADIENT_ENA (1 << 28) # define R600_LINEAR_GRADIENT_ENA (1 << 29) # define R600_POSITION_SAMPLE (1 << 30) -# define R600_BARYC_AT_SAMPLE_ENA (1 << 31) +# define R600_BARYC_AT_SAMPLE_ENA (1U << 31) #define R600_SPI_PS_IN_CONTROL_1 0x286d0 # define R600_GEN_INDEX_PIX (1 << 0) # define R600_GEN_INDEX_PIX_ADDR(x) ((x) << 1) # define R600_FRONT_FACE_ENA (1 << 8) # define R600_FRONT_FACE_CHAN(x) ((x) << 9) # define R600_FRONT_FACE_ALL_BITS (1 << 11) # define R600_FRONT_FACE_ADDR(x) ((x) << 12) # define R600_FOG_ADDR(x) ((x) << 17) # define R600_FIXED_PT_POSITION_ENA (1 << 24) # define R600_FIXED_PT_POSITION_ADDR(x) ((x) << 25) # define R700_POSITION_ULC (1 << 30) #define R600_SPI_INPUT_Z 0x286d8 #define R600_SPI_CONFIG_CNTL 0x9100 # define R600_GPR_WRITE_PRIORITY(x) ((x) << 0) # define R600_DISABLE_INTERP_1 (1 << 5) #define R600_SPI_CONFIG_CNTL_1 0x913c # define R600_VTX_DONE_DELAY(x) ((x) << 0) # define R600_INTERP_ONE_PRIM_PER_ROW (1 << 4) #define R600_GB_TILING_CONFIG 0x98f0 # define R600_PIPE_TILING(x) ((x) << 1) # define R600_BANK_TILING(x) ((x) << 4) # define R600_GROUP_SIZE(x) ((x) << 6) # define R600_ROW_TILING(x) ((x) << 8) # define R600_BANK_SWAPS(x) ((x) << 11) # define R600_SAMPLE_SPLIT(x) ((x) << 14) # define R600_BACKEND_MAP(x) ((x) << 16) #define R600_DCP_TILING_CONFIG 0x6ca0 #define R600_HDP_TILING_CONFIG 0x2f3c #define R600_CC_RB_BACKEND_DISABLE 0x98f4 #define R700_CC_SYS_RB_BACKEND_DISABLE 0x3f88 # define R600_BACKEND_DISABLE(x) ((x) << 16) #define R600_CC_GC_SHADER_PIPE_CONFIG 0x8950 #define R600_GC_USER_SHADER_PIPE_CONFIG 0x8954 # define R600_INACTIVE_QD_PIPES(x) ((x) << 8) # define R600_INACTIVE_QD_PIPES_MASK (0xff << 8) # define R600_INACTIVE_SIMDS(x) ((x) << 16) # define R600_INACTIVE_SIMDS_MASK (0xff << 16) #define R700_CGTS_SYS_TCC_DISABLE 0x3f90 #define R700_CGTS_USER_SYS_TCC_DISABLE 0x3f94 #define R700_CGTS_TCC_DISABLE 0x9148 #define R700_CGTS_USER_TCC_DISABLE 0x914c /* Constants */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 #define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 #define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 #define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 #define RADEON_LAST_DISPATCH 1 #define R600_LAST_FRAME_REG R600_SCRATCH_REG0 #define R600_LAST_DISPATCH_REG R600_SCRATCH_REG1 #define R600_LAST_CLEAR_REG R600_SCRATCH_REG2 #define R600_LAST_SWI_REG R600_SCRATCH_REG3 #define RADEON_MAX_VB_AGE 0x7fffffff #define RADEON_MAX_VB_VERTS (0xffff) #define RADEON_RING_HIGH_MARK 128 #define RADEON_PCIGART_TABLE_SIZE (32*1024) #define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) #define RADEON_WRITE(reg, val) \ do { \ if (reg < 0x10000) { \ DRM_WRITE32(dev_priv->mmio, (reg), (val)); \ } else { \ DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, (reg)); \ DRM_WRITE32(dev_priv->mmio, RADEON_MM_DATA, (val)); \ } \ } while (0) #define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) #define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) #define RADEON_WRITE_PLL(addr, val) \ do { \ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \ ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \ } while (0) #define RADEON_WRITE_PCIE(addr, val) \ do { \ RADEON_WRITE8(RADEON_PCIE_INDEX, \ ((addr) & 0xff)); \ RADEON_WRITE(RADEON_PCIE_DATA, (val)); \ } while (0) #define R500_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \ RADEON_WRITE(R520_MC_IND_DATA, (val)); \ RADEON_WRITE(R520_MC_IND_INDEX, 0); \ } while (0) #define RS480_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS480_NB_MC_INDEX, \ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \ RADEON_WRITE(RS480_NB_MC_DATA, (val)); \ RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \ } while (0) #define RS690_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \ RADEON_WRITE(RS690_MC_DATA, val); \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \ } while (0) #define RS600_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS600_MC_INDEX, RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | ((addr) & RS600_MC_ADDR_MASK)); \ RADEON_WRITE(RS600_MC_DATA, val); \ } while (0) #define IGP_WRITE_MCIND(addr, val) \ do { \ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \ RS690_WRITE_MCIND(addr, val); \ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) \ RS600_WRITE_MCIND(addr, val); \ else \ RS480_WRITE_MCIND(addr, val); \ } while (0) #define CP_PACKET0( reg, n ) \ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) #define CP_PACKET0_TABLE( reg, n ) \ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2)) #define CP_PACKET1( reg0, reg1 ) \ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2)) #define CP_PACKET2() \ (RADEON_CP_PACKET2) #define CP_PACKET3( pkt, n ) \ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16)) /* ================================================================ * Engine control helper macros */ #define RADEON_WAIT_UNTIL_2D_IDLE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \ OUT_RING( CP_PACKET0( R600_WAIT_UNTIL, 0 ) ); \ else \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ RADEON_WAIT_HOST_IDLECLEAN) ); \ } while (0) #define RADEON_WAIT_UNTIL_3D_IDLE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \ OUT_RING( CP_PACKET0( R600_WAIT_UNTIL, 0 ) ); \ else \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \ RADEON_WAIT_HOST_IDLECLEAN) ); \ } while (0) #define RADEON_WAIT_UNTIL_IDLE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \ OUT_RING( CP_PACKET0( R600_WAIT_UNTIL, 0 ) ); \ else \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ RADEON_WAIT_3D_IDLECLEAN | \ RADEON_WAIT_HOST_IDLECLEAN) ); \ } while (0) #define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \ OUT_RING( CP_PACKET0( R600_WAIT_UNTIL, 0 ) ); \ else \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \ } while (0) #define RADEON_FLUSH_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_DC_FLUSH); \ } else { \ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(R300_RB3D_DC_FLUSH); \ } \ } while (0) #define RADEON_PURGE_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ } else { \ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \ } \ } while (0) #define RADEON_FLUSH_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_ZC_FLUSH); \ } else { \ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ OUT_RING(R300_ZC_FLUSH); \ } \ } while (0) #define RADEON_PURGE_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ } else { \ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ } \ } while (0) /* ================================================================ * Misc helper macros */ /* Perfbox functionality only. */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ u32 head = GET_RING_HEAD( dev_priv ); \ if (head == dev_priv->ring.tail) \ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ } \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ do { \ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ int __ret; \ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \ __ret = r600_do_cp_idle(dev_priv); \ else \ __ret = radeon_do_cp_idle(dev_priv); \ if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ radeon_freelist_reset( dev ); \ } \ } while (0) #define RADEON_DISPATCH_AGE( age ) do { \ OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \ OUT_RING( age ); \ } while (0) #define RADEON_FRAME_AGE( age ) do { \ OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \ OUT_RING( age ); \ } while (0) #define RADEON_CLEAR_AGE( age ) do { \ OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \ OUT_RING( age ); \ } while (0) #define R600_DISPATCH_AGE(age) do { \ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \ OUT_RING((R600_LAST_DISPATCH_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \ OUT_RING(age); \ } while (0) #define R600_FRAME_AGE(age) do { \ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \ OUT_RING((R600_LAST_FRAME_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \ OUT_RING(age); \ } while (0) #define R600_CLEAR_AGE(age) do { \ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \ OUT_RING((R600_LAST_CLEAR_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \ OUT_RING(age); \ } while (0) /* ================================================================ * Ring control */ #define RADEON_VERBOSE 0 #define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; #define RADEON_RING_ALIGN 16 #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN - 1)); \ _align_nr += n; \ if ( dev_priv->ring.space <= (_align_nr) * sizeof(u32) ) { \ COMMIT_RING(); \ radeon_wait_ring( dev_priv, (_align_nr) * sizeof(u32) ); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ mask = dev_priv->ring.tail_mask; \ } while (0) #define ADVANCE_RING() do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ if (((dev_priv->ring.tail + _nr) & mask) != write) { \ DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & mask), \ write, __LINE__); \ } else \ dev_priv->ring.tail = write; \ } while (0) extern void radeon_commit_ring(drm_radeon_private_t *dev_priv); #define COMMIT_RING() do { \ radeon_commit_ring(dev_priv); \ } while(0) #define OUT_RING( x ) do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ (unsigned int)(x), write ); \ } \ ring[write++] = (x); \ write &= mask; \ } while (0) #define OUT_RING_REG( reg, val ) do { \ OUT_RING( CP_PACKET0( reg, 0 ) ); \ OUT_RING( val ); \ } while (0) #define OUT_RING_TABLE( tab, sz ) do { \ int _size = (sz); \ int *_tab = (int *)(tab); \ \ if (write + _size > mask) { \ int _i = (mask+1) - write; \ _size -= _i; \ while (_i > 0 ) { \ *(int *)(ring + write) = *_tab++; \ write++; \ _i--; \ } \ write = 0; \ _tab += _i; \ } \ while (_size > 0) { \ *(ring + write) = *_tab++; \ write++; \ _size--; \ } \ write &= mask; \ } while (0) #endif /* __RADEON_DRV_H__ */ Index: head/sys/dev/drm/via_irq.c =================================================================== --- head/sys/dev/drm/via_irq.c (revision 258779) +++ head/sys/dev/drm/via_irq.c (revision 258780) @@ -1,392 +1,392 @@ /* via_irq.c * * Copyright 2004 BEAM Ltd. * Copyright 2002 Tungsten Graphics, Inc. * Copyright 2005 Thomas Hellstrom. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Terry Barnaby * Keith Whitwell * Thomas Hellstrom * * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank * interrupt, as well as an infrastructure to handle other interrupts of the chip. * The refresh rate is also calculated for video playback sync purposes. */ #include __FBSDID("$FreeBSD$"); #include "dev/drm/drmP.h" #include "dev/drm/drm.h" #include "dev/drm/via_drm.h" #include "dev/drm/via_drv.h" #define VIA_REG_INTERRUPT 0x200 /* VIA_REG_INTERRUPT */ -#define VIA_IRQ_GLOBAL (1 << 31) +#define VIA_IRQ_GLOBAL (1U << 31) #define VIA_IRQ_VBLANK_ENABLE (1 << 19) #define VIA_IRQ_VBLANK_PENDING (1 << 3) #define VIA_IRQ_HQV0_ENABLE (1 << 11) #define VIA_IRQ_HQV1_ENABLE (1 << 25) #define VIA_IRQ_HQV0_PENDING (1 << 9) #define VIA_IRQ_HQV1_PENDING (1 << 10) #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20) #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21) #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22) #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23) #define VIA_IRQ_DMA0_DD_PENDING (1 << 4) #define VIA_IRQ_DMA0_TD_PENDING (1 << 5) #define VIA_IRQ_DMA1_DD_PENDING (1 << 6) #define VIA_IRQ_DMA1_TD_PENDING (1 << 7) /* * Device-specific IRQs go here. This type might need to be extended with * the register if there are multiple IRQ control registers. * Currently we activate the HQV interrupts of Unichrome Pro group A. */ static maskarray_t via_pro_group_a_irqs[] = { {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 0x00000000 }, {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 0x00000000 }, {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, }; static int via_num_pro_group_a = DRM_ARRAY_SIZE(via_pro_group_a_irqs); static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; static maskarray_t via_unichrome_irqs[] = { {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} }; static int via_num_unichrome = DRM_ARRAY_SIZE(via_unichrome_irqs); static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; static unsigned time_diff(struct timeval *now, struct timeval *then) { return (now->tv_usec >= then->tv_usec) ? now->tv_usec - then->tv_usec : 1000000 - (then->tv_usec - now->tv_usec); } u32 via_get_vblank_counter(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; if (crtc != 0) return 0; return atomic_read(&dev_priv->vbl_received); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; int handled = 0; struct timeval cur_vblank; drm_via_irq_t *cur_irq = dev_priv->via_irqs; int i; status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { atomic_inc(&dev_priv->vbl_received); if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { microtime(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = time_diff(&cur_vblank, &dev_priv->last_vblank) >> 4; } dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; } if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } drm_handle_vblank(dev, 0); handled = 1; } for (i = 0; i < dev_priv->num_irqs; ++i) { if (status & cur_irq->pending_mask) { atomic_inc(&cur_irq->irq_received); DRM_WAKEUP(&cur_irq->irq_queue); handled = 1; if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { via_dmablit_handler(dev, 0, 1); } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) { via_dmablit_handler(dev, 1, 1); } } cur_irq++; } /* Acknowlege interrupts */ VIA_WRITE(VIA_REG_INTERRUPT, status); if (handled) return IRQ_HANDLED; else return IRQ_NONE; } static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv) { u32 status; if (dev_priv) { /* Acknowlege interrupts */ status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status | dev_priv->irq_pending_mask); } } int via_enable_vblank(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; u32 status; if (crtc != 0) { DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); return -EINVAL; } status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); return 0; } void via_disable_vblank(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); if (crtc != 0) DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); } static int via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned int cur_irq_sequence; drm_via_irq_t *cur_irq; int ret = 0; maskarray_t *masks; int real_irq; DRM_DEBUG("\n"); if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } if (irq >= drm_via_irq_num) { DRM_ERROR("Trying to wait on unknown irq %d\n", irq); return -EINVAL; } real_irq = dev_priv->irq_map[irq]; if (real_irq < 0) { DRM_ERROR("Video IRQ %d not available on this hardware.\n", irq); return -EINVAL; } masks = dev_priv->irq_masks; cur_irq = dev_priv->via_irqs + real_irq; if (masks[real_irq][2] && !force_sequence) { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, ((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4])); cur_irq_sequence = atomic_read(&cur_irq->irq_received); } else { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, (((cur_irq_sequence = atomic_read(&cur_irq->irq_received)) - *sequence) <= (1 << 23))); } *sequence = cur_irq_sequence; return ret; } /* * drm_dma.h hooks */ void via_driver_irq_preinstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; drm_via_irq_t *cur_irq; int i; DRM_DEBUG("dev_priv: %p\n", dev_priv); if (dev_priv) { cur_irq = dev_priv->via_irqs; dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; if (dev_priv->chipset == VIA_PRO_GROUP_A || dev_priv->chipset == VIA_DX9_0) { dev_priv->irq_masks = via_pro_group_a_irqs; dev_priv->num_irqs = via_num_pro_group_a; dev_priv->irq_map = via_irqmap_pro_group_a; } else { dev_priv->irq_masks = via_unichrome_irqs; dev_priv->num_irqs = via_num_unichrome; dev_priv->irq_map = via_irqmap_unichrome; } for (i = 0; i < dev_priv->num_irqs; ++i) { atomic_set(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->pending_mask = dev_priv->irq_masks[i][1]; DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); dev_priv->irq_enable_mask |= cur_irq->enable_mask; dev_priv->irq_pending_mask |= cur_irq->pending_mask; cur_irq++; DRM_DEBUG("Initializing IRQ %d\n", i); } dev_priv->last_vblank_valid = 0; /* Clear VSync interrupt regs */ status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status & ~(dev_priv->irq_enable_mask)); /* Clear bits if they're already high */ viadrv_acknowledge_irqs(dev_priv); } } int via_driver_irq_postinstall(struct drm_device *dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; DRM_DEBUG("via_driver_irq_postinstall\n"); if (!dev_priv) return -EINVAL; status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL | dev_priv->irq_enable_mask); /* Some magic, oh for some data sheets ! */ VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); return 0; } void via_driver_irq_uninstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; DRM_DEBUG("\n"); if (dev_priv) { /* Some more magic, oh for some data sheets ! */ VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status & ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask)); } } int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_irqwait_t *irqwait = data; struct timeval now; int ret = 0; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_via_irq_t *cur_irq = dev_priv->via_irqs; int force_sequence; if (irqwait->request.irq >= dev_priv->num_irqs) { DRM_ERROR("Trying to wait on unknown irq %d\n", irqwait->request.irq); return -EINVAL; } cur_irq += irqwait->request.irq; switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: irqwait->request.sequence += atomic_read(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; default: return -EINVAL; } if (irqwait->request.type & VIA_IRQ_SIGNAL) { DRM_ERROR("Signals on Via IRQs not implemented yet.\n"); return -EINVAL; } force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, &irqwait->request.sequence); microtime(&now); irqwait->reply.tval_sec = now.tv_sec; irqwait->reply.tval_usec = now.tv_usec; return ret; } Index: head/sys/dev/drm2/i915/i915_reg.h =================================================================== --- head/sys/dev/drm2/i915/i915_reg.h (revision 258779) +++ head/sys/dev/drm2/i915/i915_reg.h (revision 258780) @@ -1,3876 +1,3876 @@ /* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #ifndef _I915_REG_H_ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. * This is all handled in the intel-gtt.ko module. i915.ko only * cares about the vga bit for the vga rbiter. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) /* PCI config space */ #define HPLLCC 0xc0 /* 855 only */ #define GC_CLOCK_CONTROL_MASK (0xf << 0) #define GC_CLOCK_133_200 (0 << 0) #define GC_CLOCK_100_200 (1 << 0) #define GC_CLOCK_100_133 (2 << 0) #define GC_CLOCK_166_250 (3 << 0) #define GCFGC2 0xda #define GCFGC 0xf0 /* 915+ only */ #define GC_LOW_FREQUENCY_ENABLE (1 << 7) #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) #define GC_DISPLAY_CLOCK_MASK (7 << 4) #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) #define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) #define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) #define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) #define I965_GC_RENDER_CLOCK_MASK (0xf << 0) #define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) #define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) #define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) #define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) #define I945_GC_RENDER_CLOCK_MASK (7 << 0) #define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) #define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) #define I915_GC_RENDER_CLOCK_MASK (7 << 0) #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) #define LBB 0xf4 /* Graphics reset regs */ #define I965_GDRST 0xc0 /* PCI config register */ #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ #define GRDOM_FULL (0<<2) #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) #define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ #define GEN6_MBC_SNPCR_SHIFT 21 #define GEN6_MBC_SNPCR_MASK (3<<21) #define GEN6_MBC_SNPCR_MAX (0<<21) #define GEN6_MBC_SNPCR_MED (1<<21) #define GEN6_MBC_SNPCR_LOW (2<<21) #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ #define GEN6_MBCTL 0x0907c #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) #define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) #define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) #define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) #define GEN6_GDRST 0x941c #define GEN6_GRDOM_FULL (1 << 0) #define GEN6_GRDOM_RENDER (1 << 1) #define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_BLT (1 << 3) /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PDE_VALID (1 << 0) #define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ /* gen6+ has bit 11-4 for physical addr bit 39-32 */ #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define GEN6_PTE_VALID (1 << 0) #define GEN6_PTE_UNCACHED (1 << 1) #define GEN6_PTE_CACHE_LLC (2 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_CACHE_BITS (3 << 1) #define GEN6_PTE_GFDT (1 << 3) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) #define PP_DIR_DCLV_2G 0xffffffff #define GAM_ECOCHK 0x4090 #define ECOCHK_SNB_BIT (1<<10) #define ECOCHK_PPGTT_CACHE64B (0x3<<3) #define ECOCHK_PPGTT_CACHE4B (0x0<<3) /* VGA stuff */ #define VGA_ST01_MDA 0x3ba #define VGA_ST01_CGA 0x3da #define VGA_MSR_WRITE 0x3c2 #define VGA_MSR_READ 0x3cc #define VGA_MSR_MEM_EN (1<<1) #define VGA_MSR_CGA_MODE (1<<0) #define VGA_SR_INDEX 0x3c4 #define VGA_SR_DATA 0x3c5 #define VGA_AR_INDEX 0x3c0 #define VGA_AR_VID_EN (1<<5) #define VGA_AR_DATA_WRITE 0x3c0 #define VGA_AR_DATA_READ 0x3c1 #define VGA_GR_INDEX 0x3ce #define VGA_GR_DATA 0x3cf /* GR05 */ #define VGA_GR_MEM_READ_MODE_SHIFT 3 #define VGA_GR_MEM_READ_MODE_PLANE 1 /* GR06 */ #define VGA_GR_MEM_MODE_MASK 0xc #define VGA_GR_MEM_MODE_SHIFT 2 #define VGA_GR_MEM_A0000_AFFFF 0 #define VGA_GR_MEM_A0000_BFFFF 1 #define VGA_GR_MEM_B0000_B7FFF 2 #define VGA_GR_MEM_B0000_BFFFF 3 #define VGA_DACMASK 0x3c6 #define VGA_DACRX 0x3c7 #define VGA_DACWX 0x3c8 #define VGA_DACDATA 0x3c9 #define VGA_CR_INDEX_MDA 0x3b4 #define VGA_CR_DATA_MDA 0x3b5 #define VGA_CR_INDEX_CGA 0x3d4 #define VGA_CR_DATA_CGA 0x3d5 /* * Memory interface instructions used by the kernel */ #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) #define MI_NOOP MI_INSTR(0, 0) #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) #define MI_FLUSH MI_INSTR(0x04, 0) #define MI_READ_FLUSH (1 << 0) #define MI_EXE_FLUSH (1 << 1) #define MI_NO_WRITE_FLUSH (1 << 2) #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) #define MI_SUSPEND_FLUSH_EN (1<<0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) #define MI_OVERLAY_CONTINUE (0x0<<21) #define MI_OVERLAY_ON (0x1<<21) #define MI_OVERLAY_OFF (0x2<<21) #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) #define MI_SET_CONTEXT MI_INSTR(0x18, 0) #define MI_MM_SPACE_GTT (1<<8) #define MI_MM_SPACE_PHYSICAL (0<<8) #define MI_SAVE_EXT_STATE_EN (1<<3) #define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_FORCE_RESTORE (1<<1) #define MI_RESTORE_INHIBIT (1<<0) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw * simply ignores the register load under certain conditions. * - One can actually load arbitrary many arbitrary registers: Simply issue x * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_INVALIDATE_TLB (1<<18) #define MI_INVALIDATE_BSD (1<<7) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) #define MI_SEMAPHORE_COMPARE (1<<20) #define MI_SEMAPHORE_REGISTER (1<<18) #define MI_SEMAPHORE_SYNC_RV (2<<16) #define MI_SEMAPHORE_SYNC_RB (0<<16) #define MI_SEMAPHORE_SYNC_VR (0<<16) #define MI_SEMAPHORE_SYNC_VB (2<<16) #define MI_SEMAPHORE_SYNC_BR (2<<16) #define MI_SEMAPHORE_SYNC_BV (0<<16) #define MI_SEMAPHORE_SYNC_INVALID (1<<0) /* * 3D instructions used by the kernel */ #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) #define SC_ENABLE_MASK (0x1<<0) #define SC_ENABLE (0x1<<0) #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) #define SCI_YMIN_MASK (0xffff<<16) #define SCI_XMIN_MASK (0xffff<<0) #define SCI_YMAX_MASK (0xffff<<16) #define SCI_XMAX_MASK (0xffff<<0) #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_32 (3<<24) #define BLT_ROP_GXCOPY (0xcc<<16) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) #define ASYNC_FLIP (1<<22) #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) #define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) #define PIPE_CONTROL_WRITE_FLUSH (1<<12) #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) #define PIPE_CONTROL_NOTIFY (1<<8) #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ /* * Reset registers */ #define DEBUG_RESET_I830 0x6070 #define DEBUG_RESET_FULL (1<<7) #define DEBUG_RESET_RENDER (1<<8) #define DEBUG_RESET_DISPLAY (1<<9) /* * Fence registers */ #define FENCE_REG_830_0 0x2000 #define FENCE_REG_945_8 0x3000 #define I830_FENCE_START_MASK 0x07f80000 #define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) #define I915_FENCE_MAX_PITCH_VAL 4 #define I830_FENCE_MAX_PITCH_VAL 6 #define I830_FENCE_MAX_SIZE_VAL (1<<8) #define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) #define FENCE_REG_965_0 0x03000 #define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_TILING_Y_SHIFT 1 #define I965_FENCE_REG_VALID (1<<0) #define I965_FENCE_MAX_PITCH_VAL 0x0400 #define FENCE_REG_SANDYBRIDGE_0 0x100000 #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 /* control register for cpu gtt access */ #define TILECTL 0x101000 #define TILECTL_SWZCTL (1 << 0) #define TILECTL_TLB_PREFETCH_DIS (1 << 2) #define TILECTL_BACKSNOOP_DIS (1 << 3) /* * Instruction and interrupt control regs */ #define PGTBL_ER 0x02024 #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 #define BLT_RING_BASE 0x22000 #define RING_TAIL(base) ((base)+0x30) #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) #define RING_CTL(base) ((base)+0x3c) #define RING_SYNC_0(base) ((base)+0x40) #define RING_SYNC_1(base) ((base)+0x44) #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) #define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) #define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) #define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) #define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) #define RING_MAX_IDLE(base) ((base)+0x54) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define ARB_MODE 0x04030 #define ARB_MODE_SWIZZLE_SNB (1<<4) #define ARB_MODE_SWIZZLE_IVB (1<<5) #define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x) #define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x) #define RENDER_HWS_PGA_GEN7 (0x04080) #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) #define DONE_REG 0x40b0 #define BSD_HWS_PGA_GEN7 (0x04180) #define BLT_HWS_PGA_GEN7 (0x04280) #define RING_ACTHD(base) ((base)+0x74) #define RING_NOPID(base) ((base)+0x94) #define RING_IMR(base) ((base)+0xa8) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 #define RING_NO_REPORT 0x00000000 #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ #if 0 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 #define PRB0_CTL 0x0203c #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ #endif #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c #define RING_IPEIR(base) ((base)+0x64) #define RING_IPEHR(base) ((base)+0x68) #define RING_INSTDONE(base) ((base)+0x6c) #define RING_INSTPS(base) ((base)+0x70) #define RING_DMA_FADD(base) ((base)+0x78) #define RING_INSTPM(base) ((base)+0xc0) #define INSTPS 0x02070 /* 965+ only */ #define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA 0x2088 /* 965GM+ only */ #define PWRCTX_EN (1<<0) #define IPEIR 0x02088 #define IPEHR 0x0208c #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 #define ERROR_GEN6 0x040a0 /* GM45+ chicken bits -- debug workaround bits that may be required * for various sorts of correct behavior. The top 16 bits of each are * the enables for writing to the corresponding low bit. */ #define _3D_CHICKEN 0x02084 #define _3D_CHICKEN2 0x0208c /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the * particular danger of not doing so is not specified. */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 0x02090 #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 12) #define GFX_MODE 0x02520 #define GFX_MODE_GEN7 0x0229c #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) #define GFX_RUN_LIST_ENABLE (1<<15) #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) #define GFX_SURFACE_FAULT_ENABLE (1<<12) #define GFX_REPLAY_MODE (1<<11) #define GFX_PSMI_GRANULARITY (1<<10) #define GFX_PPGTT_ENABLE (1<<9) #define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit)) #define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0)) #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 #define IMR 0x020a8 #define ISR 0x020ac #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) #define I915_DISPLAY_PORT_INTERRUPT (1<<17) #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ #define I915_HWB_OOM_INTERRUPT (1<<13) #define I915_SYNC_STATUS_INTERRUPT (1<<12) #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) #define I915_DEBUG_INTERRUPT (1<<2) #define I915_USER_INTERRUPT (1<<1) #define I915_ASLE_INTERRUPT (1<<0) #define I915_BSD_USER_INTERRUPT (1<<25) #define EIR 0x020b0 #define EMR 0x020b4 #define ESR 0x020b8 #define GM45_ERROR_PAGE_TABLE (1<<5) #define GM45_ERROR_MEM_PRIV (1<<4) #define I915_ERROR_PAGE_TABLE (1<<4) #define GM45_ERROR_CP_PRIV (1<<3) #define I915_ERROR_MEMORY_REFRESH (1<<1) #define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 #define INSTPM_SELF_EN (1<<12) /* 915GM only */ #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts will not assert AGPBUSY# and will only be delivered when out of C3. */ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ #define FW_BLC_SELF_EN_MASK (1<<31) #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ #define FW_BLC_SELF_EN (1<<15) /* 945 only */ #define MM_BURST_LENGTH 0x00700000 #define MM_FIFO_WATERMARK 0x0001F000 #define LM_BURST_LENGTH 0x00000700 #define LM_FIFO_WATERMARK 0x0000001F #define MI_ARB_STATE 0x020e4 /* 915+ only */ #define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ /* Make render/texture TLB fetches lower priorty than associated data * fetches. This is not turned on by default */ #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) /* Isoch request wait on GTT enable (Display A/B/C streams). * Make isoch requests stall on the TLB update. May cause * display underruns (test mode only) */ #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) /* Block grant count for isoch requests when block count is * set to a finite value. */ #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ /* Enable render writes to complete in C2/C3/C4 power states. * If this isn't enabled, render writes are prevented in low * power states. That seems bad to me. */ #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) /* This acknowledges an async flip immediately instead * of waiting for 2TLB fetches. */ #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) /* Enables non-sequential data reads through arbiter */ #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) /* Disable FSB snooping of cacheable write cycles from binner/render * command stream */ #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) /* Arbiter time slice for non-isoch streams */ #define MI_ARB_TIME_SLICE_MASK (7 << 5) #define MI_ARB_TIME_SLICE_1 (0 << 5) #define MI_ARB_TIME_SLICE_2 (1 << 5) #define MI_ARB_TIME_SLICE_4 (2 << 5) #define MI_ARB_TIME_SLICE_6 (3 << 5) #define MI_ARB_TIME_SLICE_8 (4 << 5) #define MI_ARB_TIME_SLICE_10 (5 << 5) #define MI_ARB_TIME_SLICE_14 (6 << 5) #define MI_ARB_TIME_SLICE_16 (7 << 5) /* Low priority grace period page size */ #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) /* Disable display A/B trickle feed */ #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* Set display plane priority */ #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ #define CACHE_MODE_0 0x02120 /* 915+ only */ #define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_DEPTH_EVICT_DISABLE (1<<4) #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) #define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define BB_ADDR 0x02140 /* 8 bytes */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ #define ECOSKPD 0x021d0 #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) /* GEN6 interrupt control */ #define GEN6_RENDER_HWSTAM 0x2098 #define GEN6_RENDER_IMR 0x20a8 #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) #define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) #define GEN6_RENDER_SYNC_STATUS (1 << 2) #define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) #define GEN6_RENDER_USER_INTERRUPT (1 << 0) #define GEN6_BLITTER_HWSTAM 0x22098 #define GEN6_BLITTER_IMR 0x220a8 #define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) #define GEN6_BLITTER_SYNC_STATUS (1 << 24) #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) #define GEN6_BLITTER_ECOSKPD 0x221d0 #define GEN6_BLITTER_LOCK_SHIFT 16 #define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) #define GEN6_BSD_HWSTAM 0x12098 #define GEN6_BSD_IMR 0x120a8 #define GEN6_BSD_USER_INTERRUPT (1 << 12) #define GEN6_BSD_RNCID 0x12198 /* * Framebuffer compression (915+ only) */ #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ #define FBC_LL_BASE 0x03204 /* 4k page aligned */ #define FBC_CONTROL 0x03208 #define FBC_CTL_EN (1<<31) #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) #define FBC_CTL_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c #define FBC_CMD_COMPRESS (1<<0) #define FBC_STATUS 0x03210 #define FBC_STAT_COMPRESSING (1<<31) #define FBC_STAT_COMPRESSED (1<<30) #define FBC_STAT_MODIFIED (1<<29) #define FBC_STAT_CURRENT_LINE (1<<0) #define FBC_CONTROL2 0x03214 #define FBC_CTL_FENCE_DBL (0<<4) #define FBC_CTL_IDLE_IMM (0<<2) #define FBC_CTL_IDLE_FULL (1<<2) #define FBC_CTL_IDLE_LINE (2<<2) #define FBC_CTL_IDLE_DEBUG (3<<2) #define FBC_CTL_CPU_FENCE (1<<1) #define FBC_CTL_PLANEA (0<<0) #define FBC_CTL_PLANEB (1<<0) #define FBC_FENCE_OFF 0x0321b #define FBC_TAG 0x03300 #define FBC_LL_SIZE (1536) /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE 0x3200 #define DPFC_CONTROL 0x3208 #define DPFC_CTL_EN (1<<31) #define DPFC_CTL_PLANEA (0<<30) #define DPFC_CTL_PLANEB (1<<30) #define DPFC_CTL_FENCE_EN (1<<29) #define DPFC_CTL_PERSISTENT_MODE (1<<25) #define DPFC_SR_EN (1<<10) #define DPFC_CTL_LIMIT_1X (0<<6) #define DPFC_CTL_LIMIT_2X (1<<6) #define DPFC_CTL_LIMIT_4X (2<<6) #define DPFC_RECOMP_CTL 0x320c #define DPFC_RECOMP_STALL_EN (1<<27) #define DPFC_RECOMP_STALL_WM_SHIFT (16) #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) #define DPFC_STATUS 0x3210 #define DPFC_INVAL_SEG_SHIFT (16) #define DPFC_INVAL_SEG_MASK (0x07ff0000) #define DPFC_COMP_SEG_SHIFT (0) #define DPFC_COMP_SEG_MASK (0x000003ff) #define DPFC_STATUS2 0x3214 #define DPFC_FENCE_YOFF 0x3218 #define DPFC_CHICKEN 0x3224 #define DPFC_HT_MODIFY (1<<31) /* Framebuffer compression for Ironlake */ #define ILK_DPFC_CB_BASE 0x43200 #define ILK_DPFC_CONTROL 0x43208 /* The bit 28-8 is reserved */ #define DPFC_RESERVED (0x1FFFFF00) #define ILK_DPFC_RECOMP_CTL 0x4320c #define ILK_DPFC_STATUS 0x43210 #define ILK_DPFC_FENCE_YOFF 0x43218 #define ILK_DPFC_CHICKEN 0x43224 #define ILK_FBC_RT_BASE 0x2128 #define ILK_FBC_RT_VALID (1<<0) #define ILK_DISPLAY_CHICKEN1 0x42000 #define ILK_FBCQ_DIS (1<<22) #define ILK_PABSTRETCH_DIS (1<<21) /* * Framebuffer compression for Sandybridge * * The following two registers are of type GTTMMADR */ #define SNB_DPFC_CTL_SA 0x100100 #define SNB_CPU_FENCE_ENABLE (1<<29) #define DPFC_CPU_FENCE_OFFSET 0x100104 /* * GPIO regs */ #define GPIOA 0x5010 #define GPIOB 0x5014 #define GPIOC 0x5018 #define GPIOD 0x501c #define GPIOE 0x5020 #define GPIOF 0x5024 #define GPIOG 0x5028 #define GPIOH 0x502c # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) # define GPIO_CLOCK_VAL_MASK (1 << 2) # define GPIO_CLOCK_VAL_OUT (1 << 3) # define GPIO_CLOCK_VAL_IN (1 << 4) # define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) # define GPIO_DATA_DIR_MASK (1 << 8) # define GPIO_DATA_DIR_IN (0 << 9) # define GPIO_DATA_DIR_OUT (1 << 9) # define GPIO_DATA_VAL_MASK (1 << 10) # define GPIO_DATA_VAL_OUT (1 << 11) # define GPIO_DATA_VAL_IN (1 << 12) # define GPIO_DATA_PULLUP_DISABLE (1 << 13) #define GMBUS0 0x5100 /* clock/port select */ #define GMBUS_RATE_100KHZ (0<<8) #define GMBUS_RATE_50KHZ (1<<8) #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ #define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ #define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ #define GMBUS_PORT_DISABLED 0 #define GMBUS_PORT_SSC 1 #define GMBUS_PORT_VGADDC 2 #define GMBUS_PORT_PANEL 3 #define GMBUS_PORT_DPC 4 /* HDMIC */ #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ /* 6 reserved */ #define GMBUS_PORT_DPD 7 /* HDMID */ #define GMBUS_NUM_PORTS 8 #define GMBUS1 0x5104 /* command/status */ #define GMBUS_SW_CLR_INT (1<<31) #define GMBUS_SW_RDY (1<<30) #define GMBUS_ENT (1<<29) /* enable timeout */ #define GMBUS_CYCLE_NONE (0<<25) #define GMBUS_CYCLE_WAIT (1<<25) #define GMBUS_CYCLE_INDEX (2<<25) #define GMBUS_CYCLE_STOP (4<<25) #define GMBUS_BYTE_COUNT_SHIFT 16 #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1<<0) #define GMBUS_SLAVE_WRITE (0<<0) #define GMBUS2 0x5108 /* status */ #define GMBUS_INUSE (1<<15) #define GMBUS_HW_WAIT_PHASE (1<<14) #define GMBUS_STALL_TIMEOUT (1<<13) #define GMBUS_INT (1<<12) #define GMBUS_HW_RDY (1<<11) #define GMBUS_SATOER (1<<10) #define GMBUS_ACTIVE (1<<9) #define GMBUS3 0x510c /* data buffer bytes 3-0 */ #define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) #define GMBUS_NAK_EN (1<<3) #define GMBUS_IDLE_EN (1<<2) #define GMBUS_HW_WAIT_EN (1<<1) #define GMBUS_HW_RDY_EN (1<<0) #define GMBUS5 0x5120 /* byte index */ #define GMBUS_2BYTE_INDEX_EN (1<<31) /* * Clock control & power management */ #define VGA0 0x6000 #define VGA1 0x6004 #define VGA_PD 0x6010 #define VGA0_PD_P2_DIV_4 (1 << 7) #define VGA0_PD_P1_DIV_2 (1 << 5) #define VGA0_PD_P1_SHIFT 0 #define VGA0_PD_P1_MASK (0x1f << 0) #define VGA1_PD_P2_DIV_4 (1 << 15) #define VGA1_PD_P1_DIV_2 (1 << 13) #define VGA1_PD_P1_SHIFT 8 #define VGA1_PD_P1_MASK (0x1f << 8) #define _DPLL_A 0x06014 #define _DPLL_B 0x06018 #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) -#define DPLL_VCO_ENABLE (1 << 31) +#define DPLL_VCO_ENABLE (1U << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) #define DPLL_VGA_MODE_DIS (1 << 28) #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ #define DPLL_MODE_MASK (3 << 26) #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ #define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 #define SR01_SCREEN_OFF (1<<5) #define PPCR 0x61204 #define PPCR_ON (1<<0) #define DVOB 0x61140 #define DVOB_ON (1<<31) #define DVOC 0x61160 #define DVOC_ON (1<<31) #define LVDS 0x61180 #define LVDS_ON (1<<31) /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 /* * The i830 generation, in LVDS mode, defines P1 as the bit number set within * this field (only one bit may be set). */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 #define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15 /* i830, required in DVO non-gang */ #define PLL_P2_DIVIDE_BY_4 (1 << 23) #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ #define PLL_REF_INPUT_DREFCLK (0 << 13) #define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ #define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) #define PLL_REF_INPUT_MASK (3 << 13) #define PLL_LOAD_PULSE_PHASE_SHIFT 9 /* Ironlake */ # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) # define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) # define DPLL_FPA1_P1_POST_DIV_SHIFT 0 # define DPLL_FPA1_P1_POST_DIV_MASK 0xff /* * Parallel to Serial Load Pulse phase selection. * Selects the phase for the 10X DPLL clock for the PCIe * digital display port. The range is 4 to 13; 10 or more * is just a flip delay. The default is 6 */ #define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) #define DISPLAY_RATE_SELECT_FPA1 (1 << 8) /* * SDVO multiplier for 945G/GM. Not used on 965. */ #define SDVO_MULTIPLIER_MASK 0x000000ff #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 #define _DPLL_A_MD 0x0601c /* 965+ only */ /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. * * Value is pixels minus 1. Must be set to 1 pixel for SDVO. */ #define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 #define DPLL_MD_UDI_DIVIDER_SHIFT 24 /* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ #define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 #define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 /* * SDVO/UDI pixel multiplier. * * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus * clock rate is 10 times the DPLL clock. At low resolution/refresh rate * modes, the bus rate would be below the limits, so SDVO allows for stuffing * dummy bytes in the datastream at an increased clock rate, with both sides of * the link knowing how many bytes are fill. * * So, for a mode with a dotclock of 65Mhz, we would want to double the clock * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be * set to 130Mhz, and the SDVO multiplier set to 2x in this register and * through an SDVO command. * * This register field has values of multiplication factor minus 1, with * a maximum multiplier of 5 for SDVO. */ #define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 #define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 /* * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. * This best be set to the default value (3) or the CRT won't work. No, * I don't entirely understand what this does... */ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define _DPLL_B_MD 0x06020 /* 965+ only */ #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) #define _FPA0 0x06040 #define _FPA1 0x06044 #define _FPB0 0x06048 #define _FPB1 0x0604c #define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) #define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) #define FP_N_DIV_MASK 0x003f0000 #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 #define FP_M1_DIV_MASK 0x00003f00 #define FP_M1_DIV_SHIFT 8 #define FP_M2_DIV_MASK 0x0000003f #define FP_M2_PINEVIEW_DIV_MASK 0x000000ff #define FP_M2_DIV_SHIFT 0 #define DPLL_TEST 0x606c #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) #define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) #define DPLLB_TEST_N_BYPASS (1 << 19) #define DPLLB_TEST_M_BYPASS (1 << 18) #define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) #define DPLLA_TEST_N_BYPASS (1 << 3) #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 #define DSTATE_GFX_RESET_I830 (1<<6) #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) #define DSPCLK_GATE_D 0x6200 # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ # define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */ # define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */ # define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */ # define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */ # define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */ # define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */ # define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */ # define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */ # define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */ # define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */ # define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */ # define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */ # define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */ # define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */ # define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */ # define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */ # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) # define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10) # define DCUNIT_CLOCK_GATE_DISABLE (1 << 9) # define DPUNIT_CLOCK_GATE_DISABLE (1 << 8) # define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */ # define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */ # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */ # define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5) # define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4) /** * This bit must be set on the 830 to prevent hangs when turning off the * overlay scaler. */ # define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3) # define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2) # define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1) # define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ # define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ #define RENCLK_GATE_D1 0x6204 # define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ # define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ # define PC_FE_CLOCK_GATE_DISABLE (1 << 11) # define PC_BE_CLOCK_GATE_DISABLE (1 << 10) # define WINDOWER_CLOCK_GATE_DISABLE (1 << 9) # define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8) # define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7) # define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6) # define MAG_CLOCK_GATE_DISABLE (1 << 5) /** This bit must be unset on 855,865 */ # define MECI_CLOCK_GATE_DISABLE (1 << 4) # define DCMP_CLOCK_GATE_DISABLE (1 << 3) # define MEC_CLOCK_GATE_DISABLE (1 << 2) # define MECO_CLOCK_GATE_DISABLE (1 << 1) /** This bit must be set on 855,865. */ # define SV_CLOCK_GATE_DISABLE (1 << 0) # define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16) # define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15) # define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14) # define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13) # define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12) # define I915_WM_CLOCK_GATE_DISABLE (1 << 11) # define I915_IZ_CLOCK_GATE_DISABLE (1 << 10) # define I915_PI_CLOCK_GATE_DISABLE (1 << 9) # define I915_DI_CLOCK_GATE_DISABLE (1 << 8) # define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7) # define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6) # define I915_SC_CLOCK_GATE_DISABLE (1 << 5) # define I915_FL_CLOCK_GATE_DISABLE (1 << 4) # define I915_DM_CLOCK_GATE_DISABLE (1 << 3) # define I915_PS_CLOCK_GATE_DISABLE (1 << 2) # define I915_CC_CLOCK_GATE_DISABLE (1 << 1) # define I915_BY_CLOCK_GATE_DISABLE (1 << 0) # define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30) /** This bit must always be set on 965G/965GM */ # define I965_RCC_CLOCK_GATE_DISABLE (1 << 29) # define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28) # define I965_DAP_CLOCK_GATE_DISABLE (1 << 27) # define I965_ROC_CLOCK_GATE_DISABLE (1 << 26) # define I965_GW_CLOCK_GATE_DISABLE (1 << 25) # define I965_TD_CLOCK_GATE_DISABLE (1 << 24) /** This bit must always be set on 965G */ # define I965_ISC_CLOCK_GATE_DISABLE (1 << 23) # define I965_IC_CLOCK_GATE_DISABLE (1 << 22) # define I965_EU_CLOCK_GATE_DISABLE (1 << 21) # define I965_IF_CLOCK_GATE_DISABLE (1 << 20) # define I965_TC_CLOCK_GATE_DISABLE (1 << 19) # define I965_SO_CLOCK_GATE_DISABLE (1 << 17) # define I965_FBC_CLOCK_GATE_DISABLE (1 << 16) # define I965_MARI_CLOCK_GATE_DISABLE (1 << 15) # define I965_MASF_CLOCK_GATE_DISABLE (1 << 14) # define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13) # define I965_EM_CLOCK_GATE_DISABLE (1 << 12) # define I965_UC_CLOCK_GATE_DISABLE (1 << 11) # define I965_SI_CLOCK_GATE_DISABLE (1 << 6) # define I965_MT_CLOCK_GATE_DISABLE (1 << 5) # define I965_PL_CLOCK_GATE_DISABLE (1 << 4) # define I965_DG_CLOCK_GATE_DISABLE (1 << 3) # define I965_QC_CLOCK_GATE_DISABLE (1 << 2) # define I965_FT_CLOCK_GATE_DISABLE (1 << 1) # define I965_DM_CLOCK_GATE_DISABLE (1 << 0) #define RENCLK_GATE_D2 0x6208 #define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) #define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) #define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) #define RAMCLK_GATE_D 0x6210 /* CRL only */ #define DEUC 0x6214 /* CRL only */ /* * Palette regs */ #define _PALETTE_A 0x0a000 #define _PALETTE_B 0x0a800 #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) /* MCH MMIO space */ /* * MCHBAR mirror. * * This mirrors the MCHBAR MMIO space whose location is determined by * device 0 function 0's pci config register 0x44 or 0x48 and matches it in * every way. It is not accessible from the CP register read instructions. * */ #define MCHBAR_MIRROR_BASE 0x10000 #define MCHBAR_MIRROR_BASE_SNB 0x140000 /** 915-945 and GM965 MCH register controlling DRAM channel access */ #define DCC 0x10200 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) /** Pineview MCH register contains DDR3 setting */ #define CSHRDDR3CTL 0x101a8 #define CSHRDDR3CTL_DDR3 (1 << 2) /** 965 MCH register controlling DRAM channel configuration */ #define C0DRB3 0x10206 #define C1DRB3 0x10606 /** snb MCH registers for reading the DRAM channel configuration */ #define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) #define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) #define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) #define MAD_DIMM_ECC_MASK (0x3 << 24) #define MAD_DIMM_ECC_OFF (0x0 << 24) #define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) #define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) #define MAD_DIMM_ECC_ON (0x3 << 24) #define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) #define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) #define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ #define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ #define MAD_DIMM_B_DUAL_RANK (0x1 << 18) #define MAD_DIMM_A_DUAL_RANK (0x1 << 17) #define MAD_DIMM_A_SELECT (0x1 << 16) /* DIMM sizes are in multiples of 256mb. */ #define MAD_DIMM_B_SIZE_SHIFT 8 #define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) #define MAD_DIMM_A_SIZE_SHIFT 0 #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) /* Clocking configuration register */ #define CLKCFG 0x10c00 #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ /* Note, below two are guess */ #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ #define CLKCFG_FSB_MASK (7 << 0) #define CLKCFG_MEM_533 (1 << 4) #define CLKCFG_MEM_667 (2 << 4) #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) #define TSC1 0x11001 #define TSE (1<<0) #define I915_TR1 0x11006 #define TSFS 0x11020 #define TSFS_SLOPE_MASK 0x0000ff00 #define TSFS_SLOPE_SHIFT 8 #define TSFS_INTR_MASK 0x000000ff #define CRSTANDVID 0x11100 #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ #define PXVFREQ_PX_MASK 0x7f000000 #define PXVFREQ_PX_SHIFT 24 #define VIDFREQ_BASE 0x11110 #define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ #define VIDFREQ2 0x11114 #define VIDFREQ3 0x11118 #define VIDFREQ4 0x1111c #define VIDFREQ_P0_MASK 0x1f000000 #define VIDFREQ_P0_SHIFT 24 #define VIDFREQ_P0_CSCLK_MASK 0x00f00000 #define VIDFREQ_P0_CSCLK_SHIFT 20 #define VIDFREQ_P0_CRCLK_MASK 0x000f0000 #define VIDFREQ_P0_CRCLK_SHIFT 16 #define VIDFREQ_P1_MASK 0x00001f00 #define VIDFREQ_P1_SHIFT 8 #define VIDFREQ_P1_CSCLK_MASK 0x000000f0 #define VIDFREQ_P1_CSCLK_SHIFT 4 #define VIDFREQ_P1_CRCLK_MASK 0x0000000f #define INTTOEXT_BASE_ILK 0x11300 #define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ #define INTTOEXT_MAP3_SHIFT 24 #define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) #define INTTOEXT_MAP2_SHIFT 16 #define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) #define INTTOEXT_MAP1_SHIFT 8 #define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) #define INTTOEXT_MAP0_SHIFT 0 #define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) #define MEMSWCTL 0x11170 /* Ironlake only */ #define MEMCTL_CMD_MASK 0xe000 #define MEMCTL_CMD_SHIFT 13 #define MEMCTL_CMD_RCLK_OFF 0 #define MEMCTL_CMD_RCLK_ON 1 #define MEMCTL_CMD_CHFREQ 2 #define MEMCTL_CMD_CHVID 3 #define MEMCTL_CMD_VMMOFF 4 #define MEMCTL_CMD_VMMON 5 #define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears when command complete */ #define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ #define MEMCTL_FREQ_SHIFT 8 #define MEMCTL_SFCAVM (1<<7) #define MEMCTL_TGT_VID_MASK 0x007f #define MEMIHYST 0x1117c #define MEMINTREN 0x11180 /* 16 bits */ #define MEMINT_RSEXIT_EN (1<<8) #define MEMINT_CX_SUPR_EN (1<<7) #define MEMINT_CONT_BUSY_EN (1<<6) #define MEMINT_AVG_BUSY_EN (1<<5) #define MEMINT_EVAL_CHG_EN (1<<4) #define MEMINT_MON_IDLE_EN (1<<3) #define MEMINT_UP_EVAL_EN (1<<2) #define MEMINT_DOWN_EVAL_EN (1<<1) #define MEMINT_SW_CMD_EN (1<<0) #define MEMINTRSTR 0x11182 /* 16 bits */ #define MEM_RSEXIT_MASK 0xc000 #define MEM_RSEXIT_SHIFT 14 #define MEM_CONT_BUSY_MASK 0x3000 #define MEM_CONT_BUSY_SHIFT 12 #define MEM_AVG_BUSY_MASK 0x0c00 #define MEM_AVG_BUSY_SHIFT 10 #define MEM_EVAL_CHG_MASK 0x0300 #define MEM_EVAL_BUSY_SHIFT 8 #define MEM_MON_IDLE_MASK 0x00c0 #define MEM_MON_IDLE_SHIFT 6 #define MEM_UP_EVAL_MASK 0x0030 #define MEM_UP_EVAL_SHIFT 4 #define MEM_DOWN_EVAL_MASK 0x000c #define MEM_DOWN_EVAL_SHIFT 2 #define MEM_SW_CMD_MASK 0x0003 #define MEM_INT_STEER_GFX 0 #define MEM_INT_STEER_CMR 1 #define MEM_INT_STEER_SMI 2 #define MEM_INT_STEER_SCI 3 #define MEMINTRSTS 0x11184 #define MEMINT_RSEXIT (1<<7) #define MEMINT_CONT_BUSY (1<<6) #define MEMINT_AVG_BUSY (1<<5) #define MEMINT_EVAL_CHG (1<<4) #define MEMINT_MON_IDLE (1<<3) #define MEMINT_UP_EVAL (1<<2) #define MEMINT_DOWN_EVAL (1<<1) #define MEMINT_SW_CMD (1<<0) #define MEMMODECTL 0x11190 #define MEMMODE_BOOST_EN (1<<31) #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ #define MEMMODE_BOOST_FREQ_SHIFT 24 #define MEMMODE_IDLE_MODE_MASK 0x00030000 #define MEMMODE_IDLE_MODE_SHIFT 16 #define MEMMODE_IDLE_MODE_EVAL 0 #define MEMMODE_IDLE_MODE_CONT 1 #define MEMMODE_HWIDLE_EN (1<<15) #define MEMMODE_SWMODE_EN (1<<14) #define MEMMODE_RCLK_GATE (1<<13) #define MEMMODE_HW_UPDATE (1<<12) #define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ #define MEMMODE_FSTART_SHIFT 8 #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ #define MEMMODE_FMAX_SHIFT 4 #define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ #define RCBMAXAVG 0x1119c #define MEMSWCTL2 0x1119e /* Cantiga only */ #define SWMEMCMD_RENDER_OFF (0 << 13) #define SWMEMCMD_RENDER_ON (1 << 13) #define SWMEMCMD_SWFREQ (2 << 13) #define SWMEMCMD_TARVID (3 << 13) #define SWMEMCMD_VRM_OFF (4 << 13) #define SWMEMCMD_VRM_ON (5 << 13) #define CMDSTS (1<<12) #define SFCAVM (1<<11) #define SWFREQ_MASK 0x0380 /* P0-7 */ #define SWFREQ_SHIFT 7 #define TARVID_MASK 0x001f #define MEMSTAT_CTG 0x111a0 #define RCBMINAVG 0x111a0 #define RCUPEI 0x111b0 #define RCDNEI 0x111b4 #define RSTDBYCTL 0x111b8 #define RS1EN (1<<31) #define RS2EN (1<<30) #define RS3EN (1<<29) #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ #define RSX_STATUS_MASK (7<<20) #define RSX_STATUS_ON (0<<20) #define RSX_STATUS_RC1 (1<<20) #define RSX_STATUS_RC1E (2<<20) #define RSX_STATUS_RS1 (3<<20) #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ #define RSX_STATUS_RSVD2 (7<<20) #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ #define JRSC (1<<17) /* rsx coupled to cpu c-state */ #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ #define RS1CONTSAV_MASK (3<<14) #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ #define RS1CONTSAV_RSVD (1<<14) #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ #define NORMSLEXLAT_MASK (3<<12) #define SLOW_RS123 (0<<12) #define SLOW_RS23 (1<<12) #define SLOW_RS3 (2<<12) #define NORMAL_RS123 (3<<12) #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ #define RS_CSTATE_MASK (3<<4) #define RS_CSTATE_C367_RS1 (0<<4) #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) #define RS_CSTATE_RSVD (2<<4) #define RS_CSTATE_C367_RS2 (3<<4) #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ #define VIDCTL 0x111c0 #define VIDSTS 0x111c8 #define VIDSTART 0x111cc /* 8 bits */ #define MEMSTAT_ILK 0x111f8 #define MEMSTAT_VID_MASK 0x7f00 #define MEMSTAT_VID_SHIFT 8 #define MEMSTAT_PSTATE_MASK 0x00f8 #define MEMSTAT_PSTATE_SHIFT 3 #define MEMSTAT_MON_ACTV (1<<2) #define MEMSTAT_SRC_CTL_MASK 0x0003 #define MEMSTAT_SRC_CTL_CORE 0 #define MEMSTAT_SRC_CTL_TRB 1 #define MEMSTAT_SRC_CTL_THM 2 #define MEMSTAT_SRC_CTL_STDBY 3 #define RCPREVBSYTUPAVG 0x113b8 #define RCPREVBSYTDNAVG 0x113bc #define PMMISC 0x11214 #define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ #define SDEW 0x1124c #define CSIEW0 0x11250 #define CSIEW1 0x11254 #define CSIEW2 0x11258 #define PEW 0x1125c #define DEW 0x11270 #define MCHAFE 0x112c0 #define CSIEC 0x112e0 #define DMIEC 0x112e4 #define DDREC 0x112e8 #define PEG0EC 0x112ec #define PEG1EC 0x112f0 #define GFXEC 0x112f4 #define RPPREVBSYTUPAVG 0x113b8 #define RPPREVBSYTDNAVG 0x113bc #define ECR 0x11600 #define ECR_GPFE (1<<31) #define ECR_IMONE (1<<30) #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ #define OGW0 0x11608 #define OGW1 0x1160c #define EG0 0x11610 #define EG1 0x11614 #define EG2 0x11618 #define EG3 0x1161c #define EG4 0x11620 #define EG5 0x11624 #define EG6 0x11628 #define EG7 0x1162c #define PXW 0x11664 #define PXWL 0x11680 #define LCFUSE02 0x116c0 #define LCFUSE_HIV_MASK 0x000000ff #define CSIPLL0 0x12c10 #define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 #define GEN6_GT_PERF_STATUS 0x145948 #define GEN6_RP_STATE_LIMITS 0x145994 #define GEN6_RP_STATE_CAP 0x145998 /* * Logical Context regs */ #define CCID 0x2180 #define CCID_EN (1<<0) /* * Overlay regs */ #define OVADD 0x30000 #define DOVSTA 0x30008 #define OC_BUF (0x3<<20) #define OGAMC5 0x30010 #define OGAMC4 0x30014 #define OGAMC3 0x30018 #define OGAMC2 0x3001c #define OGAMC1 0x30020 #define OGAMC0 0x30024 /* * Display engine regs */ /* Pipe A timing regs */ #define _HTOTAL_A 0x60000 #define _HBLANK_A 0x60004 #define _HSYNC_A 0x60008 #define _VTOTAL_A 0x6000c #define _VBLANK_A 0x60010 #define _VSYNC_A 0x60014 #define _PIPEASRC 0x6001c #define _BCLRPAT_A 0x60020 #define _VSYNCSHIFT_A 0x60028 /* Pipe B timing regs */ #define _HTOTAL_B 0x61000 #define _HBLANK_B 0x61004 #define _HSYNC_B 0x61008 #define _VTOTAL_B 0x6100c #define _VBLANK_B 0x61010 #define _VSYNC_B 0x61014 #define _PIPEBSRC 0x6101c #define _BCLRPAT_B 0x61020 #define _VSYNCSHIFT_B 0x61028 #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) #define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) #define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) /* VGA port control */ #define ADPA 0x61100 #define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 #define ADPA_VSYNC_CNTL_DISABLE (1<<11) #define ADPA_VSYNC_CNTL_ENABLE 0 #define ADPA_HSYNC_CNTL_DISABLE (1<<10) #define ADPA_HSYNC_CNTL_ENABLE 0 #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) #define ADPA_VSYNC_ACTIVE_LOW 0 #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) #define ADPA_HSYNC_ACTIVE_LOW 0 #define ADPA_DPMS_MASK (~(3<<10)) #define ADPA_DPMS_ON (0<<10) #define ADPA_DPMS_SUSPEND (1<<10) #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) #define DPB_HOTPLUG_INT_EN (1 << 29) #define HDMIC_HOTPLUG_INT_EN (1 << 28) #define DPC_HOTPLUG_INT_EN (1 << 28) #define HDMID_HOTPLUG_INT_EN (1 << 27) #define DPD_HOTPLUG_INT_EN (1 << 27) #define SDVOB_HOTPLUG_INT_EN (1 << 26) #define SDVOC_HOTPLUG_INT_EN (1 << 25) #define TV_HOTPLUG_INT_EN (1 << 18) #define CRT_HOTPLUG_INT_EN (1 << 9) #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) /* must use period 64 on GM45 according to docs */ #define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) #define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) #define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) #define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) #define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) #define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) #define DPB_HOTPLUG_INT_STATUS (1 << 29) #define HDMIC_HOTPLUG_INT_STATUS (1 << 28) #define DPC_HOTPLUG_INT_STATUS (1 << 28) #define HDMID_HOTPLUG_INT_STATUS (1 << 27) #define DPD_HOTPLUG_INT_STATUS (1 << 27) #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) #define SDVOC_HOTPLUG_INT_STATUS (1 << 7) #define SDVOB_HOTPLUG_INT_STATUS (1 << 6) /* SDVO port control */ #define SDVOB 0x61140 #define SDVOC 0x61160 -#define SDVO_ENABLE (1 << 31) +#define SDVO_ENABLE (1U << 31) #define SDVO_PIPE_B_SELECT (1 << 30) #define SDVO_STALL_SELECT (1 << 29) #define SDVO_INTERRUPT_ENABLE (1 << 26) /** * 915G/GM SDVO pixel multiplier. * * Programmed value is multiplier - 1, up to 5x. * * \sa DPLL_MD_UDI_MULTIPLIER_MASK */ #define SDVO_PORT_MULTIPLY_MASK (7 << 23) #define SDVO_PORT_MULTIPLY_SHIFT 23 #define SDVO_PHASE_SELECT_MASK (15 << 19) #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) #define SDVOC_GANG_MODE (1 << 16) #define SDVO_ENCODING_SDVO (0x0 << 10) #define SDVO_ENCODING_HDMI (0x2 << 10) /** Requird for HDMI operation */ #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) #define SDVO_COLOR_RANGE_16_235 (1 << 8) #define SDVO_BORDER_ENABLE (1 << 7) #define SDVO_AUDIO_ENABLE (1 << 6) /** New with 965, default is to be set */ #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) /** New with 965, default is to be set */ #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) #define SDVOB_PCIE_CONCURRENCY (1 << 3) #define SDVO_DETECTED (1 << 2) /* Bits to be preserved when writing */ #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) #define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) /* DVO port control */ #define DVOA 0x61120 #define DVOB 0x61140 #define DVOC 0x61160 -#define DVO_ENABLE (1 << 31) +#define DVO_ENABLE (1U << 31) #define DVO_PIPE_B_SELECT (1 << 30) #define DVO_PIPE_STALL_UNUSED (0 << 28) #define DVO_PIPE_STALL (1 << 28) #define DVO_PIPE_STALL_TV (2 << 28) #define DVO_PIPE_STALL_MASK (3 << 28) #define DVO_USE_VGA_SYNC (1 << 15) #define DVO_DATA_ORDER_I740 (0 << 14) #define DVO_DATA_ORDER_FP (1 << 14) #define DVO_VSYNC_DISABLE (1 << 11) #define DVO_HSYNC_DISABLE (1 << 10) #define DVO_VSYNC_TRISTATE (1 << 9) #define DVO_HSYNC_TRISTATE (1 << 8) #define DVO_BORDER_ENABLE (1 << 7) #define DVO_DATA_ORDER_GBRG (1 << 6) #define DVO_DATA_ORDER_RGGB (0 << 6) #define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) #define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) #define DVO_VSYNC_ACTIVE_HIGH (1 << 4) #define DVO_HSYNC_ACTIVE_HIGH (1 << 3) #define DVO_BLANK_ACTIVE_HIGH (1 << 2) #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ #define DVO_PRESERVE_MASK (0x7<<24) #define DVOA_SRCDIM 0x61124 #define DVOB_SRCDIM 0x61144 #define DVOC_SRCDIM 0x61164 #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 #define DVO_SRCDIM_VERTICAL_SHIFT 0 /* LVDS port control */ #define LVDS 0x61180 /* * Enables the LVDS port. This bit must be set before DPLLs are enabled, as * the DPLL semantics change when the LVDS is assigned to that pipe. */ -#define LVDS_PORT_EN (1 << 31) +#define LVDS_PORT_EN (1U << 31) /* Selects pipe B for LVDS data. Must be set on pre-965. */ #define LVDS_PIPEB_SELECT (1 << 30) #define LVDS_PIPE_MASK (1 << 30) #define LVDS_PIPE(pipe) ((pipe) << 30) /* LVDS dithering flag on 965/g4x platform */ #define LVDS_ENABLE_DITHER (1 << 25) /* LVDS sync polarity flags. Set to invert (i.e. negative) */ #define LVDS_VSYNC_POLARITY (1 << 21) #define LVDS_HSYNC_POLARITY (1 << 20) /* Enable border for unscaled (or aspect-scaled) display */ #define LVDS_BORDER_ENABLE (1 << 15) /* * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per * pixel. */ #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) /* * Controls the A3 data pair, which contains the additional LSBs for 24 bit * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be * on. */ #define LVDS_A3_POWER_MASK (3 << 6) #define LVDS_A3_POWER_DOWN (0 << 6) #define LVDS_A3_POWER_UP (3 << 6) /* * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP * is set. */ #define LVDS_CLKB_POWER_MASK (3 << 4) #define LVDS_CLKB_POWER_DOWN (0 << 4) #define LVDS_CLKB_POWER_UP (3 << 4) /* * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 * setting for whether we are in dual-channel mode. The B3 pair will * additionally only be powered up when LVDS_A3_POWER_UP is set. */ #define LVDS_B0B3_POWER_MASK (3 << 2) #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 #define VIDEO_DIP_CTL 0x61170 -#define VIDEO_DIP_ENABLE (1 << 31) +#define VIDEO_DIP_ENABLE (1U << 31) #define VIDEO_DIP_PORT_B (1 << 29) #define VIDEO_DIP_PORT_C (2 << 29) #define VIDEO_DIP_ENABLE_AVI (1 << 21) #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) #define VIDEO_DIP_ENABLE_SPD (8 << 21) #define VIDEO_DIP_SELECT_MASK (3 << 19) #define VIDEO_DIP_SELECT_AVI (0 << 19) #define VIDEO_DIP_SELECT_VENDOR (1 << 19) #define VIDEO_DIP_SELECT_SPD (3 << 19) #define VIDEO_DIP_FREQ_ONCE (0 << 16) #define VIDEO_DIP_FREQ_VSYNC (1 << 16) #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) /* Panel power sequencing */ #define PP_STATUS 0x61200 -#define PP_ON (1 << 31) +#define PP_ON (1U << 31) /* * Indicates that all dependencies of the panel are on: * * - PLL enabled * - pipe enabled * - LVDS/DVOB/DVOC on */ #define PP_READY (1 << 30) #define PP_SEQUENCE_NONE (0 << 28) #define PP_SEQUENCE_POWER_UP (1 << 28) #define PP_SEQUENCE_POWER_DOWN (2 << 28) #define PP_SEQUENCE_MASK (3 << 28) #define PP_SEQUENCE_SHIFT 28 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) #define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) #define PP_SEQUENCE_STATE_RESET (0xf << 0) #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 #define PP_OFF_DELAYS 0x6120c #define PP_DIVISOR 0x61210 /* Panel fitting */ #define PFIT_CONTROL 0x61230 -#define PFIT_ENABLE (1 << 31) +#define PFIT_ENABLE (1U << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 #define VERT_INTERP_DISABLE (0 << 10) #define VERT_INTERP_BILINEAR (1 << 10) #define VERT_INTERP_MASK (3 << 10) #define VERT_AUTO_SCALE (1 << 9) #define HORIZ_INTERP_DISABLE (0 << 6) #define HORIZ_INTERP_BILINEAR (1 << 6) #define HORIZ_INTERP_MASK (3 << 6) #define HORIZ_AUTO_SCALE (1 << 5) #define PANEL_8TO6_DITHER_ENABLE (1 << 3) #define PFIT_FILTER_FUZZY (0 << 24) #define PFIT_SCALING_AUTO (0 << 26) #define PFIT_SCALING_PROGRAMMED (1 << 26) #define PFIT_SCALING_PILLAR (2 << 26) #define PFIT_SCALING_LETTER (3 << 26) #define PFIT_PGM_RATIOS 0x61234 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 /* Pre-965 */ #define PFIT_VERT_SCALE_SHIFT 20 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_SHIFT 4 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 /* 965+ */ #define PFIT_VERT_SCALE_SHIFT_965 16 #define PFIT_VERT_SCALE_MASK_965 0x1fff0000 #define PFIT_HORIZ_SCALE_SHIFT_965 0 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff #define PFIT_AUTO_RATIOS 0x61238 /* Backlight control */ #define BLC_PWM_CTL 0x61254 #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) #define BLC_PWM_CTL2 0x61250 /* 965+ only */ #define BLM_COMBINATION_MODE (1 << 30) /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. * * The actual value is this field multiplied by two. */ #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) #define BLM_LEGACY_MODE (1 << 16) /* * This is the number of cycles out of the backlight modulation cycle for which * the backlight is on. * * This field must be no greater than the number of cycles in the complete * backlight modulation cycle. */ #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) #define BLC_HIST_CTL 0x61260 /* TV port control */ #define TV_CTL 0x68000 /** Enables the TV encoder */ -# define TV_ENC_ENABLE (1 << 31) +# define TV_ENC_ENABLE (1U << 31) /** Sources the TV encoder input from pipe B instead of A. */ # define TV_ENC_PIPEB_SELECT (1 << 30) /** Outputs composite video (DAC A only) */ # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) /** Outputs SVideo video (DAC B/C) */ # define TV_ENC_OUTPUT_SVIDEO (1 << 28) /** Outputs Component video (DAC A/B/C) */ # define TV_ENC_OUTPUT_COMPONENT (2 << 28) /** Outputs Composite and SVideo (DAC A/B/C) */ # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) # define TV_TRILEVEL_SYNC (1 << 21) /** Enables slow sync generation (945GM only) */ # define TV_SLOW_SYNC (1 << 20) /** Selects 4x oversampling for 480i and 576p */ # define TV_OVERSAMPLE_4X (0 << 18) /** Selects 2x oversampling for 720p and 1080i */ # define TV_OVERSAMPLE_2X (1 << 18) /** Selects no oversampling for 1080p */ # define TV_OVERSAMPLE_NONE (2 << 18) /** Selects 8x oversampling */ # define TV_OVERSAMPLE_8X (3 << 18) /** Selects progressive mode rather than interlaced */ # define TV_PROGRESSIVE (1 << 17) /** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ # define TV_PAL_BURST (1 << 16) /** Field for setting delay of Y compared to C */ # define TV_YC_SKEW_MASK (7 << 12) /** Enables a fix for 480p/576p standard definition modes on the 915GM only */ # define TV_ENC_SDP_FIX (1 << 11) /** * Enables a fix for the 915GM only. * * Not sure what it does. */ # define TV_ENC_C0_FIX (1 << 10) /** Bits that must be preserved by software */ # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) # define TV_FUSE_STATE_MASK (3 << 4) /** Read-only state that reports all features enabled */ # define TV_FUSE_STATE_ENABLED (0 << 4) /** Read-only state that reports that Macrovision is disabled in hardware*/ # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) /** Read-only state that reports that TV-out is disabled in hardware. */ # define TV_FUSE_STATE_DISABLED (2 << 4) /** Normal operation */ # define TV_TEST_MODE_NORMAL (0 << 0) /** Encoder test pattern 1 - combo pattern */ # define TV_TEST_MODE_PATTERN_1 (1 << 0) /** Encoder test pattern 2 - full screen vertical 75% color bars */ # define TV_TEST_MODE_PATTERN_2 (2 << 0) /** Encoder test pattern 3 - full screen horizontal 75% color bars */ # define TV_TEST_MODE_PATTERN_3 (3 << 0) /** Encoder test pattern 4 - random noise */ # define TV_TEST_MODE_PATTERN_4 (4 << 0) /** Encoder test pattern 5 - linear color ramps */ # define TV_TEST_MODE_PATTERN_5 (5 << 0) /** * This test mode forces the DACs to 50% of full output. * * This is used for load detection in combination with TVDAC_SENSE_MASK */ # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) # define TV_TEST_MODE_MASK (7 << 0) #define TV_DAC 0x68004 # define TV_DAC_SAVE 0x00ffff00 /** * Reports that DAC state change logic has reported change (RO). * * This gets cleared when TV_DAC_STATE_EN is cleared */ -# define TVDAC_STATE_CHG (1 << 31) +# define TVDAC_STATE_CHG (1U << 31) # define TVDAC_SENSE_MASK (7 << 28) /** Reports that DAC A voltage is above the detect threshold */ # define TVDAC_A_SENSE (1 << 30) /** Reports that DAC B voltage is above the detect threshold */ # define TVDAC_B_SENSE (1 << 29) /** Reports that DAC C voltage is above the detect threshold */ # define TVDAC_C_SENSE (1 << 28) /** * Enables DAC state detection logic, for load-based TV detection. * * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set * to off, for load detection to work. */ # define TVDAC_STATE_CHG_EN (1 << 27) /** Sets the DAC A sense value to high */ # define TVDAC_A_SENSE_CTL (1 << 26) /** Sets the DAC B sense value to high */ # define TVDAC_B_SENSE_CTL (1 << 25) /** Sets the DAC C sense value to high */ # define TVDAC_C_SENSE_CTL (1 << 24) /** Overrides the ENC_ENABLE and DAC voltage levels */ # define DAC_CTL_OVERRIDE (1 << 7) /** Sets the slew rate. Must be preserved in software */ # define ENC_TVDAC_SLEW_FAST (1 << 6) # define DAC_A_1_3_V (0 << 4) # define DAC_A_1_1_V (1 << 4) # define DAC_A_0_7_V (2 << 4) # define DAC_A_MASK (3 << 4) # define DAC_B_1_3_V (0 << 2) # define DAC_B_1_1_V (1 << 2) # define DAC_B_0_7_V (2 << 2) # define DAC_B_MASK (3 << 2) # define DAC_C_1_3_V (0 << 0) # define DAC_C_1_1_V (1 << 0) # define DAC_C_0_7_V (2 << 0) # define DAC_C_MASK (3 << 0) /** * CSC coefficients are stored in a floating point format with 9 bits of * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with * -1 (0x3) being the only legal negative value. */ #define TV_CSC_Y 0x68010 # define TV_RY_MASK 0x07ff0000 # define TV_RY_SHIFT 16 # define TV_GY_MASK 0x00000fff # define TV_GY_SHIFT 0 #define TV_CSC_Y2 0x68014 # define TV_BY_MASK 0x07ff0000 # define TV_BY_SHIFT 16 /** * Y attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AY_MASK 0x000003ff # define TV_AY_SHIFT 0 #define TV_CSC_U 0x68018 # define TV_RU_MASK 0x07ff0000 # define TV_RU_SHIFT 16 # define TV_GU_MASK 0x000007ff # define TV_GU_SHIFT 0 #define TV_CSC_U2 0x6801c # define TV_BU_MASK 0x07ff0000 # define TV_BU_SHIFT 16 /** * U attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AU_MASK 0x000003ff # define TV_AU_SHIFT 0 #define TV_CSC_V 0x68020 # define TV_RV_MASK 0x0fff0000 # define TV_RV_SHIFT 16 # define TV_GV_MASK 0x000007ff # define TV_GV_SHIFT 0 #define TV_CSC_V2 0x68024 # define TV_BV_MASK 0x07ff0000 # define TV_BV_SHIFT 16 /** * V attenuation for component video. * * Stored in 1.9 fixed point. */ # define TV_AV_MASK 0x000007ff # define TV_AV_SHIFT 0 #define TV_CLR_KNOBS 0x68028 /** 2s-complement brightness adjustment */ # define TV_BRIGHTNESS_MASK 0xff000000 # define TV_BRIGHTNESS_SHIFT 24 /** Contrast adjustment, as a 2.6 unsigned floating point number */ # define TV_CONTRAST_MASK 0x00ff0000 # define TV_CONTRAST_SHIFT 16 /** Saturation adjustment, as a 2.6 unsigned floating point number */ # define TV_SATURATION_MASK 0x0000ff00 # define TV_SATURATION_SHIFT 8 /** Hue adjustment, as an integer phase angle in degrees */ # define TV_HUE_MASK 0x000000ff # define TV_HUE_SHIFT 0 #define TV_CLR_LEVEL 0x6802c /** Controls the DAC level for black */ # define TV_BLACK_LEVEL_MASK 0x01ff0000 # define TV_BLACK_LEVEL_SHIFT 16 /** Controls the DAC level for blanking */ # define TV_BLANK_LEVEL_MASK 0x000001ff # define TV_BLANK_LEVEL_SHIFT 0 #define TV_H_CTL_1 0x68030 /** Number of pixels in the hsync. */ # define TV_HSYNC_END_MASK 0x1fff0000 # define TV_HSYNC_END_SHIFT 16 /** Total number of pixels minus one in the line (display and blanking). */ # define TV_HTOTAL_MASK 0x00001fff # define TV_HTOTAL_SHIFT 0 #define TV_H_CTL_2 0x68034 /** Enables the colorburst (needed for non-component color) */ -# define TV_BURST_ENA (1 << 31) +# define TV_BURST_ENA (1U << 31) /** Offset of the colorburst from the start of hsync, in pixels minus one. */ # define TV_HBURST_START_SHIFT 16 # define TV_HBURST_START_MASK 0x1fff0000 /** Length of the colorburst */ # define TV_HBURST_LEN_SHIFT 0 # define TV_HBURST_LEN_MASK 0x0001fff #define TV_H_CTL_3 0x68038 /** End of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_END_SHIFT 16 # define TV_HBLANK_END_MASK 0x1fff0000 /** Start of hblank, measured in pixels minus one from start of hsync */ # define TV_HBLANK_START_SHIFT 0 # define TV_HBLANK_START_MASK 0x0001fff #define TV_V_CTL_1 0x6803c /** XXX */ # define TV_NBR_END_SHIFT 16 # define TV_NBR_END_MASK 0x07ff0000 /** XXX */ # define TV_VI_END_F1_SHIFT 8 # define TV_VI_END_F1_MASK 0x00003f00 /** XXX */ # define TV_VI_END_F2_SHIFT 0 # define TV_VI_END_F2_MASK 0x0000003f #define TV_V_CTL_2 0x68040 /** Length of vsync, in half lines */ # define TV_VSYNC_LEN_MASK 0x07ff0000 # define TV_VSYNC_LEN_SHIFT 16 /** Offset of the start of vsync in field 1, measured in one less than the * number of half lines. */ # define TV_VSYNC_START_F1_MASK 0x00007f00 # define TV_VSYNC_START_F1_SHIFT 8 /** * Offset of the start of vsync in field 2, measured in one less than the * number of half lines. */ # define TV_VSYNC_START_F2_MASK 0x0000007f # define TV_VSYNC_START_F2_SHIFT 0 #define TV_V_CTL_3 0x68044 /** Enables generation of the equalization signal */ -# define TV_EQUAL_ENA (1 << 31) +# define TV_EQUAL_ENA (1U << 31) /** Length of vsync, in half lines */ # define TV_VEQ_LEN_MASK 0x007f0000 # define TV_VEQ_LEN_SHIFT 16 /** Offset of the start of equalization in field 1, measured in one less than * the number of half lines. */ # define TV_VEQ_START_F1_MASK 0x0007f00 # define TV_VEQ_START_F1_SHIFT 8 /** * Offset of the start of equalization in field 2, measured in one less than * the number of half lines. */ # define TV_VEQ_START_F2_MASK 0x000007f # define TV_VEQ_START_F2_SHIFT 0 #define TV_V_CTL_4 0x68048 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F1_MASK 0x003f0000 # define TV_VBURST_START_F1_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F1_MASK 0x000000ff # define TV_VBURST_END_F1_SHIFT 0 #define TV_V_CTL_5 0x6804c /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F2_MASK 0x003f0000 # define TV_VBURST_START_F2_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F2_MASK 0x000000ff # define TV_VBURST_END_F2_SHIFT 0 #define TV_V_CTL_6 0x68050 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F3_MASK 0x003f0000 # define TV_VBURST_START_F3_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F3_MASK 0x000000ff # define TV_VBURST_END_F3_SHIFT 0 #define TV_V_CTL_7 0x68054 /** * Offset to start of vertical colorburst, measured in one less than the * number of lines from vertical start. */ # define TV_VBURST_START_F4_MASK 0x003f0000 # define TV_VBURST_START_F4_SHIFT 16 /** * Offset to the end of vertical colorburst, measured in one less than the * number of lines from the start of NBR. */ # define TV_VBURST_END_F4_MASK 0x000000ff # define TV_VBURST_END_F4_SHIFT 0 #define TV_SC_CTL_1 0x68060 /** Turns on the first subcarrier phase generation DDA */ -# define TV_SC_DDA1_EN (1 << 31) +# define TV_SC_DDA1_EN (1U << 31) /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA2_EN (1 << 30) /** Turns on the first subcarrier phase generation DDA */ # define TV_SC_DDA3_EN (1 << 29) /** Sets the subcarrier DDA to reset frequency every other field */ # define TV_SC_RESET_EVERY_2 (0 << 24) /** Sets the subcarrier DDA to reset frequency every fourth field */ # define TV_SC_RESET_EVERY_4 (1 << 24) /** Sets the subcarrier DDA to reset frequency every eighth field */ # define TV_SC_RESET_EVERY_8 (2 << 24) /** Sets the subcarrier DDA to never reset the frequency */ # define TV_SC_RESET_NEVER (3 << 24) /** Sets the peak amplitude of the colorburst.*/ # define TV_BURST_LEVEL_MASK 0x00ff0000 # define TV_BURST_LEVEL_SHIFT 16 /** Sets the increment of the first subcarrier phase generation DDA */ # define TV_SCDDA1_INC_MASK 0x00000fff # define TV_SCDDA1_INC_SHIFT 0 #define TV_SC_CTL_2 0x68064 /** Sets the rollover for the second subcarrier phase generation DDA */ # define TV_SCDDA2_SIZE_MASK 0x7fff0000 # define TV_SCDDA2_SIZE_SHIFT 16 /** Sets the increent of the second subcarrier phase generation DDA */ # define TV_SCDDA2_INC_MASK 0x00007fff # define TV_SCDDA2_INC_SHIFT 0 #define TV_SC_CTL_3 0x68068 /** Sets the rollover for the third subcarrier phase generation DDA */ # define TV_SCDDA3_SIZE_MASK 0x7fff0000 # define TV_SCDDA3_SIZE_SHIFT 16 /** Sets the increent of the third subcarrier phase generation DDA */ # define TV_SCDDA3_INC_MASK 0x00007fff # define TV_SCDDA3_INC_SHIFT 0 #define TV_WIN_POS 0x68070 /** X coordinate of the display from the start of horizontal active */ # define TV_XPOS_MASK 0x1fff0000 # define TV_XPOS_SHIFT 16 /** Y coordinate of the display from the start of vertical active (NBR) */ # define TV_YPOS_MASK 0x00000fff # define TV_YPOS_SHIFT 0 #define TV_WIN_SIZE 0x68074 /** Horizontal size of the display window, measured in pixels*/ # define TV_XSIZE_MASK 0x1fff0000 # define TV_XSIZE_SHIFT 16 /** * Vertical size of the display window, measured in pixels. * * Must be even for interlaced modes. */ # define TV_YSIZE_MASK 0x00000fff # define TV_YSIZE_SHIFT 0 #define TV_FILTER_CTL_1 0x68080 /** * Enables automatic scaling calculation. * * If set, the rest of the registers are ignored, and the calculated values can * be read back from the register. */ -# define TV_AUTO_SCALE (1 << 31) +# define TV_AUTO_SCALE (1U << 31) /** * Disables the vertical filter. * * This is required on modes more than 1024 pixels wide */ # define TV_V_FILTER_BYPASS (1 << 29) /** Enables adaptive vertical filtering */ # define TV_VADAPT (1 << 28) # define TV_VADAPT_MODE_MASK (3 << 26) /** Selects the least adaptive vertical filtering mode */ # define TV_VADAPT_MODE_LEAST (0 << 26) /** Selects the moderately adaptive vertical filtering mode */ # define TV_VADAPT_MODE_MODERATE (1 << 26) /** Selects the most adaptive vertical filtering mode */ # define TV_VADAPT_MODE_MOST (3 << 26) /** * Sets the horizontal scaling factor. * * This should be the fractional part of the horizontal scaling factor divided * by the oversampling rate. TV_HSCALE should be less than 1, and set to: * * (src width - 1) / ((oversample * dest width) - 1) */ # define TV_HSCALE_FRAC_MASK 0x00003fff # define TV_HSCALE_FRAC_SHIFT 0 #define TV_FILTER_CTL_2 0x68084 /** * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) */ # define TV_VSCALE_INT_MASK 0x00038000 # define TV_VSCALE_INT_SHIFT 15 /** * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. * * \sa TV_VSCALE_INT_MASK */ # define TV_VSCALE_FRAC_MASK 0x00007fff # define TV_VSCALE_FRAC_SHIFT 0 #define TV_FILTER_CTL_3 0x68088 /** * Sets the integer part of the 3.15 fixed-point vertical scaling factor. * * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) * * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. */ # define TV_VSCALE_IP_INT_MASK 0x00038000 # define TV_VSCALE_IP_INT_SHIFT 15 /** * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. * * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. * * \sa TV_VSCALE_IP_INT_MASK */ # define TV_VSCALE_IP_FRAC_MASK 0x00007fff # define TV_VSCALE_IP_FRAC_SHIFT 0 #define TV_CC_CONTROL 0x68090 -# define TV_CC_ENABLE (1 << 31) +# define TV_CC_ENABLE (1U << 31) /** * Specifies which field to send the CC data in. * * CC data is usually sent in field 0. */ # define TV_CC_FID_MASK (1 << 27) # define TV_CC_FID_SHIFT 27 /** Sets the horizontal position of the CC data. Usually 135. */ # define TV_CC_HOFF_MASK 0x03ff0000 # define TV_CC_HOFF_SHIFT 16 /** Sets the vertical position of the CC data. Usually 21 */ # define TV_CC_LINE_MASK 0x0000003f # define TV_CC_LINE_SHIFT 0 #define TV_CC_DATA 0x68094 -# define TV_CC_RDY (1 << 31) +# define TV_CC_RDY (1U << 31) /** Second word of CC data to be transmitted. */ # define TV_CC_DATA_2_MASK 0x007f0000 # define TV_CC_DATA_2_SHIFT 16 /** First word of CC data to be transmitted. */ # define TV_CC_DATA_1_MASK 0x0000007f # define TV_CC_DATA_1_SHIFT 0 #define TV_H_LUMA_0 0x68100 #define TV_H_LUMA_59 0x681ec #define TV_H_CHROMA_0 0x68200 #define TV_H_CHROMA_59 0x682ec #define TV_V_LUMA_0 0x68300 #define TV_V_LUMA_42 0x683a8 #define TV_V_CHROMA_0 0x68400 #define TV_V_CHROMA_42 0x684a8 /* Display Port */ #define DP_A 0x64000 /* eDP */ #define DP_B 0x64100 #define DP_C 0x64200 #define DP_D 0x64300 -#define DP_PORT_EN (1 << 31) +#define DP_PORT_EN (1U << 31) #define DP_PIPEB_SELECT (1 << 30) #define DP_PIPE_MASK (1 << 30) /* Link training mode - select a suitable mode for each stage */ #define DP_LINK_TRAIN_PAT_1 (0 << 28) #define DP_LINK_TRAIN_PAT_2 (1 << 28) #define DP_LINK_TRAIN_PAT_IDLE (2 << 28) #define DP_LINK_TRAIN_OFF (3 << 28) #define DP_LINK_TRAIN_MASK (3 << 28) #define DP_LINK_TRAIN_SHIFT 28 /* CPT Link training mode */ #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) #define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) #define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) #define DP_LINK_TRAIN_OFF_CPT (3 << 8) #define DP_LINK_TRAIN_MASK_CPT (7 << 8) #define DP_LINK_TRAIN_SHIFT_CPT 8 /* Signal voltages. These are mostly controlled by the other end */ #define DP_VOLTAGE_0_4 (0 << 25) #define DP_VOLTAGE_0_6 (1 << 25) #define DP_VOLTAGE_0_8 (2 << 25) #define DP_VOLTAGE_1_2 (3 << 25) #define DP_VOLTAGE_MASK (7 << 25) #define DP_VOLTAGE_SHIFT 25 /* Signal pre-emphasis levels, like voltages, the other end tells us what * they want */ #define DP_PRE_EMPHASIS_0 (0 << 22) #define DP_PRE_EMPHASIS_3_5 (1 << 22) #define DP_PRE_EMPHASIS_6 (2 << 22) #define DP_PRE_EMPHASIS_9_5 (3 << 22) #define DP_PRE_EMPHASIS_MASK (7 << 22) #define DP_PRE_EMPHASIS_SHIFT 22 /* How many wires to use. I guess 3 was too hard */ #define DP_PORT_WIDTH_1 (0 << 19) #define DP_PORT_WIDTH_2 (1 << 19) #define DP_PORT_WIDTH_4 (3 << 19) #define DP_PORT_WIDTH_MASK (7 << 19) /* Mystic DPCD version 1.1 special mode */ #define DP_ENHANCED_FRAMING (1 << 18) /* eDP */ #define DP_PLL_FREQ_270MHZ (0 << 16) #define DP_PLL_FREQ_160MHZ (1 << 16) #define DP_PLL_FREQ_MASK (3 << 16) /** locked once port is enabled */ #define DP_PORT_REVERSAL (1 << 15) /* eDP */ #define DP_PLL_ENABLE (1 << 14) /** sends the clock on lane 15 of the PEG for debug */ #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) #define DP_SCRAMBLING_DISABLE (1 << 12) #define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) /** limit RGB values to avoid confusing TVs */ #define DP_COLOR_RANGE_16_235 (1 << 8) /** Turn on the audio link */ #define DP_AUDIO_OUTPUT_ENABLE (1 << 6) /** vs and hs sync polarity */ #define DP_SYNC_VS_HIGH (1 << 4) #define DP_SYNC_HS_HIGH (1 << 3) /** A fantasy */ #define DP_DETECTED (1 << 2) /** The aux channel provides a way to talk to the * signal sink for DDC etc. Max packet size supported * is 20 bytes in each direction, hence the 5 fixed * data registers */ #define DPA_AUX_CH_CTL 0x64010 #define DPA_AUX_CH_DATA1 0x64014 #define DPA_AUX_CH_DATA2 0x64018 #define DPA_AUX_CH_DATA3 0x6401c #define DPA_AUX_CH_DATA4 0x64020 #define DPA_AUX_CH_DATA5 0x64024 #define DPB_AUX_CH_CTL 0x64110 #define DPB_AUX_CH_DATA1 0x64114 #define DPB_AUX_CH_DATA2 0x64118 #define DPB_AUX_CH_DATA3 0x6411c #define DPB_AUX_CH_DATA4 0x64120 #define DPB_AUX_CH_DATA5 0x64124 #define DPC_AUX_CH_CTL 0x64210 #define DPC_AUX_CH_DATA1 0x64214 #define DPC_AUX_CH_DATA2 0x64218 #define DPC_AUX_CH_DATA3 0x6421c #define DPC_AUX_CH_DATA4 0x64220 #define DPC_AUX_CH_DATA5 0x64224 #define DPD_AUX_CH_CTL 0x64310 #define DPD_AUX_CH_DATA1 0x64314 #define DPD_AUX_CH_DATA2 0x64318 #define DPD_AUX_CH_DATA3 0x6431c #define DPD_AUX_CH_DATA4 0x64320 #define DPD_AUX_CH_DATA5 0x64324 -#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) +#define DP_AUX_CH_CTL_SEND_BUSY (1U << 31) #define DP_AUX_CH_CTL_DONE (1 << 30) #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) #define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) #define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) #define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) #define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) #define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 /* * Computing GMCH M and N values for the Display Port link * * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes * * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) * * The GMCH value is used internally * * bytes_per_pixel is the number of bytes coming out of the plane, * which is after the LUTs, so we want the bytes for our color format. * For our current usage, this is always 3, one byte for R, G and B. */ #define _PIPEA_GMCH_DATA_M 0x70050 #define _PIPEB_GMCH_DATA_M 0x71050 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) #define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25 #define PIPE_GMCH_DATA_M_MASK (0xffffff) #define _PIPEA_GMCH_DATA_N 0x70054 #define _PIPEB_GMCH_DATA_N 0x71054 #define PIPE_GMCH_DATA_N_MASK (0xffffff) /* * Computing Link M and N values for the Display Port link * * Link M / N = pixel_clock / ls_clk * * (the DP spec calls pixel_clock the 'strm_clk') * * The Link value is transmitted in the Main Stream * Attributes and VB-ID. */ #define _PIPEA_DP_LINK_M 0x70060 #define _PIPEB_DP_LINK_M 0x71060 #define PIPEA_DP_LINK_M_MASK (0xffffff) #define _PIPEA_DP_LINK_N 0x70064 #define _PIPEB_DP_LINK_N 0x71064 #define PIPEA_DP_LINK_N_MASK (0xffffff) #define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) #define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) #define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) #define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) /* Display & cursor control */ /* Pipe A */ #define _PIPEADSL 0x70000 #define DSL_LINEMASK 0x00000fff #define _PIPEACONF 0x70008 #define PIPECONF_ENABLE (1<<31) #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) #define PIPECONF_FRAME_START_DELAY_MASK (3<<27) #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1<<25) #define PIPECONF_PALETTE 0 #define PIPECONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_INTERLACE_MASK (7 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel * fitting must be disabled on pre-ilk for interlaced. */ #define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ #define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ /* Ironlake and later have a complete new set of values for interlaced. PFIT * means panel fitter required, PF means progressive fetch, DBL means power * saving pixel doubling. */ #define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) #define PIPECONF_INTERLACED_ILK (3 << 21) #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ #define PIPECONF_CXSR_DOWNCLOCK (1<<16) #define PIPECONF_BPP_MASK (0x000000e0) #define PIPECONF_BPP_8 (0<<5) #define PIPECONF_BPP_10 (1<<5) #define PIPECONF_BPP_6 (2<<5) #define PIPECONF_BPP_12 (3<<5) #define PIPECONF_DITHER_EN (1<<4) #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) #define PIPECONF_DITHER_TYPE_SP (0<<2) #define PIPECONF_DITHER_TYPE_ST1 (1<<2) #define PIPECONF_DITHER_TYPE_ST2 (2<<2) #define PIPECONF_DITHER_TYPE_TEMP (3<<2) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) #define PIPE_CRC_DONE_ENABLE (1UL<<28) #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) #define PIPE_DPST_EVENT_ENABLE (1UL<<23) #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) #define PIPE_DPST_EVENT_STATUS (1UL<<7) #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) #define PIPE_BPC_MASK (7 << 5) /* Ironlake */ #define PIPE_8BPC (0 << 5) #define PIPE_10BPC (1 << 5) #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) #define DSPARB_BSTART_SHIFT 0 #define DSPARB_BEND_SHIFT 9 /* on 855 */ #define DSPARB_AEND_SHIFT 0 #define DSPFW1 0x70034 #define DSPFW_SR_SHIFT 23 #define DSPFW_SR_MASK (0x1ff<<23) #define DSPFW_CURSORB_SHIFT 16 #define DSPFW_CURSORB_MASK (0x3f<<16) #define DSPFW_PLANEB_SHIFT 8 #define DSPFW_PLANEB_MASK (0x7f<<8) #define DSPFW_PLANEA_MASK (0x7f) #define DSPFW2 0x70038 #define DSPFW_CURSORA_MASK 0x00003f00 #define DSPFW_CURSORA_SHIFT 8 #define DSPFW_PLANEC_MASK (0x7f) #define DSPFW3 0x7003c #define DSPFW_HPLL_SR_EN (1<<31) #define DSPFW_CURSOR_SR_SHIFT 24 #define PINEVIEW_SELF_REFRESH_EN (1<<30) #define DSPFW_CURSOR_SR_MASK (0x3f<<24) #define DSPFW_HPLL_CURSOR_SHIFT 16 #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) #define DSPFW_HPLL_SR_MASK (0x1ff) /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64 #define I830_FIFO_LINE_SIZE 32 #define G4X_FIFO_SIZE 127 #define I965_FIFO_SIZE 512 #define I945_FIFO_SIZE 127 #define I915_FIFO_SIZE 95 #define I855GM_FIFO_SIZE 127 /* In cachelines */ #define I830_FIFO_SIZE 95 #define G4X_MAX_WM 0x3f #define I915_MAX_WM 0x3f #define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */ #define PINEVIEW_FIFO_LINE_SIZE 64 #define PINEVIEW_MAX_WM 0x1ff #define PINEVIEW_DFT_WM 0x3f #define PINEVIEW_DFT_HPLLOFF_WM 0 #define PINEVIEW_GUARD_WM 10 #define PINEVIEW_CURSOR_FIFO 64 #define PINEVIEW_CURSOR_MAX_WM 0x3f #define PINEVIEW_CURSOR_DFT_WM 0 #define PINEVIEW_CURSOR_GUARD_WM 5 #define I965_CURSOR_FIFO 64 #define I965_CURSOR_MAX_WM 32 #define I965_CURSOR_DFT_WM 8 /* define the Watermark register on Ironlake */ #define WM0_PIPEA_ILK 0x45100 #define WM0_PIPE_PLANE_MASK (0x7f<<16) #define WM0_PIPE_PLANE_SHIFT 16 #define WM0_PIPE_SPRITE_MASK (0x3f<<8) #define WM0_PIPE_SPRITE_SHIFT 8 #define WM0_PIPE_CURSOR_MASK (0x1f) #define WM0_PIPEB_ILK 0x45104 #define WM0_PIPEC_IVB 0x45200 #define WM1_LP_ILK 0x45108 #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) #define WM1_LP_FBC_MASK (0xf<<20) #define WM1_LP_FBC_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) #define WM2_LP_ILK 0x4510c #define WM2_LP_EN (1<<31) #define WM3_LP_ILK 0x45110 #define WM3_LP_EN (1<<31) #define WM1S_LP_ILK 0x45120 #define WM2S_LP_IVB 0x45124 #define WM3S_LP_IVB 0x45128 #define WM1S_LP_EN (1<<31) /* Memory latency timer register */ #define MLTR_ILK 0x11222 #define MLTR_WM1_SHIFT 0 #define MLTR_WM2_SHIFT 8 /* the unit of memory self-refresh latency time is 0.5us */ #define ILK_SRLT_MASK 0x3f #define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) #define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) #define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) /* define the fifo size on Ironlake */ #define ILK_DISPLAY_FIFO 128 #define ILK_DISPLAY_MAXWM 64 #define ILK_DISPLAY_DFTWM 8 #define ILK_CURSOR_FIFO 32 #define ILK_CURSOR_MAXWM 16 #define ILK_CURSOR_DFTWM 8 #define ILK_DISPLAY_SR_FIFO 512 #define ILK_DISPLAY_MAX_SRWM 0x1ff #define ILK_DISPLAY_DFT_SRWM 0x3f #define ILK_CURSOR_SR_FIFO 64 #define ILK_CURSOR_MAX_SRWM 0x3f #define ILK_CURSOR_DFT_SRWM 8 #define ILK_FIFO_LINE_SIZE 64 /* define the WM info on Sandybridge */ #define SNB_DISPLAY_FIFO 128 #define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ #define SNB_DISPLAY_DFTWM 8 #define SNB_CURSOR_FIFO 32 #define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ #define SNB_CURSOR_DFTWM 8 #define SNB_DISPLAY_SR_FIFO 512 #define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ #define SNB_DISPLAY_DFT_SRWM 0x3f #define SNB_CURSOR_SR_FIFO 64 #define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ #define SNB_CURSOR_DFT_SRWM 8 #define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ #define SNB_FIFO_LINE_SIZE 64 /* the address where we get all kinds of latency value */ #define SSKPD 0x5d10 #define SSKPD_WM_MASK 0x3f #define SSKPD_WM0_SHIFT 0 #define SSKPD_WM1_SHIFT 8 #define SSKPD_WM2_SHIFT 16 #define SSKPD_WM3_SHIFT 24 #define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) #define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) #define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) #define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) #define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code * should work: * * do { * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> * PIPE_FRAME_HIGH_SHIFT; * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> * PIPE_FRAME_LOW_SHIFT); * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> * PIPE_FRAME_HIGH_SHIFT); * } while (high1 != high2); * frame = (high1 << 8) | low1; */ #define _PIPEAFRAMEHIGH 0x70040 #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 #define _PIPEAFRAMEPIXEL 0x70044 #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 /* GM45+ just has to be different */ #define _PIPEA_FRMCOUNT_GM45 0x70040 #define _PIPEA_FLIPCOUNT_GM45 0x70044 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) /* Cursor A & B regs */ #define _CURACNTR 0x70080 /* Old style CUR*CNTR flags (desktop 8xx) */ #define CURSOR_ENABLE 0x80000000 #define CURSOR_GAMMA_ENABLE 0x40000000 #define CURSOR_STRIDE_MASK 0x30000000 #define CURSOR_FORMAT_SHIFT 24 #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) /* New style CUR*CNTR flags */ #define CURSOR_MODE 0x27 #define CURSOR_MODE_DISABLE 0x00 #define CURSOR_MODE_64_32B_AX 0x07 #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) #define MCURSOR_PIPE_SELECT (1 << 28) #define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 #define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 #define CURSIZE 0x700a0 #define _CURBCNTR 0x700c0 #define _CURBBASE 0x700c4 #define _CURBPOS 0x700c8 #define _CURBCNTR_IVB 0x71080 #define _CURBBASE_IVB 0x71084 #define _CURBPOS_IVB 0x71088 #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) /* Display A control */ #define _DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) #define DISPLAY_PLANE_DISABLE 0 #define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) #define DISPPLANE_8BPP (0x2<<26) #define DISPPLANE_15_16BPP (0x4<<26) #define DISPPLANE_16BPP (0x5<<26) #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) #define DISPPLANE_32BPP (0x7<<26) #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_SEL_PIPE_SHIFT 24 #define DISPPLANE_SEL_PIPE_MASK (3< */ #include __FBSDID("$FreeBSD$"); #include #include #include "radeon.h" #include "radeon_asic.h" #include "evergreend.h" #include "evergreen_blit_shaders.h" #include "cayman_blit_shaders.h" #include "radeon_blit_common.h" /* emits 17 */ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cb_color_info; int pitch, slice; h = roundup2(h, 8); if (h < 8) h = 8; cb_color_info = CB_FORMAT(format) | CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, pitch); radeon_ring_write(ring, slice); radeon_ring_write(ring, 0); radeon_ring_write(ring, cb_color_info); radeon_ring_write(ring, 0); radeon_ring_write(ring, (w - 1) | ((h - 1) << 16)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); } /* emits 5dw */ static void cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cp_coher_size; if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); if (rdev->family >= CHIP_CAYMAN) { /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync * to the RB directly. For IBs, the CP programs this as part of the * surface_sync packet. */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */ } radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, sync_type); radeon_ring_write(ring, cp_coher_size); radeon_ring_write(ring, mc_addr >> 8); radeon_ring_write(ring, 10); /* poll interval */ } /* emits 11dw + 1 surface sync = 16dw */ static void set_shaders(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u64 gpu_addr; /* VS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, 2); radeon_ring_write(ring, 0); /* PS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, 1); radeon_ring_write(ring, 0); radeon_ring_write(ring, 2); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); } /* emits 10 + 1 sync (5) = 15 */ static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_vtx_constant_word2, sq_vtx_constant_word3; /* high addr, stride */ sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | SQ_VTXC_STRIDE(16); #ifdef __BIG_ENDIAN sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); #endif /* xyzw swizzles */ sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) | SQ_VTCX_SEL_Y(SQ_SEL_Y) | SQ_VTCX_SEL_Z(SQ_SEL_Z) | SQ_VTCX_SEL_W(SQ_SEL_W); radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); radeon_ring_write(ring, 0x580); radeon_ring_write(ring, gpu_addr & 0xffffffff); radeon_ring_write(ring, 48 - 1); /* size */ radeon_ring_write(ring, sq_vtx_constant_word2); radeon_ring_write(ring, sq_vtx_constant_word3); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); if ((rdev->family == CHIP_CEDAR) || (rdev->family == CHIP_PALM) || (rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO2) || (rdev->family == CHIP_CAICOS)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(rdev, PACKET3_VC_ACTION_ENA, 48, gpu_addr); } /* emits 10 */ static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr, u32 size) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_tex_resource_word0, sq_tex_resource_word1; u32 sq_tex_resource_word4, sq_tex_resource_word7; if (h < 1) h = 1; sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D); sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | ((w - 1) << 18)); sq_tex_resource_word1 = ((h - 1) << 0) | TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); /* xyzw swizzles */ sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) | TEX_DST_SEL_Y(SQ_SEL_Y) | TEX_DST_SEL_Z(SQ_SEL_Z) | TEX_DST_SEL_W(SQ_SEL_W); sq_tex_resource_word7 = format | S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, size, gpu_addr); radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); radeon_ring_write(ring, 0); radeon_ring_write(ring, sq_tex_resource_word0); radeon_ring_write(ring, sq_tex_resource_word1); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, sq_tex_resource_word4); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, sq_tex_resource_word7); } /* emits 12 */ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; /* workaround some hw bugs */ if (x2 == 0) x1 = 1; if (y2 == 0) y1 = 1; if (rdev->family >= CHIP_CAYMAN) { if ((x2 == 1) && (y2 == 1)) x2 = 2; } radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); } /* emits 10 */ static void draw_auto(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, DI_PT_RECTLIST); radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 2) | #endif DI_INDEX_SIZE_16_BIT); radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(ring, 1); radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); radeon_ring_write(ring, 3); radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); } /* emits 39 */ static void set_default_state(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; int num_ps_gprs, num_vs_gprs, num_temp_gprs; int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_hs_threads, num_ls_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; int num_hs_stack_entries, num_ls_stack_entries; u64 gpu_addr; int dwords; /* set clear context state */ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, 0); if (rdev->family < CHIP_CAYMAN) { switch (rdev->family) { case CHIP_CEDAR: default: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 96; num_vs_threads = 16; num_gs_threads = 16; num_es_threads = 16; num_hs_threads = 16; num_ls_threads = 16; num_ps_stack_entries = 42; num_vs_stack_entries = 42; num_gs_stack_entries = 42; num_es_stack_entries = 42; num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; case CHIP_REDWOOD: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 128; num_vs_threads = 20; num_gs_threads = 20; num_es_threads = 20; num_hs_threads = 20; num_ls_threads = 20; num_ps_stack_entries = 42; num_vs_stack_entries = 42; num_gs_stack_entries = 42; num_es_stack_entries = 42; num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; case CHIP_JUNIPER: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 128; num_vs_threads = 20; num_gs_threads = 20; num_es_threads = 20; num_hs_threads = 20; num_ls_threads = 20; num_ps_stack_entries = 85; num_vs_stack_entries = 85; num_gs_stack_entries = 85; num_es_stack_entries = 85; num_hs_stack_entries = 85; num_ls_stack_entries = 85; break; case CHIP_CYPRESS: case CHIP_HEMLOCK: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 128; num_vs_threads = 20; num_gs_threads = 20; num_es_threads = 20; num_hs_threads = 20; num_ls_threads = 20; num_ps_stack_entries = 85; num_vs_stack_entries = 85; num_gs_stack_entries = 85; num_es_stack_entries = 85; num_hs_stack_entries = 85; num_ls_stack_entries = 85; break; case CHIP_PALM: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 96; num_vs_threads = 16; num_gs_threads = 16; num_es_threads = 16; num_hs_threads = 16; num_ls_threads = 16; num_ps_stack_entries = 42; num_vs_stack_entries = 42; num_gs_stack_entries = 42; num_es_stack_entries = 42; num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; case CHIP_SUMO: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 96; num_vs_threads = 25; num_gs_threads = 25; num_es_threads = 25; num_hs_threads = 25; num_ls_threads = 25; num_ps_stack_entries = 42; num_vs_stack_entries = 42; num_gs_stack_entries = 42; num_es_stack_entries = 42; num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; case CHIP_SUMO2: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 96; num_vs_threads = 25; num_gs_threads = 25; num_es_threads = 25; num_hs_threads = 25; num_ls_threads = 25; num_ps_stack_entries = 85; num_vs_stack_entries = 85; num_gs_stack_entries = 85; num_es_stack_entries = 85; num_hs_stack_entries = 85; num_ls_stack_entries = 85; break; case CHIP_BARTS: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 128; num_vs_threads = 20; num_gs_threads = 20; num_es_threads = 20; num_hs_threads = 20; num_ls_threads = 20; num_ps_stack_entries = 85; num_vs_stack_entries = 85; num_gs_stack_entries = 85; num_es_stack_entries = 85; num_hs_stack_entries = 85; num_ls_stack_entries = 85; break; case CHIP_TURKS: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 128; num_vs_threads = 20; num_gs_threads = 20; num_es_threads = 20; num_hs_threads = 20; num_ls_threads = 20; num_ps_stack_entries = 42; num_vs_stack_entries = 42; num_gs_stack_entries = 42; num_es_stack_entries = 42; num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; case CHIP_CAICOS: num_ps_gprs = 93; num_vs_gprs = 46; num_temp_gprs = 4; num_gs_gprs = 31; num_es_gprs = 31; num_hs_gprs = 23; num_ls_gprs = 23; num_ps_threads = 128; num_vs_threads = 10; num_gs_threads = 10; num_es_threads = 10; num_hs_threads = 10; num_ls_threads = 10; num_ps_stack_entries = 42; num_vs_stack_entries = 42; num_gs_stack_entries = 42; num_es_stack_entries = 42; num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; } if ((rdev->family == CHIP_CEDAR) || (rdev->family == CHIP_PALM) || (rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO2) || (rdev->family == CHIP_CAICOS)) sq_config = 0; else sq_config = VC_ENABLE; sq_config |= (EXPORT_SRC_C | CS_PRIO(0) | LS_PRIO(0) | HS_PRIO(0) | PS_PRIO(0) | VS_PRIO(1) | GS_PRIO(2) | ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | NUM_VS_GPRS(num_vs_gprs) | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | NUM_ES_GPRS(num_es_gprs)); sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | NUM_LS_GPRS(num_ls_gprs)); sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | NUM_VS_THREADS(num_vs_threads) | NUM_GS_THREADS(num_gs_threads) | NUM_ES_THREADS(num_es_threads)); sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | NUM_LS_THREADS(num_ls_threads)); sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); /* disable dyn gprs */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, 0); /* setup LDS */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, 0x10001000); /* SQ config */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11)); radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, sq_config); radeon_ring_write(ring, sq_gpr_resource_mgmt_1); radeon_ring_write(ring, sq_gpr_resource_mgmt_2); radeon_ring_write(ring, sq_gpr_resource_mgmt_3); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, sq_thread_resource_mgmt); radeon_ring_write(ring, sq_thread_resource_mgmt_2); radeon_ring_write(ring, sq_stack_resource_mgmt_1); radeon_ring_write(ring, sq_stack_resource_mgmt_2); radeon_ring_write(ring, sq_stack_resource_mgmt_3); } /* CONTEXT_CONTROL */ radeon_ring_write(ring, 0xc0012800); radeon_ring_write(ring, 0x80000000); radeon_ring_write(ring, 0x80000000); /* SQ_VTX_BASE_VTX_LOC */ radeon_ring_write(ring, 0xc0026f00); radeon_ring_write(ring, 0x00000000); radeon_ring_write(ring, 0x00000000); radeon_ring_write(ring, 0x00000000); /* SET_SAMPLER */ radeon_ring_write(ring, 0xc0036e00); radeon_ring_write(ring, 0x00000000); radeon_ring_write(ring, 0x00000012); radeon_ring_write(ring, 0x00000000); radeon_ring_write(ring, 0x00000000); /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, 1); /* emit an IB pointing at default state */ dwords = roundup2(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC); radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(ring, dwords); } int evergreen_blit_init(struct radeon_device *rdev) { u32 obj_size; int i, r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; rdev->r600_blit.primitives.set_render_target = set_render_target; rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; rdev->r600_blit.primitives.set_shaders = set_shaders; rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; rdev->r600_blit.primitives.set_scissors = set_scissors; rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ rdev->r600_blit.ring_size_common += 55; /* shaders + def state */ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_per_loop = 74; if (rdev->family >= CHIP_CAYMAN) rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */ rdev->r600_blit.max_dim = 16384; rdev->r600_blit.state_offset = 0; if (rdev->family < CHIP_CAYMAN) rdev->r600_blit.state_len = evergreen_default_size; else rdev->r600_blit.state_len = cayman_default_size; dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); dwords++; } obj_size = dwords * 4; obj_size = roundup2(obj_size, 256); rdev->r600_blit.vs_offset = obj_size; if (rdev->family < CHIP_CAYMAN) obj_size += evergreen_vs_size * 4; else obj_size += cayman_vs_size * 4; obj_size = roundup2(obj_size, 256); rdev->r600_blit.ps_offset = obj_size; if (rdev->family < CHIP_CAYMAN) obj_size += evergreen_ps_size * 4; else obj_size += cayman_ps_size * 4; obj_size = roundup2(obj_size, 256); /* pin copy shader into vram if not already initialized */ if (!rdev->r600_blit.shader_obj) { r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("evergreen failed to allocate shader\n"); return r; } r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_gpu_addr); radeon_bo_unreserve(rdev->r600_blit.shader_obj); if (r) { dev_err(rdev->dev, "(%d) pin blit object failed\n", r); return r; } } DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); if (r) { DRM_ERROR("failed to map blit object %d\n", r); return r; } if (rdev->family < CHIP_CAYMAN) { memcpy_toio((char *)ptr + rdev->r600_blit.state_offset, evergreen_default_state, rdev->r600_blit.state_len * 4); if (num_packet2s) memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); for (i = 0; i < evergreen_vs_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); for (i = 0; i < evergreen_ps_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); } else { memcpy_toio((char *)ptr + rdev->r600_blit.state_offset, cayman_default_state, rdev->r600_blit.state_len * 4); if (num_packet2s) memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); for (i = 0; i < cayman_vs_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]); for (i = 0; i < cayman_ps_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]); } radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } Index: head/sys/dev/drm2/radeon/evergreen_cs.c =================================================================== --- head/sys/dev/drm2/radeon/evergreen_cs.c (revision 258779) +++ head/sys/dev/drm2/radeon/evergreen_cs.c (revision 258780) @@ -1,3727 +1,3727 @@ /* * Copyright 2010 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include __FBSDID("$FreeBSD$"); #include #include "radeon.h" #include "radeon_asic.h" #include "evergreend.h" #include "evergreen_reg_safe.h" #include "cayman_reg_safe.h" #include "r600_cs.h" #define MAX(a,b) (((a)>(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); struct evergreen_cs_track { u32 group_size; u32 nbanks; u32 npipes; u32 row_size; /* value we track */ u32 nsamples; /* unused */ struct radeon_bo *cb_color_bo[12]; u32 cb_color_bo_offset[12]; struct radeon_bo *cb_color_fmask_bo[8]; /* unused */ struct radeon_bo *cb_color_cmask_bo[8]; /* unused */ u32 cb_color_info[12]; u32 cb_color_view[12]; u32 cb_color_pitch[12]; u32 cb_color_slice[12]; u32 cb_color_slice_idx[12]; u32 cb_color_attrib[12]; u32 cb_color_cmask_slice[8];/* unused */ u32 cb_color_fmask_slice[8];/* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ u32 vgt_strmout_config; u32 vgt_strmout_buffer_config; struct radeon_bo *vgt_strmout_bo[4]; u32 vgt_strmout_bo_offset[4]; u32 vgt_strmout_size[4]; u32 db_depth_control; u32 db_depth_view; u32 db_depth_slice; u32 db_depth_size; u32 db_z_info; u32 db_z_read_offset; u32 db_z_write_offset; struct radeon_bo *db_z_read_bo; struct radeon_bo *db_z_write_bo; u32 db_s_info; u32 db_s_read_offset; u32 db_s_write_offset; struct radeon_bo *db_s_read_bo; struct radeon_bo *db_s_write_bo; bool sx_misc_kill_all_prims; bool cb_dirty; bool db_dirty; bool streamout_dirty; u32 htile_offset; u32 htile_surface; struct radeon_bo *htile_bo; }; static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) { if (tiling_flags & RADEON_TILING_MACRO) return ARRAY_2D_TILED_THIN1; else if (tiling_flags & RADEON_TILING_MICRO) return ARRAY_1D_TILED_THIN1; else return ARRAY_LINEAR_GENERAL; } static u32 evergreen_cs_get_num_banks(u32 nbanks) { switch (nbanks) { case 2: return ADDR_SURF_2_BANK; case 4: return ADDR_SURF_4_BANK; case 8: default: return ADDR_SURF_8_BANK; case 16: return ADDR_SURF_16_BANK; } } static void evergreen_cs_track_init(struct evergreen_cs_track *track) { int i; for (i = 0; i < 8; i++) { track->cb_color_fmask_bo[i] = NULL; track->cb_color_cmask_bo[i] = NULL; track->cb_color_cmask_slice[i] = 0; track->cb_color_fmask_slice[i] = 0; } for (i = 0; i < 12; i++) { track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_info[i] = 0; track->cb_color_view[i] = 0xFFFFFFFF; track->cb_color_pitch[i] = 0; track->cb_color_slice[i] = 0xfffffff; track->cb_color_slice_idx[i] = 0; } track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; track->db_depth_slice = 0xffffffff; track->db_depth_view = 0xFFFFC000; track->db_depth_size = 0xFFFFFFFF; track->db_depth_control = 0xFFFFFFFF; track->db_z_info = 0xFFFFFFFF; track->db_z_read_offset = 0xFFFFFFFF; track->db_z_write_offset = 0xFFFFFFFF; track->db_z_read_bo = NULL; track->db_z_write_bo = NULL; track->db_s_info = 0xFFFFFFFF; track->db_s_read_offset = 0xFFFFFFFF; track->db_s_write_offset = 0xFFFFFFFF; track->db_s_read_bo = NULL; track->db_s_write_bo = NULL; track->db_dirty = true; track->htile_bo = NULL; track->htile_offset = 0xFFFFFFFF; track->htile_surface = 0; for (i = 0; i < 4; i++) { track->vgt_strmout_size[i] = 0; track->vgt_strmout_bo[i] = NULL; track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; } track->streamout_dirty = true; track->sx_misc_kill_all_prims = false; } struct eg_surface { /* value gathered from cs */ unsigned nbx; unsigned nby; unsigned format; unsigned mode; unsigned nbanks; unsigned bankw; unsigned bankh; unsigned tsplit; unsigned mtilea; unsigned nsamples; /* output value */ unsigned bpe; unsigned layer_size; unsigned palign; unsigned halign; unsigned long base_align; }; static int evergreen_surface_check_linear(struct radeon_cs_parser *p, struct eg_surface *surf, const char *prefix) { surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples; surf->base_align = surf->bpe; surf->palign = 1; surf->halign = 1; return 0; } static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p, struct eg_surface *surf, const char *prefix) { struct evergreen_cs_track *track = p->track; unsigned palign; palign = MAX(64, track->group_size / surf->bpe); surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples; surf->base_align = track->group_size; surf->palign = palign; surf->halign = 1; if (surf->nbx & (palign - 1)) { if (prefix) { dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n", __func__, __LINE__, prefix, surf->nbx, palign); } return -EINVAL; } return 0; } static int evergreen_surface_check_1d(struct radeon_cs_parser *p, struct eg_surface *surf, const char *prefix) { struct evergreen_cs_track *track = p->track; unsigned palign; palign = track->group_size / (8 * surf->bpe * surf->nsamples); palign = MAX(8, palign); surf->layer_size = surf->nbx * surf->nby * surf->bpe; surf->base_align = track->group_size; surf->palign = palign; surf->halign = 8; if ((surf->nbx & (palign - 1))) { if (prefix) { dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n", __func__, __LINE__, prefix, surf->nbx, palign, track->group_size, surf->bpe, surf->nsamples); } return -EINVAL; } if ((surf->nby & (8 - 1))) { if (prefix) { dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n", __func__, __LINE__, prefix, surf->nby); } return -EINVAL; } return 0; } static int evergreen_surface_check_2d(struct radeon_cs_parser *p, struct eg_surface *surf, const char *prefix) { struct evergreen_cs_track *track = p->track; unsigned palign, halign, tileb, slice_pt; unsigned mtile_pr, mtile_ps, mtileb; tileb = 64 * surf->bpe * surf->nsamples; slice_pt = 1; if (tileb > surf->tsplit) { slice_pt = tileb / surf->tsplit; } tileb = tileb / slice_pt; /* macro tile width & height */ palign = (8 * surf->bankw * track->npipes) * surf->mtilea; halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; mtileb = (palign / 8) * (halign / 8) * tileb; mtile_pr = surf->nbx / palign; mtile_ps = (mtile_pr * surf->nby) / halign; surf->layer_size = mtile_ps * mtileb * slice_pt; surf->base_align = (palign / 8) * (halign / 8) * tileb; surf->palign = palign; surf->halign = halign; if ((surf->nbx & (palign - 1))) { if (prefix) { dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n", __func__, __LINE__, prefix, surf->nbx, palign); } return -EINVAL; } if ((surf->nby & (halign - 1))) { if (prefix) { dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n", __func__, __LINE__, prefix, surf->nby, halign); } return -EINVAL; } return 0; } static int evergreen_surface_check(struct radeon_cs_parser *p, struct eg_surface *surf, const char *prefix) { /* some common value computed here */ surf->bpe = r600_fmt_get_blocksize(surf->format); switch (surf->mode) { case ARRAY_LINEAR_GENERAL: return evergreen_surface_check_linear(p, surf, prefix); case ARRAY_LINEAR_ALIGNED: return evergreen_surface_check_linear_aligned(p, surf, prefix); case ARRAY_1D_TILED_THIN1: return evergreen_surface_check_1d(p, surf, prefix); case ARRAY_2D_TILED_THIN1: return evergreen_surface_check_2d(p, surf, prefix); default: dev_warn(p->dev, "%s:%d %s invalid array mode %d\n", __func__, __LINE__, prefix, surf->mode); return -EINVAL; } return -EINVAL; } static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p, struct eg_surface *surf, const char *prefix) { switch (surf->mode) { case ARRAY_2D_TILED_THIN1: break; case ARRAY_LINEAR_GENERAL: case ARRAY_LINEAR_ALIGNED: case ARRAY_1D_TILED_THIN1: return 0; default: dev_warn(p->dev, "%s:%d %s invalid array mode %d\n", __func__, __LINE__, prefix, surf->mode); return -EINVAL; } switch (surf->nbanks) { case 0: surf->nbanks = 2; break; case 1: surf->nbanks = 4; break; case 2: surf->nbanks = 8; break; case 3: surf->nbanks = 16; break; default: dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n", __func__, __LINE__, prefix, surf->nbanks); return -EINVAL; } switch (surf->bankw) { case 0: surf->bankw = 1; break; case 1: surf->bankw = 2; break; case 2: surf->bankw = 4; break; case 3: surf->bankw = 8; break; default: dev_warn(p->dev, "%s:%d %s invalid bankw %d\n", __func__, __LINE__, prefix, surf->bankw); return -EINVAL; } switch (surf->bankh) { case 0: surf->bankh = 1; break; case 1: surf->bankh = 2; break; case 2: surf->bankh = 4; break; case 3: surf->bankh = 8; break; default: dev_warn(p->dev, "%s:%d %s invalid bankh %d\n", __func__, __LINE__, prefix, surf->bankh); return -EINVAL; } switch (surf->mtilea) { case 0: surf->mtilea = 1; break; case 1: surf->mtilea = 2; break; case 2: surf->mtilea = 4; break; case 3: surf->mtilea = 8; break; default: dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n", __func__, __LINE__, prefix, surf->mtilea); return -EINVAL; } switch (surf->tsplit) { case 0: surf->tsplit = 64; break; case 1: surf->tsplit = 128; break; case 2: surf->tsplit = 256; break; case 3: surf->tsplit = 512; break; case 4: surf->tsplit = 1024; break; case 5: surf->tsplit = 2048; break; case 6: surf->tsplit = 4096; break; default: dev_warn(p->dev, "%s:%d %s invalid tile split %d\n", __func__, __LINE__, prefix, surf->tsplit); return -EINVAL; } return 0; } static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id) { struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; unsigned long offset; int r; mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1; pitch = track->cb_color_pitch[id]; slice = track->cb_color_slice[id]; surf.nbx = (pitch + 1) * 8; surf.nby = ((slice + 1) * 64) / surf.nbx; surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]); surf.format = G_028C70_FORMAT(track->cb_color_info[id]); surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]); surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]); surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]); surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]); surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]); surf.nsamples = 1; if (!r600_fmt_is_valid_color(surf.format)) { dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n", __func__, __LINE__, surf.format, id, track->cb_color_info[id]); return -EINVAL; } r = evergreen_surface_value_conv_check(p, &surf, "cb"); if (r) { return r; } r = evergreen_surface_check(p, &surf, "cb"); if (r) { dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, id, track->cb_color_pitch[id], track->cb_color_slice[id], track->cb_color_attrib[id], track->cb_color_info[id]); return r; } offset = track->cb_color_bo_offset[id] << 8; if (offset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", __func__, __LINE__, id, offset, surf.base_align); return -EINVAL; } offset += surf.layer_size * mslice; if (offset > radeon_bo_size(track->cb_color_bo[id])) { /* old ddx are broken they allocate bo with w*h*bpp but * program slice with ALIGN(h, 8), catch this and patch * command stream. */ if (!surf.mode) { volatile u32 *ib = p->ib.ptr; unsigned long tmp, nby, bsize, size, min = 0; /* find the height the ddx wants */ if (surf.nby > 8) { min = surf.nby - 8; } bsize = radeon_bo_size(track->cb_color_bo[id]); tmp = track->cb_color_bo_offset[id] << 8; for (nby = surf.nby; nby > min; nby--) { size = nby * surf.nbx * surf.bpe * surf.nsamples; if ((tmp + size * mslice) <= bsize) { break; } } if (nby > min) { surf.nby = nby; slice = ((nby * surf.nbx) / 64) - 1; if (!evergreen_surface_check(p, &surf, "cb")) { /* check if this one works */ tmp += surf.layer_size * mslice; if (tmp <= bsize) { ib[track->cb_color_slice_idx[id]] = slice; goto old_ddx_ok; } } } } dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " "offset %d, max layer %d, bo size %ld, slice %d)\n", __func__, __LINE__, id, surf.layer_size, track->cb_color_bo_offset[id] << 8, mslice, radeon_bo_size(track->cb_color_bo[id]), slice); dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", __func__, __LINE__, surf.nbx, surf.nby, surf.mode, surf.bpe, surf.nsamples, surf.bankw, surf.bankh, surf.tsplit, surf.mtilea); return -EINVAL; } old_ddx_ok: return 0; } static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p, unsigned nbx, unsigned nby) { struct evergreen_cs_track *track = p->track; unsigned long size; if (track->htile_bo == NULL) { dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", __func__, __LINE__, track->db_z_info); return -EINVAL; } if (G_028ABC_LINEAR(track->htile_surface)) { /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */ nbx = roundup(nbx, 16 * 8); /* height is npipes htiles aligned == npipes * 8 pixel aligned */ nby = roundup(nby, track->npipes * 8); } else { /* always assume 8x8 htile */ /* align is htile align * 8, htile align vary according to * number of pipe and tile width and nby */ switch (track->npipes) { case 8: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup(nbx, 64 * 8); nby = roundup(nby, 64 * 8); break; case 4: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup(nbx, 64 * 8); nby = roundup(nby, 32 * 8); break; case 2: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup(nbx, 32 * 8); nby = roundup(nby, 32 * 8); break; case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup(nbx, 32 * 8); nby = roundup(nby, 16 * 8); break; default: dev_warn(p->dev, "%s:%d invalid num pipes %d\n", __func__, __LINE__, track->npipes); return -EINVAL; } } /* compute number of htile */ nbx = nbx >> 3; nby = nby >> 3; /* size must be aligned on npipes * 2K boundary */ size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); size += track->htile_offset; if (size > radeon_bo_size(track->htile_bo)) { dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", __func__, __LINE__, radeon_bo_size(track->htile_bo), size, nbx, nby); return -EINVAL; } return 0; } static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) { struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; unsigned long offset; int r; mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size); slice = track->db_depth_slice; surf.nbx = (pitch + 1) * 8; surf.nby = ((slice + 1) * 64) / surf.nbx; surf.mode = G_028040_ARRAY_MODE(track->db_z_info); surf.format = G_028044_FORMAT(track->db_s_info); surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info); surf.nbanks = G_028040_NUM_BANKS(track->db_z_info); surf.bankw = G_028040_BANK_WIDTH(track->db_z_info); surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info); surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info); surf.nsamples = 1; if (surf.format != 1) { dev_warn(p->dev, "%s:%d stencil invalid format %d\n", __func__, __LINE__, surf.format); return -EINVAL; } /* replace by color format so we can use same code */ surf.format = V_028C70_COLOR_8; r = evergreen_surface_value_conv_check(p, &surf, "stencil"); if (r) { return r; } r = evergreen_surface_check(p, &surf, NULL); if (r) { /* old userspace doesn't compute proper depth/stencil alignment * check that alignment against a bigger byte per elements and * only report if that alignment is wrong too. */ surf.format = V_028C70_COLOR_8_8_8_8; r = evergreen_surface_check(p, &surf, "stencil"); if (r) { dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, track->db_depth_slice, track->db_s_info, track->db_z_info); } return r; } offset = track->db_s_read_offset << 8; if (offset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } offset += surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_read_bo)) { dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " "offset %ld, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, (unsigned long)track->db_s_read_offset << 8, mslice, radeon_bo_size(track->db_s_read_bo)); dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, track->db_depth_slice, track->db_s_info, track->db_z_info); return -EINVAL; } offset = track->db_s_write_offset << 8; if (offset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } offset += surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_write_bo)) { dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " "offset %ld, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, (unsigned long)track->db_s_write_offset << 8, mslice, radeon_bo_size(track->db_s_write_bo)); return -EINVAL; } /* hyperz */ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) { r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby); if (r) { return r; } } return 0; } static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) { struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; unsigned long offset; int r; mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size); slice = track->db_depth_slice; surf.nbx = (pitch + 1) * 8; surf.nby = ((slice + 1) * 64) / surf.nbx; surf.mode = G_028040_ARRAY_MODE(track->db_z_info); surf.format = G_028040_FORMAT(track->db_z_info); surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info); surf.nbanks = G_028040_NUM_BANKS(track->db_z_info); surf.bankw = G_028040_BANK_WIDTH(track->db_z_info); surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info); surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info); surf.nsamples = 1; switch (surf.format) { case V_028040_Z_16: surf.format = V_028C70_COLOR_16; break; case V_028040_Z_24: case V_028040_Z_32_FLOAT: surf.format = V_028C70_COLOR_8_8_8_8; break; default: dev_warn(p->dev, "%s:%d depth invalid format %d\n", __func__, __LINE__, surf.format); return -EINVAL; } r = evergreen_surface_value_conv_check(p, &surf, "depth"); if (r) { dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, track->db_depth_slice, track->db_z_info); return r; } r = evergreen_surface_check(p, &surf, "depth"); if (r) { dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, track->db_depth_slice, track->db_z_info); return r; } offset = track->db_z_read_offset << 8; if (offset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } offset += surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_read_bo)) { dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " "offset %ld, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, (unsigned long)track->db_z_read_offset << 8, mslice, radeon_bo_size(track->db_z_read_bo)); return -EINVAL; } offset = track->db_z_write_offset << 8; if (offset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } offset += surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_write_bo)) { dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " "offset %ld, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, (unsigned long)track->db_z_write_offset << 8, mslice, radeon_bo_size(track->db_z_write_bo)); return -EINVAL; } /* hyperz */ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) { r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby); if (r) { return r; } } return 0; } static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, struct radeon_bo *texture, struct radeon_bo *mipmap, unsigned idx) { struct eg_surface surf; unsigned long toffset, moffset; unsigned dim, llevel, mslice, width, height, depth, i; u32 texdw[8]; int r; texdw[0] = radeon_get_ib_value(p, idx + 0); texdw[1] = radeon_get_ib_value(p, idx + 1); texdw[2] = radeon_get_ib_value(p, idx + 2); texdw[3] = radeon_get_ib_value(p, idx + 3); texdw[4] = radeon_get_ib_value(p, idx + 4); texdw[5] = radeon_get_ib_value(p, idx + 5); texdw[6] = radeon_get_ib_value(p, idx + 6); texdw[7] = radeon_get_ib_value(p, idx + 7); dim = G_030000_DIM(texdw[0]); llevel = G_030014_LAST_LEVEL(texdw[5]); mslice = G_030014_LAST_ARRAY(texdw[5]) + 1; width = G_030000_TEX_WIDTH(texdw[0]) + 1; height = G_030004_TEX_HEIGHT(texdw[1]) + 1; depth = G_030004_TEX_DEPTH(texdw[1]) + 1; surf.format = G_03001C_DATA_FORMAT(texdw[7]); surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8; surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx); surf.nby = r600_fmt_get_nblocksy(surf.format, height); surf.mode = G_030004_ARRAY_MODE(texdw[1]); surf.tsplit = G_030018_TILE_SPLIT(texdw[6]); surf.nbanks = G_03001C_NUM_BANKS(texdw[7]); surf.bankw = G_03001C_BANK_WIDTH(texdw[7]); surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]); surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]); surf.nsamples = 1; toffset = texdw[2] << 8; moffset = texdw[3] << 8; if (!r600_fmt_is_valid_texture(surf.format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, surf.format); return -EINVAL; } switch (dim) { case V_030000_SQ_TEX_DIM_1D: case V_030000_SQ_TEX_DIM_2D: case V_030000_SQ_TEX_DIM_CUBEMAP: case V_030000_SQ_TEX_DIM_1D_ARRAY: case V_030000_SQ_TEX_DIM_2D_ARRAY: depth = 1; break; case V_030000_SQ_TEX_DIM_2D_MSAA: case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA: surf.nsamples = 1 << llevel; llevel = 0; depth = 1; break; case V_030000_SQ_TEX_DIM_3D: break; default: dev_warn(p->dev, "%s:%d texture invalid dimension %d\n", __func__, __LINE__, dim); return -EINVAL; } r = evergreen_surface_value_conv_check(p, &surf, "texture"); if (r) { return r; } /* align height */ evergreen_surface_check(p, &surf, NULL); surf.nby = roundup(surf.nby, surf.halign); r = evergreen_surface_check(p, &surf, "texture"); if (r) { dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", __func__, __LINE__, texdw[0], texdw[1], texdw[4], texdw[5], texdw[6], texdw[7]); return r; } /* check texture size */ if (toffset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n", __func__, __LINE__, toffset, surf.base_align); return -EINVAL; } if (moffset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", __func__, __LINE__, moffset, surf.base_align); return -EINVAL; } if (dim == SQ_TEX_DIM_3D) { toffset += surf.layer_size * depth; } else { toffset += surf.layer_size * mslice; } if (toffset > radeon_bo_size(texture)) { dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, " "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n", __func__, __LINE__, surf.layer_size, (unsigned long)texdw[2] << 8, mslice, depth, radeon_bo_size(texture), surf.nbx, surf.nby); return -EINVAL; } if (!mipmap) { if (llevel) { dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n", __func__, __LINE__); return -EINVAL; } else { return 0; /* everything's ok */ } } /* check mipmap size */ for (i = 1; i <= llevel; i++) { unsigned w, h, d; w = r600_mip_minify(width, i); h = r600_mip_minify(height, i); d = r600_mip_minify(depth, i); surf.nbx = r600_fmt_get_nblocksx(surf.format, w); surf.nby = r600_fmt_get_nblocksy(surf.format, h); switch (surf.mode) { case ARRAY_2D_TILED_THIN1: if (surf.nbx < surf.palign || surf.nby < surf.halign) { surf.mode = ARRAY_1D_TILED_THIN1; } /* recompute alignment */ evergreen_surface_check(p, &surf, NULL); break; case ARRAY_LINEAR_GENERAL: case ARRAY_LINEAR_ALIGNED: case ARRAY_1D_TILED_THIN1: break; default: dev_warn(p->dev, "%s:%d invalid array mode %d\n", __func__, __LINE__, surf.mode); return -EINVAL; } surf.nbx = roundup(surf.nbx, surf.palign); surf.nby = roundup(surf.nby, surf.halign); r = evergreen_surface_check(p, &surf, "mipmap"); if (r) { return r; } if (dim == SQ_TEX_DIM_3D) { moffset += surf.layer_size * d; } else { moffset += surf.layer_size * mslice; } if (moffset > radeon_bo_size(mipmap)) { dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, " "offset %ld, coffset %ld, max layer %d, depth %d, " "bo size %ld) level0 (%d %d %d)\n", __func__, __LINE__, i, surf.layer_size, (unsigned long)texdw[3] << 8, moffset, mslice, d, radeon_bo_size(mipmap), width, height, depth); dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", __func__, __LINE__, surf.nbx, surf.nby, surf.mode, surf.bpe, surf.nsamples, surf.bankw, surf.bankh, surf.tsplit, surf.mtilea); return -EINVAL; } } return 0; } static int evergreen_cs_track_check(struct radeon_cs_parser *p) { struct evergreen_cs_track *track = p->track; unsigned tmp, i; int r; unsigned buffer_mask = 0; /* check streamout */ if (track->streamout_dirty && track->vgt_strmout_config) { for (i = 0; i < 4; i++) { if (track->vgt_strmout_config & (1 << i)) { buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf; } } for (i = 0; i < 4; i++) { if (buffer_mask & (1 << i)) { if (track->vgt_strmout_bo[i]) { u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", i, (uintmax_t)offset, radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } } else { dev_warn(p->dev, "No buffer for streamout %d\n", i); return -EINVAL; } } } track->streamout_dirty = false; } if (track->sx_misc_kill_all_prims) return 0; /* check that we have a cb for each enabled target */ if (track->cb_dirty) { tmp = track->cb_target_mask; for (i = 0; i < 8; i++) { if ((tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); return -EINVAL; } /* check cb */ r = evergreen_cs_track_validate_cb(p, i); if (r) { return r; } } } track->cb_dirty = false; } if (track->db_dirty) { /* Check stencil buffer */ if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID && G_028800_STENCIL_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_stencil(p); if (r) return r; } /* Check depth buffer */ if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID && G_028800_Z_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_depth(p); if (r) return r; } track->db_dirty = false; } return 0; } /** * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet * @parser: parser structure holding parsing context. * @pkt: where to store packet informations * * Assume that chunk_ib_index is properly set. Will return -EINVAL * if packet is bigger than remaining ib size. or if packets is unknown. **/ static int evergreen_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; uint32_t header; if (idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", idx, ib_chunk->length_dw); return -EINVAL; } header = radeon_get_ib_value(p, idx); pkt->idx = idx; pkt->type = CP_PACKET_GET_TYPE(header); pkt->count = CP_PACKET_GET_COUNT(header); pkt->one_reg_wr = 0; switch (pkt->type) { case PACKET_TYPE0: pkt->reg = CP_PACKET0_GET_REG(header); break; case PACKET_TYPE3: pkt->opcode = CP_PACKET3_GET_OPCODE(header); break; case PACKET_TYPE2: pkt->count = -1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); return -EINVAL; } if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); return -EINVAL; } return 0; } /** * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 * @parser: parser structure holding parsing context. * @data: pointer to relocation data * @offset_start: starting offset * @offset_mask: offset mask (to align start offset on) * @reloc: reloc informations * * Check next packet is relocation packet3, do bo validation and compute * GPU offset using the provided start. **/ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_packet p3reloc; unsigned idx; int r; if (p->chunk_relocs_idx == -1) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } *cs_reloc = NULL; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); if (r) { return r; } p->idx += p3reloc.count + 2; if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { DRM_ERROR("No packet3 for relocation for packet at %d.\n", p3reloc.idx); return -EINVAL; } idx = radeon_get_ib_value(p, p3reloc.idx + 1); if (idx >= relocs_chunk->length_dw) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, relocs_chunk->length_dw); return -EINVAL; } /* FIXME: we assume reloc size is 4 dwords */ *cs_reloc = p->relocs_ptr[(idx / 4)]; return 0; } /** * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP * @p: structure holding the parser context. * * Check if the next packet is a relocation packet3. **/ static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) { struct radeon_cs_packet p3reloc; int r; r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); if (r) { return false; } if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { return false; } return true; } /** * evergreen_cs_packet_next_vline() - parse userspace VLINE packet * @parser: parser structure holding parsing context. * * Userspace sends a special sequence for VLINE waits. * PACKET0 - VLINE_START_END + value * PACKET3 - WAIT_REG_MEM poll vline status reg * RELOC (P3) - crtc_id in reloc. * * This function parses this and relocates the VLINE START END * and WAIT_REG_MEM packets to the correct crtc. * It also detects a switched off crtc and nulls out the * wait in that case. */ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) { struct drm_mode_object *obj; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, wait_reg_mem; int crtc_id; int r; uint32_t header, h_idx, reg, wait_reg_mem_info; volatile uint32_t *ib; ib = p->ib.ptr; /* parse the WAIT_REG_MEM */ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); if (r) return r; /* check its a WAIT_REG_MEM */ if (wait_reg_mem.type != PACKET_TYPE3 || wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); return -EINVAL; } wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); /* bit 4 is reg (0) or mem (1) */ if (wait_reg_mem_info & 0x10) { DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); return -EINVAL; } /* waiting for value to be equal */ if ((wait_reg_mem_info & 0x7) != 0x3) { DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); return -EINVAL; } if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); return -EINVAL; } if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); return -EINVAL; } /* jump over the NOP */ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); if (r) return r; h_idx = p->idx - 2; p->idx += wait_reg_mem.count + 2; p->idx += p3reloc.count + 2; header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = CP_PACKET0_GET_REG(header); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -EINVAL; } crtc = obj_to_crtc(obj); radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; if (!crtc->enabled) { /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ ib[h_idx + 2] = PACKET2(0); ib[h_idx + 3] = PACKET2(0); ib[h_idx + 4] = PACKET2(0); ib[h_idx + 5] = PACKET2(0); ib[h_idx + 6] = PACKET2(0); ib[h_idx + 7] = PACKET2(0); ib[h_idx + 8] = PACKET2(0); } else { switch (reg) { case EVERGREEN_VLINE_START_END: header &= ~R600_CP_PACKET0_REG_MASK; header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; ib[h_idx] = header; ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; break; default: DRM_ERROR("unknown crtc reloc\n"); return -EINVAL; } } return 0; } static int evergreen_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { int r; switch (reg) { case EVERGREEN_VLINE_START_END: r = evergreen_cs_packet_parse_vline(p); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); return r; } break; default: DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", reg, idx); return -EINVAL; } return 0; } static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { unsigned reg, i; unsigned idx; int r; idx = pkt->idx + 1; reg = pkt->reg; for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { r = evergreen_packet0_check(p, pkt, idx, reg); if (r) { return r; } } return 0; } /** * evergreen_cs_check_reg() - check if register is authorized or not * @parser: parser structure holding parsing context * @reg: register we are testing * @idx: index into the cs buffer * * This function will test against evergreen_reg_safe_bm and return 0 * if register is safe. If register is not flag as safe this function * will test it against a list of register needind special handling. */ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; struct radeon_cs_reloc *reloc; u32 last_reg; u32 m, i, tmp, *ib; int r; if (p->rdev->family >= CHIP_CAYMAN) last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm); else last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm); i = (reg >> 7); if (i >= last_reg) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } m = 1 << ((reg >> 2) & 31); if (p->rdev->family >= CHIP_CAYMAN) { if (!(cayman_reg_safe_bm[i] & m)) return 0; } else { if (!(evergreen_reg_safe_bm[i] & m)) return 0; } ib = p->ib.ptr; switch (reg) { /* force following reg to 0 in an attempt to disable out buffer * which will need us to better understand how it works to perform * security check on it (Jerome) */ case SQ_ESGS_RING_SIZE: case SQ_GSVS_RING_SIZE: case SQ_ESTMP_RING_SIZE: case SQ_GSTMP_RING_SIZE: case SQ_HSTMP_RING_SIZE: case SQ_LSTMP_RING_SIZE: case SQ_PSTMP_RING_SIZE: case SQ_VSTMP_RING_SIZE: case SQ_ESGS_RING_ITEMSIZE: case SQ_ESTMP_RING_ITEMSIZE: case SQ_GSTMP_RING_ITEMSIZE: case SQ_GSVS_RING_ITEMSIZE: case SQ_GS_VERT_ITEMSIZE: case SQ_GS_VERT_ITEMSIZE_1: case SQ_GS_VERT_ITEMSIZE_2: case SQ_GS_VERT_ITEMSIZE_3: case SQ_GSVS_RING_OFFSET_1: case SQ_GSVS_RING_OFFSET_2: case SQ_GSVS_RING_OFFSET_3: case SQ_HSTMP_RING_ITEMSIZE: case SQ_LSTMP_RING_ITEMSIZE: case SQ_PSTMP_RING_ITEMSIZE: case SQ_VSTMP_RING_ITEMSIZE: case VGT_TF_RING_SIZE: /* get value to populate the IB don't remove */ /*tmp =radeon_get_ib_value(p, idx); ib[idx] = 0;*/ break; case SQ_ESGS_RING_BASE: case SQ_GSVS_RING_BASE: case SQ_ESTMP_RING_BASE: case SQ_GSTMP_RING_BASE: case SQ_HSTMP_RING_BASE: case SQ_LSTMP_RING_BASE: case SQ_PSTMP_RING_BASE: case SQ_VSTMP_RING_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case DB_DEPTH_CONTROL: track->db_depth_control = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case CAYMAN_DB_EQAA: if (p->rdev->family < CHIP_CAYMAN) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } break; case CAYMAN_DB_DEPTH_INFO: if (p->rdev->family < CHIP_CAYMAN) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } break; case DB_Z_INFO: track->db_z_info = radeon_get_ib_value(p, idx); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] &= ~Z_ARRAY_MODE(0xf); track->db_z_info &= ~Z_ARRAY_MODE(0xf); ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; evergreen_tiling_fields(reloc->lobj.tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); ib[idx] |= DB_TILE_SPLIT(tile_split) | DB_BANK_WIDTH(bankw) | DB_BANK_HEIGHT(bankh) | DB_MACRO_TILE_ASPECT(mtaspect); } } track->db_dirty = true; break; case DB_STENCIL_INFO: track->db_s_info = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case DB_DEPTH_VIEW: track->db_depth_view = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case DB_DEPTH_SIZE: track->db_depth_size = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case R_02805C_DB_DEPTH_SLICE: track->db_depth_slice = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case DB_Z_READ_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_z_read_offset = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->db_z_read_bo = reloc->robj; track->db_dirty = true; break; case DB_Z_WRITE_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_z_write_offset = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->db_z_write_bo = reloc->robj; track->db_dirty = true; break; case DB_STENCIL_READ_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_s_read_offset = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->db_s_read_bo = reloc->robj; track->db_dirty = true; break; case DB_STENCIL_WRITE_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_s_write_offset = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->db_s_write_bo = reloc->robj; track->db_dirty = true; break; case VGT_STRMOUT_CONFIG: track->vgt_strmout_config = radeon_get_ib_value(p, idx); track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_CONFIG: track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_BASE_0: case VGT_STRMOUT_BUFFER_BASE_1: case VGT_STRMOUT_BUFFER_BASE_2: case VGT_STRMOUT_BUFFER_BASE_3: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->vgt_strmout_bo[tmp] = reloc->robj; track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_SIZE_0: case VGT_STRMOUT_BUFFER_SIZE_1: case VGT_STRMOUT_BUFFER_SIZE_2: case VGT_STRMOUT_BUFFER_SIZE_3: tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; /* size in register is DWs, convert to bytes */ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; track->streamout_dirty = true; break; case CP_COHER_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "missing reloc for CP_COHER_BASE " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); case CB_TARGET_MASK: track->cb_target_mask = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case CB_SHADER_MASK: track->cb_shader_mask = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case PA_SC_AA_CONFIG: if (p->rdev->family >= CHIP_CAYMAN) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; track->nsamples = 1 << tmp; break; case CAYMAN_PA_SC_AA_CONFIG: if (p->rdev->family < CHIP_CAYMAN) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK; track->nsamples = 1 << tmp; break; case CB_COLOR0_VIEW: case CB_COLOR1_VIEW: case CB_COLOR2_VIEW: case CB_COLOR3_VIEW: case CB_COLOR4_VIEW: case CB_COLOR5_VIEW: case CB_COLOR6_VIEW: case CB_COLOR7_VIEW: tmp = (reg - CB_COLOR0_VIEW) / 0x3c; track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case CB_COLOR8_VIEW: case CB_COLOR9_VIEW: case CB_COLOR10_VIEW: case CB_COLOR11_VIEW: tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case CB_COLOR0_INFO: case CB_COLOR1_INFO: case CB_COLOR2_INFO: case CB_COLOR3_INFO: case CB_COLOR4_INFO: case CB_COLOR5_INFO: case CB_COLOR6_INFO: case CB_COLOR7_INFO: tmp = (reg - CB_COLOR0_INFO) / 0x3c; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } track->cb_dirty = true; break; case CB_COLOR8_INFO: case CB_COLOR9_INFO: case CB_COLOR10_INFO: case CB_COLOR11_INFO: tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } track->cb_dirty = true; break; case CB_COLOR0_PITCH: case CB_COLOR1_PITCH: case CB_COLOR2_PITCH: case CB_COLOR3_PITCH: case CB_COLOR4_PITCH: case CB_COLOR5_PITCH: case CB_COLOR6_PITCH: case CB_COLOR7_PITCH: tmp = (reg - CB_COLOR0_PITCH) / 0x3c; track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case CB_COLOR8_PITCH: case CB_COLOR9_PITCH: case CB_COLOR10_PITCH: case CB_COLOR11_PITCH: tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case CB_COLOR0_SLICE: case CB_COLOR1_SLICE: case CB_COLOR2_SLICE: case CB_COLOR3_SLICE: case CB_COLOR4_SLICE: case CB_COLOR5_SLICE: case CB_COLOR6_SLICE: case CB_COLOR7_SLICE: tmp = (reg - CB_COLOR0_SLICE) / 0x3c; track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); track->cb_color_slice_idx[tmp] = idx; track->cb_dirty = true; break; case CB_COLOR8_SLICE: case CB_COLOR9_SLICE: case CB_COLOR10_SLICE: case CB_COLOR11_SLICE: tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); track->cb_color_slice_idx[tmp] = idx; track->cb_dirty = true; break; case CB_COLOR0_ATTRIB: case CB_COLOR1_ATTRIB: case CB_COLOR2_ATTRIB: case CB_COLOR3_ATTRIB: case CB_COLOR4_ATTRIB: case CB_COLOR5_ATTRIB: case CB_COLOR6_ATTRIB: case CB_COLOR7_ATTRIB: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; evergreen_tiling_fields(reloc->lobj.tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); ib[idx] |= CB_TILE_SPLIT(tile_split) | CB_BANK_WIDTH(bankw) | CB_BANK_HEIGHT(bankh) | CB_MACRO_TILE_ASPECT(mtaspect); } } tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c); track->cb_color_attrib[tmp] = ib[idx]; track->cb_dirty = true; break; case CB_COLOR8_ATTRIB: case CB_COLOR9_ATTRIB: case CB_COLOR10_ATTRIB: case CB_COLOR11_ATTRIB: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; evergreen_tiling_fields(reloc->lobj.tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); ib[idx] |= CB_TILE_SPLIT(tile_split) | CB_BANK_WIDTH(bankw) | CB_BANK_HEIGHT(bankh) | CB_MACRO_TILE_ASPECT(mtaspect); } } tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8; track->cb_color_attrib[tmp] = ib[idx]; track->cb_dirty = true; break; case CB_COLOR0_FMASK: case CB_COLOR1_FMASK: case CB_COLOR2_FMASK: case CB_COLOR3_FMASK: case CB_COLOR4_FMASK: case CB_COLOR5_FMASK: case CB_COLOR6_FMASK: case CB_COLOR7_FMASK: tmp = (reg - CB_COLOR0_FMASK) / 0x3c; r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_fmask_bo[tmp] = reloc->robj; break; case CB_COLOR0_CMASK: case CB_COLOR1_CMASK: case CB_COLOR2_CMASK: case CB_COLOR3_CMASK: case CB_COLOR4_CMASK: case CB_COLOR5_CMASK: case CB_COLOR6_CMASK: case CB_COLOR7_CMASK: tmp = (reg - CB_COLOR0_CMASK) / 0x3c; r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_cmask_bo[tmp] = reloc->robj; break; case CB_COLOR0_FMASK_SLICE: case CB_COLOR1_FMASK_SLICE: case CB_COLOR2_FMASK_SLICE: case CB_COLOR3_FMASK_SLICE: case CB_COLOR4_FMASK_SLICE: case CB_COLOR5_FMASK_SLICE: case CB_COLOR6_FMASK_SLICE: case CB_COLOR7_FMASK_SLICE: tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); break; case CB_COLOR0_CMASK_SLICE: case CB_COLOR1_CMASK_SLICE: case CB_COLOR2_CMASK_SLICE: case CB_COLOR3_CMASK_SLICE: case CB_COLOR4_CMASK_SLICE: case CB_COLOR5_CMASK_SLICE: case CB_COLOR6_CMASK_SLICE: case CB_COLOR7_CMASK_SLICE: tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); break; case CB_COLOR0_BASE: case CB_COLOR1_BASE: case CB_COLOR2_BASE: case CB_COLOR3_BASE: case CB_COLOR4_BASE: case CB_COLOR5_BASE: case CB_COLOR6_BASE: case CB_COLOR7_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 0x3c; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_bo[tmp] = reloc->robj; track->cb_dirty = true; break; case CB_COLOR8_BASE: case CB_COLOR9_BASE: case CB_COLOR10_BASE: case CB_COLOR11_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_bo[tmp] = reloc->robj; track->cb_dirty = true; break; case DB_HTILE_DATA_BASE: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx); ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->htile_bo = reloc->robj; track->db_dirty = true; break; case DB_HTILE_SURFACE: /* 8x8 only */ track->htile_surface = radeon_get_ib_value(p, idx); /* force 8x8 htile width and height */ ib[idx] |= 3; track->db_dirty = true; break; case CB_IMMED0_BASE: case CB_IMMED1_BASE: case CB_IMMED2_BASE: case CB_IMMED3_BASE: case CB_IMMED4_BASE: case CB_IMMED5_BASE: case CB_IMMED6_BASE: case CB_IMMED7_BASE: case CB_IMMED8_BASE: case CB_IMMED9_BASE: case CB_IMMED10_BASE: case CB_IMMED11_BASE: case SQ_PGM_START_FS: case SQ_PGM_START_ES: case SQ_PGM_START_VS: case SQ_PGM_START_GS: case SQ_PGM_START_PS: case SQ_PGM_START_HS: case SQ_PGM_START_LS: case SQ_CONST_MEM_BASE: case SQ_ALU_CONST_CACHE_GS_0: case SQ_ALU_CONST_CACHE_GS_1: case SQ_ALU_CONST_CACHE_GS_2: case SQ_ALU_CONST_CACHE_GS_3: case SQ_ALU_CONST_CACHE_GS_4: case SQ_ALU_CONST_CACHE_GS_5: case SQ_ALU_CONST_CACHE_GS_6: case SQ_ALU_CONST_CACHE_GS_7: case SQ_ALU_CONST_CACHE_GS_8: case SQ_ALU_CONST_CACHE_GS_9: case SQ_ALU_CONST_CACHE_GS_10: case SQ_ALU_CONST_CACHE_GS_11: case SQ_ALU_CONST_CACHE_GS_12: case SQ_ALU_CONST_CACHE_GS_13: case SQ_ALU_CONST_CACHE_GS_14: case SQ_ALU_CONST_CACHE_GS_15: case SQ_ALU_CONST_CACHE_PS_0: case SQ_ALU_CONST_CACHE_PS_1: case SQ_ALU_CONST_CACHE_PS_2: case SQ_ALU_CONST_CACHE_PS_3: case SQ_ALU_CONST_CACHE_PS_4: case SQ_ALU_CONST_CACHE_PS_5: case SQ_ALU_CONST_CACHE_PS_6: case SQ_ALU_CONST_CACHE_PS_7: case SQ_ALU_CONST_CACHE_PS_8: case SQ_ALU_CONST_CACHE_PS_9: case SQ_ALU_CONST_CACHE_PS_10: case SQ_ALU_CONST_CACHE_PS_11: case SQ_ALU_CONST_CACHE_PS_12: case SQ_ALU_CONST_CACHE_PS_13: case SQ_ALU_CONST_CACHE_PS_14: case SQ_ALU_CONST_CACHE_PS_15: case SQ_ALU_CONST_CACHE_VS_0: case SQ_ALU_CONST_CACHE_VS_1: case SQ_ALU_CONST_CACHE_VS_2: case SQ_ALU_CONST_CACHE_VS_3: case SQ_ALU_CONST_CACHE_VS_4: case SQ_ALU_CONST_CACHE_VS_5: case SQ_ALU_CONST_CACHE_VS_6: case SQ_ALU_CONST_CACHE_VS_7: case SQ_ALU_CONST_CACHE_VS_8: case SQ_ALU_CONST_CACHE_VS_9: case SQ_ALU_CONST_CACHE_VS_10: case SQ_ALU_CONST_CACHE_VS_11: case SQ_ALU_CONST_CACHE_VS_12: case SQ_ALU_CONST_CACHE_VS_13: case SQ_ALU_CONST_CACHE_VS_14: case SQ_ALU_CONST_CACHE_VS_15: case SQ_ALU_CONST_CACHE_HS_0: case SQ_ALU_CONST_CACHE_HS_1: case SQ_ALU_CONST_CACHE_HS_2: case SQ_ALU_CONST_CACHE_HS_3: case SQ_ALU_CONST_CACHE_HS_4: case SQ_ALU_CONST_CACHE_HS_5: case SQ_ALU_CONST_CACHE_HS_6: case SQ_ALU_CONST_CACHE_HS_7: case SQ_ALU_CONST_CACHE_HS_8: case SQ_ALU_CONST_CACHE_HS_9: case SQ_ALU_CONST_CACHE_HS_10: case SQ_ALU_CONST_CACHE_HS_11: case SQ_ALU_CONST_CACHE_HS_12: case SQ_ALU_CONST_CACHE_HS_13: case SQ_ALU_CONST_CACHE_HS_14: case SQ_ALU_CONST_CACHE_HS_15: case SQ_ALU_CONST_CACHE_LS_0: case SQ_ALU_CONST_CACHE_LS_1: case SQ_ALU_CONST_CACHE_LS_2: case SQ_ALU_CONST_CACHE_LS_3: case SQ_ALU_CONST_CACHE_LS_4: case SQ_ALU_CONST_CACHE_LS_5: case SQ_ALU_CONST_CACHE_LS_6: case SQ_ALU_CONST_CACHE_LS_7: case SQ_ALU_CONST_CACHE_LS_8: case SQ_ALU_CONST_CACHE_LS_9: case SQ_ALU_CONST_CACHE_LS_10: case SQ_ALU_CONST_CACHE_LS_11: case SQ_ALU_CONST_CACHE_LS_12: case SQ_ALU_CONST_CACHE_LS_13: case SQ_ALU_CONST_CACHE_LS_14: case SQ_ALU_CONST_CACHE_LS_15: r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: if (p->rdev->family >= CHIP_CAYMAN) { dev_warn(p->dev, "bad SET_CONFIG_REG " "0x%04X\n", reg); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONFIG_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case CAYMAN_SX_SCATTER_EXPORT_BASE: if (p->rdev->family < CHIP_CAYMAN) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case SX_MISC: track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; break; default: dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } return 0; } static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { u32 last_reg, m, i; if (p->rdev->family >= CHIP_CAYMAN) last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm); else last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm); i = (reg >> 7); if (i >= last_reg) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } m = 1 << ((reg >> 2) & 31); if (p->rdev->family >= CHIP_CAYMAN) { if (!(cayman_reg_safe_bm[i] & m)) return true; } else { if (!(evergreen_reg_safe_bm[i] & m)) return true; } dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } static int evergreen_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { struct radeon_cs_reloc *reloc; struct evergreen_cs_track *track; volatile u32 *ib; unsigned idx; unsigned i; unsigned start_reg, end_reg, reg; int r; u32 idx_value; track = (struct evergreen_cs_track *)p->track; ib = p->ib.ptr; idx = pkt->idx + 1; idx_value = radeon_get_ib_value(p, idx); switch (pkt->opcode) { case PACKET3_SET_PREDICATION: { int pred_op; int tmp; uint64_t offset; if (pkt->count != 1) { DRM_ERROR("bad SET PREDICATION\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx + 1); pred_op = (tmp >> 16) & 0x7; /* for the clear predicate operation */ if (pred_op == 0) return 0; if (pred_op > 2) { DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET PREDICATION\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (idx_value & 0xfffffff0) + ((u64)(tmp & 0xff) << 32); ib[idx + 0] = offset; ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); } break; case PACKET3_CONTEXT_CONTROL: if (pkt->count != 1) { DRM_ERROR("bad CONTEXT_CONTROL\n"); return -EINVAL; } break; case PACKET3_INDEX_TYPE: case PACKET3_NUM_INSTANCES: case PACKET3_CLEAR_STATE: if (pkt->count) { DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); return -EINVAL; } break; case CAYMAN_PACKET3_DEALLOC_STATE: if (p->rdev->family < CHIP_CAYMAN) { DRM_ERROR("bad PACKET3_DEALLOC_STATE\n"); return -EINVAL; } if (pkt->count) { DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); return -EINVAL; } break; case PACKET3_INDEX_BASE: { uint64_t offset; if (pkt->count != 1) { DRM_ERROR("bad INDEX_BASE\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad INDEX_BASE\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX: { uint64_t offset; if (pkt->count != 3) { DRM_ERROR("bad DRAW_INDEX\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad DRAW_INDEX\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_2: { uint64_t offset; if (pkt->count != 4) { DRM_ERROR("bad DRAW_INDEX_2\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad DRAW_INDEX_2\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + radeon_get_ib_value(p, idx+1) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { DRM_ERROR("bad DRAW_INDEX_AUTO\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_MULTI_AUTO: if (pkt->count != 2) { DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_IMMD: if (pkt->count < 2) { DRM_ERROR("bad DRAW_INDEX_IMMD\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DRAW_INDEX_OFFSET: if (pkt->count != 2) { DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DRAW_INDEX_OFFSET_2: if (pkt->count != 3) { DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DISPATCH_DIRECT: if (pkt->count != 3) { DRM_ERROR("bad DISPATCH_DIRECT\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DISPATCH_INDIRECT: if (pkt->count != 1) { DRM_ERROR("bad DISPATCH_INDIRECT\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad DISPATCH_INDIRECT\n"); return -EINVAL; } ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { DRM_ERROR("bad WAIT_REG_MEM\n"); return -EINVAL; } /* bit 4 is reg (0) or mem (1) */ if (idx_value & 0x10) { uint64_t offset; r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad WAIT_REG_MEM\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); ib[idx+2] = upper_32_bits(offset) & 0xff; } break; case PACKET3_CP_DMA: { u32 command, size, info; u64 offset, tmp; if (pkt->count != 4) { DRM_ERROR("bad CP DMA\n"); return -EINVAL; } command = radeon_get_ib_value(p, idx+4); size = command & 0x1fffff; info = radeon_get_ib_value(p, idx+1); if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */ ((((info & 0x00300000) >> 20) == 0) && (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */ ((((info & 0x60000000) >> 29) == 0) && (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ /* non mem to mem copies requires dw aligned count */ if (size % 4) { DRM_ERROR("CP DMA command requires dw count alignment\n"); return -EINVAL; } } if (command & PACKET3_CP_DMA_CMD_SAS) { /* src address space is register */ /* GDS is ok */ if (((info & 0x60000000) >> 29) != 1) { DRM_ERROR("CP DMA SAS not supported\n"); return -EINVAL; } } else { if (command & PACKET3_CP_DMA_CMD_SAIC) { DRM_ERROR("CP DMA SAIC only supported for registers\n"); return -EINVAL; } /* src address space is memory */ if (((info & 0x60000000) >> 29) == 0) { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad CP DMA SRC\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx] = offset; ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); } else if (((info & 0x60000000) >> 29) != 2) { DRM_ERROR("bad CP DMA SRC_SEL\n"); return -EINVAL; } } if (command & PACKET3_CP_DMA_CMD_DAS) { /* dst address space is register */ /* GDS is ok */ if (((info & 0x00300000) >> 20) != 1) { DRM_ERROR("CP DMA DAS not supported\n"); return -EINVAL; } } else { /* dst address space is memory */ if (command & PACKET3_CP_DMA_CMD_DAIC) { DRM_ERROR("CP DMA DAIC only supported for registers\n"); return -EINVAL; } if (((info & 0x00300000) >> 20) == 0) { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad CP DMA DST\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx+2) + ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+2] = offset; ib[idx+3] = upper_32_bits(offset) & 0xff; } else { DRM_ERROR("bad CP DMA DST_SEL\n"); return -EINVAL; } } break; } case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } /* 0xffffffff/0x0 is flush all cache flag */ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || radeon_get_ib_value(p, idx + 2) != 0) { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_EVENT_WRITE: if (pkt->count != 2 && pkt->count != 0) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } if (pkt->count) { uint64_t offset; r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffff8; ib[idx+2] = upper_32_bits(offset) & 0xff; } break; case PACKET3_EVENT_WRITE_EOP: { uint64_t offset; if (pkt->count != 4) { DRM_ERROR("bad EVENT_WRITE_EOP\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad EVENT_WRITE_EOP\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffffc; ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); break; } case PACKET3_EVENT_WRITE_EOS: { uint64_t offset; if (pkt->count != 3) { DRM_ERROR("bad EVENT_WRITE_EOS\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad EVENT_WRITE_EOS\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffffc; ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); break; } case PACKET3_SET_CONFIG_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); r = evergreen_cs_check_reg(p, reg, idx+1+i); if (r) return r; } break; case PACKET3_SET_CONTEXT_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || (start_reg >= PACKET3_SET_CONTEXT_REG_END) || (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); r = evergreen_cs_check_reg(p, reg, idx+1+i); if (r) return r; } break; case PACKET3_SET_RESOURCE: if (pkt->count % 8) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_RESOURCE_START) || (start_reg >= PACKET3_SET_RESOURCE_END) || (end_reg >= PACKET3_SET_RESOURCE_END)) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } for (i = 0; i < (pkt->count / 8); i++) { struct radeon_bo *texture, *mipmap; u32 toffset, moffset; u32 size, offset, mip_address, tex_dim; switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { case SQ_TEX_VTX_VALID_TEXTURE: /* tex base */ r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET_RESOURCE (tex)\n"); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; evergreen_tiling_fields(reloc->lobj.tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split); ib[idx+1+(i*8)+7] |= TEX_BANK_WIDTH(bankw) | TEX_BANK_HEIGHT(bankh) | MACRO_TILE_ASPECT(mtaspect) | TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); } } texture = reloc->robj; toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); /* tex mip base */ tex_dim = ib[idx+1+(i*8)+0] & 0x7; mip_address = ib[idx+1+(i*8)+3]; if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) && !mip_address && !evergreen_cs_packet_next_is_pkt3_nop(p)) { /* MIP_ADDRESS should point to FMASK for an MSAA texture. * It should be 0 if FMASK is disabled. */ moffset = 0; mipmap = NULL; } else { r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET_RESOURCE (tex)\n"); return -EINVAL; } moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; } r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8)); if (r) return r; ib[idx+1+(i*8)+2] += toffset; ib[idx+1+(i*8)+3] += moffset; break; case SQ_TEX_VTX_VALID_BUFFER: { uint64_t offset64; /* vtx base */ r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET_RESOURCE (vtx)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1+(i*8)+0); size = radeon_get_ib_value(p, idx+1+(i*8)+1); if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { /* force size to size of the buffer */ dev_warn(p->dev, "vbo resource seems too big for the bo\n"); ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; } offset64 = reloc->lobj.gpu_offset + offset; ib[idx+1+(i*8)+0] = offset64; ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | (upper_32_bits(offset64) & 0xff); break; } case SQ_TEX_VTX_INVALID_TEXTURE: case SQ_TEX_VTX_INVALID_BUFFER: default: DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } } break; case PACKET3_SET_ALU_CONST: /* XXX fix me ALU const buffers only */ break; case PACKET3_SET_BOOL_CONST: start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_BOOL_CONST_START) || (start_reg >= PACKET3_SET_BOOL_CONST_END) || (end_reg >= PACKET3_SET_BOOL_CONST_END)) { DRM_ERROR("bad SET_BOOL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_LOOP_CONST: start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_LOOP_CONST_START) || (start_reg >= PACKET3_SET_LOOP_CONST_END) || (end_reg >= PACKET3_SET_LOOP_CONST_END)) { DRM_ERROR("bad SET_LOOP_CONST\n"); return -EINVAL; } break; case PACKET3_SET_CTL_CONST: start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CTL_CONST_START) || (start_reg >= PACKET3_SET_CTL_CONST_END) || (end_reg >= PACKET3_SET_CTL_CONST_END)) { DRM_ERROR("bad SET_CTL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_SAMPLER: if (pkt->count % 3) { DRM_ERROR("bad SET_SAMPLER\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_SAMPLER_START) || (start_reg >= PACKET3_SET_SAMPLER_END) || (end_reg >= PACKET3_SET_SAMPLER_END)) { DRM_ERROR("bad SET_SAMPLER\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BUFFER_UPDATE: if (pkt->count != 4) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); return -EINVAL; } /* Updating memory at DST_ADDRESS. */ if (idx_value & 0x1) { u64 offset; r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } /* Reading data from SRC_ADDRESS. */ if (((idx_value >> 1) & 0x3) == 2) { u64 offset; r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } break; case PACKET3_MEM_WRITE: { u64 offset; if (pkt->count != 3) { DRM_ERROR("bad MEM_WRITE (invalid count)\n"); return -EINVAL; } r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+0); offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; if (offset & 0x7) { DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; break; } case PACKET3_COPY_DW: if (pkt->count != 4) { DRM_ERROR("bad COPY_DW (invalid count)\n"); return -EINVAL; } if (idx_value & 0x1) { u64 offset; /* SRC is memory. */ r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad COPY_DW (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { /* SRC is a reg. */ reg = radeon_get_ib_value(p, idx+1) << 2; if (!evergreen_is_safe_reg(p, reg, idx+1)) return -EINVAL; } if (idx_value & 0x2) { u64 offset; /* DST is memory. */ r = evergreen_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } else { /* DST is a reg. */ reg = radeon_get_ib_value(p, idx+3) << 2; if (!evergreen_is_safe_reg(p, reg, idx+3)) return -EINVAL; } break; case PACKET3_NOP: break; default: DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; } int evergreen_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct evergreen_cs_track *track; u32 tmp; int r; if (p->track == NULL) { /* initialize tracker, we are in kms */ track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (track == NULL) return -ENOMEM; evergreen_cs_track_init(track); if (p->rdev->family >= CHIP_CAYMAN) tmp = p->rdev->config.cayman.tile_config; else tmp = p->rdev->config.evergreen.tile_config; switch (tmp & 0xf) { case 0: track->npipes = 1; break; case 1: default: track->npipes = 2; break; case 2: track->npipes = 4; break; case 3: track->npipes = 8; break; } switch ((tmp & 0xf0) >> 4) { case 0: track->nbanks = 4; break; case 1: default: track->nbanks = 8; break; case 2: track->nbanks = 16; break; } switch ((tmp & 0xf00) >> 8) { case 0: track->group_size = 256; break; case 1: default: track->group_size = 512; break; } switch ((tmp & 0xf000) >> 12) { case 0: track->row_size = 1; break; case 1: default: track->row_size = 2; break; case 2: track->row_size = 4; break; } p->track = track; } do { r = evergreen_cs_packet_parse(p, &pkt, p->idx); if (r) { free(p->track, DRM_MEM_DRIVER); p->track = NULL; return r; } p->idx += pkt.count + 2; switch (pkt.type) { case PACKET_TYPE0: r = evergreen_cs_parse_packet0(p, &pkt); break; case PACKET_TYPE2: break; case PACKET_TYPE3: r = evergreen_packet3_check(p, &pkt); break; default: DRM_ERROR("Unknown packet type %d !\n", pkt.type); free(p->track, DRM_MEM_DRIVER); p->track = NULL; return -EINVAL; } if (r) { free(p->track, DRM_MEM_DRIVER); p->track = NULL; return r; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); #if 0 for (r = 0; r < p->ib.length_dw; r++) { DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); mdelay(1); } #endif free(p->track, DRM_MEM_DRIVER); p->track = NULL; return 0; } /* * DMA */ #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) #define GET_DMA_COUNT(h) ((h) & 0x000fffff) #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) #define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26) #define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20) /** * evergreen_dma_cs_parse() - parse the DMA IB * @p: parser structure holding parsing context. * * Parses the DMA IB from the CS ioctl and updates * the GPU addresses based on the reloc information and * checks for errors. (Evergreen-Cayman) * Returns 0 for success and an error on failure. **/ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc; u32 header, cmd, count, tiled, new_cmd, misc; volatile u32 *ib = p->ib.ptr; u32 idx, idx_value; u64 src_offset, dst_offset, dst2_offset; int r; do { if (p->idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", p->idx, ib_chunk->length_dw); return -EINVAL; } idx = p->idx; header = radeon_get_ib_value(p, idx); cmd = GET_DMA_CMD(header); count = GET_DMA_COUNT(header); tiled = GET_DMA_T(header); new_cmd = GET_DMA_NEW(header); misc = GET_DMA_MISC(header); switch (cmd) { case DMA_PACKET_WRITE: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } if (tiled) { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += count + 7; } else { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_COPY: r = r600_dma_cs_next_reloc(p, &src_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_COPY\n"); return -EINVAL; } if (tiled) { idx_value = radeon_get_ib_value(p, idx + 2); if (new_cmd) { switch (misc) { case 0: /* L2T, frame to fields */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n", (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 10; break; case 1: /* L2T, T2L partial */ if (p->family < CHIP_CAYMAN) { DRM_ERROR("L2T, T2L Partial is cayman only !\n"); return -EINVAL; } /* detile bit */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { /* tiled src, linear dst */ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } p->idx += 12; break; case 3: /* L2T, broadcast */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n", (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 10; break; case 4: /* L2T, T2L */ /* detile bit */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx+7); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+7); src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; break; case 5: /* T2T partial */ if (p->family < CHIP_CAYMAN) { DRM_ERROR("L2T, T2L Partial is cayman only !\n"); return -EINVAL; } ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += 13; break; case 7: /* L2T, broadcast */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n", (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 10; break; default: DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); return -EINVAL; } } else { switch (misc) { case 0: /* detile bit */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx+7); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+7); src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; break; default: DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); return -EINVAL; } } } else { if (new_cmd) { switch (misc) { case 0: /* L2L, byte */ src_offset = radeon_get_ib_value(p, idx+2); src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, byte src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + count, radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + count, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 5; break; case 1: /* L2L, partial */ if (p->family < CHIP_CAYMAN) { DRM_ERROR("L2L Partial is cayman only !\n"); return -EINVAL; } ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; p->idx += 9; break; case 4: /* L2L, dw, broadcast */ r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; src_offset = radeon_get_ib_value(p, idx+3); src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%ju %lu)\n", (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff; ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 7; break; default: DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); return -EINVAL; } } else { /* L2L, dw */ src_offset = radeon_get_ib_value(p, idx+2); src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 5; } } break; case DMA_PACKET_CONSTANT_FILL: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; p->idx += 4; break; case DMA_PACKET_NOP: p->idx += 1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); #if 0 for (r = 0; r < p->ib->length_dw; r++) { DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); mdelay(1); } #endif return 0; } /* vm parser */ static bool evergreen_vm_reg_valid(u32 reg) { /* context regs are fine */ if (reg >= 0x28000) return true; /* check config regs */ switch (reg) { case WAIT_UNTIL: case GRBM_GFX_INDEX: case CP_STRMOUT_CNTL: case CP_COHER_CNTL: case CP_COHER_SIZE: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_GS_VERTEX_REUSE: case VGT_PRIMITIVE_TYPE: case VGT_INDEX_TYPE: case VGT_NUM_INDICES: case VGT_NUM_INSTANCES: case VGT_COMPUTE_DIM_X: case VGT_COMPUTE_DIM_Y: case VGT_COMPUTE_DIM_Z: case VGT_COMPUTE_START_X: case VGT_COMPUTE_START_Y: case VGT_COMPUTE_START_Z: case VGT_COMPUTE_INDEX: case VGT_COMPUTE_THREAD_GROUP_SIZE: case VGT_HS_OFFCHIP_PARAM: case PA_CL_ENHANCE: case PA_SU_LINE_STIPPLE_VALUE: case PA_SC_LINE_STIPPLE_STATE: case PA_SC_ENHANCE: case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ: case SQ_DYN_GPR_SIMD_LOCK_EN: case SQ_CONFIG: case SQ_GPR_RESOURCE_MGMT_1: case SQ_GLOBAL_GPR_RESOURCE_MGMT_1: case SQ_GLOBAL_GPR_RESOURCE_MGMT_2: case SQ_CONST_MEM_BASE: case SQ_STATIC_THREAD_MGMT_1: case SQ_STATIC_THREAD_MGMT_2: case SQ_STATIC_THREAD_MGMT_3: case SPI_CONFIG_CNTL: case SPI_CONFIG_CNTL_1: case TA_CNTL_AUX: case DB_DEBUG: case DB_DEBUG2: case DB_DEBUG3: case DB_DEBUG4: case DB_WATERMARKS: case TD_PS_BORDER_COLOR_INDEX: case TD_PS_BORDER_COLOR_RED: case TD_PS_BORDER_COLOR_GREEN: case TD_PS_BORDER_COLOR_BLUE: case TD_PS_BORDER_COLOR_ALPHA: case TD_VS_BORDER_COLOR_INDEX: case TD_VS_BORDER_COLOR_RED: case TD_VS_BORDER_COLOR_GREEN: case TD_VS_BORDER_COLOR_BLUE: case TD_VS_BORDER_COLOR_ALPHA: case TD_GS_BORDER_COLOR_INDEX: case TD_GS_BORDER_COLOR_RED: case TD_GS_BORDER_COLOR_GREEN: case TD_GS_BORDER_COLOR_BLUE: case TD_GS_BORDER_COLOR_ALPHA: case TD_HS_BORDER_COLOR_INDEX: case TD_HS_BORDER_COLOR_RED: case TD_HS_BORDER_COLOR_GREEN: case TD_HS_BORDER_COLOR_BLUE: case TD_HS_BORDER_COLOR_ALPHA: case TD_LS_BORDER_COLOR_INDEX: case TD_LS_BORDER_COLOR_RED: case TD_LS_BORDER_COLOR_GREEN: case TD_LS_BORDER_COLOR_BLUE: case TD_LS_BORDER_COLOR_ALPHA: case TD_CS_BORDER_COLOR_INDEX: case TD_CS_BORDER_COLOR_RED: case TD_CS_BORDER_COLOR_GREEN: case TD_CS_BORDER_COLOR_BLUE: case TD_CS_BORDER_COLOR_ALPHA: case SQ_ESGS_RING_SIZE: case SQ_GSVS_RING_SIZE: case SQ_ESTMP_RING_SIZE: case SQ_GSTMP_RING_SIZE: case SQ_HSTMP_RING_SIZE: case SQ_LSTMP_RING_SIZE: case SQ_PSTMP_RING_SIZE: case SQ_VSTMP_RING_SIZE: case SQ_ESGS_RING_ITEMSIZE: case SQ_ESTMP_RING_ITEMSIZE: case SQ_GSTMP_RING_ITEMSIZE: case SQ_GSVS_RING_ITEMSIZE: case SQ_GS_VERT_ITEMSIZE: case SQ_GS_VERT_ITEMSIZE_1: case SQ_GS_VERT_ITEMSIZE_2: case SQ_GS_VERT_ITEMSIZE_3: case SQ_GSVS_RING_OFFSET_1: case SQ_GSVS_RING_OFFSET_2: case SQ_GSVS_RING_OFFSET_3: case SQ_HSTMP_RING_ITEMSIZE: case SQ_LSTMP_RING_ITEMSIZE: case SQ_PSTMP_RING_ITEMSIZE: case SQ_VSTMP_RING_ITEMSIZE: case VGT_TF_RING_SIZE: case SQ_ESGS_RING_BASE: case SQ_GSVS_RING_BASE: case SQ_ESTMP_RING_BASE: case SQ_GSTMP_RING_BASE: case SQ_HSTMP_RING_BASE: case SQ_LSTMP_RING_BASE: case SQ_PSTMP_RING_BASE: case SQ_VSTMP_RING_BASE: case CAYMAN_VGT_OFFCHIP_LDS_BASE: case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: return true; default: DRM_ERROR("Invalid register 0x%x in CS\n", reg); return false; } } static int evergreen_vm_packet3_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, end_reg, reg, i; u32 command, info; switch (pkt->opcode) { case PACKET3_NOP: case PACKET3_SET_BASE: case PACKET3_CLEAR_STATE: case PACKET3_INDEX_BUFFER_SIZE: case PACKET3_DISPATCH_DIRECT: case PACKET3_DISPATCH_INDIRECT: case PACKET3_MODE_CONTROL: case PACKET3_SET_PREDICATION: case PACKET3_COND_EXEC: case PACKET3_PRED_EXEC: case PACKET3_DRAW_INDIRECT: case PACKET3_DRAW_INDEX_INDIRECT: case PACKET3_INDEX_BASE: case PACKET3_DRAW_INDEX_2: case PACKET3_CONTEXT_CONTROL: case PACKET3_DRAW_INDEX_OFFSET: case PACKET3_INDEX_TYPE: case PACKET3_DRAW_INDEX: case PACKET3_DRAW_INDEX_AUTO: case PACKET3_DRAW_INDEX_IMMD: case PACKET3_NUM_INSTANCES: case PACKET3_DRAW_INDEX_MULTI_AUTO: case PACKET3_STRMOUT_BUFFER_UPDATE: case PACKET3_DRAW_INDEX_OFFSET_2: case PACKET3_DRAW_INDEX_MULTI_ELEMENT: case PACKET3_MPEG_INDEX: case PACKET3_WAIT_REG_MEM: case PACKET3_MEM_WRITE: case PACKET3_SURFACE_SYNC: case PACKET3_EVENT_WRITE: case PACKET3_EVENT_WRITE_EOP: case PACKET3_EVENT_WRITE_EOS: case PACKET3_SET_CONTEXT_REG: case PACKET3_SET_BOOL_CONST: case PACKET3_SET_LOOP_CONST: case PACKET3_SET_RESOURCE: case PACKET3_SET_SAMPLER: case PACKET3_SET_CTL_CONST: case PACKET3_SET_RESOURCE_OFFSET: case PACKET3_SET_CONTEXT_REG_INDIRECT: case PACKET3_SET_RESOURCE_INDIRECT: case CAYMAN_PACKET3_DEALLOC_STATE: break; case PACKET3_COND_WRITE: if (idx_value & 0x100) { reg = ib[idx + 5] * 4; if (!evergreen_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_COPY_DW: if (idx_value & 0x2) { reg = ib[idx + 3] * 4; if (!evergreen_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_SET_CONFIG_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_CP_DMA: command = ib[idx + 4]; info = ib[idx + 1]; if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */ ((((info & 0x00300000) >> 20) == 0) && (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */ ((((info & 0x60000000) >> 29) == 0) && (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ /* non mem to mem copies requires dw aligned count */ if ((command & 0x1fffff) % 4) { DRM_ERROR("CP DMA command requires dw count alignment\n"); return -EINVAL; } } if (command & PACKET3_CP_DMA_CMD_SAS) { /* src address space is register */ if (((info & 0x60000000) >> 29) == 0) { start_reg = idx_value << 2; if (command & PACKET3_CP_DMA_CMD_SAIC) { reg = start_reg; if (!evergreen_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad SRC register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad SRC register\n"); return -EINVAL; } } } } } if (command & PACKET3_CP_DMA_CMD_DAS) { /* dst address space is register */ if (((info & 0x00300000) >> 20) == 0) { start_reg = ib[idx + 2]; if (command & PACKET3_CP_DMA_CMD_DAIC) { reg = start_reg; if (!evergreen_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad DST register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad DST register\n"); return -EINVAL; } } } } } break; default: return -EINVAL; } return 0; } int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) { int ret = 0; u32 idx = 0; struct radeon_cs_packet pkt; do { pkt.idx = idx; pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]); pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]); pkt.one_reg_wr = 0; switch (pkt.type) { case PACKET_TYPE0: dev_err(rdev->dev, "Packet0 not allowed!\n"); ret = -EINVAL; break; case PACKET_TYPE2: idx += 1; break; case PACKET_TYPE3: pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]); ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt); idx += pkt.count + 2; break; default: dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type); ret = -EINVAL; break; } if (ret) break; } while (idx < ib->length_dw); return ret; } /** * evergreen_dma_ib_parse() - parse the DMA IB for VM * @rdev: radeon_device pointer * @ib: radeon_ib pointer * * Parses the DMA IB from the VM CS ioctl * checks for errors. (Cayman-SI) * Returns 0 for success and an error on failure. **/ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) { u32 idx = 0; u32 header, cmd, count, tiled, new_cmd, misc; do { header = ib->ptr[idx]; cmd = GET_DMA_CMD(header); count = GET_DMA_COUNT(header); tiled = GET_DMA_T(header); new_cmd = GET_DMA_NEW(header); misc = GET_DMA_MISC(header); switch (cmd) { case DMA_PACKET_WRITE: if (tiled) idx += count + 7; else idx += count + 3; break; case DMA_PACKET_COPY: if (tiled) { if (new_cmd) { switch (misc) { case 0: /* L2T, frame to fields */ idx += 10; break; case 1: /* L2T, T2L partial */ idx += 12; break; case 3: /* L2T, broadcast */ idx += 10; break; case 4: /* L2T, T2L */ idx += 9; break; case 5: /* T2T partial */ idx += 13; break; case 7: /* L2T, broadcast */ idx += 10; break; default: DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); return -EINVAL; } } else { switch (misc) { case 0: idx += 9; break; default: DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); return -EINVAL; } } } else { if (new_cmd) { switch (misc) { case 0: /* L2L, byte */ idx += 5; break; case 1: /* L2L, partial */ idx += 9; break; case 4: /* L2L, dw, broadcast */ idx += 7; break; default: DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); return -EINVAL; } } else { /* L2L, dw */ idx += 5; } } break; case DMA_PACKET_CONSTANT_FILL: idx += 4; break; case DMA_PACKET_NOP: idx += 1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (idx < ib->length_dw); return 0; } Index: head/sys/dev/drm2/radeon/evergreend.h =================================================================== --- head/sys/dev/drm2/radeon/evergreend.h (revision 258779) +++ head/sys/dev/drm2/radeon/evergreend.h (revision 258780) @@ -1,2046 +1,2046 @@ /* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include __FBSDID("$FreeBSD$"); #ifndef EVERGREEND_H #define EVERGREEND_H #define EVERGREEN_MAX_SH_GPRS 256 #define EVERGREEN_MAX_TEMP_GPRS 16 #define EVERGREEN_MAX_SH_THREADS 256 #define EVERGREEN_MAX_SH_STACK_ENTRIES 4096 #define EVERGREEN_MAX_FRC_EOV_CNT 16384 #define EVERGREEN_MAX_BACKENDS 8 #define EVERGREEN_MAX_BACKENDS_MASK 0xFF #define EVERGREEN_MAX_SIMDS 16 #define EVERGREEN_MAX_SIMDS_MASK 0xFFFF #define EVERGREEN_MAX_PIPES 8 #define EVERGREEN_MAX_PIPES_MASK 0xFF #define EVERGREEN_MAX_LDS_NUM 0xFFFF #define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003 #define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003 #define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 #define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002 #define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002 #define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002 #define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001 #define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001 #define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002 #define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002 /* Registers */ #define RCU_IND_INDEX 0x100 #define RCU_IND_DATA 0x104 #define GRBM_GFX_INDEX 0x802C #define INSTANCE_INDEX(x) ((x) << 0) #define SE_INDEX(x) ((x) << 16) #define INSTANCE_BROADCAST_WRITES (1 << 30) -#define SE_BROADCAST_WRITES (1 << 31) +#define SE_BROADCAST_WRITES (1U << 31) #define RLC_GFX_INDEX 0x3fC4 #define CC_GC_SHADER_PIPE_CONFIG 0x8950 #define WRITE_DIS (1 << 0) #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) #define GB_ADDR_CONFIG 0x98F8 #define NUM_PIPES(x) ((x) << 0) #define NUM_PIPES_MASK 0x0000000f #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) #define BANK_INTERLEAVE_SIZE(x) ((x) << 8) #define NUM_SHADER_ENGINES(x) ((x) << 12) #define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) #define NUM_GPUS(x) ((x) << 20) #define MULTI_GPU_TILE_SIZE(x) ((x) << 24) #define ROW_SIZE(x) ((x) << 28) #define GB_BACKEND_MAP 0x98FC #define DMIF_ADDR_CONFIG 0xBD4 #define HDP_ADDR_CONFIG 0x2F48 #define HDP_MISC_CNTL 0x2F4C #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 #define GC_USER_RB_BACKEND_DISABLE 0x9B7C #define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_TCC_DISABLE 0x9148 #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 #define CGTS_USER_TCC_DISABLE 0x914C #define CONFIG_MEMSIZE 0x5428 #define BIF_FB_EN 0x5490 #define FB_READ_EN (1 << 0) #define FB_WRITE_EN (1 << 1) #define CP_STRMOUT_CNTL 0x84FC #define CP_COHER_CNTL 0x85F0 #define CP_COHER_SIZE 0x85F4 #define CP_COHER_BASE 0x85F8 #define CP_STALLED_STAT1 0x8674 #define CP_STALLED_STAT2 0x8678 #define CP_BUSY_STAT 0x867C #define CP_STAT 0x8680 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1 << 28) #define CP_PFP_HALT (1 << 26) #define CP_ME_RAM_DATA 0xC160 #define CP_ME_RAM_RADDR 0xC158 #define CP_ME_RAM_WADDR 0xC15C #define CP_MEQ_THRESHOLDS 0x8764 #define STQ_SPLIT(x) ((x) << 0) #define CP_PERFMON_CNTL 0x87FC #define CP_PFP_UCODE_ADDR 0xC150 #define CP_PFP_UCODE_DATA 0xC154 #define CP_QUEUE_THRESHOLDS 0x8760 #define ROQ_IB1_START(x) ((x) << 0) #define ROQ_IB2_START(x) ((x) << 8) #define CP_RB_BASE 0xC100 #define CP_RB_CNTL 0xC104 #define RB_BUFSZ(x) ((x) << 0) #define RB_BLKSZ(x) ((x) << 8) #define RB_NO_UPDATE (1 << 27) -#define RB_RPTR_WR_ENA (1 << 31) +#define RB_RPTR_WR_ENA (1U << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C #define RB_RPTR_SWAP(x) ((x) << 0) #define CP_RB_RPTR_ADDR_HI 0xC110 #define CP_RB_RPTR_WR 0xC108 #define CP_RB_WPTR 0xC114 #define CP_RB_WPTR_ADDR 0xC118 #define CP_RB_WPTR_ADDR_HI 0xC11C #define CP_RB_WPTR_DELAY 0x8704 #define CP_SEM_WAIT_TIMER 0x85BC #define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_DEBUG 0xC1FC /* Audio clocks */ #define DCCG_AUDIO_DTO_SOURCE 0x05ac # define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */ # define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */ #define DCCG_AUDIO_DTO0_PHASE 0x05b0 #define DCCG_AUDIO_DTO0_MODULE 0x05b4 #define DCCG_AUDIO_DTO0_LOAD 0x05b8 #define DCCG_AUDIO_DTO0_CNTL 0x05bc #define DCCG_AUDIO_DTO1_PHASE 0x05c0 #define DCCG_AUDIO_DTO1_MODULE 0x05c4 #define DCCG_AUDIO_DTO1_LOAD 0x05c8 #define DCCG_AUDIO_DTO1_CNTL 0x05cc /* DCE 4.0 AFMT */ #define HDMI_CONTROL 0x7030 # define HDMI_KEEPOUT_MODE (1 << 0) # define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */ # define HDMI_ERROR_ACK (1 << 8) # define HDMI_ERROR_MASK (1 << 9) # define HDMI_DEEP_COLOR_ENABLE (1 << 24) # define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28) # define HDMI_24BIT_DEEP_COLOR 0 # define HDMI_30BIT_DEEP_COLOR 1 # define HDMI_36BIT_DEEP_COLOR 2 #define HDMI_STATUS 0x7034 # define HDMI_ACTIVE_AVMUTE (1 << 0) # define HDMI_AUDIO_PACKET_ERROR (1 << 16) # define HDMI_VBI_PACKET_ERROR (1 << 20) #define HDMI_AUDIO_PACKET_CONTROL 0x7038 # define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4) # define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) #define HDMI_ACR_PACKET_CONTROL 0x703c # define HDMI_ACR_SEND (1 << 0) # define HDMI_ACR_CONT (1 << 1) # define HDMI_ACR_SELECT(x) (((x) & 3) << 4) # define HDMI_ACR_HW 0 # define HDMI_ACR_32 1 # define HDMI_ACR_44 2 # define HDMI_ACR_48 3 # define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ # define HDMI_ACR_AUTO_SEND (1 << 12) # define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16) # define HDMI_ACR_X1 1 # define HDMI_ACR_X2 2 # define HDMI_ACR_X4 4 -# define HDMI_ACR_AUDIO_PRIORITY (1 << 31) +# define HDMI_ACR_AUDIO_PRIORITY (1U << 31) #define HDMI_VBI_PACKET_CONTROL 0x7040 # define HDMI_NULL_SEND (1 << 0) # define HDMI_GC_SEND (1 << 4) # define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */ #define HDMI_INFOFRAME_CONTROL0 0x7044 # define HDMI_AVI_INFO_SEND (1 << 0) # define HDMI_AVI_INFO_CONT (1 << 1) # define HDMI_AUDIO_INFO_SEND (1 << 4) # define HDMI_AUDIO_INFO_CONT (1 << 5) # define HDMI_MPEG_INFO_SEND (1 << 8) # define HDMI_MPEG_INFO_CONT (1 << 9) #define HDMI_INFOFRAME_CONTROL1 0x7048 # define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) # define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) # define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) #define HDMI_GENERIC_PACKET_CONTROL 0x704c # define HDMI_GENERIC0_SEND (1 << 0) # define HDMI_GENERIC0_CONT (1 << 1) # define HDMI_GENERIC1_SEND (1 << 4) # define HDMI_GENERIC1_CONT (1 << 5) # define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16) # define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24) #define HDMI_GC 0x7058 # define HDMI_GC_AVMUTE (1 << 0) # define HDMI_GC_AVMUTE_CONT (1 << 2) #define AFMT_AUDIO_PACKET_CONTROL2 0x705c # define AFMT_AUDIO_LAYOUT_OVRD (1 << 0) # define AFMT_AUDIO_LAYOUT_SELECT (1 << 1) # define AFMT_60958_CS_SOURCE (1 << 4) # define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8) # define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16) #define AFMT_AVI_INFO0 0x7084 # define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define AFMT_AVI_INFO_S(x) (((x) & 3) << 8) # define AFMT_AVI_INFO_B(x) (((x) & 3) << 10) # define AFMT_AVI_INFO_A(x) (((x) & 1) << 12) # define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13) # define AFMT_AVI_INFO_Y_RGB 0 # define AFMT_AVI_INFO_Y_YCBCR422 1 # define AFMT_AVI_INFO_Y_YCBCR444 2 # define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8) # define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16) # define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20) # define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22) # define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16) # define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24) # define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26) # define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28) # define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31) # define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24) #define AFMT_AVI_INFO1 0x7088 # define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */ # define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */ # define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12) # define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14) # define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16) #define AFMT_AVI_INFO2 0x708c # define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0) # define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16) #define AFMT_AVI_INFO3 0x7090 # define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0) # define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24) #define AFMT_MPEG_INFO0 0x7094 # define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8) # define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16) # define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24) #define AFMT_MPEG_INFO1 0x7098 # define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0) # define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8) # define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12) #define AFMT_GENERIC0_HDR 0x709c #define AFMT_GENERIC0_0 0x70a0 #define AFMT_GENERIC0_1 0x70a4 #define AFMT_GENERIC0_2 0x70a8 #define AFMT_GENERIC0_3 0x70ac #define AFMT_GENERIC0_4 0x70b0 #define AFMT_GENERIC0_5 0x70b4 #define AFMT_GENERIC0_6 0x70b8 #define AFMT_GENERIC1_HDR 0x70bc #define AFMT_GENERIC1_0 0x70c0 #define AFMT_GENERIC1_1 0x70c4 #define AFMT_GENERIC1_2 0x70c8 #define AFMT_GENERIC1_3 0x70cc #define AFMT_GENERIC1_4 0x70d0 #define AFMT_GENERIC1_5 0x70d4 #define AFMT_GENERIC1_6 0x70d8 #define HDMI_ACR_32_0 0x70dc # define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12) #define HDMI_ACR_32_1 0x70e0 # define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0) #define HDMI_ACR_44_0 0x70e4 # define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12) #define HDMI_ACR_44_1 0x70e8 # define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0) #define HDMI_ACR_48_0 0x70ec # define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12) #define HDMI_ACR_48_1 0x70f0 # define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0) #define HDMI_ACR_STATUS_0 0x70f4 #define HDMI_ACR_STATUS_1 0x70f8 #define AFMT_AUDIO_INFO0 0x70fc # define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8) # define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11) # define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16) # define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24) #define AFMT_AUDIO_INFO1 0x7100 # define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0) # define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11) # define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15) # define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8) # define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16) #define AFMT_60958_0 0x7104 # define AFMT_60958_CS_A(x) (((x) & 1) << 0) # define AFMT_60958_CS_B(x) (((x) & 1) << 1) # define AFMT_60958_CS_C(x) (((x) & 1) << 2) # define AFMT_60958_CS_D(x) (((x) & 3) << 3) # define AFMT_60958_CS_MODE(x) (((x) & 3) << 6) # define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) # define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) # define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) # define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) # define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) #define AFMT_60958_1 0x7108 # define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) # define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) # define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16) # define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18) # define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) #define AFMT_AUDIO_CRC_CONTROL 0x710c # define AFMT_AUDIO_CRC_EN (1 << 0) #define AFMT_RAMP_CONTROL0 0x7110 # define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) -# define AFMT_RAMP_DATA_SIGN (1 << 31) +# define AFMT_RAMP_DATA_SIGN (1U << 31) #define AFMT_RAMP_CONTROL1 0x7114 # define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0) # define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24) #define AFMT_RAMP_CONTROL2 0x7118 # define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0) #define AFMT_RAMP_CONTROL3 0x711c # define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0) #define AFMT_60958_2 0x7120 # define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0) # define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4) # define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8) # define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12) # define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16) # define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20) #define AFMT_STATUS 0x7128 # define AFMT_AUDIO_ENABLE (1 << 4) # define AFMT_AUDIO_HBR_ENABLE (1 << 8) # define AFMT_AZ_FORMAT_WTRIG (1 << 28) # define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29) # define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30) #define AFMT_AUDIO_PACKET_CONTROL 0x712c # define AFMT_AUDIO_SAMPLE_SEND (1 << 0) # define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */ # define AFMT_AUDIO_TEST_EN (1 << 12) # define AFMT_AUDIO_CHANNEL_SWAP (1 << 24) # define AFMT_60958_CS_UPDATE (1 << 26) # define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27) # define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28) # define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) # define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) #define AFMT_VBI_PACKET_CONTROL 0x7130 # define AFMT_GENERIC0_UPDATE (1 << 2) #define AFMT_INFOFRAME_CONTROL0 0x7134 # define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */ # define AFMT_AUDIO_INFO_UPDATE (1 << 7) # define AFMT_MPEG_INFO_UPDATE (1 << 10) #define AFMT_GENERIC0_7 0x7138 /* DCE4/5 ELD audio interface */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */ # define MAX_CHANNELS(x) (((x) & 0x7) << 0) /* max channels minus one. 7 = 8 channels */ # define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) # define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) # define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ /* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO * bit0 = 32 kHz * bit1 = 44.1 kHz * bit2 = 48 kHz * bit3 = 88.2 kHz * bit4 = 96 kHz * bit5 = 176.4 kHz * bit6 = 192 kHz */ #define AZ_HOT_PLUG_CONTROL 0x5e78 # define AZ_FORCE_CODEC_WAKE (1 << 0) # define PIN0_JACK_DETECTION_ENABLE (1 << 4) # define PIN1_JACK_DETECTION_ENABLE (1 << 5) # define PIN2_JACK_DETECTION_ENABLE (1 << 6) # define PIN3_JACK_DETECTION_ENABLE (1 << 7) # define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8) # define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9) # define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10) # define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11) # define CODEC_HOT_PLUG_ENABLE (1 << 12) # define PIN0_AUDIO_ENABLED (1 << 24) # define PIN1_AUDIO_ENABLED (1 << 25) # define PIN2_AUDIO_ENABLED (1 << 26) # define PIN3_AUDIO_ENABLED (1 << 27) -# define AUDIO_ENABLED (1 << 31) +# define AUDIO_ENABLED (1U << 31) #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 #define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS_MASK 0x00FF0000 #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) #define GRBM_SOFT_RESET 0x8020 #define SOFT_RESET_CP (1 << 0) #define SOFT_RESET_CB (1 << 1) #define SOFT_RESET_DB (1 << 3) #define SOFT_RESET_PA (1 << 5) #define SOFT_RESET_SC (1 << 6) #define SOFT_RESET_SPI (1 << 8) #define SOFT_RESET_SH (1 << 9) #define SOFT_RESET_SX (1 << 10) #define SOFT_RESET_TC (1 << 11) #define SOFT_RESET_TA (1 << 12) #define SOFT_RESET_VC (1 << 13) #define SOFT_RESET_VGT (1 << 14) #define GRBM_STATUS 0x8010 #define CMDFIFO_AVAIL_MASK 0x0000000F #define SRBM_RQ_PENDING (1 << 5) #define CF_RQ_PENDING (1 << 7) #define PF_RQ_PENDING (1 << 8) #define GRBM_EE_BUSY (1 << 10) #define SX_CLEAN (1 << 11) #define DB_CLEAN (1 << 12) #define CB_CLEAN (1 << 13) #define TA_BUSY (1 << 14) #define VGT_BUSY_NO_DMA (1 << 16) #define VGT_BUSY (1 << 17) #define SX_BUSY (1 << 20) #define SH_BUSY (1 << 21) #define SPI_BUSY (1 << 22) #define SC_BUSY (1 << 24) #define PA_BUSY (1 << 25) #define DB_BUSY (1 << 26) #define CP_COHERENCY_BUSY (1 << 28) #define CP_BUSY (1 << 29) #define CB_BUSY (1 << 30) -#define GUI_ACTIVE (1 << 31) +#define GUI_ACTIVE (1U << 31) #define GRBM_STATUS_SE0 0x8014 #define GRBM_STATUS_SE1 0x8018 #define SE_SX_CLEAN (1 << 0) #define SE_DB_CLEAN (1 << 1) #define SE_CB_CLEAN (1 << 2) #define SE_TA_BUSY (1 << 25) #define SE_SX_BUSY (1 << 26) #define SE_SPI_BUSY (1 << 27) #define SE_SH_BUSY (1 << 28) #define SE_SC_BUSY (1 << 29) #define SE_DB_BUSY (1 << 30) -#define SE_CB_BUSY (1 << 31) +#define SE_CB_BUSY (1U << 31) /* evergreen */ #define CG_THERMAL_CTRL 0x72c #define TOFFSET_MASK 0x00003FE0 #define TOFFSET_SHIFT 5 #define CG_MULT_THERMAL_STATUS 0x740 #define ASIC_T(x) ((x) << 16) #define ASIC_T_MASK 0x07FF0000 #define ASIC_T_SHIFT 16 #define CG_TS0_STATUS 0x760 #define TS0_ADC_DOUT_MASK 0x000003FF #define TS0_ADC_DOUT_SHIFT 0 /* APU */ #define CG_THERMAL_STATUS 0x678 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 #define MC_SHARED_CHREMAP 0x2008 #define MC_SHARED_BLACKOUT_CNTL 0x20ac #define BLACKOUT_MODE_MASK 0x00000007 #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 #define NOOFRANK_SHIFT 2 #define NOOFRANK_MASK 0x00000004 #define NOOFROWS_SHIFT 3 #define NOOFROWS_MASK 0x00000038 #define NOOFCOLS_SHIFT 6 #define NOOFCOLS_MASK 0x000000C0 #define CHANSIZE_SHIFT 8 #define CHANSIZE_MASK 0x00000100 #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) #define FUS_MC_ARB_RAMCFG 0x2768 #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_FB_LOCATION 0x2024 #define MC_FUS_VM_FB_OFFSET 0x2898 #define MC_VM_MB_L1_TLB0_CNTL 0x2234 #define MC_VM_MB_L1_TLB1_CNTL 0x2238 #define MC_VM_MB_L1_TLB2_CNTL 0x223C #define MC_VM_MB_L1_TLB3_CNTL 0x2240 #define ENABLE_L1_TLB (1 << 0) #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) #define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) #define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) #define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) #define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) #define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15) #define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18) #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C #define MC_VM_MD_L1_TLB3_CNTL 0x2698 #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 #define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define PA_CL_ENHANCE 0x8A14 #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) #define PA_SC_ENHANCE 0x8BF0 #define PA_SC_AA_CONFIG 0x28C04 #define MSAA_NUM_SAMPLES_SHIFT 0 #define MSAA_NUM_SAMPLES_MASK 0x3 #define PA_SC_CLIPRECT_RULE 0x2820C #define PA_SC_EDGERULE 0x28230 #define PA_SC_FIFO_SIZE 0x8BCC #define SC_PRIM_FIFO_SIZE(x) ((x) << 0) #define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) #define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) #define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define PA_SC_LINE_STIPPLE 0x28A0C #define PA_SU_LINE_STIPPLE_VALUE 0x8A60 #define PA_SC_LINE_STIPPLE_STATE 0x8B10 #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 #define SCRATCH_REG3 0x850C #define SCRATCH_REG4 0x8510 #define SCRATCH_REG5 0x8514 #define SCRATCH_REG6 0x8518 #define SCRATCH_REG7 0x851C #define SCRATCH_UMSK 0x8540 #define SCRATCH_ADDR 0x8544 #define SMX_SAR_CTL0 0xA008 #define SMX_DC_CTL0 0xA020 #define USE_HASH_FUNCTION (1 << 0) #define NUMBER_OF_SETS(x) ((x) << 1) #define FLUSH_ALL_ON_EVENT (1 << 10) #define STALL_ON_EVENT (1 << 11) #define SMX_EVENT_CTL 0xA02C #define ES_FLUSH_CTL(x) ((x) << 0) #define GS_FLUSH_CTL(x) ((x) << 3) #define ACK_FLUSH_CTL(x) ((x) << 6) #define SYNC_FLUSH_CTL (1 << 8) #define SPI_CONFIG_CNTL 0x9100 #define GPR_WRITE_PRIORITY(x) ((x) << 0) #define SPI_CONFIG_CNTL_1 0x913C #define VTX_DONE_DELAY(x) ((x) << 0) #define INTERP_ONE_PRIM_PER_ROW (1 << 4) #define SPI_INPUT_Z 0x286D8 #define SPI_PS_IN_CONTROL_0 0x286CC #define NUM_INTERP(x) ((x)<<0) #define POSITION_ENA (1<<8) #define POSITION_CENTROID (1<<9) #define POSITION_ADDR(x) ((x)<<10) #define PARAM_GEN(x) ((x)<<15) #define PARAM_GEN_ADDR(x) ((x)<<19) #define BARYC_SAMPLE_CNTL(x) ((x)<<26) #define PERSP_GRADIENT_ENA (1<<28) #define LINEAR_GRADIENT_ENA (1<<29) #define POSITION_SAMPLE (1<<30) #define BARYC_AT_SAMPLE_ENA (1<<31) #define SQ_CONFIG 0x8C00 #define VC_ENABLE (1 << 0) #define EXPORT_SRC_C (1 << 1) #define CS_PRIO(x) ((x) << 18) #define LS_PRIO(x) ((x) << 20) #define HS_PRIO(x) ((x) << 22) #define PS_PRIO(x) ((x) << 24) #define VS_PRIO(x) ((x) << 26) #define GS_PRIO(x) ((x) << 28) #define ES_PRIO(x) ((x) << 30) #define SQ_GPR_RESOURCE_MGMT_1 0x8C04 #define NUM_PS_GPRS(x) ((x) << 0) #define NUM_VS_GPRS(x) ((x) << 16) #define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) #define SQ_GPR_RESOURCE_MGMT_2 0x8C08 #define NUM_GS_GPRS(x) ((x) << 0) #define NUM_ES_GPRS(x) ((x) << 16) #define SQ_GPR_RESOURCE_MGMT_3 0x8C0C #define NUM_HS_GPRS(x) ((x) << 0) #define NUM_LS_GPRS(x) ((x) << 16) #define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10 #define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14 #define SQ_THREAD_RESOURCE_MGMT 0x8C18 #define NUM_PS_THREADS(x) ((x) << 0) #define NUM_VS_THREADS(x) ((x) << 8) #define NUM_GS_THREADS(x) ((x) << 16) #define NUM_ES_THREADS(x) ((x) << 24) #define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C #define NUM_HS_THREADS(x) ((x) << 0) #define NUM_LS_THREADS(x) ((x) << 8) #define SQ_STACK_RESOURCE_MGMT_1 0x8C20 #define NUM_PS_STACK_ENTRIES(x) ((x) << 0) #define NUM_VS_STACK_ENTRIES(x) ((x) << 16) #define SQ_STACK_RESOURCE_MGMT_2 0x8C24 #define NUM_GS_STACK_ENTRIES(x) ((x) << 0) #define NUM_ES_STACK_ENTRIES(x) ((x) << 16) #define SQ_STACK_RESOURCE_MGMT_3 0x8C28 #define NUM_HS_STACK_ENTRIES(x) ((x) << 0) #define NUM_LS_STACK_ENTRIES(x) ((x) << 16) #define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C #define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94 #define SQ_STATIC_THREAD_MGMT_1 0x8E20 #define SQ_STATIC_THREAD_MGMT_2 0x8E24 #define SQ_STATIC_THREAD_MGMT_3 0x8E28 #define SQ_LDS_RESOURCE_MGMT 0x8E2C #define SQ_MS_FIFO_SIZES 0x8CF0 #define CACHE_FIFO_SIZE(x) ((x) << 0) #define FETCH_FIFO_HIWATER(x) ((x) << 8) #define DONE_FIFO_HIWATER(x) ((x) << 16) #define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) #define SX_DEBUG_1 0x9058 #define ENABLE_NEW_SMX_ADDRESS (1 << 16) #define SX_EXPORT_BUFFER_SIZES 0x900C #define COLOR_BUFFER_SIZE(x) ((x) << 0) #define POSITION_BUFFER_SIZE(x) ((x) << 8) #define SMX_BUFFER_SIZE(x) ((x) << 16) #define SX_MEMORY_EXPORT_BASE 0x9010 #define SX_MISC 0x28350 #define CB_PERF_CTR0_SEL_0 0x9A20 #define CB_PERF_CTR0_SEL_1 0x9A24 #define CB_PERF_CTR1_SEL_0 0x9A28 #define CB_PERF_CTR1_SEL_1 0x9A2C #define CB_PERF_CTR2_SEL_0 0x9A30 #define CB_PERF_CTR2_SEL_1 0x9A34 #define CB_PERF_CTR3_SEL_0 0x9A38 #define CB_PERF_CTR3_SEL_1 0x9A3C #define TA_CNTL_AUX 0x9508 #define DISABLE_CUBE_WRAP (1 << 0) #define DISABLE_CUBE_ANISO (1 << 1) #define SYNC_GRADIENT (1 << 24) #define SYNC_WALKER (1 << 25) #define SYNC_ALIGNER (1 << 26) #define TCP_CHAN_STEER_LO 0x960c #define TCP_CHAN_STEER_HI 0x9610 #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x) << 0) #define VC_ONLY 0 #define TC_ONLY 1 #define VC_AND_TC 2 #define AUTO_INVLD_EN(x) ((x) << 6) #define NO_AUTO 0 #define ES_AUTO 1 #define GS_AUTO 2 #define ES_AND_GS_AUTO 3 #define VGT_GS_VERTEX_REUSE 0x88D4 #define VGT_NUM_INSTANCES 0x8974 #define VGT_OUT_DEALLOC_CNTL 0x28C5C #define DEALLOC_DIST_MASK 0x0000007F #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VTX_REUSE_DEPTH_MASK 0x000000FF #define VM_CONTEXT0_CNTL 0x1410 #define ENABLE_CONTEXT (1 << 0) #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) #define VM_CONTEXT1_CNTL 0x1414 #define VM_CONTEXT1_CNTL2 0x1434 #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define REQUEST_TYPE(x) (((x) & 0xf) << 0) #define RESPONSE_TYPE_MASK 0x000000F0 #define RESPONSE_TYPE_SHIFT 4 #define VM_L2_CNTL 0x1400 #define ENABLE_L2_CACHE (1 << 0) #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) #define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) #define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) #define VM_L2_CNTL2 0x1404 #define INVALIDATE_ALL_L1_TLBS (1 << 0) #define INVALIDATE_L2_CACHE (1 << 1) #define VM_L2_CNTL3 0x1408 #define BANK_SELECT(x) ((x) << 0) #define CACHE_UPDATE_MODE(x) ((x) << 6) #define VM_L2_STATUS 0x140C #define L2_BUSY (1 << 0) #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC #define WAIT_UNTIL 0x8040 #define SRBM_STATUS 0x0E50 #define SRBM_SOFT_RESET 0x0E60 #define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6 #define SOFT_RESET_BIF (1 << 1) #define SOFT_RESET_CG (1 << 2) #define SOFT_RESET_DC (1 << 5) #define SOFT_RESET_GRBM (1 << 8) #define SOFT_RESET_HDP (1 << 9) #define SOFT_RESET_IH (1 << 10) #define SOFT_RESET_MC (1 << 11) #define SOFT_RESET_RLC (1 << 13) #define SOFT_RESET_ROM (1 << 14) #define SOFT_RESET_SEM (1 << 15) #define SOFT_RESET_VMC (1 << 17) #define SOFT_RESET_DMA (1 << 20) #define SOFT_RESET_TST (1 << 21) #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) /* display watermarks */ #define DC_LB_MEMORY_SPLIT 0x6b0c #define PRIORITY_A_CNT 0x6b18 #define PRIORITY_MARK_MASK 0x7fff #define PRIORITY_OFF (1 << 16) #define PRIORITY_ALWAYS_ON (1 << 20) #define PRIORITY_B_CNT 0x6b1c #define PIPE0_ARBITRATION_CONTROL3 0x0bf0 # define LATENCY_WATERMARK_MASK(x) ((x) << 16) #define PIPE0_LATENCY_CONTROL 0x0bf4 # define LATENCY_LOW_WATERMARK(x) ((x) << 0) # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ # define IH_WPTR_OVERFLOW_ENABLE (1 << 16) -# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +# define IH_WPTR_OVERFLOW_CLEAR (1U << 31) #define IH_RB_BASE 0x3e04 #define IH_RB_RPTR 0x3e08 #define IH_RB_WPTR 0x3e0c # define RB_OVERFLOW (1 << 0) # define WPTR_OFFSET_MASK 0x3fffc #define IH_RB_WPTR_ADDR_HI 0x3e10 #define IH_RB_WPTR_ADDR_LO 0x3e14 #define IH_CNTL 0x3e18 # define ENABLE_INTR (1 << 0) # define IH_MC_SWAP(x) ((x) << 1) # define IH_MC_SWAP_NONE 0 # define IH_MC_SWAP_16BIT 1 # define IH_MC_SWAP_32BIT 2 # define IH_MC_SWAP_64BIT 3 # define RPTR_REARM (1 << 4) # define MC_WRREQ_CREDIT(x) ((x) << 15) # define MC_WR_CLEAN_CNT(x) ((x) << 20) #define CP_INT_CNTL 0xc124 # define CNTX_BUSY_INT_ENABLE (1 << 19) # define CNTX_EMPTY_INT_ENABLE (1 << 20) # define SCRATCH_INT_ENABLE (1 << 25) # define TIME_STAMP_INT_ENABLE (1 << 26) # define IB2_INT_ENABLE (1 << 29) # define IB1_INT_ENABLE (1 << 30) -# define RB_INT_ENABLE (1 << 31) +# define RB_INT_ENABLE (1U << 31) #define CP_INT_STATUS 0xc128 # define SCRATCH_INT_STAT (1 << 25) # define TIME_STAMP_INT_STAT (1 << 26) # define IB2_INT_STAT (1 << 29) # define IB1_INT_STAT (1 << 30) -# define RB_INT_STAT (1 << 31) +# define RB_INT_STAT (1U << 31) #define GRBM_INT_CNTL 0x8060 # define RDERR_INT_ENABLE (1 << 0) # define GUI_IDLE_INT_ENABLE (1 << 19) /* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ #define CRTC_STATUS_FRAME_COUNT 0x6e98 /* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ #define VLINE_STATUS 0x6bb8 # define VLINE_OCCURRED (1 << 0) # define VLINE_ACK (1 << 4) # define VLINE_STAT (1 << 12) # define VLINE_INTERRUPT (1 << 16) # define VLINE_INTERRUPT_TYPE (1 << 17) /* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ #define VBLANK_STATUS 0x6bbc # define VBLANK_OCCURRED (1 << 0) # define VBLANK_ACK (1 << 4) # define VBLANK_STAT (1 << 12) # define VBLANK_INTERRUPT (1 << 16) # define VBLANK_INTERRUPT_TYPE (1 << 17) /* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ #define INT_MASK 0x6b40 # define VBLANK_INT_MASK (1 << 0) # define VLINE_INT_MASK (1 << 4) #define DISP_INTERRUPT_STATUS 0x60f4 # define LB_D1_VLINE_INTERRUPT (1 << 2) # define LB_D1_VBLANK_INTERRUPT (1 << 3) # define DC_HPD1_INTERRUPT (1 << 17) # define DC_HPD1_RX_INTERRUPT (1 << 18) # define DACA_AUTODETECT_INTERRUPT (1 << 22) # define DACB_AUTODETECT_INTERRUPT (1 << 23) # define DC_I2C_SW_DONE_INTERRUPT (1 << 24) # define DC_I2C_HW_DONE_INTERRUPT (1 << 25) #define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 # define LB_D2_VLINE_INTERRUPT (1 << 2) # define LB_D2_VBLANK_INTERRUPT (1 << 3) # define DC_HPD2_INTERRUPT (1 << 17) # define DC_HPD2_RX_INTERRUPT (1 << 18) # define DISP_TIMER_INTERRUPT (1 << 24) #define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc # define LB_D3_VLINE_INTERRUPT (1 << 2) # define LB_D3_VBLANK_INTERRUPT (1 << 3) # define DC_HPD3_INTERRUPT (1 << 17) # define DC_HPD3_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 # define LB_D4_VLINE_INTERRUPT (1 << 2) # define LB_D4_VBLANK_INTERRUPT (1 << 3) # define DC_HPD4_INTERRUPT (1 << 17) # define DC_HPD4_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c # define LB_D5_VLINE_INTERRUPT (1 << 2) # define LB_D5_VBLANK_INTERRUPT (1 << 3) # define DC_HPD5_INTERRUPT (1 << 17) # define DC_HPD5_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150 # define LB_D6_VLINE_INTERRUPT (1 << 2) # define LB_D6_VBLANK_INTERRUPT (1 << 3) # define DC_HPD6_INTERRUPT (1 << 17) # define DC_HPD6_RX_INTERRUPT (1 << 18) /* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ #define GRPH_INT_STATUS 0x6858 # define GRPH_PFLIP_INT_OCCURRED (1 << 0) # define GRPH_PFLIP_INT_CLEAR (1 << 8) /* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ #define GRPH_INT_CONTROL 0x685c # define GRPH_PFLIP_INT_MASK (1 << 0) # define GRPH_PFLIP_INT_TYPE (1 << 8) #define DACA_AUTODETECT_INT_CONTROL 0x66c8 #define DACB_AUTODETECT_INT_CONTROL 0x67c8 #define DC_HPD1_INT_STATUS 0x601c #define DC_HPD2_INT_STATUS 0x6028 #define DC_HPD3_INT_STATUS 0x6034 #define DC_HPD4_INT_STATUS 0x6040 #define DC_HPD5_INT_STATUS 0x604c #define DC_HPD6_INT_STATUS 0x6058 # define DC_HPDx_INT_STATUS (1 << 0) # define DC_HPDx_SENSE (1 << 1) # define DC_HPDx_RX_INT_STATUS (1 << 8) #define DC_HPD1_INT_CONTROL 0x6020 #define DC_HPD2_INT_CONTROL 0x602c #define DC_HPD3_INT_CONTROL 0x6038 #define DC_HPD4_INT_CONTROL 0x6044 #define DC_HPD5_INT_CONTROL 0x6050 #define DC_HPD6_INT_CONTROL 0x605c # define DC_HPDx_INT_ACK (1 << 0) # define DC_HPDx_INT_POLARITY (1 << 8) # define DC_HPDx_INT_EN (1 << 16) # define DC_HPDx_RX_INT_ACK (1 << 20) # define DC_HPDx_RX_INT_EN (1 << 24) #define DC_HPD1_CONTROL 0x6024 #define DC_HPD2_CONTROL 0x6030 #define DC_HPD3_CONTROL 0x603c #define DC_HPD4_CONTROL 0x6048 #define DC_HPD5_CONTROL 0x6054 #define DC_HPD6_CONTROL 0x6060 # define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) # define DC_HPDx_EN (1 << 28) /* ASYNC DMA */ #define DMA_RB_RPTR 0xd008 #define DMA_RB_WPTR 0xd00c #define DMA_CNTL 0xd02c # define TRAP_ENABLE (1 << 0) # define SEM_INCOMPLETE_INT_ENABLE (1 << 1) # define SEM_WAIT_INT_ENABLE (1 << 2) # define DATA_SWAP_ENABLE (1 << 3) # define FENCE_SWAP_ENABLE (1 << 4) # define CTXEMPTY_INT_ENABLE (1 << 28) #define DMA_TILING_CONFIG 0xD0B8 #define CAYMAN_DMA1_CNTL 0xd82c /* async DMA packets */ #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ (((t) & 0x1) << 23) | \ (((s) & 0x1) << 22) | \ (((n) & 0xFFFFF) << 0)) /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 #define DMA_PACKET_INDIRECT_BUFFER 0x4 #define DMA_PACKET_SEMAPHORE 0x5 #define DMA_PACKET_FENCE 0x6 #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_CONSTANT_FILL 0xd #define DMA_PACKET_NOP 0xf /* PCIE link stuff */ #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ # define LC_LINK_WIDTH_SHIFT 0 # define LC_LINK_WIDTH_MASK 0x7 # define LC_LINK_WIDTH_X0 0 # define LC_LINK_WIDTH_X1 1 # define LC_LINK_WIDTH_X2 2 # define LC_LINK_WIDTH_X4 3 # define LC_LINK_WIDTH_X8 4 # define LC_LINK_WIDTH_X16 6 # define LC_LINK_WIDTH_RD_SHIFT 4 # define LC_LINK_WIDTH_RD_MASK 0x70 # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) # define LC_RECONFIG_NOW (1 << 8) # define LC_RENEGOTIATION_SUPPORT (1 << 9) # define LC_RENEGOTIATE_EN (1 << 10) # define LC_SHORT_RECONFIG_EN (1 << 11) # define LC_UPCONFIGURE_SUPPORT (1 << 12) # define LC_UPCONFIGURE_DIS (1 << 13) #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ # define LC_GEN2_EN_STRAP (1 << 0) # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 # define LC_CURRENT_DATA_RATE (1 << 11) # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) #define MM_CFGREGS_CNTL 0x544c # define MM_WR_TO_CFG_EN (1 << 3) #define LINK_CNTL2 0x88 /* F0 */ # define TARGET_LINK_SPEED_MASK (0xf << 0) # define SELECTABLE_DEEMPHASIS (1 << 6) /* * PM4 */ #define PACKET_TYPE0 0 #define PACKET_TYPE1 1 #define PACKET_TYPE2 2 #define PACKET_TYPE3 3 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ (((reg) >> 2) & 0xFFFF) | \ ((n) & 0x3FFF) << 16) #define CP_PACKET2 0x80000000 #define PACKET2_PAD_SHIFT 0 #define PACKET2_PAD_MASK (0x3fffffff << 0) #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ (((op) & 0xFF) << 8) | \ ((n) & 0x3FFF) << 16) /* Packet 3 types */ #define PACKET3_NOP 0x10 #define PACKET3_SET_BASE 0x11 #define PACKET3_CLEAR_STATE 0x12 #define PACKET3_INDEX_BUFFER_SIZE 0x13 #define PACKET3_DISPATCH_DIRECT 0x15 #define PACKET3_DISPATCH_INDIRECT 0x16 #define PACKET3_INDIRECT_BUFFER_END 0x17 #define PACKET3_MODE_CONTROL 0x18 #define PACKET3_SET_PREDICATION 0x20 #define PACKET3_REG_RMW 0x21 #define PACKET3_COND_EXEC 0x22 #define PACKET3_PRED_EXEC 0x23 #define PACKET3_DRAW_INDIRECT 0x24 #define PACKET3_DRAW_INDEX_INDIRECT 0x25 #define PACKET3_INDEX_BASE 0x26 #define PACKET3_DRAW_INDEX_2 0x27 #define PACKET3_CONTEXT_CONTROL 0x28 #define PACKET3_DRAW_INDEX_OFFSET 0x29 #define PACKET3_INDEX_TYPE 0x2A #define PACKET3_DRAW_INDEX 0x2B #define PACKET3_DRAW_INDEX_AUTO 0x2D #define PACKET3_DRAW_INDEX_IMMD 0x2E #define PACKET3_NUM_INSTANCES 0x2F #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_COPY_DW 0x3B #define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_MEM_WRITE 0x3D #define PACKET3_INDIRECT_BUFFER 0x32 #define PACKET3_CP_DMA 0x41 /* 1. header * 2. SRC_ADDR_LO or DATA [31:0] * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] | * SRC_ADDR_HI [7:0] * 4. DST_ADDR_LO [31:0] * 5. DST_ADDR_HI [7:0] * 6. COMMAND [29:22] | BYTE_COUNT [20:0] */ # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) /* 0 - SRC_ADDR * 1 - GDS */ # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) /* 0 - ME * 1 - PFP */ # define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29) /* 0 - SRC_ADDR * 1 - GDS * 2 - DATA */ -# define PACKET3_CP_DMA_CP_SYNC (1 << 31) +# define PACKET3_CP_DMA_CP_SYNC (1U << 31) /* COMMAND */ # define PACKET3_CP_DMA_DIS_WC (1 << 21) # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) /* 0 - none * 1 - 8 in 16 * 2 - 8 in 32 * 3 - 8 in 64 */ # define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) /* 0 - none * 1 - 8 in 16 * 2 - 8 in 32 * 3 - 8 in 64 */ # define PACKET3_CP_DMA_CMD_SAS (1 << 26) /* 0 - memory * 1 - register */ # define PACKET3_CP_DMA_CMD_DAS (1 << 27) /* 0 - memory * 1 - register */ # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) # define PACKET3_DB_DEST_BASE_ENA (1 << 14) # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) # define PACKET3_CB11_DEST_BASE_ENA (1 << 18) # define PACKET3_FULL_CACHE_ENA (1 << 20) # define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_VC_ACTION_ENA (1 << 24) # define PACKET3_CB_ACTION_ENA (1 << 25) # define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_SH_ACTION_ENA (1 << 27) # define PACKET3_SX_ACTION_ENA (1 << 28) #define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 #define PACKET3_EVENT_WRITE_EOP 0x47 #define PACKET3_EVENT_WRITE_EOS 0x48 #define PACKET3_PREAMBLE_CNTL 0x4A # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) #define PACKET3_RB_OFFSET 0x4B #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D #define PACKET3_ALU_PS_CONST_UPDATE 0x4E #define PACKET3_ALU_VS_CONST_UPDATE 0x4F #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_START 0x00008000 #define PACKET3_SET_CONFIG_REG_END 0x0000ac00 #define PACKET3_SET_CONTEXT_REG 0x69 #define PACKET3_SET_CONTEXT_REG_START 0x00028000 #define PACKET3_SET_CONTEXT_REG_END 0x00029000 #define PACKET3_SET_ALU_CONST 0x6A /* alu const buffers only; no reg file */ #define PACKET3_SET_BOOL_CONST 0x6B #define PACKET3_SET_BOOL_CONST_START 0x0003a500 #define PACKET3_SET_BOOL_CONST_END 0x0003a518 #define PACKET3_SET_LOOP_CONST 0x6C #define PACKET3_SET_LOOP_CONST_START 0x0003a200 #define PACKET3_SET_LOOP_CONST_END 0x0003a500 #define PACKET3_SET_RESOURCE 0x6D #define PACKET3_SET_RESOURCE_START 0x00030000 #define PACKET3_SET_RESOURCE_END 0x00038000 #define PACKET3_SET_SAMPLER 0x6E #define PACKET3_SET_SAMPLER_START 0x0003c000 #define PACKET3_SET_SAMPLER_END 0x0003c600 #define PACKET3_SET_CTL_CONST 0x6F #define PACKET3_SET_CTL_CONST_START 0x0003cff0 #define PACKET3_SET_CTL_CONST_END 0x0003ff0c #define PACKET3_SET_RESOURCE_OFFSET 0x70 #define PACKET3_SET_ALU_CONST_VS 0x71 #define PACKET3_SET_ALU_CONST_DI 0x72 #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 #define PACKET3_SET_RESOURCE_INDIRECT 0x74 #define PACKET3_SET_APPEND_CNT 0x75 #define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c #define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) #define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) #define SQ_TEX_VTX_INVALID_TEXTURE 0x0 #define SQ_TEX_VTX_INVALID_BUFFER 0x1 #define SQ_TEX_VTX_VALID_TEXTURE 0x2 #define SQ_TEX_VTX_VALID_BUFFER 0x3 #define VGT_VTX_VECT_EJECT_REG 0x88b0 #define SQ_CONST_MEM_BASE 0x8df8 #define SQ_ESGS_RING_BASE 0x8c40 #define SQ_ESGS_RING_SIZE 0x8c44 #define SQ_GSVS_RING_BASE 0x8c48 #define SQ_GSVS_RING_SIZE 0x8c4c #define SQ_ESTMP_RING_BASE 0x8c50 #define SQ_ESTMP_RING_SIZE 0x8c54 #define SQ_GSTMP_RING_BASE 0x8c58 #define SQ_GSTMP_RING_SIZE 0x8c5c #define SQ_VSTMP_RING_BASE 0x8c60 #define SQ_VSTMP_RING_SIZE 0x8c64 #define SQ_PSTMP_RING_BASE 0x8c68 #define SQ_PSTMP_RING_SIZE 0x8c6c #define SQ_LSTMP_RING_BASE 0x8e10 #define SQ_LSTMP_RING_SIZE 0x8e14 #define SQ_HSTMP_RING_BASE 0x8e18 #define SQ_HSTMP_RING_SIZE 0x8e1c #define VGT_TF_RING_SIZE 0x8988 #define SQ_ESGS_RING_ITEMSIZE 0x28900 #define SQ_GSVS_RING_ITEMSIZE 0x28904 #define SQ_ESTMP_RING_ITEMSIZE 0x28908 #define SQ_GSTMP_RING_ITEMSIZE 0x2890c #define SQ_VSTMP_RING_ITEMSIZE 0x28910 #define SQ_PSTMP_RING_ITEMSIZE 0x28914 #define SQ_LSTMP_RING_ITEMSIZE 0x28830 #define SQ_HSTMP_RING_ITEMSIZE 0x28834 #define SQ_GS_VERT_ITEMSIZE 0x2891c #define SQ_GS_VERT_ITEMSIZE_1 0x28920 #define SQ_GS_VERT_ITEMSIZE_2 0x28924 #define SQ_GS_VERT_ITEMSIZE_3 0x28928 #define SQ_GSVS_RING_OFFSET_1 0x2892c #define SQ_GSVS_RING_OFFSET_2 0x28930 #define SQ_GSVS_RING_OFFSET_3 0x28934 #define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 #define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 #define SQ_ALU_CONST_CACHE_PS_0 0x28940 #define SQ_ALU_CONST_CACHE_PS_1 0x28944 #define SQ_ALU_CONST_CACHE_PS_2 0x28948 #define SQ_ALU_CONST_CACHE_PS_3 0x2894c #define SQ_ALU_CONST_CACHE_PS_4 0x28950 #define SQ_ALU_CONST_CACHE_PS_5 0x28954 #define SQ_ALU_CONST_CACHE_PS_6 0x28958 #define SQ_ALU_CONST_CACHE_PS_7 0x2895c #define SQ_ALU_CONST_CACHE_PS_8 0x28960 #define SQ_ALU_CONST_CACHE_PS_9 0x28964 #define SQ_ALU_CONST_CACHE_PS_10 0x28968 #define SQ_ALU_CONST_CACHE_PS_11 0x2896c #define SQ_ALU_CONST_CACHE_PS_12 0x28970 #define SQ_ALU_CONST_CACHE_PS_13 0x28974 #define SQ_ALU_CONST_CACHE_PS_14 0x28978 #define SQ_ALU_CONST_CACHE_PS_15 0x2897c #define SQ_ALU_CONST_CACHE_VS_0 0x28980 #define SQ_ALU_CONST_CACHE_VS_1 0x28984 #define SQ_ALU_CONST_CACHE_VS_2 0x28988 #define SQ_ALU_CONST_CACHE_VS_3 0x2898c #define SQ_ALU_CONST_CACHE_VS_4 0x28990 #define SQ_ALU_CONST_CACHE_VS_5 0x28994 #define SQ_ALU_CONST_CACHE_VS_6 0x28998 #define SQ_ALU_CONST_CACHE_VS_7 0x2899c #define SQ_ALU_CONST_CACHE_VS_8 0x289a0 #define SQ_ALU_CONST_CACHE_VS_9 0x289a4 #define SQ_ALU_CONST_CACHE_VS_10 0x289a8 #define SQ_ALU_CONST_CACHE_VS_11 0x289ac #define SQ_ALU_CONST_CACHE_VS_12 0x289b0 #define SQ_ALU_CONST_CACHE_VS_13 0x289b4 #define SQ_ALU_CONST_CACHE_VS_14 0x289b8 #define SQ_ALU_CONST_CACHE_VS_15 0x289bc #define SQ_ALU_CONST_CACHE_GS_0 0x289c0 #define SQ_ALU_CONST_CACHE_GS_1 0x289c4 #define SQ_ALU_CONST_CACHE_GS_2 0x289c8 #define SQ_ALU_CONST_CACHE_GS_3 0x289cc #define SQ_ALU_CONST_CACHE_GS_4 0x289d0 #define SQ_ALU_CONST_CACHE_GS_5 0x289d4 #define SQ_ALU_CONST_CACHE_GS_6 0x289d8 #define SQ_ALU_CONST_CACHE_GS_7 0x289dc #define SQ_ALU_CONST_CACHE_GS_8 0x289e0 #define SQ_ALU_CONST_CACHE_GS_9 0x289e4 #define SQ_ALU_CONST_CACHE_GS_10 0x289e8 #define SQ_ALU_CONST_CACHE_GS_11 0x289ec #define SQ_ALU_CONST_CACHE_GS_12 0x289f0 #define SQ_ALU_CONST_CACHE_GS_13 0x289f4 #define SQ_ALU_CONST_CACHE_GS_14 0x289f8 #define SQ_ALU_CONST_CACHE_GS_15 0x289fc #define SQ_ALU_CONST_CACHE_HS_0 0x28f00 #define SQ_ALU_CONST_CACHE_HS_1 0x28f04 #define SQ_ALU_CONST_CACHE_HS_2 0x28f08 #define SQ_ALU_CONST_CACHE_HS_3 0x28f0c #define SQ_ALU_CONST_CACHE_HS_4 0x28f10 #define SQ_ALU_CONST_CACHE_HS_5 0x28f14 #define SQ_ALU_CONST_CACHE_HS_6 0x28f18 #define SQ_ALU_CONST_CACHE_HS_7 0x28f1c #define SQ_ALU_CONST_CACHE_HS_8 0x28f20 #define SQ_ALU_CONST_CACHE_HS_9 0x28f24 #define SQ_ALU_CONST_CACHE_HS_10 0x28f28 #define SQ_ALU_CONST_CACHE_HS_11 0x28f2c #define SQ_ALU_CONST_CACHE_HS_12 0x28f30 #define SQ_ALU_CONST_CACHE_HS_13 0x28f34 #define SQ_ALU_CONST_CACHE_HS_14 0x28f38 #define SQ_ALU_CONST_CACHE_HS_15 0x28f3c #define SQ_ALU_CONST_CACHE_LS_0 0x28f40 #define SQ_ALU_CONST_CACHE_LS_1 0x28f44 #define SQ_ALU_CONST_CACHE_LS_2 0x28f48 #define SQ_ALU_CONST_CACHE_LS_3 0x28f4c #define SQ_ALU_CONST_CACHE_LS_4 0x28f50 #define SQ_ALU_CONST_CACHE_LS_5 0x28f54 #define SQ_ALU_CONST_CACHE_LS_6 0x28f58 #define SQ_ALU_CONST_CACHE_LS_7 0x28f5c #define SQ_ALU_CONST_CACHE_LS_8 0x28f60 #define SQ_ALU_CONST_CACHE_LS_9 0x28f64 #define SQ_ALU_CONST_CACHE_LS_10 0x28f68 #define SQ_ALU_CONST_CACHE_LS_11 0x28f6c #define SQ_ALU_CONST_CACHE_LS_12 0x28f70 #define SQ_ALU_CONST_CACHE_LS_13 0x28f74 #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c #define PA_SC_SCREEN_SCISSOR_TL 0x28030 #define PA_SC_GENERIC_SCISSOR_TL 0x28240 #define PA_SC_WINDOW_SCISSOR_TL 0x28204 #define VGT_PRIMITIVE_TYPE 0x8958 #define VGT_INDEX_TYPE 0x895C #define VGT_NUM_INDICES 0x8970 #define VGT_COMPUTE_DIM_X 0x8990 #define VGT_COMPUTE_DIM_Y 0x8994 #define VGT_COMPUTE_DIM_Z 0x8998 #define VGT_COMPUTE_START_X 0x899C #define VGT_COMPUTE_START_Y 0x89A0 #define VGT_COMPUTE_START_Z 0x89A4 #define VGT_COMPUTE_INDEX 0x89A8 #define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC #define VGT_HS_OFFCHIP_PARAM 0x89B0 #define DB_DEBUG 0x9830 #define DB_DEBUG2 0x9834 #define DB_DEBUG3 0x9838 #define DB_DEBUG4 0x983C #define DB_WATERMARKS 0x9854 #define DB_DEPTH_CONTROL 0x28800 #define R_028800_DB_DEPTH_CONTROL 0x028800 #define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0) #define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1) #define C_028800_STENCIL_ENABLE 0xFFFFFFFE #define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1) #define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1) #define C_028800_Z_ENABLE 0xFFFFFFFD #define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2) #define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1) #define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB #define S_028800_ZFUNC(x) (((x) & 0x7) << 4) #define G_028800_ZFUNC(x) (((x) >> 4) & 0x7) #define C_028800_ZFUNC 0xFFFFFF8F #define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7) #define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1) #define C_028800_BACKFACE_ENABLE 0xFFFFFF7F #define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8) #define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7) #define C_028800_STENCILFUNC 0xFFFFF8FF #define V_028800_STENCILFUNC_NEVER 0x00000000 #define V_028800_STENCILFUNC_LESS 0x00000001 #define V_028800_STENCILFUNC_EQUAL 0x00000002 #define V_028800_STENCILFUNC_LEQUAL 0x00000003 #define V_028800_STENCILFUNC_GREATER 0x00000004 #define V_028800_STENCILFUNC_NOTEQUAL 0x00000005 #define V_028800_STENCILFUNC_GEQUAL 0x00000006 #define V_028800_STENCILFUNC_ALWAYS 0x00000007 #define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11) #define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7) #define C_028800_STENCILFAIL 0xFFFFC7FF #define V_028800_STENCIL_KEEP 0x00000000 #define V_028800_STENCIL_ZERO 0x00000001 #define V_028800_STENCIL_REPLACE 0x00000002 #define V_028800_STENCIL_INCR 0x00000003 #define V_028800_STENCIL_DECR 0x00000004 #define V_028800_STENCIL_INVERT 0x00000005 #define V_028800_STENCIL_INCR_WRAP 0x00000006 #define V_028800_STENCIL_DECR_WRAP 0x00000007 #define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14) #define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7) #define C_028800_STENCILZPASS 0xFFFE3FFF #define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17) #define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7) #define C_028800_STENCILZFAIL 0xFFF1FFFF #define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20) #define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7) #define C_028800_STENCILFUNC_BF 0xFF8FFFFF #define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23) #define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7) #define C_028800_STENCILFAIL_BF 0xFC7FFFFF #define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26) #define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7) #define C_028800_STENCILZPASS_BF 0xE3FFFFFF #define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29) #define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7) #define C_028800_STENCILZFAIL_BF 0x1FFFFFFF #define DB_DEPTH_VIEW 0x28008 #define R_028008_DB_DEPTH_VIEW 0x00028008 #define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0) #define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF) #define C_028008_SLICE_START 0xFFFFF800 #define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13) #define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF) #define C_028008_SLICE_MAX 0xFF001FFF #define DB_HTILE_DATA_BASE 0x28014 #define DB_HTILE_SURFACE 0x28abc #define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0) #define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1) #define C_028ABC_HTILE_WIDTH 0xFFFFFFFE #define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1) #define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1) #define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD #define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1) #define DB_Z_INFO 0x28040 # define Z_ARRAY_MODE(x) ((x) << 4) # define DB_TILE_SPLIT(x) (((x) & 0x7) << 8) # define DB_NUM_BANKS(x) (((x) & 0x3) << 12) # define DB_BANK_WIDTH(x) (((x) & 0x3) << 16) # define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20) # define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24) #define R_028040_DB_Z_INFO 0x028040 #define S_028040_FORMAT(x) (((x) & 0x3) << 0) #define G_028040_FORMAT(x) (((x) >> 0) & 0x3) #define C_028040_FORMAT 0xFFFFFFFC #define V_028040_Z_INVALID 0x00000000 #define V_028040_Z_16 0x00000001 #define V_028040_Z_24 0x00000002 #define V_028040_Z_32_FLOAT 0x00000003 #define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4) #define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF) #define C_028040_ARRAY_MODE 0xFFFFFF0F #define S_028040_READ_SIZE(x) (((x) & 0x1) << 28) #define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1) #define C_028040_READ_SIZE 0xEFFFFFFF #define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29) #define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1) #define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF #define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31) #define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1) #define C_028040_ZRANGE_PRECISION 0x7FFFFFFF #define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8) #define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7) #define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12) #define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3) #define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16) #define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3) #define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20) #define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3) #define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24) #define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3) #define DB_STENCIL_INFO 0x28044 #define R_028044_DB_STENCIL_INFO 0x028044 #define S_028044_FORMAT(x) (((x) & 0x1) << 0) #define G_028044_FORMAT(x) (((x) >> 0) & 0x1) #define C_028044_FORMAT 0xFFFFFFFE #define V_028044_STENCIL_INVALID 0 #define V_028044_STENCIL_8 1 #define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c #define DB_Z_WRITE_BASE 0x28050 #define DB_STENCIL_WRITE_BASE 0x28054 #define DB_DEPTH_SIZE 0x28058 #define R_028058_DB_DEPTH_SIZE 0x028058 #define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0) #define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF) #define C_028058_PITCH_TILE_MAX 0xFFFFF800 #define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11) #define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF) #define C_028058_HEIGHT_TILE_MAX 0xFFC007FF #define R_02805C_DB_DEPTH_SLICE 0x02805C #define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0) #define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF) #define C_02805C_SLICE_TILE_MAX 0xFFC00000 #define SQ_PGM_START_PS 0x28840 #define SQ_PGM_START_VS 0x2885c #define SQ_PGM_START_GS 0x28874 #define SQ_PGM_START_ES 0x2888c #define SQ_PGM_START_FS 0x288a4 #define SQ_PGM_START_HS 0x288b8 #define SQ_PGM_START_LS 0x288d0 #define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8 #define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8 #define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8 #define VGT_STRMOUT_BUFFER_BASE_3 0x28B08 #define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0 #define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0 #define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0 #define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00 #define VGT_STRMOUT_CONFIG 0x28b94 #define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 #define CB_TARGET_MASK 0x28238 #define CB_SHADER_MASK 0x2823c #define GDS_ADDR_BASE 0x28720 #define CB_IMMED0_BASE 0x28b9c #define CB_IMMED1_BASE 0x28ba0 #define CB_IMMED2_BASE 0x28ba4 #define CB_IMMED3_BASE 0x28ba8 #define CB_IMMED4_BASE 0x28bac #define CB_IMMED5_BASE 0x28bb0 #define CB_IMMED6_BASE 0x28bb4 #define CB_IMMED7_BASE 0x28bb8 #define CB_IMMED8_BASE 0x28bbc #define CB_IMMED9_BASE 0x28bc0 #define CB_IMMED10_BASE 0x28bc4 #define CB_IMMED11_BASE 0x28bc8 /* all 12 CB blocks have these regs */ #define CB_COLOR0_BASE 0x28c60 #define CB_COLOR0_PITCH 0x28c64 #define CB_COLOR0_SLICE 0x28c68 #define CB_COLOR0_VIEW 0x28c6c #define R_028C6C_CB_COLOR0_VIEW 0x00028C6C #define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0) #define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF) #define C_028C6C_SLICE_START 0xFFFFF800 #define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13) #define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF) #define C_028C6C_SLICE_MAX 0xFF001FFF #define R_028C70_CB_COLOR0_INFO 0x028C70 #define S_028C70_ENDIAN(x) (((x) & 0x3) << 0) #define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3) #define C_028C70_ENDIAN 0xFFFFFFFC #define S_028C70_FORMAT(x) (((x) & 0x3F) << 2) #define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F) #define C_028C70_FORMAT 0xFFFFFF03 #define V_028C70_COLOR_INVALID 0x00000000 #define V_028C70_COLOR_8 0x00000001 #define V_028C70_COLOR_4_4 0x00000002 #define V_028C70_COLOR_3_3_2 0x00000003 #define V_028C70_COLOR_16 0x00000005 #define V_028C70_COLOR_16_FLOAT 0x00000006 #define V_028C70_COLOR_8_8 0x00000007 #define V_028C70_COLOR_5_6_5 0x00000008 #define V_028C70_COLOR_6_5_5 0x00000009 #define V_028C70_COLOR_1_5_5_5 0x0000000A #define V_028C70_COLOR_4_4_4_4 0x0000000B #define V_028C70_COLOR_5_5_5_1 0x0000000C #define V_028C70_COLOR_32 0x0000000D #define V_028C70_COLOR_32_FLOAT 0x0000000E #define V_028C70_COLOR_16_16 0x0000000F #define V_028C70_COLOR_16_16_FLOAT 0x00000010 #define V_028C70_COLOR_8_24 0x00000011 #define V_028C70_COLOR_8_24_FLOAT 0x00000012 #define V_028C70_COLOR_24_8 0x00000013 #define V_028C70_COLOR_24_8_FLOAT 0x00000014 #define V_028C70_COLOR_10_11_11 0x00000015 #define V_028C70_COLOR_10_11_11_FLOAT 0x00000016 #define V_028C70_COLOR_11_11_10 0x00000017 #define V_028C70_COLOR_11_11_10_FLOAT 0x00000018 #define V_028C70_COLOR_2_10_10_10 0x00000019 #define V_028C70_COLOR_8_8_8_8 0x0000001A #define V_028C70_COLOR_10_10_10_2 0x0000001B #define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C #define V_028C70_COLOR_32_32 0x0000001D #define V_028C70_COLOR_32_32_FLOAT 0x0000001E #define V_028C70_COLOR_16_16_16_16 0x0000001F #define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020 #define V_028C70_COLOR_32_32_32_32 0x00000022 #define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023 #define V_028C70_COLOR_32_32_32_FLOAT 0x00000030 #define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8) #define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF) #define C_028C70_ARRAY_MODE 0xFFFFF0FF #define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000 #define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001 #define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002 #define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004 #define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12) #define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7) #define C_028C70_NUMBER_TYPE 0xFFFF8FFF #define V_028C70_NUMBER_UNORM 0x00000000 #define V_028C70_NUMBER_SNORM 0x00000001 #define V_028C70_NUMBER_USCALED 0x00000002 #define V_028C70_NUMBER_SSCALED 0x00000003 #define V_028C70_NUMBER_UINT 0x00000004 #define V_028C70_NUMBER_SINT 0x00000005 #define V_028C70_NUMBER_SRGB 0x00000006 #define V_028C70_NUMBER_FLOAT 0x00000007 #define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15) #define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3) #define C_028C70_COMP_SWAP 0xFFFE7FFF #define V_028C70_SWAP_STD 0x00000000 #define V_028C70_SWAP_ALT 0x00000001 #define V_028C70_SWAP_STD_REV 0x00000002 #define V_028C70_SWAP_ALT_REV 0x00000003 #define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17) #define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1) #define C_028C70_FAST_CLEAR 0xFFFDFFFF #define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18) #define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3) #define C_028C70_COMPRESSION 0xFFF3FFFF #define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19) #define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1) #define C_028C70_BLEND_CLAMP 0xFFF7FFFF #define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20) #define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1) #define C_028C70_BLEND_BYPASS 0xFFEFFFFF #define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21) #define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1) #define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF #define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22) #define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1) #define C_028C70_ROUND_MODE 0xFFBFFFFF #define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23) #define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1) #define C_028C70_TILE_COMPACT 0xFF7FFFFF #define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24) #define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3) #define C_028C70_SOURCE_FORMAT 0xFCFFFFFF #define V_028C70_EXPORT_4C_32BPC 0x0 #define V_028C70_EXPORT_4C_16BPC 0x1 #define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */ #define S_028C70_RAT(x) (((x) & 0x1) << 26) #define G_028C70_RAT(x) (((x) >> 26) & 0x1) #define C_028C70_RAT 0xFBFFFFFF #define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27) #define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7) #define C_028C70_RESOURCE_TYPE 0xC7FFFFFF #define CB_COLOR0_INFO 0x28c70 # define CB_FORMAT(x) ((x) << 2) # define CB_ARRAY_MODE(x) ((x) << 8) # define ARRAY_LINEAR_GENERAL 0 # define ARRAY_LINEAR_ALIGNED 1 # define ARRAY_1D_TILED_THIN1 2 # define ARRAY_2D_TILED_THIN1 4 # define CB_SOURCE_FORMAT(x) ((x) << 24) # define CB_SF_EXPORT_FULL 0 # define CB_SF_EXPORT_NORM 1 #define R_028C74_CB_COLOR0_ATTRIB 0x028C74 #define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4) #define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1) #define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF #define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5) #define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf) #define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10) #define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3) #define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13) #define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3) #define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16) #define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3) #define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19) #define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3) #define CB_COLOR0_ATTRIB 0x28c74 # define CB_TILE_SPLIT(x) (((x) & 0x7) << 5) # define ADDR_SURF_TILE_SPLIT_64B 0 # define ADDR_SURF_TILE_SPLIT_128B 1 # define ADDR_SURF_TILE_SPLIT_256B 2 # define ADDR_SURF_TILE_SPLIT_512B 3 # define ADDR_SURF_TILE_SPLIT_1KB 4 # define ADDR_SURF_TILE_SPLIT_2KB 5 # define ADDR_SURF_TILE_SPLIT_4KB 6 # define CB_NUM_BANKS(x) (((x) & 0x3) << 10) # define ADDR_SURF_2_BANK 0 # define ADDR_SURF_4_BANK 1 # define ADDR_SURF_8_BANK 2 # define ADDR_SURF_16_BANK 3 # define CB_BANK_WIDTH(x) (((x) & 0x3) << 13) # define ADDR_SURF_BANK_WIDTH_1 0 # define ADDR_SURF_BANK_WIDTH_2 1 # define ADDR_SURF_BANK_WIDTH_4 2 # define ADDR_SURF_BANK_WIDTH_8 3 # define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16) # define ADDR_SURF_BANK_HEIGHT_1 0 # define ADDR_SURF_BANK_HEIGHT_2 1 # define ADDR_SURF_BANK_HEIGHT_4 2 # define ADDR_SURF_BANK_HEIGHT_8 3 # define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19) #define CB_COLOR0_DIM 0x28c78 /* only CB0-7 blocks have these regs */ #define CB_COLOR0_CMASK 0x28c7c #define CB_COLOR0_CMASK_SLICE 0x28c80 #define CB_COLOR0_FMASK 0x28c84 #define CB_COLOR0_FMASK_SLICE 0x28c88 #define CB_COLOR0_CLEAR_WORD0 0x28c8c #define CB_COLOR0_CLEAR_WORD1 0x28c90 #define CB_COLOR0_CLEAR_WORD2 0x28c94 #define CB_COLOR0_CLEAR_WORD3 0x28c98 #define CB_COLOR1_BASE 0x28c9c #define CB_COLOR2_BASE 0x28cd8 #define CB_COLOR3_BASE 0x28d14 #define CB_COLOR4_BASE 0x28d50 #define CB_COLOR5_BASE 0x28d8c #define CB_COLOR6_BASE 0x28dc8 #define CB_COLOR7_BASE 0x28e04 #define CB_COLOR8_BASE 0x28e40 #define CB_COLOR9_BASE 0x28e5c #define CB_COLOR10_BASE 0x28e78 #define CB_COLOR11_BASE 0x28e94 #define CB_COLOR1_PITCH 0x28ca0 #define CB_COLOR2_PITCH 0x28cdc #define CB_COLOR3_PITCH 0x28d18 #define CB_COLOR4_PITCH 0x28d54 #define CB_COLOR5_PITCH 0x28d90 #define CB_COLOR6_PITCH 0x28dcc #define CB_COLOR7_PITCH 0x28e08 #define CB_COLOR8_PITCH 0x28e44 #define CB_COLOR9_PITCH 0x28e60 #define CB_COLOR10_PITCH 0x28e7c #define CB_COLOR11_PITCH 0x28e98 #define CB_COLOR1_SLICE 0x28ca4 #define CB_COLOR2_SLICE 0x28ce0 #define CB_COLOR3_SLICE 0x28d1c #define CB_COLOR4_SLICE 0x28d58 #define CB_COLOR5_SLICE 0x28d94 #define CB_COLOR6_SLICE 0x28dd0 #define CB_COLOR7_SLICE 0x28e0c #define CB_COLOR8_SLICE 0x28e48 #define CB_COLOR9_SLICE 0x28e64 #define CB_COLOR10_SLICE 0x28e80 #define CB_COLOR11_SLICE 0x28e9c #define CB_COLOR1_VIEW 0x28ca8 #define CB_COLOR2_VIEW 0x28ce4 #define CB_COLOR3_VIEW 0x28d20 #define CB_COLOR4_VIEW 0x28d5c #define CB_COLOR5_VIEW 0x28d98 #define CB_COLOR6_VIEW 0x28dd4 #define CB_COLOR7_VIEW 0x28e10 #define CB_COLOR8_VIEW 0x28e4c #define CB_COLOR9_VIEW 0x28e68 #define CB_COLOR10_VIEW 0x28e84 #define CB_COLOR11_VIEW 0x28ea0 #define CB_COLOR1_INFO 0x28cac #define CB_COLOR2_INFO 0x28ce8 #define CB_COLOR3_INFO 0x28d24 #define CB_COLOR4_INFO 0x28d60 #define CB_COLOR5_INFO 0x28d9c #define CB_COLOR6_INFO 0x28dd8 #define CB_COLOR7_INFO 0x28e14 #define CB_COLOR8_INFO 0x28e50 #define CB_COLOR9_INFO 0x28e6c #define CB_COLOR10_INFO 0x28e88 #define CB_COLOR11_INFO 0x28ea4 #define CB_COLOR1_ATTRIB 0x28cb0 #define CB_COLOR2_ATTRIB 0x28cec #define CB_COLOR3_ATTRIB 0x28d28 #define CB_COLOR4_ATTRIB 0x28d64 #define CB_COLOR5_ATTRIB 0x28da0 #define CB_COLOR6_ATTRIB 0x28ddc #define CB_COLOR7_ATTRIB 0x28e18 #define CB_COLOR8_ATTRIB 0x28e54 #define CB_COLOR9_ATTRIB 0x28e70 #define CB_COLOR10_ATTRIB 0x28e8c #define CB_COLOR11_ATTRIB 0x28ea8 #define CB_COLOR1_DIM 0x28cb4 #define CB_COLOR2_DIM 0x28cf0 #define CB_COLOR3_DIM 0x28d2c #define CB_COLOR4_DIM 0x28d68 #define CB_COLOR5_DIM 0x28da4 #define CB_COLOR6_DIM 0x28de0 #define CB_COLOR7_DIM 0x28e1c #define CB_COLOR8_DIM 0x28e58 #define CB_COLOR9_DIM 0x28e74 #define CB_COLOR10_DIM 0x28e90 #define CB_COLOR11_DIM 0x28eac #define CB_COLOR1_CMASK 0x28cb8 #define CB_COLOR2_CMASK 0x28cf4 #define CB_COLOR3_CMASK 0x28d30 #define CB_COLOR4_CMASK 0x28d6c #define CB_COLOR5_CMASK 0x28da8 #define CB_COLOR6_CMASK 0x28de4 #define CB_COLOR7_CMASK 0x28e20 #define CB_COLOR1_CMASK_SLICE 0x28cbc #define CB_COLOR2_CMASK_SLICE 0x28cf8 #define CB_COLOR3_CMASK_SLICE 0x28d34 #define CB_COLOR4_CMASK_SLICE 0x28d70 #define CB_COLOR5_CMASK_SLICE 0x28dac #define CB_COLOR6_CMASK_SLICE 0x28de8 #define CB_COLOR7_CMASK_SLICE 0x28e24 #define CB_COLOR1_FMASK 0x28cc0 #define CB_COLOR2_FMASK 0x28cfc #define CB_COLOR3_FMASK 0x28d38 #define CB_COLOR4_FMASK 0x28d74 #define CB_COLOR5_FMASK 0x28db0 #define CB_COLOR6_FMASK 0x28dec #define CB_COLOR7_FMASK 0x28e28 #define CB_COLOR1_FMASK_SLICE 0x28cc4 #define CB_COLOR2_FMASK_SLICE 0x28d00 #define CB_COLOR3_FMASK_SLICE 0x28d3c #define CB_COLOR4_FMASK_SLICE 0x28d78 #define CB_COLOR5_FMASK_SLICE 0x28db4 #define CB_COLOR6_FMASK_SLICE 0x28df0 #define CB_COLOR7_FMASK_SLICE 0x28e2c #define CB_COLOR1_CLEAR_WORD0 0x28cc8 #define CB_COLOR2_CLEAR_WORD0 0x28d04 #define CB_COLOR3_CLEAR_WORD0 0x28d40 #define CB_COLOR4_CLEAR_WORD0 0x28d7c #define CB_COLOR5_CLEAR_WORD0 0x28db8 #define CB_COLOR6_CLEAR_WORD0 0x28df4 #define CB_COLOR7_CLEAR_WORD0 0x28e30 #define CB_COLOR1_CLEAR_WORD1 0x28ccc #define CB_COLOR2_CLEAR_WORD1 0x28d08 #define CB_COLOR3_CLEAR_WORD1 0x28d44 #define CB_COLOR4_CLEAR_WORD1 0x28d80 #define CB_COLOR5_CLEAR_WORD1 0x28dbc #define CB_COLOR6_CLEAR_WORD1 0x28df8 #define CB_COLOR7_CLEAR_WORD1 0x28e34 #define CB_COLOR1_CLEAR_WORD2 0x28cd0 #define CB_COLOR2_CLEAR_WORD2 0x28d0c #define CB_COLOR3_CLEAR_WORD2 0x28d48 #define CB_COLOR4_CLEAR_WORD2 0x28d84 #define CB_COLOR5_CLEAR_WORD2 0x28dc0 #define CB_COLOR6_CLEAR_WORD2 0x28dfc #define CB_COLOR7_CLEAR_WORD2 0x28e38 #define CB_COLOR1_CLEAR_WORD3 0x28cd4 #define CB_COLOR2_CLEAR_WORD3 0x28d10 #define CB_COLOR3_CLEAR_WORD3 0x28d4c #define CB_COLOR4_CLEAR_WORD3 0x28d88 #define CB_COLOR5_CLEAR_WORD3 0x28dc4 #define CB_COLOR6_CLEAR_WORD3 0x28e00 #define CB_COLOR7_CLEAR_WORD3 0x28e3c #define SQ_TEX_RESOURCE_WORD0_0 0x30000 # define TEX_DIM(x) ((x) << 0) # define SQ_TEX_DIM_1D 0 # define SQ_TEX_DIM_2D 1 # define SQ_TEX_DIM_3D 2 # define SQ_TEX_DIM_CUBEMAP 3 # define SQ_TEX_DIM_1D_ARRAY 4 # define SQ_TEX_DIM_2D_ARRAY 5 # define SQ_TEX_DIM_2D_MSAA 6 # define SQ_TEX_DIM_2D_ARRAY_MSAA 7 #define SQ_TEX_RESOURCE_WORD1_0 0x30004 # define TEX_ARRAY_MODE(x) ((x) << 28) #define SQ_TEX_RESOURCE_WORD2_0 0x30008 #define SQ_TEX_RESOURCE_WORD3_0 0x3000C #define SQ_TEX_RESOURCE_WORD4_0 0x30010 # define TEX_DST_SEL_X(x) ((x) << 16) # define TEX_DST_SEL_Y(x) ((x) << 19) # define TEX_DST_SEL_Z(x) ((x) << 22) # define TEX_DST_SEL_W(x) ((x) << 25) # define SQ_SEL_X 0 # define SQ_SEL_Y 1 # define SQ_SEL_Z 2 # define SQ_SEL_W 3 # define SQ_SEL_0 4 # define SQ_SEL_1 5 #define SQ_TEX_RESOURCE_WORD5_0 0x30014 #define SQ_TEX_RESOURCE_WORD6_0 0x30018 # define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29) #define SQ_TEX_RESOURCE_WORD7_0 0x3001c # define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6) # define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8) # define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10) # define TEX_NUM_BANKS(x) (((x) & 0x3) << 16) #define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000 #define S_030000_DIM(x) (((x) & 0x7) << 0) #define G_030000_DIM(x) (((x) >> 0) & 0x7) #define C_030000_DIM 0xFFFFFFF8 #define V_030000_SQ_TEX_DIM_1D 0x00000000 #define V_030000_SQ_TEX_DIM_2D 0x00000001 #define V_030000_SQ_TEX_DIM_3D 0x00000002 #define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003 #define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004 #define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005 #define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006 #define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007 #define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5) #define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1) #define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF #define S_030000_PITCH(x) (((x) & 0xFFF) << 6) #define G_030000_PITCH(x) (((x) >> 6) & 0xFFF) #define C_030000_PITCH 0xFFFC003F #define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18) #define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF) #define C_030000_TEX_WIDTH 0x0003FFFF #define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004 #define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0) #define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF) #define C_030004_TEX_HEIGHT 0xFFFFC000 #define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14) #define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF) #define C_030004_TEX_DEPTH 0xF8003FFF #define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28) #define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF) #define C_030004_ARRAY_MODE 0x0FFFFFFF #define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008 #define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) #define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) #define C_030008_BASE_ADDRESS 0x00000000 #define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C #define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) #define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) #define C_03000C_MIP_ADDRESS 0x00000000 #define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010 #define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) #define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) #define C_030010_FORMAT_COMP_X 0xFFFFFFFC #define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000 #define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001 #define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002 #define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2) #define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3) #define C_030010_FORMAT_COMP_Y 0xFFFFFFF3 #define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4) #define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3) #define C_030010_FORMAT_COMP_Z 0xFFFFFFCF #define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6) #define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3) #define C_030010_FORMAT_COMP_W 0xFFFFFF3F #define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8) #define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3) #define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF #define V_030010_SQ_NUM_FORMAT_NORM 0x00000000 #define V_030010_SQ_NUM_FORMAT_INT 0x00000001 #define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002 #define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10) #define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1) #define C_030010_SRF_MODE_ALL 0xFFFFFBFF #define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000 #define V_030010_SRF_MODE_NO_ZERO 0x00000001 #define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11) #define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1) #define C_030010_FORCE_DEGAMMA 0xFFFFF7FF #define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12) #define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3) #define C_030010_ENDIAN_SWAP 0xFFFFCFFF #define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16) #define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7) #define C_030010_DST_SEL_X 0xFFF8FFFF #define V_030010_SQ_SEL_X 0x00000000 #define V_030010_SQ_SEL_Y 0x00000001 #define V_030010_SQ_SEL_Z 0x00000002 #define V_030010_SQ_SEL_W 0x00000003 #define V_030010_SQ_SEL_0 0x00000004 #define V_030010_SQ_SEL_1 0x00000005 #define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19) #define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7) #define C_030010_DST_SEL_Y 0xFFC7FFFF #define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22) #define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7) #define C_030010_DST_SEL_Z 0xFE3FFFFF #define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25) #define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7) #define C_030010_DST_SEL_W 0xF1FFFFFF #define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28) #define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF) #define C_030010_BASE_LEVEL 0x0FFFFFFF #define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014 #define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0) #define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF) #define C_030014_LAST_LEVEL 0xFFFFFFF0 #define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4) #define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF) #define C_030014_BASE_ARRAY 0xFFFE000F #define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17) #define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF) #define C_030014_LAST_ARRAY 0xC001FFFF #define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018 #define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0) #define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7) #define C_030018_MAX_ANISO 0xFFFFFFF8 #define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3) #define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7) #define C_030018_PERF_MODULATION 0xFFFFFFC7 #define S_030018_INTERLACED(x) (((x) & 0x1) << 6) #define G_030018_INTERLACED(x) (((x) >> 6) & 0x1) #define C_030018_INTERLACED 0xFFFFFFBF #define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29) #define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7) #define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C #define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6) #define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3) #define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8) #define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3) #define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10) #define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3) #define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16) #define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3) #define S_03001C_TYPE(x) (((x) & 0x3) << 30) #define G_03001C_TYPE(x) (((x) >> 30) & 0x3) #define C_03001C_TYPE 0x3FFFFFFF #define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000 #define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001 #define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002 #define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003 #define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0) #define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F) #define C_03001C_DATA_FORMAT 0xFFFFFFC0 #define SQ_VTX_CONSTANT_WORD0_0 0x30000 #define SQ_VTX_CONSTANT_WORD1_0 0x30004 #define SQ_VTX_CONSTANT_WORD2_0 0x30008 # define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0) # define SQ_VTXC_STRIDE(x) ((x) << 8) # define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30) # define SQ_ENDIAN_NONE 0 # define SQ_ENDIAN_8IN16 1 # define SQ_ENDIAN_8IN32 2 #define SQ_VTX_CONSTANT_WORD3_0 0x3000C # define SQ_VTCX_SEL_X(x) ((x) << 3) # define SQ_VTCX_SEL_Y(x) ((x) << 6) # define SQ_VTCX_SEL_Z(x) ((x) << 9) # define SQ_VTCX_SEL_W(x) ((x) << 12) #define SQ_VTX_CONSTANT_WORD4_0 0x30010 #define SQ_VTX_CONSTANT_WORD5_0 0x30014 #define SQ_VTX_CONSTANT_WORD6_0 0x30018 #define SQ_VTX_CONSTANT_WORD7_0 0x3001c #define TD_PS_BORDER_COLOR_INDEX 0xA400 #define TD_PS_BORDER_COLOR_RED 0xA404 #define TD_PS_BORDER_COLOR_GREEN 0xA408 #define TD_PS_BORDER_COLOR_BLUE 0xA40C #define TD_PS_BORDER_COLOR_ALPHA 0xA410 #define TD_VS_BORDER_COLOR_INDEX 0xA414 #define TD_VS_BORDER_COLOR_RED 0xA418 #define TD_VS_BORDER_COLOR_GREEN 0xA41C #define TD_VS_BORDER_COLOR_BLUE 0xA420 #define TD_VS_BORDER_COLOR_ALPHA 0xA424 #define TD_GS_BORDER_COLOR_INDEX 0xA428 #define TD_GS_BORDER_COLOR_RED 0xA42C #define TD_GS_BORDER_COLOR_GREEN 0xA430 #define TD_GS_BORDER_COLOR_BLUE 0xA434 #define TD_GS_BORDER_COLOR_ALPHA 0xA438 #define TD_HS_BORDER_COLOR_INDEX 0xA43C #define TD_HS_BORDER_COLOR_RED 0xA440 #define TD_HS_BORDER_COLOR_GREEN 0xA444 #define TD_HS_BORDER_COLOR_BLUE 0xA448 #define TD_HS_BORDER_COLOR_ALPHA 0xA44C #define TD_LS_BORDER_COLOR_INDEX 0xA450 #define TD_LS_BORDER_COLOR_RED 0xA454 #define TD_LS_BORDER_COLOR_GREEN 0xA458 #define TD_LS_BORDER_COLOR_BLUE 0xA45C #define TD_LS_BORDER_COLOR_ALPHA 0xA460 #define TD_CS_BORDER_COLOR_INDEX 0xA464 #define TD_CS_BORDER_COLOR_RED 0xA468 #define TD_CS_BORDER_COLOR_GREEN 0xA46C #define TD_CS_BORDER_COLOR_BLUE 0xA470 #define TD_CS_BORDER_COLOR_ALPHA 0xA474 /* cayman 3D regs */ #define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4 #define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48 #define CAYMAN_DB_EQAA 0x28804 #define CAYMAN_DB_DEPTH_INFO 0x2803C #define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 #define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 #define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 #define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358 /* cayman packet3 addition */ #define CAYMAN_PACKET3_DEALLOC_STATE 0x14 /* DMA regs common on r6xx/r7xx/evergreen/ni */ #define DMA_RB_CNTL 0xd000 # define DMA_RB_ENABLE (1 << 0) # define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ # define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) # define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ #define DMA_STATUS_REG 0xd034 # define DMA_IDLE (1 << 0) #endif Index: head/sys/dev/drm2/radeon/nid.h =================================================================== --- head/sys/dev/drm2/radeon/nid.h (revision 258779) +++ head/sys/dev/drm2/radeon/nid.h (revision 258780) @@ -1,680 +1,680 @@ /* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #ifndef NI_H #define NI_H #include __FBSDID("$FreeBSD$"); #define CAYMAN_MAX_SH_GPRS 256 #define CAYMAN_MAX_TEMP_GPRS 16 #define CAYMAN_MAX_SH_THREADS 256 #define CAYMAN_MAX_SH_STACK_ENTRIES 4096 #define CAYMAN_MAX_FRC_EOV_CNT 16384 #define CAYMAN_MAX_BACKENDS 8 #define CAYMAN_MAX_BACKENDS_MASK 0xFF #define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF #define CAYMAN_MAX_SIMDS 16 #define CAYMAN_MAX_SIMDS_MASK 0xFFFF #define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF #define CAYMAN_MAX_PIPES 8 #define CAYMAN_MAX_PIPES_MASK 0xFF #define CAYMAN_MAX_LDS_NUM 0xFFFF #define CAYMAN_MAX_TCC 16 #define CAYMAN_MAX_TCC_MASK 0xFF #define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 #define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 #define DMIF_ADDR_CONFIG 0xBD4 #define SRBM_GFX_CNTL 0x0E44 #define RINGID(x) (((x) & 0x3) << 0) #define VMID(x) (((x) & 0x7) << 0) #define SRBM_STATUS 0x0E50 #define SRBM_SOFT_RESET 0x0E60 #define SOFT_RESET_BIF (1 << 1) #define SOFT_RESET_CG (1 << 2) #define SOFT_RESET_DC (1 << 5) #define SOFT_RESET_DMA1 (1 << 6) #define SOFT_RESET_GRBM (1 << 8) #define SOFT_RESET_HDP (1 << 9) #define SOFT_RESET_IH (1 << 10) #define SOFT_RESET_MC (1 << 11) #define SOFT_RESET_RLC (1 << 13) #define SOFT_RESET_ROM (1 << 14) #define SOFT_RESET_SEM (1 << 15) #define SOFT_RESET_VMC (1 << 17) #define SOFT_RESET_DMA (1 << 20) #define SOFT_RESET_TST (1 << 21) #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define REQUEST_TYPE(x) (((x) & 0xf) << 0) #define RESPONSE_TYPE_MASK 0x000000F0 #define RESPONSE_TYPE_SHIFT 4 #define VM_L2_CNTL 0x1400 #define ENABLE_L2_CACHE (1 << 0) #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) #define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) #define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10) #define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) #define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 18) /* CONTEXT1_IDENTITY_ACCESS_MODE * 0 physical = logical * 1 logical via context1 page table * 2 inside identity aperture use translation, outside physical = logical * 3 inside identity aperture physical = logical, outside use translation */ #define VM_L2_CNTL2 0x1404 #define INVALIDATE_ALL_L1_TLBS (1 << 0) #define INVALIDATE_L2_CACHE (1 << 1) #define VM_L2_CNTL3 0x1408 #define BANK_SELECT(x) ((x) << 0) #define CACHE_UPDATE_MODE(x) ((x) << 6) #define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20) #define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15) #define VM_L2_STATUS 0x140C #define L2_BUSY (1 << 0) #define VM_CONTEXT0_CNTL 0x1410 #define ENABLE_CONTEXT (1 << 0) #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) #define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3) #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) #define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6) #define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7) #define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9) #define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10) #define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12) #define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13) #define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15) #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) #define VM_CONTEXT1_CNTL 0x1414 #define VM_CONTEXT0_CNTL2 0x1430 #define VM_CONTEXT1_CNTL2 0x1434 #define VM_INVALIDATE_REQUEST 0x1478 #define VM_INVALIDATE_RESPONSE 0x147c #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 #define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 #define MC_SHARED_CHREMAP 0x2008 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_MX_L1_TLB_CNTL 0x2064 #define ENABLE_L1_TLB (1 << 0) #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) #define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) #define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) #define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) #define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) #define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6) #define FUS_MC_VM_FB_OFFSET 0x2068 #define MC_SHARED_BLACKOUT_CNTL 0x20ac #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 #define NOOFRANK_SHIFT 2 #define NOOFRANK_MASK 0x00000004 #define NOOFROWS_SHIFT 3 #define NOOFROWS_MASK 0x00000038 #define NOOFCOLS_SHIFT 6 #define NOOFCOLS_MASK 0x000000C0 #define CHANSIZE_SHIFT 8 #define CHANSIZE_MASK 0x00000100 #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) #define MC_SEQ_SUP_CNTL 0x28c8 #define RUN_MASK (1 << 0) #define MC_SEQ_SUP_PGM 0x28cc #define MC_IO_PAD_CNTL_D0 0x29d0 #define MEM_FALL_OUT_CMD (1 << 8) #define MC_SEQ_MISC0 0x2a00 #define MC_SEQ_MISC0_GDDR5_SHIFT 28 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 #define MC_SEQ_MISC0_GDDR5_VALUE 5 #define MC_SEQ_IO_DEBUG_INDEX 0x2a44 #define MC_SEQ_IO_DEBUG_DATA 0x2a48 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_ADDR_CONFIG 0x2F48 #define HDP_MISC_CNTL 0x2F4C #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 #define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C #define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 #define RLC_GFX_INDEX 0x3FC4 #define CONFIG_MEMSIZE 0x5428 #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) #define GRBM_STATUS 0x8010 #define CMDFIFO_AVAIL_MASK 0x0000000F #define RING2_RQ_PENDING (1 << 4) #define SRBM_RQ_PENDING (1 << 5) #define RING1_RQ_PENDING (1 << 6) #define CF_RQ_PENDING (1 << 7) #define PF_RQ_PENDING (1 << 8) #define GDS_DMA_RQ_PENDING (1 << 9) #define GRBM_EE_BUSY (1 << 10) #define SX_CLEAN (1 << 11) #define DB_CLEAN (1 << 12) #define CB_CLEAN (1 << 13) #define TA_BUSY (1 << 14) #define GDS_BUSY (1 << 15) #define VGT_BUSY_NO_DMA (1 << 16) #define VGT_BUSY (1 << 17) #define IA_BUSY_NO_DMA (1 << 18) #define IA_BUSY (1 << 19) #define SX_BUSY (1 << 20) #define SH_BUSY (1 << 21) #define SPI_BUSY (1 << 22) #define SC_BUSY (1 << 24) #define PA_BUSY (1 << 25) #define DB_BUSY (1 << 26) #define CP_COHERENCY_BUSY (1 << 28) #define CP_BUSY (1 << 29) #define CB_BUSY (1 << 30) -#define GUI_ACTIVE (1 << 31) +#define GUI_ACTIVE (1U << 31) #define GRBM_STATUS_SE0 0x8014 #define GRBM_STATUS_SE1 0x8018 #define SE_SX_CLEAN (1 << 0) #define SE_DB_CLEAN (1 << 1) #define SE_CB_CLEAN (1 << 2) #define SE_VGT_BUSY (1 << 23) #define SE_PA_BUSY (1 << 24) #define SE_TA_BUSY (1 << 25) #define SE_SX_BUSY (1 << 26) #define SE_SPI_BUSY (1 << 27) #define SE_SH_BUSY (1 << 28) #define SE_SC_BUSY (1 << 29) #define SE_DB_BUSY (1 << 30) -#define SE_CB_BUSY (1 << 31) +#define SE_CB_BUSY (1U << 31) #define GRBM_SOFT_RESET 0x8020 #define SOFT_RESET_CP (1 << 0) #define SOFT_RESET_CB (1 << 1) #define SOFT_RESET_DB (1 << 3) #define SOFT_RESET_GDS (1 << 4) #define SOFT_RESET_PA (1 << 5) #define SOFT_RESET_SC (1 << 6) #define SOFT_RESET_SPI (1 << 8) #define SOFT_RESET_SH (1 << 9) #define SOFT_RESET_SX (1 << 10) #define SOFT_RESET_TC (1 << 11) #define SOFT_RESET_TA (1 << 12) #define SOFT_RESET_VGT (1 << 14) #define SOFT_RESET_IA (1 << 15) #define GRBM_GFX_INDEX 0x802C #define INSTANCE_INDEX(x) ((x) << 0) #define SE_INDEX(x) ((x) << 16) #define INSTANCE_BROADCAST_WRITES (1 << 30) -#define SE_BROADCAST_WRITES (1 << 31) +#define SE_BROADCAST_WRITES (1U << 31) #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 #define SCRATCH_REG3 0x850C #define SCRATCH_REG4 0x8510 #define SCRATCH_REG5 0x8514 #define SCRATCH_REG6 0x8518 #define SCRATCH_REG7 0x851C #define SCRATCH_UMSK 0x8540 #define SCRATCH_ADDR 0x8544 #define CP_SEM_WAIT_TIMER 0x85BC #define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_COHER_CNTL2 0x85E8 #define CP_STALLED_STAT1 0x8674 #define CP_STALLED_STAT2 0x8678 #define CP_BUSY_STAT 0x867C #define CP_STAT 0x8680 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1 << 28) #define CP_PFP_HALT (1 << 26) #define CP_RB2_RPTR 0x86f8 #define CP_RB1_RPTR 0x86fc #define CP_RB0_RPTR 0x8700 #define CP_RB_WPTR_DELAY 0x8704 #define CP_MEQ_THRESHOLDS 0x8764 #define MEQ1_START(x) ((x) << 0) #define MEQ2_START(x) ((x) << 8) #define CP_PERFMON_CNTL 0x87FC #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x) << 0) #define VC_ONLY 0 #define TC_ONLY 1 #define VC_AND_TC 2 #define AUTO_INVLD_EN(x) ((x) << 6) #define NO_AUTO 0 #define ES_AUTO 1 #define GS_AUTO 2 #define ES_AND_GS_AUTO 3 #define VGT_GS_VERTEX_REUSE 0x88D4 #define CC_GC_SHADER_PIPE_CONFIG 0x8950 #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 #define INACTIVE_QD_PIPES_SHIFT 8 #define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS_MASK 0xFFFF0000 #define INACTIVE_SIMDS_SHIFT 16 #define VGT_PRIMITIVE_TYPE 0x8958 #define VGT_NUM_INSTANCES 0x8974 #define VGT_TF_RING_SIZE 0x8988 #define VGT_OFFCHIP_LDS_BASE 0x89b4 #define PA_SC_LINE_STIPPLE_STATE 0x8B10 #define PA_CL_ENHANCE 0x8A14 #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) #define PA_SC_FIFO_SIZE 0x8BCC #define SC_PRIM_FIFO_SIZE(x) ((x) << 0) #define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) #define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) #define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define SQ_CONFIG 0x8C00 #define VC_ENABLE (1 << 0) #define EXPORT_SRC_C (1 << 1) #define GFX_PRIO(x) ((x) << 2) #define CS1_PRIO(x) ((x) << 4) #define CS2_PRIO(x) ((x) << 6) #define SQ_GPR_RESOURCE_MGMT_1 0x8C04 #define NUM_PS_GPRS(x) ((x) << 0) #define NUM_VS_GPRS(x) ((x) << 16) #define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) #define SQ_ESGS_RING_SIZE 0x8c44 #define SQ_GSVS_RING_SIZE 0x8c4c #define SQ_ESTMP_RING_BASE 0x8c50 #define SQ_ESTMP_RING_SIZE 0x8c54 #define SQ_GSTMP_RING_BASE 0x8c58 #define SQ_GSTMP_RING_SIZE 0x8c5c #define SQ_VSTMP_RING_BASE 0x8c60 #define SQ_VSTMP_RING_SIZE 0x8c64 #define SQ_PSTMP_RING_BASE 0x8c68 #define SQ_PSTMP_RING_SIZE 0x8c6c #define SQ_MS_FIFO_SIZES 0x8CF0 #define CACHE_FIFO_SIZE(x) ((x) << 0) #define FETCH_FIFO_HIWATER(x) ((x) << 8) #define DONE_FIFO_HIWATER(x) ((x) << 16) #define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) #define SQ_LSTMP_RING_BASE 0x8e10 #define SQ_LSTMP_RING_SIZE 0x8e14 #define SQ_HSTMP_RING_BASE 0x8e18 #define SQ_HSTMP_RING_SIZE 0x8e1c #define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C #define DYN_GPR_ENABLE (1 << 8) #define SQ_CONST_MEM_BASE 0x8df8 #define SX_EXPORT_BUFFER_SIZES 0x900C #define COLOR_BUFFER_SIZE(x) ((x) << 0) #define POSITION_BUFFER_SIZE(x) ((x) << 8) #define SMX_BUFFER_SIZE(x) ((x) << 16) #define SX_DEBUG_1 0x9058 #define ENABLE_NEW_SMX_ADDRESS (1 << 16) #define SPI_CONFIG_CNTL 0x9100 #define GPR_WRITE_PRIORITY(x) ((x) << 0) #define SPI_CONFIG_CNTL_1 0x913C #define VTX_DONE_DELAY(x) ((x) << 0) #define INTERP_ONE_PRIM_PER_ROW (1 << 4) #define CRC_SIMD_ID_WADDR_DISABLE (1 << 8) #define CGTS_TCC_DISABLE 0x9148 #define CGTS_USER_TCC_DISABLE 0x914C #define TCC_DISABLE_MASK 0xFFFF0000 #define TCC_DISABLE_SHIFT 16 #define CGTS_SM_CTRL_REG 0x9150 #define OVERRIDE (1 << 21) #define TA_CNTL_AUX 0x9508 #define DISABLE_CUBE_WRAP (1 << 0) #define DISABLE_CUBE_ANISO (1 << 1) #define TCP_CHAN_STEER_LO 0x960c #define TCP_CHAN_STEER_HI 0x9610 #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) #define GB_ADDR_CONFIG 0x98F8 #define NUM_PIPES(x) ((x) << 0) #define NUM_PIPES_MASK 0x00000007 #define NUM_PIPES_SHIFT 0 #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) #define PIPE_INTERLEAVE_SIZE_MASK 0x00000070 #define PIPE_INTERLEAVE_SIZE_SHIFT 4 #define BANK_INTERLEAVE_SIZE(x) ((x) << 8) #define NUM_SHADER_ENGINES(x) ((x) << 12) #define NUM_SHADER_ENGINES_MASK 0x00003000 #define NUM_SHADER_ENGINES_SHIFT 12 #define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) #define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000 #define SHADER_ENGINE_TILE_SIZE_SHIFT 16 #define NUM_GPUS(x) ((x) << 20) #define NUM_GPUS_MASK 0x00700000 #define NUM_GPUS_SHIFT 20 #define MULTI_GPU_TILE_SIZE(x) ((x) << 24) #define MULTI_GPU_TILE_SIZE_MASK 0x03000000 #define MULTI_GPU_TILE_SIZE_SHIFT 24 #define ROW_SIZE(x) ((x) << 28) #define ROW_SIZE_MASK 0x30000000 #define ROW_SIZE_SHIFT 28 #define NUM_LOWER_PIPES(x) ((x) << 30) #define NUM_LOWER_PIPES_MASK 0x40000000 #define NUM_LOWER_PIPES_SHIFT 30 #define GB_BACKEND_MAP 0x98FC #define CB_PERF_CTR0_SEL_0 0x9A20 #define CB_PERF_CTR0_SEL_1 0x9A24 #define CB_PERF_CTR1_SEL_0 0x9A28 #define CB_PERF_CTR1_SEL_1 0x9A2C #define CB_PERF_CTR2_SEL_0 0x9A30 #define CB_PERF_CTR2_SEL_1 0x9A34 #define CB_PERF_CTR3_SEL_0 0x9A38 #define CB_PERF_CTR3_SEL_1 0x9A3C #define GC_USER_RB_BACKEND_DISABLE 0x9B7C #define BACKEND_DISABLE_MASK 0x00FF0000 #define BACKEND_DISABLE_SHIFT 16 #define SMX_DC_CTL0 0xA020 #define USE_HASH_FUNCTION (1 << 0) #define NUMBER_OF_SETS(x) ((x) << 1) #define FLUSH_ALL_ON_EVENT (1 << 10) #define STALL_ON_EVENT (1 << 11) #define SMX_EVENT_CTL 0xA02C #define ES_FLUSH_CTL(x) ((x) << 0) #define GS_FLUSH_CTL(x) ((x) << 3) #define ACK_FLUSH_CTL(x) ((x) << 6) #define SYNC_FLUSH_CTL (1 << 8) #define CP_RB0_BASE 0xC100 #define CP_RB0_CNTL 0xC104 #define RB_BUFSZ(x) ((x) << 0) #define RB_BLKSZ(x) ((x) << 8) #define RB_NO_UPDATE (1 << 27) -#define RB_RPTR_WR_ENA (1 << 31) +#define RB_RPTR_WR_ENA (1U << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB0_RPTR_ADDR 0xC10C #define CP_RB0_RPTR_ADDR_HI 0xC110 #define CP_RB0_WPTR 0xC114 #define CP_INT_CNTL 0xC124 # define CNTX_BUSY_INT_ENABLE (1 << 19) # define CNTX_EMPTY_INT_ENABLE (1 << 20) # define TIME_STAMP_INT_ENABLE (1 << 26) #define CP_RB1_BASE 0xC180 #define CP_RB1_CNTL 0xC184 #define CP_RB1_RPTR_ADDR 0xC188 #define CP_RB1_RPTR_ADDR_HI 0xC18C #define CP_RB1_WPTR 0xC190 #define CP_RB2_BASE 0xC194 #define CP_RB2_CNTL 0xC198 #define CP_RB2_RPTR_ADDR 0xC19C #define CP_RB2_RPTR_ADDR_HI 0xC1A0 #define CP_RB2_WPTR 0xC1A4 #define CP_PFP_UCODE_ADDR 0xC150 #define CP_PFP_UCODE_DATA 0xC154 #define CP_ME_RAM_RADDR 0xC158 #define CP_ME_RAM_WADDR 0xC15C #define CP_ME_RAM_DATA 0xC160 #define CP_DEBUG 0xC1FC #define VGT_EVENT_INITIATOR 0x28a90 # define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) /* * PM4 */ #define PACKET_TYPE0 0 #define PACKET_TYPE1 1 #define PACKET_TYPE2 2 #define PACKET_TYPE3 3 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ (((reg) >> 2) & 0xFFFF) | \ ((n) & 0x3FFF) << 16) #define CP_PACKET2 0x80000000 #define PACKET2_PAD_SHIFT 0 #define PACKET2_PAD_MASK (0x3fffffff << 0) #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ (((op) & 0xFF) << 8) | \ ((n) & 0x3FFF) << 16) /* Packet 3 types */ #define PACKET3_NOP 0x10 #define PACKET3_SET_BASE 0x11 #define PACKET3_CLEAR_STATE 0x12 #define PACKET3_INDEX_BUFFER_SIZE 0x13 #define PACKET3_DEALLOC_STATE 0x14 #define PACKET3_DISPATCH_DIRECT 0x15 #define PACKET3_DISPATCH_INDIRECT 0x16 #define PACKET3_INDIRECT_BUFFER_END 0x17 #define PACKET3_MODE_CONTROL 0x18 #define PACKET3_SET_PREDICATION 0x20 #define PACKET3_REG_RMW 0x21 #define PACKET3_COND_EXEC 0x22 #define PACKET3_PRED_EXEC 0x23 #define PACKET3_DRAW_INDIRECT 0x24 #define PACKET3_DRAW_INDEX_INDIRECT 0x25 #define PACKET3_INDEX_BASE 0x26 #define PACKET3_DRAW_INDEX_2 0x27 #define PACKET3_CONTEXT_CONTROL 0x28 #define PACKET3_DRAW_INDEX_OFFSET 0x29 #define PACKET3_INDEX_TYPE 0x2A #define PACKET3_DRAW_INDEX 0x2B #define PACKET3_DRAW_INDEX_AUTO 0x2D #define PACKET3_DRAW_INDEX_IMMD 0x2E #define PACKET3_NUM_INSTANCES 0x2F #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 #define PACKET3_INDIRECT_BUFFER 0x32 #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 #define PACKET3_WRITE_DATA 0x37 #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_MEM_WRITE 0x3D #define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) # define PACKET3_DB_DEST_BASE_ENA (1 << 14) # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) # define PACKET3_CB11_DEST_BASE_ENA (1 << 18) # define PACKET3_FULL_CACHE_ENA (1 << 20) # define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_CB_ACTION_ENA (1 << 25) # define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_SH_ACTION_ENA (1 << 27) # define PACKET3_SX_ACTION_ENA (1 << 28) #define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 #define EVENT_TYPE(x) ((x) << 0) #define EVENT_INDEX(x) ((x) << 8) /* 0 - any non-TS event * 1 - ZPASS_DONE * 2 - SAMPLE_PIPELINESTAT * 3 - SAMPLE_STREAMOUTSTAT* * 4 - *S_PARTIAL_FLUSH * 5 - TS events */ #define PACKET3_EVENT_WRITE_EOP 0x47 #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data * 2 - send 64bit data * 3 - send 64bit counter value */ #define INT_SEL(x) ((x) << 24) /* 0 - none * 1 - interrupt only (DATA_SEL = 0) * 2 - interrupt when data write is confirmed */ #define PACKET3_EVENT_WRITE_EOS 0x48 #define PACKET3_PREAMBLE_CNTL 0x4A # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D #define PACKET3_ALU_PS_CONST_UPDATE 0x4E #define PACKET3_ALU_VS_CONST_UPDATE 0x4F #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_START 0x00008000 #define PACKET3_SET_CONFIG_REG_END 0x0000ac00 #define PACKET3_SET_CONTEXT_REG 0x69 #define PACKET3_SET_CONTEXT_REG_START 0x00028000 #define PACKET3_SET_CONTEXT_REG_END 0x00029000 #define PACKET3_SET_ALU_CONST 0x6A /* alu const buffers only; no reg file */ #define PACKET3_SET_BOOL_CONST 0x6B #define PACKET3_SET_BOOL_CONST_START 0x0003a500 #define PACKET3_SET_BOOL_CONST_END 0x0003a518 #define PACKET3_SET_LOOP_CONST 0x6C #define PACKET3_SET_LOOP_CONST_START 0x0003a200 #define PACKET3_SET_LOOP_CONST_END 0x0003a500 #define PACKET3_SET_RESOURCE 0x6D #define PACKET3_SET_RESOURCE_START 0x00030000 #define PACKET3_SET_RESOURCE_END 0x00038000 #define PACKET3_SET_SAMPLER 0x6E #define PACKET3_SET_SAMPLER_START 0x0003c000 #define PACKET3_SET_SAMPLER_END 0x0003c600 #define PACKET3_SET_CTL_CONST 0x6F #define PACKET3_SET_CTL_CONST_START 0x0003cff0 #define PACKET3_SET_CTL_CONST_END 0x0003ff0c #define PACKET3_SET_RESOURCE_OFFSET 0x70 #define PACKET3_SET_ALU_CONST_VS 0x71 #define PACKET3_SET_ALU_CONST_DI 0x72 #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 #define PACKET3_SET_RESOURCE_INDIRECT 0x74 #define PACKET3_SET_APPEND_CNT 0x75 #define PACKET3_ME_WRITE 0x7A /* ASYNC DMA - first instance at 0xd000, second at 0xd800 */ #define DMA0_REGISTER_OFFSET 0x0 /* not a register */ #define DMA1_REGISTER_OFFSET 0x800 /* not a register */ #define DMA_RB_CNTL 0xd000 # define DMA_RB_ENABLE (1 << 0) # define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ # define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) # define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ #define DMA_RB_BASE 0xd004 #define DMA_RB_RPTR 0xd008 #define DMA_RB_WPTR 0xd00c #define DMA_RB_RPTR_ADDR_HI 0xd01c #define DMA_RB_RPTR_ADDR_LO 0xd020 #define DMA_IB_CNTL 0xd024 # define DMA_IB_ENABLE (1 << 0) # define DMA_IB_SWAP_ENABLE (1 << 4) -# define CMD_VMID_FORCE (1 << 31) +# define CMD_VMID_FORCE (1U << 31) #define DMA_IB_RPTR 0xd028 #define DMA_CNTL 0xd02c # define TRAP_ENABLE (1 << 0) # define SEM_INCOMPLETE_INT_ENABLE (1 << 1) # define SEM_WAIT_INT_ENABLE (1 << 2) # define DATA_SWAP_ENABLE (1 << 3) # define FENCE_SWAP_ENABLE (1 << 4) # define CTXEMPTY_INT_ENABLE (1 << 28) #define DMA_STATUS_REG 0xd034 # define DMA_IDLE (1 << 0) #define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044 #define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048 #define DMA_TILING_CONFIG 0xd0b8 #define DMA_MODE 0xd0bc #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ (((t) & 0x1) << 23) | \ (((s) & 0x1) << 22) | \ (((n) & 0xFFFFF) << 0)) #define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ (((vmid) & 0xF) << 20) | \ (((n) & 0xFFFFF) << 0)) /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 #define DMA_PACKET_INDIRECT_BUFFER 0x4 #define DMA_PACKET_SEMAPHORE 0x5 #define DMA_PACKET_FENCE 0x6 #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_CONSTANT_FILL 0xd #define DMA_PACKET_NOP 0xf #endif Index: head/sys/dev/drm2/radeon/r200.c =================================================================== --- head/sys/dev/drm2/radeon/r200.c (revision 258779) +++ head/sys/dev/drm2/radeon/r200.c (revision 258780) @@ -1,552 +1,552 @@ /* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include __FBSDID("$FreeBSD$"); #include #include #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" #include "r100d.h" #include "r200_reg_safe.h" #include "r100_track.h" static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) { int vtx_size, i; vtx_size = 2; if (vtx_fmt_0 & R200_VTX_Z0) vtx_size++; if (vtx_fmt_0 & R200_VTX_W0) vtx_size++; /* blend weight */ if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT)) vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7; if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL) vtx_size++; if (vtx_fmt_0 & R200_VTX_N0) vtx_size += 3; if (vtx_fmt_0 & R200_VTX_POINT_SIZE) vtx_size++; if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG) vtx_size++; if (vtx_fmt_0 & R200_VTX_SHININESS_0) vtx_size++; if (vtx_fmt_0 & R200_VTX_SHININESS_1) vtx_size++; for (i = 0; i < 8; i++) { int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3; switch (color_size) { case 0: break; case 1: vtx_size++; break; case 2: vtx_size += 3; break; case 3: vtx_size += 4; break; } } if (vtx_fmt_0 & R200_VTX_XY1) vtx_size += 2; if (vtx_fmt_0 & R200_VTX_Z1) vtx_size++; if (vtx_fmt_0 & R200_VTX_W1) vtx_size++; if (vtx_fmt_0 & R200_VTX_N1) vtx_size += 3; return vtx_size; } int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t size; uint32_t cur_size; int i, num_loops; int r = 0; /* radeon pitch is /64 */ size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; num_loops = DIV_ROUND_UP(size, 0x1FFFFF); r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); return r; } /* Must wait for 2D idle & clean before DMA or hangs might happen */ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, (1 << 16)); for (i = 0; i < num_loops; i++) { cur_size = size; if (cur_size > 0x1FFFFF) { cur_size = 0x1FFFFF; } size -= cur_size; radeon_ring_write(ring, PACKET0(0x720, 2)); radeon_ring_write(ring, src_offset); radeon_ring_write(ring, dst_offset); - radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30)); + radeon_ring_write(ring, cur_size | (1U << 31) | (1 << 30)); src_offset += cur_size; dst_offset += cur_size; } radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); if (fence) { r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); } radeon_ring_unlock_commit(rdev, ring); return r; } static int r200_get_vtx_size_1(uint32_t vtx_fmt_1) { int vtx_size, i, tex_size; vtx_size = 0; for (i = 0; i < 6; i++) { tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7; if (tex_size > 4) continue; vtx_size += tex_size; } return vtx_size; } int r200_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { struct radeon_cs_reloc *reloc; struct r100_cs_track *track; volatile uint32_t *ib; uint32_t tmp; int r; int i; int face; u32 tile_flags = 0; u32 idx_value; ib = p->ib.ptr; track = (struct r100_cs_track *)p->track; idx_value = radeon_get_ib_value(p, idx); switch (reg) { case RADEON_CRTC_GUI_TRIG_VLINE: r = r100_cs_packet_parse_vline(p); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } break; /* FIXME: only allow PACKET3 blit? easier to check for out of * range access */ case RADEON_DST_PITCH_OFFSET: case RADEON_SRC_PITCH_OFFSET: r = r100_reloc_pitch_offset(p, pkt, idx, reg); if (r) return r; break; case RADEON_RB3D_DEPTHOFFSET: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } track->zb.robj = reloc->robj; track->zb.offset = idx_value; track->zb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case RADEON_RB3D_COLOROFFSET: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } track->cb[0].robj = reloc->robj; track->cb[0].offset = idx_value; track->cb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R200_PP_TXOFFSET_0: case R200_PP_TXOFFSET_1: case R200_PP_TXOFFSET_2: case R200_PP_TXOFFSET_3: case R200_PP_TXOFFSET_4: case R200_PP_TXOFFSET_5: i = (reg - R200_PP_TXOFFSET_0) / 24; r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) tile_flags |= R200_TXO_MACRO_TILE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= R200_TXO_MICRO_TILE; tmp = idx_value & ~(0x7 << 2); tmp |= tile_flags; ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); } else ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[i].robj = reloc->robj; track->tex_dirty = true; break; case R200_PP_CUBIC_OFFSET_F1_0: case R200_PP_CUBIC_OFFSET_F2_0: case R200_PP_CUBIC_OFFSET_F3_0: case R200_PP_CUBIC_OFFSET_F4_0: case R200_PP_CUBIC_OFFSET_F5_0: case R200_PP_CUBIC_OFFSET_F1_1: case R200_PP_CUBIC_OFFSET_F2_1: case R200_PP_CUBIC_OFFSET_F3_1: case R200_PP_CUBIC_OFFSET_F4_1: case R200_PP_CUBIC_OFFSET_F5_1: case R200_PP_CUBIC_OFFSET_F1_2: case R200_PP_CUBIC_OFFSET_F2_2: case R200_PP_CUBIC_OFFSET_F3_2: case R200_PP_CUBIC_OFFSET_F4_2: case R200_PP_CUBIC_OFFSET_F5_2: case R200_PP_CUBIC_OFFSET_F1_3: case R200_PP_CUBIC_OFFSET_F2_3: case R200_PP_CUBIC_OFFSET_F3_3: case R200_PP_CUBIC_OFFSET_F4_3: case R200_PP_CUBIC_OFFSET_F5_3: case R200_PP_CUBIC_OFFSET_F1_4: case R200_PP_CUBIC_OFFSET_F2_4: case R200_PP_CUBIC_OFFSET_F3_4: case R200_PP_CUBIC_OFFSET_F4_4: case R200_PP_CUBIC_OFFSET_F5_4: case R200_PP_CUBIC_OFFSET_F1_5: case R200_PP_CUBIC_OFFSET_F2_5: case R200_PP_CUBIC_OFFSET_F3_5: case R200_PP_CUBIC_OFFSET_F4_5: case R200_PP_CUBIC_OFFSET_F5_5: i = (reg - R200_PP_TXOFFSET_0) / 24; face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4; r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } track->textures[i].cube_info[face - 1].offset = idx_value; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[i].cube_info[face - 1].robj = reloc->robj; track->tex_dirty = true; break; case RADEON_RE_WIDTH_HEIGHT: track->maxy = ((idx_value >> 16) & 0x7FF); track->cb_dirty = true; track->zb_dirty = true; break; case RADEON_RB3D_COLORPITCH: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) tile_flags |= RADEON_COLOR_TILE_ENABLE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; ib[idx] = tmp; } else ib[idx] = idx_value; track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; track->cb_dirty = true; break; case RADEON_RB3D_DEPTHPITCH: track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; track->zb_dirty = true; break; case RADEON_RB3D_CNTL: switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { case 7: case 8: case 9: case 11: case 12: track->cb[0].cpp = 1; break; case 3: case 4: case 15: track->cb[0].cpp = 2; break; case 6: track->cb[0].cpp = 4; break; default: DRM_ERROR("Invalid color buffer format (%d) !\n", ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); return -EINVAL; } if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) { DRM_ERROR("No support for depth xy offset in kms\n"); return -EINVAL; } track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); track->cb_dirty = true; track->zb_dirty = true; break; case RADEON_RB3D_ZSTENCILCNTL: switch (idx_value & 0xf) { case 0: track->zb.cpp = 2; break; case 2: case 3: case 4: case 5: case 9: case 11: track->zb.cpp = 4; break; default: break; } track->zb_dirty = true; break; case RADEON_RB3D_ZPASS_ADDR: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case RADEON_PP_CNTL: { uint32_t temp = idx_value >> 4; for (i = 0; i < track->num_texture; i++) track->textures[i].enabled = !!(temp & (1 << i)); track->tex_dirty = true; } break; case RADEON_SE_VF_CNTL: track->vap_vf_cntl = idx_value; break; case 0x210c: /* VAP_VF_MAX_VTX_INDX */ track->max_indx = idx_value & 0x00FFFFFFUL; break; case R200_SE_VTX_FMT_0: track->vtx_size = r200_get_vtx_size_0(idx_value); break; case R200_SE_VTX_FMT_1: track->vtx_size += r200_get_vtx_size_1(idx_value); break; case R200_PP_TXSIZE_0: case R200_PP_TXSIZE_1: case R200_PP_TXSIZE_2: case R200_PP_TXSIZE_3: case R200_PP_TXSIZE_4: case R200_PP_TXSIZE_5: i = (reg - R200_PP_TXSIZE_0) / 32; track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; track->tex_dirty = true; break; case R200_PP_TXPITCH_0: case R200_PP_TXPITCH_1: case R200_PP_TXPITCH_2: case R200_PP_TXPITCH_3: case R200_PP_TXPITCH_4: case R200_PP_TXPITCH_5: i = (reg - R200_PP_TXPITCH_0) / 32; track->textures[i].pitch = idx_value + 32; track->tex_dirty = true; break; case R200_PP_TXFILTER_0: case R200_PP_TXFILTER_1: case R200_PP_TXFILTER_2: case R200_PP_TXFILTER_3: case R200_PP_TXFILTER_4: case R200_PP_TXFILTER_5: i = (reg - R200_PP_TXFILTER_0) / 32; track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK) >> R200_MAX_MIP_LEVEL_SHIFT); tmp = (idx_value >> 23) & 0x7; if (tmp == 2 || tmp == 6) track->textures[i].roundup_w = false; tmp = (idx_value >> 27) & 0x7; if (tmp == 2 || tmp == 6) track->textures[i].roundup_h = false; track->tex_dirty = true; break; case R200_PP_TXMULTI_CTL_0: case R200_PP_TXMULTI_CTL_1: case R200_PP_TXMULTI_CTL_2: case R200_PP_TXMULTI_CTL_3: case R200_PP_TXMULTI_CTL_4: case R200_PP_TXMULTI_CTL_5: i = (reg - R200_PP_TXMULTI_CTL_0) / 32; break; case R200_PP_TXFORMAT_X_0: case R200_PP_TXFORMAT_X_1: case R200_PP_TXFORMAT_X_2: case R200_PP_TXFORMAT_X_3: case R200_PP_TXFORMAT_X_4: case R200_PP_TXFORMAT_X_5: i = (reg - R200_PP_TXFORMAT_X_0) / 32; track->textures[i].txdepth = idx_value & 0x7; tmp = (idx_value >> 16) & 0x3; /* 2D, 3D, CUBE */ switch (tmp) { case 0: case 3: case 4: case 5: case 6: case 7: /* 1D/2D */ track->textures[i].tex_coord_type = 0; break; case 1: /* CUBE */ track->textures[i].tex_coord_type = 2; break; case 2: /* 3D */ track->textures[i].tex_coord_type = 1; break; } track->tex_dirty = true; break; case R200_PP_TXFORMAT_0: case R200_PP_TXFORMAT_1: case R200_PP_TXFORMAT_2: case R200_PP_TXFORMAT_3: case R200_PP_TXFORMAT_4: case R200_PP_TXFORMAT_5: i = (reg - R200_PP_TXFORMAT_0) / 32; if (idx_value & R200_TXFORMAT_NON_POWER2) { track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); } if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) track->textures[i].lookup_disable = true; switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { case R200_TXFORMAT_I8: case R200_TXFORMAT_RGB332: case R200_TXFORMAT_Y8: track->textures[i].cpp = 1; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_AI88: case R200_TXFORMAT_ARGB1555: case R200_TXFORMAT_RGB565: case R200_TXFORMAT_ARGB4444: case R200_TXFORMAT_VYUY422: case R200_TXFORMAT_YVYU422: case R200_TXFORMAT_LDVDU655: case R200_TXFORMAT_DVDU88: case R200_TXFORMAT_AVYU4444: track->textures[i].cpp = 2; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_ARGB8888: case R200_TXFORMAT_RGBA8888: case R200_TXFORMAT_ABGR8888: case R200_TXFORMAT_BGR111110: case R200_TXFORMAT_LDVDU8888: track->textures[i].cpp = 4; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_DXT1: track->textures[i].cpp = 1; track->textures[i].compress_format = R100_TRACK_COMP_DXT1; break; case R200_TXFORMAT_DXT23: case R200_TXFORMAT_DXT45: track->textures[i].cpp = 1; track->textures[i].compress_format = R100_TRACK_COMP_DXT1; break; } track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); track->tex_dirty = true; break; case R200_PP_CUBIC_FACES_0: case R200_PP_CUBIC_FACES_1: case R200_PP_CUBIC_FACES_2: case R200_PP_CUBIC_FACES_3: case R200_PP_CUBIC_FACES_4: case R200_PP_CUBIC_FACES_5: tmp = idx_value; i = (reg - R200_PP_CUBIC_FACES_0) / 32; for (face = 0; face < 4; face++) { track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); } track->tex_dirty = true; break; default: DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", reg, idx); return -EINVAL; } return 0; } void r200_set_safe_registers(struct radeon_device *rdev) { rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r200_reg_safe_bm); } Index: head/sys/dev/drm2/radeon/r300.c =================================================================== --- head/sys/dev/drm2/radeon/r300.c (revision 258779) +++ head/sys/dev/drm2/radeon/r300.c (revision 258780) @@ -1,1566 +1,1566 @@ /* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" #include #include "r100_track.h" #include "r300d.h" #include "rv350d.h" #include "r300_reg_safe.h" /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 * * GPU Errata: * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL * using MMIO to flush host path read cache, this lead to HARDLOCKUP. * However, scheduling such write to the ring seems harmless, i suspect * the CP read collide with the flush somehow, or maybe the MC, hard to * tell. (Jerome Glisse) */ /* * rv370,rv380 PCIE GART */ static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) { uint32_t tmp; int i; /* Workaround HW bug do flush 2 times */ for (i = 0; i < 2; i++) { tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); } mb(); } #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { volatile uint32_t *ptr = rdev->gart.ptr; if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } addr = (lower_32_bits(addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 24) | R300_PTE_WRITEABLE | R300_PTE_READABLE; /* on x86 we want this to be CPU endian, on powerpc * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ ptr += i; *ptr = (uint32_t)addr; return 0; } int rv370_pcie_gart_init(struct radeon_device *rdev) { int r; if (rdev->gart.robj) { DRM_ERROR("RV370 PCIE GART already initialized\n"); return 0; } /* Initialize common gart structure */ r = radeon_gart_init(rdev); if (r) return r; r = rv370_debugfs_pcie_gart_info_init(rdev); if (r) DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; return radeon_gart_table_vram_alloc(rdev); } int rv370_pcie_gart_enable(struct radeon_device *rdev) { uint32_t table_addr; uint32_t tmp; int r; if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } r = radeon_gart_table_vram_pin(rdev); if (r) return r; radeon_gart_restore(rdev); /* discard memory request outside of configured range */ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); table_addr = rdev->gart.table_addr; WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); /* FIXME: setup default page */ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); /* Clear error */ WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp |= RADEON_PCIE_TX_GART_EN; tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); rv370_pcie_gart_tlb_flush(rdev); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long long)table_addr); rdev->gart.ready = true; return 0; } void rv370_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); radeon_gart_table_vram_unpin(rdev); } void rv370_pcie_gart_fini(struct radeon_device *rdev) { radeon_gart_fini(rdev); rv370_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); } void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { struct radeon_ring *ring = &rdev->ring[fence->ring]; /* Who ever call radeon_fence_emit should call ring_lock and ask * for enough space (today caller are ib schedule and buffer move) */ /* Write SC register so SC & US assert idle */ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); radeon_ring_write(ring, 0); /* Flush 3D cache */ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_RB3D_DC_FLUSH); radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_ZC_FLUSH); /* Wait until IDLE & CLEAN */ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_DMA_GUI_IDLE)); radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); radeon_ring_write(ring, rdev->config.r300.hdp_cntl | RADEON_HDP_READ_BUFFER_INVALIDATE); radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); radeon_ring_write(ring, rdev->config.r300.hdp_cntl); /* Emit fence sequence & fire IRQ */ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); radeon_ring_write(ring, RADEON_SW_INT_FIRE); } void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) { unsigned gb_tile_config; int r; /* Sub pixel 1/12 so we can have 4K rendering according to doc */ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); switch(rdev->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; case 1: default: gb_tile_config |= R300_PIPE_COUNT_RV350; break; } r = radeon_ring_lock(rdev, ring, 64); if (r) { return; } radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); radeon_ring_write(ring, RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI); radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); radeon_ring_write(ring, gb_tile_config); radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); radeon_ring_write(ring, ((6 << R300_MS_X0_SHIFT) | (6 << R300_MS_Y0_SHIFT) | (6 << R300_MS_X1_SHIFT) | (6 << R300_MS_Y1_SHIFT) | (6 << R300_MS_X2_SHIFT) | (6 << R300_MS_Y2_SHIFT) | (6 << R300_MSBD0_Y_SHIFT) | (6 << R300_MSBD0_X_SHIFT))); radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); radeon_ring_write(ring, ((6 << R300_MS_X3_SHIFT) | (6 << R300_MS_Y3_SHIFT) | (6 << R300_MS_X4_SHIFT) | (6 << R300_MS_Y4_SHIFT) | (6 << R300_MS_X5_SHIFT) | (6 << R300_MS_Y5_SHIFT) | (6 << R300_MSBD1_SHIFT))); radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); radeon_ring_write(ring, R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); radeon_ring_write(ring, R300_GEOMETRY_ROUND_NEAREST | R300_COLOR_ROUND_NEAREST); radeon_ring_unlock_commit(rdev, ring); } static void r300_errata(struct radeon_device *rdev) { rdev->pll_errata = 0; if (rdev->family == CHIP_R300 && (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { rdev->pll_errata |= CHIP_ERRATA_R300_CG; } } int r300_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; uint32_t tmp; for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ tmp = RREG32(RADEON_MC_STATUS); if (tmp & R300_MC_IDLE) { return 0; } DRM_UDELAY(1); } return -1; } static void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; if ((rdev->family == CHIP_R300 && rdev->ddev->pci_device != 0x4144) || (rdev->family == CHIP_R350 && rdev->ddev->pci_device != 0x4148)) { /* r300,r350 */ rdev->num_gb_pipes = 2; } else { /* rv350,rv370,rv380,r300 AD, r350 AH */ rdev->num_gb_pipes = 1; } rdev->num_z_pipes = 1; gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); switch (rdev->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; default: case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; } WREG32(R300_GB_TILE_CONFIG, gb_tile_config); if (r100_gui_wait_for_idle(rdev)) { DRM_ERROR("Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } tmp = RREG32(R300_DST_PIPE_CONFIG); WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); WREG32(R300_RB2D_DSTCACHE_MODE, R300_DC_AUTOFLUSH_ENABLE | R300_DC_DC_DISABLE_IGNORE_PE); if (r100_gui_wait_for_idle(rdev)) { DRM_ERROR("Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } if (r300_mc_wait_for_idle(rdev)) { DRM_ERROR("Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", rdev->num_gb_pipes, rdev->num_z_pipes); } int r300_asic_reset(struct radeon_device *rdev) { struct r100_mc_save save; u32 status, tmp; int ret = 0; status = RREG32(R_000E40_RBBM_STATUS); if (!G_000E40_GUI_ACTIVE(status)) { return 0; } r100_mc_stop(rdev, &save); status = RREG32(R_000E40_RBBM_STATUS); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* stop CP */ WREG32(RADEON_CP_CSQ_CNTL, 0); tmp = RREG32(RADEON_CP_RB_CNTL); WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); WREG32(RADEON_CP_RB_RPTR_WR, 0); WREG32(RADEON_CP_RB_WPTR, 0); WREG32(RADEON_CP_RB_CNTL, tmp); /* save PCI state */ pci_save_state(device_get_parent(rdev->dev)); /* disable bus mastering */ r100_bm_disable(rdev); WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | S_0000F0_SOFT_RESET_GA(1)); RREG32(R_0000F0_RBBM_SOFT_RESET); DRM_MDELAY(500); WREG32(R_0000F0_RBBM_SOFT_RESET, 0); DRM_MDELAY(1); status = RREG32(R_000E40_RBBM_STATUS); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* resetting the CP seems to be problematic sometimes it end up * hard locking the computer, but it's necessary for successful * reset more test & playing is needed on R3XX/R4XX to find a * reliable (if any solution) */ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); RREG32(R_0000F0_RBBM_SOFT_RESET); DRM_MDELAY(500); WREG32(R_0000F0_RBBM_SOFT_RESET, 0); DRM_MDELAY(1); status = RREG32(R_000E40_RBBM_STATUS); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* restore PCI & busmastering */ pci_restore_state(device_get_parent(rdev->dev)); r100_enable_bm(rdev); /* Check if GPU is idle */ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { dev_err(rdev->dev, "failed to reset GPU\n"); ret = -1; } else dev_info(rdev->dev, "GPU reset succeed\n"); r100_mc_resume(rdev, &save); return ret; } /* * r300,r350,rv350,rv380 VRAM info */ void r300_mc_init(struct radeon_device *rdev) { u64 base; u32 tmp; /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; tmp = RREG32(RADEON_MEM_CNTL); tmp &= R300_MEM_NUM_CHANNELS_MASK; switch (tmp) { case 0: rdev->mc.vram_width = 64; break; case 1: rdev->mc.vram_width = 128; break; case 2: rdev->mc.vram_width = 256; break; default: rdev->mc.vram_width = 128; break; } r100_vram_init_sizes(rdev); base = rdev->mc.aper_base; if (rdev->flags & RADEON_IS_IGP) base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = 0; if (!(rdev->flags & RADEON_IS_AGP)) radeon_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); } void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) { uint32_t link_width_cntl, mask; if (rdev->flags & RADEON_IS_IGP) return; if (!(rdev->flags & RADEON_IS_PCIE)) return; /* FIXME wait for idle */ switch (lanes) { case 0: mask = RADEON_PCIE_LC_LINK_WIDTH_X0; break; case 1: mask = RADEON_PCIE_LC_LINK_WIDTH_X1; break; case 2: mask = RADEON_PCIE_LC_LINK_WIDTH_X2; break; case 4: mask = RADEON_PCIE_LC_LINK_WIDTH_X4; break; case 8: mask = RADEON_PCIE_LC_LINK_WIDTH_X8; break; case 12: mask = RADEON_PCIE_LC_LINK_WIDTH_X12; break; case 16: default: mask = RADEON_PCIE_LC_LINK_WIDTH_X16; break; } link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) return; link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | RADEON_PCIE_LC_RECONFIG_NOW | RADEON_PCIE_LC_RECONFIG_LATER | RADEON_PCIE_LC_SHORT_RECONFIG_EN); link_width_cntl |= mask; WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | RADEON_PCIE_LC_RECONFIG_NOW)); /* wait for lane set to complete */ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); while (link_width_cntl == 0xffffffff) link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); } int rv370_get_pcie_lanes(struct radeon_device *rdev) { u32 link_width_cntl; if (rdev->flags & RADEON_IS_IGP) return 0; if (!(rdev->flags & RADEON_IS_PCIE)) return 0; /* FIXME wait for idle */ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { case RADEON_PCIE_LC_LINK_WIDTH_X0: return 0; case RADEON_PCIE_LC_LINK_WIDTH_X1: return 1; case RADEON_PCIE_LC_LINK_WIDTH_X2: return 2; case RADEON_PCIE_LC_LINK_WIDTH_X4: return 4; case RADEON_PCIE_LC_LINK_WIDTH_X8: return 8; case RADEON_PCIE_LC_LINK_WIDTH_X16: default: return 16; } } #if defined(CONFIG_DEBUG_FS) static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; uint32_t tmp; tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); return 0; } static struct drm_info_list rv370_pcie_gart_info_list[] = { {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, }; #endif static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); #else return 0; #endif } static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { struct radeon_cs_reloc *reloc; struct r100_cs_track *track; volatile uint32_t *ib; uint32_t tmp, tile_flags = 0; unsigned i; int r; u32 idx_value; ib = p->ib.ptr; track = (struct r100_cs_track *)p->track; idx_value = radeon_get_ib_value(p, idx); switch(reg) { case AVIVO_D1MODE_VLINE_START_END: case RADEON_CRTC_GUI_TRIG_VLINE: r = r100_cs_packet_parse_vline(p); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } break; case RADEON_DST_PITCH_OFFSET: case RADEON_SRC_PITCH_OFFSET: r = r100_reloc_pitch_offset(p, pkt, idx, reg); if (r) return r; break; case R300_RB3D_COLOROFFSET0: case R300_RB3D_COLOROFFSET1: case R300_RB3D_COLOROFFSET2: case R300_RB3D_COLOROFFSET3: i = (reg - R300_RB3D_COLOROFFSET0) >> 2; r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } track->cb[i].robj = reloc->robj; track->cb[i].offset = idx_value; track->cb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R300_ZB_DEPTHOFFSET: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } track->zb.robj = reloc->robj; track->zb.offset = idx_value; track->zb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R300_TX_OFFSET_0: case R300_TX_OFFSET_0+4: case R300_TX_OFFSET_0+8: case R300_TX_OFFSET_0+12: case R300_TX_OFFSET_0+16: case R300_TX_OFFSET_0+20: case R300_TX_OFFSET_0+24: case R300_TX_OFFSET_0+28: case R300_TX_OFFSET_0+32: case R300_TX_OFFSET_0+36: case R300_TX_OFFSET_0+40: case R300_TX_OFFSET_0+44: case R300_TX_OFFSET_0+48: case R300_TX_OFFSET_0+52: case R300_TX_OFFSET_0+56: case R300_TX_OFFSET_0+60: i = (reg - R300_TX_OFFSET_0) >> 2; r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); } else { if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_TXO_MACRO_TILE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_TXO_MICRO_TILE; else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) tile_flags |= R300_TXO_MICRO_TILE_SQUARE; tmp = idx_value + ((u32)reloc->lobj.gpu_offset); tmp |= tile_flags; ib[idx] = tmp; } track->textures[i].robj = reloc->robj; track->tex_dirty = true; break; /* Tracked registers */ case 0x2084: /* VAP_VF_CNTL */ track->vap_vf_cntl = idx_value; break; case 0x20B4: /* VAP_VTX_SIZE */ track->vtx_size = idx_value & 0x7F; break; case 0x2134: /* VAP_VF_MAX_VTX_INDX */ track->max_indx = idx_value & 0x00FFFFFFUL; break; case 0x2088: /* VAP_ALT_NUM_VERTICES - only valid on r500 */ if (p->rdev->family < CHIP_RV515) goto fail; track->vap_alt_nverts = idx_value & 0xFFFFFF; break; case 0x43E4: /* SC_SCISSOR1 */ track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; if (p->rdev->family < CHIP_RV515) { track->maxy -= 1440; } track->cb_dirty = true; track->zb_dirty = true; break; case 0x4E00: /* RB3D_CCTL */ if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ p->rdev->cmask_filp != p->filp) { DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); return -EINVAL; } track->num_cb = ((idx_value >> 5) & 0x3) + 1; track->cb_dirty = true; break; case 0x4E38: case 0x4E3C: case 0x4E40: case 0x4E44: /* RB3D_COLORPITCH0 */ /* RB3D_COLORPITCH1 */ /* RB3D_COLORPITCH2 */ /* RB3D_COLORPITCH3 */ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_COLOR_TILE_ENABLE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_COLOR_MICROTILE_ENABLE; else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; ib[idx] = tmp; } i = (reg - 0x4E38) >> 2; track->cb[i].pitch = idx_value & 0x3FFE; switch (((idx_value >> 21) & 0xF)) { case 9: case 11: case 12: track->cb[i].cpp = 1; break; case 3: case 4: case 13: case 15: track->cb[i].cpp = 2; break; case 5: if (p->rdev->family < CHIP_RV515) { DRM_ERROR("Invalid color buffer format (%d)!\n", ((idx_value >> 21) & 0xF)); return -EINVAL; } /* Pass through. */ case 6: track->cb[i].cpp = 4; break; case 10: track->cb[i].cpp = 8; break; case 7: track->cb[i].cpp = 16; break; default: DRM_ERROR("Invalid color buffer format (%d) !\n", ((idx_value >> 21) & 0xF)); return -EINVAL; } track->cb_dirty = true; break; case 0x4F00: /* ZB_CNTL */ if (idx_value & 2) { track->z_enabled = true; } else { track->z_enabled = false; } track->zb_dirty = true; break; case 0x4F10: /* ZB_FORMAT */ switch ((idx_value & 0xF)) { case 0: case 1: track->zb.cpp = 2; break; case 2: track->zb.cpp = 4; break; default: DRM_ERROR("Invalid z buffer format (%d) !\n", (idx_value & 0xF)); return -EINVAL; } track->zb_dirty = true; break; case 0x4F24: /* ZB_DEPTHPITCH */ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_DEPTHMACROTILE_ENABLE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_DEPTHMICROTILE_TILED; else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; ib[idx] = tmp; } track->zb.pitch = idx_value & 0x3FFC; track->zb_dirty = true; break; case 0x4104: /* TX_ENABLE */ for (i = 0; i < 16; i++) { bool enabled; enabled = !!(idx_value & (1 << i)); track->textures[i].enabled = enabled; } track->tex_dirty = true; break; case 0x44C0: case 0x44C4: case 0x44C8: case 0x44CC: case 0x44D0: case 0x44D4: case 0x44D8: case 0x44DC: case 0x44E0: case 0x44E4: case 0x44E8: case 0x44EC: case 0x44F0: case 0x44F4: case 0x44F8: case 0x44FC: /* TX_FORMAT1_[0-15] */ i = (reg - 0x44C0) >> 2; tmp = (idx_value >> 25) & 0x3; track->textures[i].tex_coord_type = tmp; switch ((idx_value & 0x1F)) { case R300_TX_FORMAT_X8: case R300_TX_FORMAT_Y4X4: case R300_TX_FORMAT_Z3Y3X2: track->textures[i].cpp = 1; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_X16: case R300_TX_FORMAT_FL_I16: case R300_TX_FORMAT_Y8X8: case R300_TX_FORMAT_Z5Y6X5: case R300_TX_FORMAT_Z6Y5X5: case R300_TX_FORMAT_W4Z4Y4X4: case R300_TX_FORMAT_W1Z5Y5X5: case R300_TX_FORMAT_D3DMFT_CxV8U8: case R300_TX_FORMAT_B8G8_B8G8: case R300_TX_FORMAT_G8R8_G8B8: track->textures[i].cpp = 2; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_Y16X16: case R300_TX_FORMAT_FL_I16A16: case R300_TX_FORMAT_Z11Y11X10: case R300_TX_FORMAT_Z10Y11X11: case R300_TX_FORMAT_W8Z8Y8X8: case R300_TX_FORMAT_W2Z10Y10X10: case 0x17: case R300_TX_FORMAT_FL_I32: case 0x1e: track->textures[i].cpp = 4; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_W16Z16Y16X16: case R300_TX_FORMAT_FL_R16G16B16A16: case R300_TX_FORMAT_FL_I32A32: track->textures[i].cpp = 8; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_FL_R32G32B32A32: track->textures[i].cpp = 16; track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_DXT1: track->textures[i].cpp = 1; track->textures[i].compress_format = R100_TRACK_COMP_DXT1; break; case R300_TX_FORMAT_ATI2N: if (p->rdev->family < CHIP_R420) { DRM_ERROR("Invalid texture format %u\n", (idx_value & 0x1F)); return -EINVAL; } /* The same rules apply as for DXT3/5. */ /* Pass through. */ case R300_TX_FORMAT_DXT3: case R300_TX_FORMAT_DXT5: track->textures[i].cpp = 1; track->textures[i].compress_format = R100_TRACK_COMP_DXT35; break; default: DRM_ERROR("Invalid texture format %u\n", (idx_value & 0x1F)); return -EINVAL; } track->tex_dirty = true; break; case 0x4400: case 0x4404: case 0x4408: case 0x440C: case 0x4410: case 0x4414: case 0x4418: case 0x441C: case 0x4420: case 0x4424: case 0x4428: case 0x442C: case 0x4430: case 0x4434: case 0x4438: case 0x443C: /* TX_FILTER0_[0-15] */ i = (reg - 0x4400) >> 2; tmp = idx_value & 0x7; if (tmp == 2 || tmp == 4 || tmp == 6) { track->textures[i].roundup_w = false; } tmp = (idx_value >> 3) & 0x7; if (tmp == 2 || tmp == 4 || tmp == 6) { track->textures[i].roundup_h = false; } track->tex_dirty = true; break; case 0x4500: case 0x4504: case 0x4508: case 0x450C: case 0x4510: case 0x4514: case 0x4518: case 0x451C: case 0x4520: case 0x4524: case 0x4528: case 0x452C: case 0x4530: case 0x4534: case 0x4538: case 0x453C: /* TX_FORMAT2_[0-15] */ i = (reg - 0x4500) >> 2; tmp = idx_value & 0x3FFF; track->textures[i].pitch = tmp + 1; if (p->rdev->family >= CHIP_RV515) { tmp = ((idx_value >> 15) & 1) << 11; track->textures[i].width_11 = tmp; tmp = ((idx_value >> 16) & 1) << 11; track->textures[i].height_11 = tmp; /* ATI1N */ if (idx_value & (1 << 14)) { /* The same rules apply as for DXT1. */ track->textures[i].compress_format = R100_TRACK_COMP_DXT1; } } else if (idx_value & (1 << 14)) { DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); return -EINVAL; } track->tex_dirty = true; break; case 0x4480: case 0x4484: case 0x4488: case 0x448C: case 0x4490: case 0x4494: case 0x4498: case 0x449C: case 0x44A0: case 0x44A4: case 0x44A8: case 0x44AC: case 0x44B0: case 0x44B4: case 0x44B8: case 0x44BC: /* TX_FORMAT0_[0-15] */ i = (reg - 0x4480) >> 2; tmp = idx_value & 0x7FF; track->textures[i].width = tmp + 1; tmp = (idx_value >> 11) & 0x7FF; track->textures[i].height = tmp + 1; tmp = (idx_value >> 26) & 0xF; track->textures[i].num_levels = tmp; - tmp = idx_value & (1 << 31); + tmp = idx_value & (1U << 31); track->textures[i].use_pitch = !!tmp; tmp = (idx_value >> 22) & 0xF; track->textures[i].txdepth = tmp; track->tex_dirty = true; break; case R300_ZB_ZPASS_ADDR: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case 0x4e0c: /* RB3D_COLOR_CHANNEL_MASK */ track->color_channel_mask = idx_value; track->cb_dirty = true; break; case 0x43a4: /* SC_HYPERZ_EN */ /* r300c emits this register - we need to disable hyperz for it * without complaining */ if (p->rdev->hyperz_filp != p->filp) { if (idx_value & 0x1) ib[idx] = idx_value & ~1; } break; case 0x4f1c: /* ZB_BW_CNTL */ track->zb_cb_clear = !!(idx_value & (1 << 5)); track->cb_dirty = true; track->zb_dirty = true; if (p->rdev->hyperz_filp != p->filp) { if (idx_value & (R300_HIZ_ENABLE | R300_RD_COMP_ENABLE | R300_WR_COMP_ENABLE | R300_FAST_FILL_ENABLE)) goto fail; } break; case 0x4e04: /* RB3D_BLENDCNTL */ track->blend_read_enable = !!(idx_value & (1 << 2)); track->cb_dirty = true; break; case R300_RB3D_AARESOLVE_OFFSET: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); r100_cs_dump_packet(p, pkt); return r; } track->aa.robj = reloc->robj; track->aa.offset = idx_value; track->aa_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R300_RB3D_AARESOLVE_PITCH: track->aa.pitch = idx_value & 0x3FFE; track->aa_dirty = true; break; case R300_RB3D_AARESOLVE_CTL: track->aaresolve = idx_value & 0x1; track->aa_dirty = true; break; case 0x4f30: /* ZB_MASK_OFFSET */ case 0x4f34: /* ZB_ZMASK_PITCH */ case 0x4f44: /* ZB_HIZ_OFFSET */ case 0x4f54: /* ZB_HIZ_PITCH */ if (idx_value && (p->rdev->hyperz_filp != p->filp)) goto fail; break; case 0x4028: if (idx_value && (p->rdev->hyperz_filp != p->filp)) goto fail; /* GB_Z_PEQ_CONFIG */ if (p->rdev->family >= CHIP_RV350) break; goto fail; break; case 0x4be8: /* valid register only on RV530 */ if (p->rdev->family == CHIP_RV530) break; /* fallthrough do not move */ default: goto fail; } return 0; fail: DRM_ERROR("Forbidden register 0x%04X in cs at %d (val=%08x)\n", reg, idx, idx_value); return -EINVAL; } static int r300_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { struct radeon_cs_reloc *reloc; struct r100_cs_track *track; volatile uint32_t *ib; unsigned idx; int r; ib = p->ib.ptr; idx = pkt->idx + 1; track = (struct r100_cs_track *)p->track; switch(pkt->opcode) { case PACKET3_3D_LOAD_VBPNTR: r = r100_packet3_load_vbpntr(p, pkt, idx); if (r) return r; break; case PACKET3_INDX_BUFFER: r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); r100_cs_dump_packet(p, pkt); return r; } ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); if (r) { return r; } break; /* Draw packet */ case PACKET3_3D_DRAW_IMMD: /* Number of dwords is vtx_size * (num_vertices - 1) * PRIM_WALK must be equal to 3 vertex data in embedded * in cmd stream */ if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); track->immd_dwords = pkt->count - 1; r = r100_cs_track_check(p->rdev, track); if (r) { return r; } break; case PACKET3_3D_DRAW_IMMD_2: /* Number of dwords is vtx_size * (num_vertices - 1) * PRIM_WALK must be equal to 3 vertex data in embedded * in cmd stream */ if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } track->vap_vf_cntl = radeon_get_ib_value(p, idx); track->immd_dwords = pkt->count; r = r100_cs_track_check(p->rdev, track); if (r) { return r; } break; case PACKET3_3D_DRAW_VBUF: track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); r = r100_cs_track_check(p->rdev, track); if (r) { return r; } break; case PACKET3_3D_DRAW_VBUF_2: track->vap_vf_cntl = radeon_get_ib_value(p, idx); r = r100_cs_track_check(p->rdev, track); if (r) { return r; } break; case PACKET3_3D_DRAW_INDX: track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); r = r100_cs_track_check(p->rdev, track); if (r) { return r; } break; case PACKET3_3D_DRAW_INDX_2: track->vap_vf_cntl = radeon_get_ib_value(p, idx); r = r100_cs_track_check(p->rdev, track); if (r) { return r; } break; case PACKET3_3D_CLEAR_HIZ: case PACKET3_3D_CLEAR_ZMASK: if (p->rdev->hyperz_filp != p->filp) return -EINVAL; break; case PACKET3_3D_CLEAR_CMASK: if (p->rdev->cmask_filp != p->filp) return -EINVAL; break; case PACKET3_NOP: break; default: DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; } int r300_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct r100_cs_track *track; int r; track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (track == NULL) return -ENOMEM; r100_cs_track_clear(p->rdev, track); p->track = track; do { r = r100_cs_packet_parse(p, &pkt, p->idx); if (r) { free(p->track, DRM_MEM_DRIVER); p->track = NULL; return r; } p->idx += pkt.count + 2; switch (pkt.type) { case PACKET_TYPE0: r = r100_cs_parse_packet0(p, &pkt, p->rdev->config.r300.reg_safe_bm, p->rdev->config.r300.reg_safe_bm_size, &r300_packet0_check); break; case PACKET_TYPE2: break; case PACKET_TYPE3: r = r300_packet3_check(p, &pkt); break; default: DRM_ERROR("Unknown packet type %d !\n", pkt.type); free(p->track, DRM_MEM_DRIVER); p->track = NULL; return -EINVAL; } if (r) { free(p->track, DRM_MEM_DRIVER); p->track = NULL; return r; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); free(p->track, DRM_MEM_DRIVER); p->track = NULL; return 0; } void r300_set_reg_safe(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = DRM_ARRAY_SIZE(r300_reg_safe_bm); } void r300_mc_program(struct radeon_device *rdev) { struct r100_mc_save save; int r; r = r100_debugfs_mc_info_init(rdev); if (r) { dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); } /* Stops all mc clients */ r100_mc_stop(rdev, &save); if (rdev->flags & RADEON_IS_AGP) { WREG32(R_00014C_MC_AGP_LOCATION, S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); WREG32(R_00015C_AGP_BASE_2, upper_32_bits(rdev->mc.agp_base) & 0xff); } else { WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); WREG32(R_000170_AGP_BASE, 0); WREG32(R_00015C_AGP_BASE_2, 0); } /* Wait for mc idle */ if (r300_mc_wait_for_idle(rdev)) DRM_INFO("Failed to wait MC idle before programming MC.\n"); /* Program MC, should be a 32bits limited address space */ WREG32(R_000148_MC_FB_LOCATION, S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); r100_mc_resume(rdev, &save); } void r300_clock_startup(struct radeon_device *rdev) { u32 tmp; if (radeon_dynclks != -1 && radeon_dynclks) radeon_legacy_set_clock_gating(rdev, 1); /* We need to force on some of the block */ tmp = RREG32_PLL(R_00000D_SCLK_CNTL); tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) tmp |= S_00000D_FORCE_VAP(1); WREG32_PLL(R_00000D_SCLK_CNTL, tmp); } static int r300_startup(struct radeon_device *rdev) { int r; /* set common regs */ r100_set_common_regs(rdev); /* program mc */ r300_mc_program(rdev); /* Resume clock */ r300_clock_startup(rdev); /* Initialize GPU configuration (# pipes, ...) */ r300_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ if (rdev->flags & RADEON_IS_PCIE) { r = rv370_pcie_gart_enable(rdev); if (r) return r; } if (rdev->family == CHIP_R300 || rdev->family == CHIP_R350 || rdev->family == CHIP_RV350) r100_enable_bm(rdev); if (rdev->flags & RADEON_IS_PCI) { r = r100_pci_gart_enable(rdev); if (r) return r; } /* allocate wb buffer */ r = radeon_wb_init(rdev); if (r) return r; r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); if (r) { dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); return r; } /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; } return 0; } int r300_resume(struct radeon_device *rdev) { int r; /* Make sur GART are not working */ if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_disable(rdev); /* Resume clock before doing reset */ r300_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); } /* post */ radeon_combios_asic_init(rdev->ddev); /* Resume clock after posting */ r300_clock_startup(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); rdev->accel_working = true; r = r300_startup(rdev); if (r) { rdev->accel_working = false; } return r; } int r300_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_disable(rdev); return 0; } void r300_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); free(rdev->bios, DRM_MEM_DRIVER); rdev->bios = NULL; } int r300_init(struct radeon_device *rdev) { int r; /* Disable VGA */ r100_vga_render_disable(rdev); /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ /* restore some register to sane defaults */ r100_restore_sanity(rdev); /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) return -EINVAL; } if (rdev->is_atom_bios) { dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); return -EINVAL; } else { r = radeon_combios_init(rdev); if (r) return r; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ if (radeon_boot_test_post_card(rdev) == false) return -EINVAL; /* Set asic errata */ r300_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) { radeon_agp_disable(rdev); } } /* initialize memory controller */ r300_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) return r; r = radeon_irq_kms_init(rdev); if (r) return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) return r; if (rdev->flags & RADEON_IS_PCIE) { r = rv370_pcie_gart_init(rdev); if (r) return r; } if (rdev->flags & RADEON_IS_PCI) { r = r100_pci_gart_init(rdev); if (r) return r; } r300_set_reg_safe(rdev); rdev->accel_working = true; r = r300_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); rdev->accel_working = false; } return 0; } Index: head/sys/dev/drm2/radeon/r300_reg.h =================================================================== --- head/sys/dev/drm2/radeon/r300_reg.h (revision 258779) +++ head/sys/dev/drm2/radeon/r300_reg.h (revision 258780) @@ -1,1792 +1,1792 @@ /* * Copyright 2005 Nicolai Haehnle et al. * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Nicolai Haehnle * Jerome Glisse */ #ifndef _R300_REG_H_ #define _R300_REG_H_ #include __FBSDID("$FreeBSD$"); #define R300_SURF_TILE_MACRO (1<<16) #define R300_SURF_TILE_MICRO (2<<16) #define R300_SURF_TILE_BOTH (3<<16) #define R300_MC_INIT_MISC_LAT_TIMER 0x180 # define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 # define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4 # define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8 # define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12 # define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16 # define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20 # define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24 # define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28 #define R300_MC_INIT_GFX_LAT_TIMER 0x154 # define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0 # define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4 # define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8 # define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12 # define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16 # define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20 # define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24 # define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28 /* * This file contains registers and constants for the R300. They have been * found mostly by examining command buffers captured using glxtest, as well * as by extrapolating some known registers and constants from the R200. * I am fairly certain that they are correct unless stated otherwise * in comments. */ #define R300_SE_VPORT_XSCALE 0x1D98 #define R300_SE_VPORT_XOFFSET 0x1D9C #define R300_SE_VPORT_YSCALE 0x1DA0 #define R300_SE_VPORT_YOFFSET 0x1DA4 #define R300_SE_VPORT_ZSCALE 0x1DA8 #define R300_SE_VPORT_ZOFFSET 0x1DAC /* * Vertex Array Processing (VAP) Control * Stolen from r200 code from Christoph Brill (It's a guess!) */ #define R300_VAP_CNTL 0x2080 /* This register is written directly and also starts data section * in many 3d CP_PACKET3's */ #define R300_VAP_VF_CNTL 0x2084 # define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0 # define R300_VAP_VF_CNTL__PRIM_NONE (0<<0) # define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0) # define R300_VAP_VF_CNTL__PRIM_LINES (2<<0) # define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0) # define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0) # define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0) # define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0) # define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0) # define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0) # define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0) # define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0) # define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4 /* State based - direct writes to registers trigger vertex generation */ # define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4) # define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4) # define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4) # define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4) /* I don't think I saw these three used.. */ # define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6 # define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9 # define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10 /* index size - when not set the indices are assumed to be 16 bit */ # define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11) /* number of vertices */ # define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16 /* BEGIN: Wild guesses */ #define R300_VAP_OUTPUT_VTX_FMT_0 0x2090 # define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0) # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1) # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */ # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */ # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */ # define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ #define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 /* each of the following is 3 bits wide, specifies number of components */ # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 /* END: Wild guesses */ #define R300_SE_VTE_CNTL 0x20b0 # define R300_VPORT_X_SCALE_ENA 0x00000001 # define R300_VPORT_X_OFFSET_ENA 0x00000002 # define R300_VPORT_Y_SCALE_ENA 0x00000004 # define R300_VPORT_Y_OFFSET_ENA 0x00000008 # define R300_VPORT_Z_SCALE_ENA 0x00000010 # define R300_VPORT_Z_OFFSET_ENA 0x00000020 # define R300_VTX_XY_FMT 0x00000100 # define R300_VTX_Z_FMT 0x00000200 # define R300_VTX_W0_FMT 0x00000400 # define R300_VTX_W0_NORMALIZE 0x00000800 # define R300_VTX_ST_DENORMALIZED 0x00001000 /* BEGIN: Vertex data assembly - lots of uncertainties */ /* gap */ #define R300_VAP_CNTL_STATUS 0x2140 # define R300_VC_NO_SWAP (0 << 0) # define R300_VC_16BIT_SWAP (1 << 0) # define R300_VC_32BIT_SWAP (2 << 0) # define R300_VAP_TCL_BYPASS (1 << 8) /* gap */ /* Where do we get our vertex data? * * Vertex data either comes either from immediate mode registers or from * vertex arrays. * There appears to be no mixed mode (though we can force the pitch of * vertex arrays to 0, effectively reusing the same element over and over * again). * * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure * if these registers influence vertex array processing. * * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3. * * In both cases, vertex attributes are then passed through INPUT_ROUTE. * * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data * into the vertex processor's input registers. * The first word routes the first input, the second word the second, etc. * The corresponding input is routed into the register with the given index. * The list is ended by a word with INPUT_ROUTE_END set. * * Always set COMPONENTS_4 in immediate mode. */ #define R300_VAP_INPUT_ROUTE_0_0 0x2150 # define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0) # define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0) # define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0) # define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0) # define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */ # define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8 # define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */ # define R300_VAP_INPUT_ROUTE_END (1 << 13) # define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */ # define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */ # define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */ # define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */ #define R300_VAP_INPUT_ROUTE_0_1 0x2154 #define R300_VAP_INPUT_ROUTE_0_2 0x2158 #define R300_VAP_INPUT_ROUTE_0_3 0x215C #define R300_VAP_INPUT_ROUTE_0_4 0x2160 #define R300_VAP_INPUT_ROUTE_0_5 0x2164 #define R300_VAP_INPUT_ROUTE_0_6 0x2168 #define R300_VAP_INPUT_ROUTE_0_7 0x216C /* gap */ /* Notes: * - always set up to produce at least two attributes: * if vertex program uses only position, fglrx will set normal, too * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal. */ #define R300_VAP_INPUT_CNTL_0 0x2180 # define R300_INPUT_CNTL_0_COLOR 0x00000001 #define R300_VAP_INPUT_CNTL_1 0x2184 # define R300_INPUT_CNTL_POS 0x00000001 # define R300_INPUT_CNTL_NORMAL 0x00000002 # define R300_INPUT_CNTL_COLOR 0x00000004 # define R300_INPUT_CNTL_TC0 0x00000400 # define R300_INPUT_CNTL_TC1 0x00000800 # define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */ # define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */ # define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */ # define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */ # define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */ # define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */ /* gap */ /* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0 * are set to a swizzling bit pattern, other words are 0. * * In immediate mode, the pattern is always set to xyzw. In vertex array * mode, the swizzling pattern is e.g. used to set zw components in texture * coordinates with only tweo components. */ #define R300_VAP_INPUT_ROUTE_1_0 0x21E0 # define R300_INPUT_ROUTE_SELECT_X 0 # define R300_INPUT_ROUTE_SELECT_Y 1 # define R300_INPUT_ROUTE_SELECT_Z 2 # define R300_INPUT_ROUTE_SELECT_W 3 # define R300_INPUT_ROUTE_SELECT_ZERO 4 # define R300_INPUT_ROUTE_SELECT_ONE 5 # define R300_INPUT_ROUTE_SELECT_MASK 7 # define R300_INPUT_ROUTE_X_SHIFT 0 # define R300_INPUT_ROUTE_Y_SHIFT 3 # define R300_INPUT_ROUTE_Z_SHIFT 6 # define R300_INPUT_ROUTE_W_SHIFT 9 # define R300_INPUT_ROUTE_ENABLE (15 << 12) #define R300_VAP_INPUT_ROUTE_1_1 0x21E4 #define R300_VAP_INPUT_ROUTE_1_2 0x21E8 #define R300_VAP_INPUT_ROUTE_1_3 0x21EC #define R300_VAP_INPUT_ROUTE_1_4 0x21F0 #define R300_VAP_INPUT_ROUTE_1_5 0x21F4 #define R300_VAP_INPUT_ROUTE_1_6 0x21F8 #define R300_VAP_INPUT_ROUTE_1_7 0x21FC /* END: Vertex data assembly */ /* gap */ /* BEGIN: Upload vertex program and data */ /* * The programmable vertex shader unit has a memory bank of unknown size * that can be written to in 16 byte units by writing the address into * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs). * * Pointers into the memory bank are always in multiples of 16 bytes. * * The memory bank is divided into areas with fixed meaning. * * Starting at address UPLOAD_PROGRAM: Vertex program instructions. * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB), * whereas the difference between known addresses suggests size 512. * * Starting at address UPLOAD_PARAMETERS: Vertex program parameters. * Native reported limits and the VPI layout suggest size 256, whereas * difference between known addresses suggests size 512. * * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the * floating point pointsize. The exact purpose of this state is uncertain, * as there is also the R300_RE_POINTSIZE register. * * Multiple vertex programs and parameter sets can be loaded at once, * which could explain the size discrepancy. */ #define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200 # define R300_PVS_UPLOAD_PROGRAM 0x00000000 # define R300_PVS_UPLOAD_PARAMETERS 0x00000200 # define R300_PVS_UPLOAD_POINTSIZE 0x00000406 /* gap */ #define R300_VAP_PVS_UPLOAD_DATA 0x2208 /* END: Upload vertex program and data */ /* gap */ /* I do not know the purpose of this register. However, I do know that * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL * for normal rendering. */ #define R300_VAP_UNKNOWN_221C 0x221C # define R300_221C_NORMAL 0x00000000 # define R300_221C_CLEAR 0x0001C000 /* These seem to be per-pixel and per-vertex X and Y clipping planes. The first * plane is per-pixel and the second plane is per-vertex. * * This was determined by experimentation alone but I believe it is correct. * * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest. */ #define R300_VAP_CLIP_X_0 0x2220 #define R300_VAP_CLIP_X_1 0x2224 #define R300_VAP_CLIP_Y_0 0x2228 #define R300_VAP_CLIP_Y_1 0x2230 /* gap */ /* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between * rendering commands and overwriting vertex program parameters. * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and * avoids bugs caused by still running shaders reading bad data from memory. */ #define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 /* Absolutely no clue what this register is about. */ #define R300_VAP_UNKNOWN_2288 0x2288 # define R300_2288_R300 0x00750000 /* -- nh */ # define R300_2288_RV350 0x0000FFFF /* -- Vladimir */ /* gap */ /* Addresses are relative to the vertex program instruction area of the * memory bank. PROGRAM_END points to the last instruction of the active * program * * The meaning of the two UNKNOWN fields is obviously not known. However, * experiments so far have shown that both *must* point to an instruction * inside the vertex program, otherwise the GPU locks up. * * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to * position takes place. * * Most likely this is used to ignore rest of the program in cases * where group of verts arent visible. For some reason this "section" * is sometimes accepted other instruction that have no relationship with * position calculations. */ #define R300_VAP_PVS_CNTL_1 0x22D0 # define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 # define R300_PVS_CNTL_1_POS_END_SHIFT 10 # define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 /* Addresses are relative the the vertex program parameters area. */ #define R300_VAP_PVS_CNTL_2 0x22D4 # define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 # define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 #define R300_VAP_PVS_CNTL_3 0x22D8 # define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10 # define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0 /* The entire range from 0x2300 to 0x2AC inclusive seems to be used for * immediate vertices */ #define R300_VAP_VTX_COLOR_R 0x2464 #define R300_VAP_VTX_COLOR_G 0x2468 #define R300_VAP_VTX_COLOR_B 0x246C #define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */ #define R300_VAP_VTX_POS_0_Y_1 0x2494 #define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */ #define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */ #define R300_VAP_VTX_POS_0_Y_2 0x24A4 #define R300_VAP_VTX_POS_0_Z_2 0x24A8 /* write 0 to indicate end of packet? */ #define R300_VAP_VTX_END_OF_PKT 0x24AC /* gap */ /* These are values from r300_reg/r300_reg.h - they are known to be correct * and are here so we can use one register file instead of several * - Vladimir */ #define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000 # define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4) # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5) # define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16) #define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004 /* each of the following is 3 bits wide, specifies number of components */ # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 /* UNK30 seems to enables point to quad transformation on textures * (or something closely related to that). * This bit is rather fatal at the time being due to lackings at pixel * shader side */ #define R300_GB_ENABLE 0x4008 # define R300_GB_POINT_STUFF_ENABLE (1<<0) # define R300_GB_LINE_STUFF_ENABLE (1<<1) # define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2) # define R300_GB_STENCIL_AUTO_ENABLE (1<<4) # define R300_GB_UNK31 (1<<31) /* each of the following is 2 bits wide */ #define R300_GB_TEX_REPLICATE 0 #define R300_GB_TEX_ST 1 #define R300_GB_TEX_STR 2 # define R300_GB_TEX0_SOURCE_SHIFT 16 # define R300_GB_TEX1_SOURCE_SHIFT 18 # define R300_GB_TEX2_SOURCE_SHIFT 20 # define R300_GB_TEX3_SOURCE_SHIFT 22 # define R300_GB_TEX4_SOURCE_SHIFT 24 # define R300_GB_TEX5_SOURCE_SHIFT 26 # define R300_GB_TEX6_SOURCE_SHIFT 28 # define R300_GB_TEX7_SOURCE_SHIFT 30 /* MSPOS - positions for multisample antialiasing (?) */ #define R300_GB_MSPOS0 0x4010 /* shifts - each of the fields is 4 bits */ # define R300_GB_MSPOS0__MS_X0_SHIFT 0 # define R300_GB_MSPOS0__MS_Y0_SHIFT 4 # define R300_GB_MSPOS0__MS_X1_SHIFT 8 # define R300_GB_MSPOS0__MS_Y1_SHIFT 12 # define R300_GB_MSPOS0__MS_X2_SHIFT 16 # define R300_GB_MSPOS0__MS_Y2_SHIFT 20 # define R300_GB_MSPOS0__MSBD0_Y 24 # define R300_GB_MSPOS0__MSBD0_X 28 #define R300_GB_MSPOS1 0x4014 # define R300_GB_MSPOS1__MS_X3_SHIFT 0 # define R300_GB_MSPOS1__MS_Y3_SHIFT 4 # define R300_GB_MSPOS1__MS_X4_SHIFT 8 # define R300_GB_MSPOS1__MS_Y4_SHIFT 12 # define R300_GB_MSPOS1__MS_X5_SHIFT 16 # define R300_GB_MSPOS1__MS_Y5_SHIFT 20 # define R300_GB_MSPOS1__MSBD1 24 #define R300_GB_TILE_CONFIG 0x4018 # define R300_GB_TILE_ENABLE (1<<0) # define R300_GB_TILE_PIPE_COUNT_RV300 0 # define R300_GB_TILE_PIPE_COUNT_R300 (3<<1) # define R300_GB_TILE_PIPE_COUNT_R420 (7<<1) # define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1) # define R300_GB_TILE_SIZE_8 0 # define R300_GB_TILE_SIZE_16 (1<<4) # define R300_GB_TILE_SIZE_32 (2<<4) # define R300_GB_SUPER_SIZE_1 (0<<6) # define R300_GB_SUPER_SIZE_2 (1<<6) # define R300_GB_SUPER_SIZE_4 (2<<6) # define R300_GB_SUPER_SIZE_8 (3<<6) # define R300_GB_SUPER_SIZE_16 (4<<6) # define R300_GB_SUPER_SIZE_32 (5<<6) # define R300_GB_SUPER_SIZE_64 (6<<6) # define R300_GB_SUPER_SIZE_128 (7<<6) # define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */ # define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */ # define R300_GB_SUPER_TILE_A 0 # define R300_GB_SUPER_TILE_B (1<<15) # define R300_GB_SUBPIXEL_1_12 0 # define R300_GB_SUBPIXEL_1_16 (1<<16) #define R300_GB_FIFO_SIZE 0x4024 /* each of the following is 2 bits wide */ #define R300_GB_FIFO_SIZE_32 0 #define R300_GB_FIFO_SIZE_64 1 #define R300_GB_FIFO_SIZE_128 2 #define R300_GB_FIFO_SIZE_256 3 # define R300_SC_IFIFO_SIZE_SHIFT 0 # define R300_SC_TZFIFO_SIZE_SHIFT 2 # define R300_SC_BFIFO_SIZE_SHIFT 4 # define R300_US_OFIFO_SIZE_SHIFT 12 # define R300_US_WFIFO_SIZE_SHIFT 14 /* the following use the same constants as above, but meaning is is times 2 (i.e. instead of 32 words it means 64 */ # define R300_RS_TFIFO_SIZE_SHIFT 6 # define R300_RS_CFIFO_SIZE_SHIFT 8 # define R300_US_RAM_SIZE_SHIFT 10 /* watermarks, 3 bits wide */ # define R300_RS_HIGHWATER_COL_SHIFT 16 # define R300_RS_HIGHWATER_TEX_SHIFT 19 # define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ # define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 #define R300_GB_SELECT 0x401C # define R300_GB_FOG_SELECT_C0A 0 # define R300_GB_FOG_SELECT_C1A 1 # define R300_GB_FOG_SELECT_C2A 2 # define R300_GB_FOG_SELECT_C3A 3 # define R300_GB_FOG_SELECT_1_1_W 4 # define R300_GB_FOG_SELECT_Z 5 # define R300_GB_DEPTH_SELECT_Z 0 # define R300_GB_DEPTH_SELECT_1_1_W (1<<3) # define R300_GB_W_SELECT_1_W 0 # define R300_GB_W_SELECT_1 (1<<4) #define R300_GB_AA_CONFIG 0x4020 # define R300_AA_DISABLE 0x00 # define R300_AA_ENABLE 0x01 # define R300_AA_SUBSAMPLES_2 0 # define R300_AA_SUBSAMPLES_3 (1<<1) # define R300_AA_SUBSAMPLES_4 (2<<1) # define R300_AA_SUBSAMPLES_6 (3<<1) /* gap */ /* Zero to flush caches. */ #define R300_TX_INVALTAGS 0x4100 #define R300_TX_FLUSH 0x0 /* The upper enable bits are guessed, based on fglrx reported limits. */ #define R300_TX_ENABLE 0x4104 # define R300_TX_ENABLE_0 (1 << 0) # define R300_TX_ENABLE_1 (1 << 1) # define R300_TX_ENABLE_2 (1 << 2) # define R300_TX_ENABLE_3 (1 << 3) # define R300_TX_ENABLE_4 (1 << 4) # define R300_TX_ENABLE_5 (1 << 5) # define R300_TX_ENABLE_6 (1 << 6) # define R300_TX_ENABLE_7 (1 << 7) # define R300_TX_ENABLE_8 (1 << 8) # define R300_TX_ENABLE_9 (1 << 9) # define R300_TX_ENABLE_10 (1 << 10) # define R300_TX_ENABLE_11 (1 << 11) # define R300_TX_ENABLE_12 (1 << 12) # define R300_TX_ENABLE_13 (1 << 13) # define R300_TX_ENABLE_14 (1 << 14) # define R300_TX_ENABLE_15 (1 << 15) /* The pointsize is given in multiples of 6. The pointsize can be * enormous: Clear() renders a single point that fills the entire * framebuffer. */ #define R300_RE_POINTSIZE 0x421C # define R300_POINTSIZE_Y_SHIFT 0 # define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */ # define R300_POINTSIZE_X_SHIFT 16 # define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */ # define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6) /* The line width is given in multiples of 6. * In default mode lines are classified as vertical lines. * HO: horizontal * VE: vertical or horizontal * HO & VE: no classification */ #define R300_RE_LINE_CNT 0x4234 # define R300_LINESIZE_SHIFT 0 # define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */ # define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6) # define R300_LINE_CNT_HO (1 << 16) # define R300_LINE_CNT_VE (1 << 17) /* Some sort of scale or clamp value for texcoordless textures. */ #define R300_RE_UNK4238 0x4238 /* Something shade related */ #define R300_RE_SHADE 0x4274 #define R300_RE_SHADE_MODEL 0x4278 # define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa # define R300_RE_SHADE_MODEL_FLAT 0x39595 /* Dangerous */ #define R300_RE_POLYGON_MODE 0x4288 # define R300_PM_ENABLED (1 << 0) # define R300_PM_FRONT_POINT (0 << 0) # define R300_PM_BACK_POINT (0 << 0) # define R300_PM_FRONT_LINE (1 << 4) # define R300_PM_FRONT_FILL (1 << 5) # define R300_PM_BACK_LINE (1 << 7) # define R300_PM_BACK_FILL (1 << 8) /* Fog parameters */ #define R300_RE_FOG_SCALE 0x4294 #define R300_RE_FOG_START 0x4298 /* Not sure why there are duplicate of factor and constant values. * My best guess so far is that there are separate zbiases for test and write. * Ordering might be wrong. * Some of the tests indicate that fgl has a fallback implementation of zbias * via pixel shaders. */ #define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */ #define R300_RE_ZBIAS_T_FACTOR 0x42A4 #define R300_RE_ZBIAS_T_CONSTANT 0x42A8 #define R300_RE_ZBIAS_W_FACTOR 0x42AC #define R300_RE_ZBIAS_W_CONSTANT 0x42B0 /* This register needs to be set to (1<<1) for RV350 to correctly * perform depth test (see --vb-triangles in r300_demo) * Don't know about other chips. - Vladimir * This is set to 3 when GL_POLYGON_OFFSET_FILL is on. * My guess is that there are two bits for each zbias primitive * (FILL, LINE, POINT). * One to enable depth test and one for depth write. * Yet this doesn't explain why depth writes work ... */ #define R300_RE_OCCLUSION_CNTL 0x42B4 # define R300_OCCLUSION_ON (1<<1) #define R300_RE_CULL_CNTL 0x42B8 # define R300_CULL_FRONT (1 << 0) # define R300_CULL_BACK (1 << 1) # define R300_FRONT_FACE_CCW (0 << 2) # define R300_FRONT_FACE_CW (1 << 2) /* BEGIN: Rasterization / Interpolators - many guesses */ /* 0_UNKNOWN_18 has always been set except for clear operations. * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends * on the vertex program, *not* the fragment program) */ #define R300_RS_CNTL_0 0x4300 # define R300_RS_CNTL_TC_CNT_SHIFT 2 # define R300_RS_CNTL_TC_CNT_MASK (7 << 2) /* number of color interpolators used */ # define R300_RS_CNTL_CI_CNT_SHIFT 7 # define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18) /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */ #define R300_RS_CNTL_1 0x4304 /* gap */ /* Only used for texture coordinates. * Use the source field to route texture coordinate input from the * vertex program to the desired interpolator. Note that the source * field is relative to the outputs the vertex program *actually* * writes. If a vertex program only writes texcoord[1], this will * be source index 0. * Set INTERP_USED on all interpolators that produce data used by * the fragment program. INTERP_USED looks like a swizzling mask, * but I haven't seen it used that way. * * Note: The _UNKNOWN constants are always set in their respective * register. I don't know if this is necessary. */ #define R300_RS_INTERP_0 0x4310 #define R300_RS_INTERP_1 0x4314 # define R300_RS_INTERP_1_UNKNOWN 0x40 #define R300_RS_INTERP_2 0x4318 # define R300_RS_INTERP_2_UNKNOWN 0x80 #define R300_RS_INTERP_3 0x431C # define R300_RS_INTERP_3_UNKNOWN 0xC0 #define R300_RS_INTERP_4 0x4320 #define R300_RS_INTERP_5 0x4324 #define R300_RS_INTERP_6 0x4328 #define R300_RS_INTERP_7 0x432C # define R300_RS_INTERP_SRC_SHIFT 2 # define R300_RS_INTERP_SRC_MASK (7 << 2) # define R300_RS_INTERP_USED 0x00D10000 /* These DWORDs control how vertex data is routed into fragment program * registers, after interpolators. */ #define R300_RS_ROUTE_0 0x4330 #define R300_RS_ROUTE_1 0x4334 #define R300_RS_ROUTE_2 0x4338 #define R300_RS_ROUTE_3 0x433C /* GUESS */ #define R300_RS_ROUTE_4 0x4340 /* GUESS */ #define R300_RS_ROUTE_5 0x4344 /* GUESS */ #define R300_RS_ROUTE_6 0x4348 /* GUESS */ #define R300_RS_ROUTE_7 0x434C /* GUESS */ # define R300_RS_ROUTE_SOURCE_INTERP_0 0 # define R300_RS_ROUTE_SOURCE_INTERP_1 1 # define R300_RS_ROUTE_SOURCE_INTERP_2 2 # define R300_RS_ROUTE_SOURCE_INTERP_3 3 # define R300_RS_ROUTE_SOURCE_INTERP_4 4 # define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */ # define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */ # define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */ # define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */ # define R300_RS_ROUTE_DEST_SHIFT 6 # define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */ /* Special handling for color: When the fragment program uses color, * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the * color register index. * * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state. * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly * correct or not. - Oliver. */ # define R300_RS_ROUTE_0_COLOR (1 << 14) # define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17 # define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */ /* As above, but for secondary color */ # define R300_RS_ROUTE_1_COLOR1 (1 << 14) # define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17 # define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17) # define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) /* END: Rasterization / Interpolators - many guesses */ /* Hierarchical Z Enable */ #define R300_SC_HYPERZ 0x43a4 # define R300_SC_HYPERZ_DISABLE (0 << 0) # define R300_SC_HYPERZ_ENABLE (1 << 0) # define R300_SC_HYPERZ_MIN (0 << 1) # define R300_SC_HYPERZ_MAX (1 << 1) # define R300_SC_HYPERZ_ADJ_256 (0 << 2) # define R300_SC_HYPERZ_ADJ_128 (1 << 2) # define R300_SC_HYPERZ_ADJ_64 (2 << 2) # define R300_SC_HYPERZ_ADJ_32 (3 << 2) # define R300_SC_HYPERZ_ADJ_16 (4 << 2) # define R300_SC_HYPERZ_ADJ_8 (5 << 2) # define R300_SC_HYPERZ_ADJ_4 (6 << 2) # define R300_SC_HYPERZ_ADJ_2 (7 << 2) # define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5) # define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5) # define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6) # define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6) #define R300_SC_EDGERULE 0x43a8 /* BEGIN: Scissors and cliprects */ /* There are four clipping rectangles. Their corner coordinates are inclusive. * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending * on whether the pixel is inside cliprects 0-3, respectively. For example, * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned * the number 3 (binary 0011). * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set, * the pixel is rasterized. * * In addition to this, there is a scissors rectangle. Only pixels inside the * scissors rectangle are drawn. (coordinates are inclusive) * * For some reason, the top-left corner of the framebuffer is at (1440, 1440) * for the purpose of clipping and scissors. */ #define R300_RE_CLIPRECT_TL_0 0x43B0 #define R300_RE_CLIPRECT_BR_0 0x43B4 #define R300_RE_CLIPRECT_TL_1 0x43B8 #define R300_RE_CLIPRECT_BR_1 0x43BC #define R300_RE_CLIPRECT_TL_2 0x43C0 #define R300_RE_CLIPRECT_BR_2 0x43C4 #define R300_RE_CLIPRECT_TL_3 0x43C8 #define R300_RE_CLIPRECT_BR_3 0x43CC # define R300_CLIPRECT_OFFSET 1440 # define R300_CLIPRECT_MASK 0x1FFF # define R300_CLIPRECT_X_SHIFT 0 # define R300_CLIPRECT_X_MASK (0x1FFF << 0) # define R300_CLIPRECT_Y_SHIFT 13 # define R300_CLIPRECT_Y_MASK (0x1FFF << 13) #define R300_RE_CLIPRECT_CNTL 0x43D0 # define R300_CLIP_OUT (1 << 0) # define R300_CLIP_0 (1 << 1) # define R300_CLIP_1 (1 << 2) # define R300_CLIP_10 (1 << 3) # define R300_CLIP_2 (1 << 4) # define R300_CLIP_20 (1 << 5) # define R300_CLIP_21 (1 << 6) # define R300_CLIP_210 (1 << 7) # define R300_CLIP_3 (1 << 8) # define R300_CLIP_30 (1 << 9) # define R300_CLIP_31 (1 << 10) # define R300_CLIP_310 (1 << 11) # define R300_CLIP_32 (1 << 12) # define R300_CLIP_320 (1 << 13) # define R300_CLIP_321 (1 << 14) # define R300_CLIP_3210 (1 << 15) /* gap */ #define R300_RE_SCISSORS_TL 0x43E0 #define R300_RE_SCISSORS_BR 0x43E4 # define R300_SCISSORS_OFFSET 1440 # define R300_SCISSORS_X_SHIFT 0 # define R300_SCISSORS_X_MASK (0x1FFF << 0) # define R300_SCISSORS_Y_SHIFT 13 # define R300_SCISSORS_Y_MASK (0x1FFF << 13) /* END: Scissors and cliprects */ /* BEGIN: Texture specification */ /* * The texture specification dwords are grouped by meaning and not by texture * unit. This means that e.g. the offset for texture image unit N is found in * register TX_OFFSET_0 + (4*N) */ #define R300_TX_FILTER_0 0x4400 # define R300_TX_REPEAT 0 # define R300_TX_MIRRORED 1 # define R300_TX_CLAMP 4 # define R300_TX_CLAMP_TO_EDGE 2 # define R300_TX_CLAMP_TO_BORDER 6 # define R300_TX_WRAP_S_SHIFT 0 # define R300_TX_WRAP_S_MASK (7 << 0) # define R300_TX_WRAP_T_SHIFT 3 # define R300_TX_WRAP_T_MASK (7 << 3) # define R300_TX_WRAP_Q_SHIFT 6 # define R300_TX_WRAP_Q_MASK (7 << 6) # define R300_TX_MAG_FILTER_NEAREST (1 << 9) # define R300_TX_MAG_FILTER_LINEAR (2 << 9) # define R300_TX_MAG_FILTER_MASK (3 << 9) # define R300_TX_MIN_FILTER_NEAREST (1 << 11) # define R300_TX_MIN_FILTER_LINEAR (2 << 11) # define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11) # define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11) # define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) # define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) /* NOTE: NEAREST doesn't seem to exist. * Im not seting MAG_FILTER_MASK and (3 << 11) on for all * anisotropy modes because that would void selected mag filter */ # define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13) # define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13) # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13) # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13) # define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) ) # define R300_TX_MAX_ANISO_1_TO_1 (0 << 21) # define R300_TX_MAX_ANISO_2_TO_1 (2 << 21) # define R300_TX_MAX_ANISO_4_TO_1 (4 << 21) # define R300_TX_MAX_ANISO_8_TO_1 (6 << 21) # define R300_TX_MAX_ANISO_16_TO_1 (8 << 21) # define R300_TX_MAX_ANISO_MASK (14 << 21) #define R300_TX_FILTER1_0 0x4440 # define R300_CHROMA_KEY_MODE_DISABLE 0 # define R300_CHROMA_KEY_FORCE 1 # define R300_CHROMA_KEY_BLEND 2 # define R300_MC_ROUND_NORMAL (0<<2) # define R300_MC_ROUND_MPEG4 (1<<2) # define R300_LOD_BIAS_MASK 0x1fff # define R300_EDGE_ANISO_EDGE_DIAG (0<<13) # define R300_EDGE_ANISO_EDGE_ONLY (1<<13) # define R300_MC_COORD_TRUNCATE_DISABLE (0<<14) # define R300_MC_COORD_TRUNCATE_MPEG (1<<14) # define R300_TX_TRI_PERF_0_8 (0<<15) # define R300_TX_TRI_PERF_1_8 (1<<15) # define R300_TX_TRI_PERF_1_4 (2<<15) # define R300_TX_TRI_PERF_3_8 (3<<15) # define R300_ANISO_THRESHOLD_MASK (7<<17) #define R300_TX_SIZE_0 0x4480 # define R300_TX_WIDTHMASK_SHIFT 0 # define R300_TX_WIDTHMASK_MASK (2047 << 0) # define R300_TX_HEIGHTMASK_SHIFT 11 # define R300_TX_HEIGHTMASK_MASK (2047 << 11) # define R300_TX_UNK23 (1 << 23) # define R300_TX_MAX_MIP_LEVEL_SHIFT 26 # define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26) # define R300_TX_SIZE_PROJECTED (1<<30) # define R300_TX_SIZE_TXPITCH_EN (1<<31) #define R300_TX_FORMAT_0 0x44C0 /* The interpretation of the format word by Wladimir van der Laan */ /* The X, Y, Z and W refer to the layout of the components. They are given meanings as R, G, B and Alpha by the swizzle specification */ # define R300_TX_FORMAT_X8 0x0 # define R300_TX_FORMAT_X16 0x1 # define R300_TX_FORMAT_Y4X4 0x2 # define R300_TX_FORMAT_Y8X8 0x3 # define R300_TX_FORMAT_Y16X16 0x4 # define R300_TX_FORMAT_Z3Y3X2 0x5 # define R300_TX_FORMAT_Z5Y6X5 0x6 # define R300_TX_FORMAT_Z6Y5X5 0x7 # define R300_TX_FORMAT_Z11Y11X10 0x8 # define R300_TX_FORMAT_Z10Y11X11 0x9 # define R300_TX_FORMAT_W4Z4Y4X4 0xA # define R300_TX_FORMAT_W1Z5Y5X5 0xB # define R300_TX_FORMAT_W8Z8Y8X8 0xC # define R300_TX_FORMAT_W2Z10Y10X10 0xD # define R300_TX_FORMAT_W16Z16Y16X16 0xE # define R300_TX_FORMAT_DXT1 0xF # define R300_TX_FORMAT_DXT3 0x10 # define R300_TX_FORMAT_DXT5 0x11 # define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ # define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ # define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ # define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ /* 0x16 - some 16 bit green format.. ?? */ # define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ # define R300_TX_FORMAT_CUBIC_MAP (1 << 26) /* gap */ /* Floating point formats */ /* Note - hardware supports both 16 and 32 bit floating point */ # define R300_TX_FORMAT_FL_I16 0x18 # define R300_TX_FORMAT_FL_I16A16 0x19 # define R300_TX_FORMAT_FL_R16G16B16A16 0x1A # define R300_TX_FORMAT_FL_I32 0x1B # define R300_TX_FORMAT_FL_I32A32 0x1C # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D # define R300_TX_FORMAT_ATI2N 0x1F /* alpha modes, convenience mostly */ /* if you have alpha, pick constant appropriate to the number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ # define R300_TX_FORMAT_ALPHA_1CH 0x000 # define R300_TX_FORMAT_ALPHA_2CH 0x200 # define R300_TX_FORMAT_ALPHA_4CH 0x600 # define R300_TX_FORMAT_ALPHA_NONE 0xA00 /* Swizzling */ /* constants */ # define R300_TX_FORMAT_X 0 # define R300_TX_FORMAT_Y 1 # define R300_TX_FORMAT_Z 2 # define R300_TX_FORMAT_W 3 # define R300_TX_FORMAT_ZERO 4 # define R300_TX_FORMAT_ONE 5 /* 2.0*Z, everything above 1.0 is set to 0.0 */ # define R300_TX_FORMAT_CUT_Z 6 /* 2.0*W, everything above 1.0 is set to 0.0 */ # define R300_TX_FORMAT_CUT_W 7 # define R300_TX_FORMAT_B_SHIFT 18 # define R300_TX_FORMAT_G_SHIFT 15 # define R300_TX_FORMAT_R_SHIFT 12 # define R300_TX_FORMAT_A_SHIFT 9 /* Convenience macro to take care of layout and swizzling */ # define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \ ((R300_TX_FORMAT_##B)< 0.5, return ARG0, else return ARG1 * - CMP: If ARG2 < 0, return ARG1, else return ARG0 * - FLR: use FRC+MAD * - XPD: use MAD+MAD * - SGE, SLT: use MAD+CMP * - RSQ: use ABS modifier for argument * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation * (e.g. RCP) into color register * - apparently, there's no quick DST operation * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2" * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0" * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1" * * Operand selection * First stage selects three sources from the available registers and * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha). * fglrx sorts the three source fields: Registers before constants, * lower indices before higher indices; I do not know whether this is * necessary. * * fglrx fills unused sources with "read constant 0" * According to specs, you cannot select more than two different constants. * * Second stage selects the operands from the sources. This is defined in * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants * zero and one. * Swizzling and negation happens in this stage, as well. * * Important: Color and alpha seem to be mostly separate, i.e. their sources * selection appears to be fully independent (the register storage is probably * physically split into a color and an alpha section). * However (because of the apparent physical split), there is some interaction * WRT swizzling. If, for example, you want to load an R component into an * Alpha operand, this R component is taken from a *color* source, not from * an alpha source. The corresponding register doesn't even have to appear in * the alpha sources list. (I hope this all makes sense to you) * * Destination selection * The destination register index is in FPI1 (color) and FPI3 (alpha) * together with enable bits. * There are separate enable bits for writing into temporary registers * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* * /DSTA_OUTPUT). You can write to both at once, or not write at all (the * same index must be used for both). * * Note: There is a special form for LRP * - Argument order is the same as in ARB_fragment_program. * - Operation is MAD * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP * - Set FPI0/FPI2_SPECIAL_LRP * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */ #define R300_PFS_INSTR1_0 0x46C0 # define R300_FPI1_SRC0C_SHIFT 0 # define R300_FPI1_SRC0C_MASK (31 << 0) # define R300_FPI1_SRC0C_CONST (1 << 5) # define R300_FPI1_SRC1C_SHIFT 6 # define R300_FPI1_SRC1C_MASK (31 << 6) # define R300_FPI1_SRC1C_CONST (1 << 11) # define R300_FPI1_SRC2C_SHIFT 12 # define R300_FPI1_SRC2C_MASK (31 << 12) # define R300_FPI1_SRC2C_CONST (1 << 17) # define R300_FPI1_SRC_MASK 0x0003ffff # define R300_FPI1_DSTC_SHIFT 18 # define R300_FPI1_DSTC_MASK (31 << 18) # define R300_FPI1_DSTC_REG_MASK_SHIFT 23 # define R300_FPI1_DSTC_REG_X (1 << 23) # define R300_FPI1_DSTC_REG_Y (1 << 24) # define R300_FPI1_DSTC_REG_Z (1 << 25) # define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26 # define R300_FPI1_DSTC_OUTPUT_X (1 << 26) # define R300_FPI1_DSTC_OUTPUT_Y (1 << 27) # define R300_FPI1_DSTC_OUTPUT_Z (1 << 28) #define R300_PFS_INSTR3_0 0x47C0 # define R300_FPI3_SRC0A_SHIFT 0 # define R300_FPI3_SRC0A_MASK (31 << 0) # define R300_FPI3_SRC0A_CONST (1 << 5) # define R300_FPI3_SRC1A_SHIFT 6 # define R300_FPI3_SRC1A_MASK (31 << 6) # define R300_FPI3_SRC1A_CONST (1 << 11) # define R300_FPI3_SRC2A_SHIFT 12 # define R300_FPI3_SRC2A_MASK (31 << 12) # define R300_FPI3_SRC2A_CONST (1 << 17) # define R300_FPI3_SRC_MASK 0x0003ffff # define R300_FPI3_DSTA_SHIFT 18 # define R300_FPI3_DSTA_MASK (31 << 18) # define R300_FPI3_DSTA_REG (1 << 23) # define R300_FPI3_DSTA_OUTPUT (1 << 24) # define R300_FPI3_DSTA_DEPTH (1 << 27) #define R300_PFS_INSTR0_0 0x48C0 # define R300_FPI0_ARGC_SRC0C_XYZ 0 # define R300_FPI0_ARGC_SRC0C_XXX 1 # define R300_FPI0_ARGC_SRC0C_YYY 2 # define R300_FPI0_ARGC_SRC0C_ZZZ 3 # define R300_FPI0_ARGC_SRC1C_XYZ 4 # define R300_FPI0_ARGC_SRC1C_XXX 5 # define R300_FPI0_ARGC_SRC1C_YYY 6 # define R300_FPI0_ARGC_SRC1C_ZZZ 7 # define R300_FPI0_ARGC_SRC2C_XYZ 8 # define R300_FPI0_ARGC_SRC2C_XXX 9 # define R300_FPI0_ARGC_SRC2C_YYY 10 # define R300_FPI0_ARGC_SRC2C_ZZZ 11 # define R300_FPI0_ARGC_SRC0A 12 # define R300_FPI0_ARGC_SRC1A 13 # define R300_FPI0_ARGC_SRC2A 14 # define R300_FPI0_ARGC_SRC1C_LRP 15 # define R300_FPI0_ARGC_ZERO 20 # define R300_FPI0_ARGC_ONE 21 /* GUESS */ # define R300_FPI0_ARGC_HALF 22 # define R300_FPI0_ARGC_SRC0C_YZX 23 # define R300_FPI0_ARGC_SRC1C_YZX 24 # define R300_FPI0_ARGC_SRC2C_YZX 25 # define R300_FPI0_ARGC_SRC0C_ZXY 26 # define R300_FPI0_ARGC_SRC1C_ZXY 27 # define R300_FPI0_ARGC_SRC2C_ZXY 28 # define R300_FPI0_ARGC_SRC0CA_WZY 29 # define R300_FPI0_ARGC_SRC1CA_WZY 30 # define R300_FPI0_ARGC_SRC2CA_WZY 31 # define R300_FPI0_ARG0C_SHIFT 0 # define R300_FPI0_ARG0C_MASK (31 << 0) # define R300_FPI0_ARG0C_NEG (1 << 5) # define R300_FPI0_ARG0C_ABS (1 << 6) # define R300_FPI0_ARG1C_SHIFT 7 # define R300_FPI0_ARG1C_MASK (31 << 7) # define R300_FPI0_ARG1C_NEG (1 << 12) # define R300_FPI0_ARG1C_ABS (1 << 13) # define R300_FPI0_ARG2C_SHIFT 14 # define R300_FPI0_ARG2C_MASK (31 << 14) # define R300_FPI0_ARG2C_NEG (1 << 19) # define R300_FPI0_ARG2C_ABS (1 << 20) # define R300_FPI0_SPECIAL_LRP (1 << 21) # define R300_FPI0_OUTC_MAD (0 << 23) # define R300_FPI0_OUTC_DP3 (1 << 23) # define R300_FPI0_OUTC_DP4 (2 << 23) # define R300_FPI0_OUTC_MIN (4 << 23) # define R300_FPI0_OUTC_MAX (5 << 23) # define R300_FPI0_OUTC_CMPH (7 << 23) # define R300_FPI0_OUTC_CMP (8 << 23) # define R300_FPI0_OUTC_FRC (9 << 23) # define R300_FPI0_OUTC_REPL_ALPHA (10 << 23) # define R300_FPI0_OUTC_SAT (1 << 30) -# define R300_FPI0_INSERT_NOP (1 << 31) +# define R300_FPI0_INSERT_NOP (1U << 31) #define R300_PFS_INSTR2_0 0x49C0 # define R300_FPI2_ARGA_SRC0C_X 0 # define R300_FPI2_ARGA_SRC0C_Y 1 # define R300_FPI2_ARGA_SRC0C_Z 2 # define R300_FPI2_ARGA_SRC1C_X 3 # define R300_FPI2_ARGA_SRC1C_Y 4 # define R300_FPI2_ARGA_SRC1C_Z 5 # define R300_FPI2_ARGA_SRC2C_X 6 # define R300_FPI2_ARGA_SRC2C_Y 7 # define R300_FPI2_ARGA_SRC2C_Z 8 # define R300_FPI2_ARGA_SRC0A 9 # define R300_FPI2_ARGA_SRC1A 10 # define R300_FPI2_ARGA_SRC2A 11 # define R300_FPI2_ARGA_SRC1A_LRP 15 # define R300_FPI2_ARGA_ZERO 16 # define R300_FPI2_ARGA_ONE 17 /* GUESS */ # define R300_FPI2_ARGA_HALF 18 # define R300_FPI2_ARG0A_SHIFT 0 # define R300_FPI2_ARG0A_MASK (31 << 0) # define R300_FPI2_ARG0A_NEG (1 << 5) /* GUESS */ # define R300_FPI2_ARG0A_ABS (1 << 6) # define R300_FPI2_ARG1A_SHIFT 7 # define R300_FPI2_ARG1A_MASK (31 << 7) # define R300_FPI2_ARG1A_NEG (1 << 12) /* GUESS */ # define R300_FPI2_ARG1A_ABS (1 << 13) # define R300_FPI2_ARG2A_SHIFT 14 # define R300_FPI2_ARG2A_MASK (31 << 14) # define R300_FPI2_ARG2A_NEG (1 << 19) /* GUESS */ # define R300_FPI2_ARG2A_ABS (1 << 20) # define R300_FPI2_SPECIAL_LRP (1 << 21) # define R300_FPI2_OUTA_MAD (0 << 23) # define R300_FPI2_OUTA_DP4 (1 << 23) # define R300_FPI2_OUTA_MIN (2 << 23) # define R300_FPI2_OUTA_MAX (3 << 23) # define R300_FPI2_OUTA_CMP (6 << 23) # define R300_FPI2_OUTA_FRC (7 << 23) # define R300_FPI2_OUTA_EX2 (8 << 23) # define R300_FPI2_OUTA_LG2 (9 << 23) # define R300_FPI2_OUTA_RCP (10 << 23) # define R300_FPI2_OUTA_RSQ (11 << 23) # define R300_FPI2_OUTA_SAT (1 << 30) -# define R300_FPI2_UNKNOWN_31 (1 << 31) +# define R300_FPI2_UNKNOWN_31 (1U << 31) /* END: Fragment program instruction set */ /* Fog state and color */ #define R300_RE_FOG_STATE 0x4BC0 # define R300_FOG_ENABLE (1 << 0) # define R300_FOG_MODE_LINEAR (0 << 1) # define R300_FOG_MODE_EXP (1 << 1) # define R300_FOG_MODE_EXP2 (2 << 1) # define R300_FOG_MODE_MASK (3 << 1) #define R300_FOG_COLOR_R 0x4BC8 #define R300_FOG_COLOR_G 0x4BCC #define R300_FOG_COLOR_B 0x4BD0 #define R300_PP_ALPHA_TEST 0x4BD4 # define R300_REF_ALPHA_MASK 0x000000ff # define R300_ALPHA_TEST_FAIL (0 << 8) # define R300_ALPHA_TEST_LESS (1 << 8) # define R300_ALPHA_TEST_LEQUAL (3 << 8) # define R300_ALPHA_TEST_EQUAL (2 << 8) # define R300_ALPHA_TEST_GEQUAL (6 << 8) # define R300_ALPHA_TEST_GREATER (4 << 8) # define R300_ALPHA_TEST_NEQUAL (5 << 8) # define R300_ALPHA_TEST_PASS (7 << 8) # define R300_ALPHA_TEST_OP_MASK (7 << 8) # define R300_ALPHA_TEST_ENABLE (1 << 11) /* gap */ /* Fragment program parameters in 7.16 floating point */ #define R300_PFS_PARAM_0_X 0x4C00 #define R300_PFS_PARAM_0_Y 0x4C04 #define R300_PFS_PARAM_0_Z 0x4C08 #define R300_PFS_PARAM_0_W 0x4C0C /* GUESS: PARAM_31 is last, based on native limits reported by fglrx */ #define R300_PFS_PARAM_31_X 0x4DF0 #define R300_PFS_PARAM_31_Y 0x4DF4 #define R300_PFS_PARAM_31_Z 0x4DF8 #define R300_PFS_PARAM_31_W 0x4DFC /* Notes: * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in * the application * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND * are set to the same * function (both registers are always set up completely in any case) * - Most blend flags are simply copied from R200 and not tested yet */ #define R300_RB3D_CBLEND 0x4E04 #define R300_RB3D_ABLEND 0x4E08 /* the following only appear in CBLEND */ # define R300_BLEND_ENABLE (1 << 0) # define R300_BLEND_UNKNOWN (3 << 1) # define R300_BLEND_NO_SEPARATE (1 << 3) /* the following are shared between CBLEND and ABLEND */ # define R300_FCN_MASK (3 << 12) # define R300_COMB_FCN_ADD_CLAMP (0 << 12) # define R300_COMB_FCN_ADD_NOCLAMP (1 << 12) # define R300_COMB_FCN_SUB_CLAMP (2 << 12) # define R300_COMB_FCN_SUB_NOCLAMP (3 << 12) # define R300_COMB_FCN_MIN (4 << 12) # define R300_COMB_FCN_MAX (5 << 12) # define R300_COMB_FCN_RSUB_CLAMP (6 << 12) # define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12) # define R300_BLEND_GL_ZERO (32) # define R300_BLEND_GL_ONE (33) # define R300_BLEND_GL_SRC_COLOR (34) # define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35) # define R300_BLEND_GL_DST_COLOR (36) # define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37) # define R300_BLEND_GL_SRC_ALPHA (38) # define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39) # define R300_BLEND_GL_DST_ALPHA (40) # define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41) # define R300_BLEND_GL_SRC_ALPHA_SATURATE (42) # define R300_BLEND_GL_CONST_COLOR (43) # define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44) # define R300_BLEND_GL_CONST_ALPHA (45) # define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46) # define R300_BLEND_MASK (63) # define R300_SRC_BLEND_SHIFT (16) # define R300_DST_BLEND_SHIFT (24) #define R300_RB3D_BLEND_COLOR 0x4E10 #define R300_RB3D_COLORMASK 0x4E0C # define R300_COLORMASK0_B (1<<0) # define R300_COLORMASK0_G (1<<1) # define R300_COLORMASK0_R (1<<2) # define R300_COLORMASK0_A (1<<3) /* gap */ #define R300_RB3D_COLOROFFSET0 0x4E28 # define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ #define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ #define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ #define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */ /* gap */ /* Bit 16: Larger tiles * Bit 17: 4x2 tiles * Bit 18: Extremely weird tile like, but some pixels duplicated? */ #define R300_RB3D_COLORPITCH0 0x4E38 # define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ # define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ # define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ # define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17) # define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ # define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ # define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ # define R300_COLOR_FORMAT_RGB565 (2 << 22) # define R300_COLOR_FORMAT_ARGB8888 (3 << 22) #define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */ #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ #define R300_RB3D_AARESOLVE_OFFSET 0x4E80 #define R300_RB3D_AARESOLVE_PITCH 0x4E84 #define R300_RB3D_AARESOLVE_CTL 0x4E88 /* gap */ /* Guess by Vladimir. * Set to 0A before 3D operations, set to 02 afterwards. */ /*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/ # define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002 # define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A /* gap */ /* There seems to be no "write only" setting, so use Z-test = ALWAYS * for this. * Bit (1<<8) is the "test" bit. so plain write is 6 - vd */ #define R300_ZB_CNTL 0x4F00 # define R300_STENCIL_ENABLE (1 << 0) # define R300_Z_ENABLE (1 << 1) # define R300_Z_WRITE_ENABLE (1 << 2) # define R300_Z_SIGNED_COMPARE (1 << 3) # define R300_STENCIL_FRONT_BACK (1 << 4) #define R300_ZB_ZSTENCILCNTL 0x4f04 /* functions */ # define R300_ZS_NEVER 0 # define R300_ZS_LESS 1 # define R300_ZS_LEQUAL 2 # define R300_ZS_EQUAL 3 # define R300_ZS_GEQUAL 4 # define R300_ZS_GREATER 5 # define R300_ZS_NOTEQUAL 6 # define R300_ZS_ALWAYS 7 # define R300_ZS_MASK 7 /* operations */ # define R300_ZS_KEEP 0 # define R300_ZS_ZERO 1 # define R300_ZS_REPLACE 2 # define R300_ZS_INCR 3 # define R300_ZS_DECR 4 # define R300_ZS_INVERT 5 # define R300_ZS_INCR_WRAP 6 # define R300_ZS_DECR_WRAP 7 # define R300_Z_FUNC_SHIFT 0 /* front and back refer to operations done for front and back faces, i.e. separate stencil function support */ # define R300_S_FRONT_FUNC_SHIFT 3 # define R300_S_FRONT_SFAIL_OP_SHIFT 6 # define R300_S_FRONT_ZPASS_OP_SHIFT 9 # define R300_S_FRONT_ZFAIL_OP_SHIFT 12 # define R300_S_BACK_FUNC_SHIFT 15 # define R300_S_BACK_SFAIL_OP_SHIFT 18 # define R300_S_BACK_ZPASS_OP_SHIFT 21 # define R300_S_BACK_ZFAIL_OP_SHIFT 24 #define R300_ZB_STENCILREFMASK 0x4f08 # define R300_STENCILREF_SHIFT 0 # define R300_STENCILREF_MASK 0x000000ff # define R300_STENCILMASK_SHIFT 8 # define R300_STENCILMASK_MASK 0x0000ff00 # define R300_STENCILWRITEMASK_SHIFT 16 # define R300_STENCILWRITEMASK_MASK 0x00ff0000 /* gap */ #define R300_ZB_FORMAT 0x4f10 # define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0) # define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0) # define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0) /* reserved up to (15 << 0) */ # define R300_INVERT_13E3_LEADING_ONES (0 << 4) # define R300_INVERT_13E3_LEADING_ZEROS (1 << 4) #define R300_ZB_ZTOP 0x4F14 # define R300_ZTOP_DISABLE (0 << 0) # define R300_ZTOP_ENABLE (1 << 0) /* gap */ #define R300_ZB_ZCACHE_CTLSTAT 0x4f18 # define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0) # define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0) # define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1) # define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1) # define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31) -# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31) +# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1U << 31) #define R300_ZB_BW_CNTL 0x4f1c # define R300_HIZ_DISABLE (0 << 0) # define R300_HIZ_ENABLE (1 << 0) # define R300_HIZ_MIN (0 << 1) # define R300_HIZ_MAX (1 << 1) # define R300_FAST_FILL_DISABLE (0 << 2) # define R300_FAST_FILL_ENABLE (1 << 2) # define R300_RD_COMP_DISABLE (0 << 3) # define R300_RD_COMP_ENABLE (1 << 3) # define R300_WR_COMP_DISABLE (0 << 4) # define R300_WR_COMP_ENABLE (1 << 4) # define R300_ZB_CB_CLEAR_RMW (0 << 5) # define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5) # define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6) # define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6) # define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7) # define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7) # define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8) # define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8) # define R500_BMASK_ENABLE (0 << 10) # define R500_BMASK_DISABLE (1 << 10) # define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11) # define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11) # define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12) # define R500_HIZ_FP_EXP_BITS_1 (1 << 12) # define R500_HIZ_FP_EXP_BITS_2 (2 << 12) # define R500_HIZ_FP_EXP_BITS_3 (3 << 12) # define R500_HIZ_FP_EXP_BITS_4 (4 << 12) # define R500_HIZ_FP_EXP_BITS_5 (5 << 12) # define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15) # define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15) # define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16) # define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16) # define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17) # define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17) # define R500_PEQ_PACKING_DISABLE (0 << 18) # define R500_PEQ_PACKING_ENABLE (1 << 18) # define R500_COVERED_PTR_MASKING_DISABLE (0 << 18) # define R500_COVERED_PTR_MASKING_ENABLE (1 << 18) /* gap */ /* Z Buffer Address Offset. * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles. */ #define R300_ZB_DEPTHOFFSET 0x4f20 /* Z Buffer Pitch and Endian Control */ #define R300_ZB_DEPTHPITCH 0x4f24 # define R300_DEPTHPITCH_MASK 0x00003FFC # define R300_DEPTHMACROTILE_DISABLE (0 << 16) # define R300_DEPTHMACROTILE_ENABLE (1 << 16) # define R300_DEPTHMICROTILE_LINEAR (0 << 17) # define R300_DEPTHMICROTILE_TILED (1 << 17) # define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17) # define R300_DEPTHENDIAN_NO_SWAP (0 << 18) # define R300_DEPTHENDIAN_WORD_SWAP (1 << 18) # define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18) # define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18) /* Z Buffer Clear Value */ #define R300_ZB_DEPTHCLEARVALUE 0x4f28 #define R300_ZB_ZMASK_OFFSET 0x4f30 #define R300_ZB_ZMASK_PITCH 0x4f34 #define R300_ZB_ZMASK_WRINDEX 0x4f38 #define R300_ZB_ZMASK_DWORD 0x4f3c #define R300_ZB_ZMASK_RDINDEX 0x4f40 /* Hierarchical Z Memory Offset */ #define R300_ZB_HIZ_OFFSET 0x4f44 /* Hierarchical Z Write Index */ #define R300_ZB_HIZ_WRINDEX 0x4f48 /* Hierarchical Z Data */ #define R300_ZB_HIZ_DWORD 0x4f4c /* Hierarchical Z Read Index */ #define R300_ZB_HIZ_RDINDEX 0x4f50 /* Hierarchical Z Pitch */ #define R300_ZB_HIZ_PITCH 0x4f54 /* Z Buffer Z Pass Counter Data */ #define R300_ZB_ZPASS_DATA 0x4f58 /* Z Buffer Z Pass Counter Address */ #define R300_ZB_ZPASS_ADDR 0x4f5c /* Depth buffer X and Y coordinate offset */ #define R300_ZB_DEPTHXY_OFFSET 0x4f60 # define R300_DEPTHX_OFFSET_SHIFT 1 # define R300_DEPTHX_OFFSET_MASK 0x000007FE # define R300_DEPTHY_OFFSET_SHIFT 17 # define R300_DEPTHY_OFFSET_MASK 0x07FE0000 /* Sets the fifo sizes */ #define R500_ZB_FIFO_SIZE 0x4fd0 # define R500_OP_FIFO_SIZE_FULL (0 << 0) # define R500_OP_FIFO_SIZE_HALF (1 << 0) # define R500_OP_FIFO_SIZE_QUATER (2 << 0) # define R500_OP_FIFO_SIZE_EIGTHS (4 << 0) /* Stencil Reference Value and Mask for backfacing quads */ /* R300_ZB_STENCILREFMASK handles front face */ #define R500_ZB_STENCILREFMASK_BF 0x4fd4 # define R500_STENCILREF_SHIFT 0 # define R500_STENCILREF_MASK 0x000000ff # define R500_STENCILMASK_SHIFT 8 # define R500_STENCILMASK_MASK 0x0000ff00 # define R500_STENCILWRITEMASK_SHIFT 16 # define R500_STENCILWRITEMASK_MASK 0x00ff0000 /* BEGIN: Vertex program instruction set */ /* Every instruction is four dwords long: * DWORD 0: output and opcode * DWORD 1: first argument * DWORD 2: second argument * DWORD 3: third argument * * Notes: * - ABS r, a is implemented as MAX r, a, -a * - MOV is implemented as ADD to zero * - XPD is implemented as MUL + MAD * - FLR is implemented as FRC + ADD * - apparently, fglrx tries to schedule instructions so that there is at * least one instruction between the write to a temporary and the first * read from said temporary; however, violations of this scheduling are * allowed * - register indices seem to be unrelated with OpenGL aliasing to * conventional state * - only one attribute and one parameter can be loaded at a time; however, * the same attribute/parameter can be used for more than one argument * - the second software argument for POW is the third hardware argument * (no idea why) * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2 * * There is some magic surrounding LIT: * The single argument is replicated across all three inputs, but swizzled: * First argument: xyzy * Second argument: xyzx * Third argument: xyzw * Whenever the result is used later in the fragment program, fglrx forces * x and w to be 1.0 in the input selection; I don't know whether this is * strictly necessary */ #define R300_VPI_OUT_OP_DOT (1 << 0) #define R300_VPI_OUT_OP_MUL (2 << 0) #define R300_VPI_OUT_OP_ADD (3 << 0) #define R300_VPI_OUT_OP_MAD (4 << 0) #define R300_VPI_OUT_OP_DST (5 << 0) #define R300_VPI_OUT_OP_FRC (6 << 0) #define R300_VPI_OUT_OP_MAX (7 << 0) #define R300_VPI_OUT_OP_MIN (8 << 0) #define R300_VPI_OUT_OP_SGE (9 << 0) #define R300_VPI_OUT_OP_SLT (10 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */ #define R300_VPI_OUT_OP_UNK12 (12 << 0) #define R300_VPI_OUT_OP_ARL (13 << 0) #define R300_VPI_OUT_OP_EXP (65 << 0) #define R300_VPI_OUT_OP_LOG (66 << 0) /* Used in fog computations, scalar(scalar) */ #define R300_VPI_OUT_OP_UNK67 (67 << 0) #define R300_VPI_OUT_OP_LIT (68 << 0) #define R300_VPI_OUT_OP_POW (69 << 0) #define R300_VPI_OUT_OP_RCP (70 << 0) #define R300_VPI_OUT_OP_RSQ (72 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */ #define R300_VPI_OUT_OP_UNK73 (73 << 0) #define R300_VPI_OUT_OP_EX2 (75 << 0) #define R300_VPI_OUT_OP_LG2 (76 << 0) #define R300_VPI_OUT_OP_MAD_2 (128 << 0) /* all temps, vector(scalar, vector, vector) */ #define R300_VPI_OUT_OP_UNK129 (129 << 0) #define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8) #define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8) #define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8) #define R300_VPI_OUT_REG_CLASS_MASK (31 << 8) #define R300_VPI_OUT_REG_INDEX_SHIFT 13 /* GUESS based on fglrx native limits */ #define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) #define R300_VPI_OUT_WRITE_X (1 << 20) #define R300_VPI_OUT_WRITE_Y (1 << 21) #define R300_VPI_OUT_WRITE_Z (1 << 22) #define R300_VPI_OUT_WRITE_W (1 << 23) #define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0) #define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0) #define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0) #define R300_VPI_IN_REG_CLASS_NONE (9 << 0) #define R300_VPI_IN_REG_CLASS_MASK (31 << 0) #define R300_VPI_IN_REG_INDEX_SHIFT 5 /* GUESS based on fglrx native limits */ #define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* The R300 can select components from the input register arbitrarily. * Use the following constants, shifted by the component shift you * want to select */ #define R300_VPI_IN_SELECT_X 0 #define R300_VPI_IN_SELECT_Y 1 #define R300_VPI_IN_SELECT_Z 2 #define R300_VPI_IN_SELECT_W 3 #define R300_VPI_IN_SELECT_ZERO 4 #define R300_VPI_IN_SELECT_ONE 5 #define R300_VPI_IN_SELECT_MASK 7 #define R300_VPI_IN_X_SHIFT 13 #define R300_VPI_IN_Y_SHIFT 16 #define R300_VPI_IN_Z_SHIFT 19 #define R300_VPI_IN_W_SHIFT 22 #define R300_VPI_IN_NEG_X (1 << 25) #define R300_VPI_IN_NEG_Y (1 << 26) #define R300_VPI_IN_NEG_Z (1 << 27) #define R300_VPI_IN_NEG_W (1 << 28) /* END: Vertex program instruction set */ /* BEGIN: Packet 3 commands */ /* A primitive emission dword. */ #define R300_PRIM_TYPE_NONE (0 << 0) #define R300_PRIM_TYPE_POINT (1 << 0) #define R300_PRIM_TYPE_LINE (2 << 0) #define R300_PRIM_TYPE_LINE_STRIP (3 << 0) #define R300_PRIM_TYPE_TRI_LIST (4 << 0) #define R300_PRIM_TYPE_TRI_FAN (5 << 0) #define R300_PRIM_TYPE_TRI_STRIP (6 << 0) #define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0) #define R300_PRIM_TYPE_RECT_LIST (8 << 0) #define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) /* GUESS (based on r200) */ #define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) #define R300_PRIM_TYPE_LINE_LOOP (12 << 0) #define R300_PRIM_TYPE_QUADS (13 << 0) #define R300_PRIM_TYPE_QUAD_STRIP (14 << 0) #define R300_PRIM_TYPE_POLYGON (15 << 0) #define R300_PRIM_TYPE_MASK 0xF #define R300_PRIM_WALK_IND (1 << 4) #define R300_PRIM_WALK_LIST (2 << 4) #define R300_PRIM_WALK_RING (3 << 4) #define R300_PRIM_WALK_MASK (3 << 4) /* GUESS (based on r200) */ #define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) #define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) #define R300_PRIM_NUM_VERTICES_SHIFT 16 #define R300_PRIM_NUM_VERTICES_MASK 0xffff /* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR. * Two parameter dwords: * 0. The first parameter appears to be always 0 * 1. The second parameter is a standard primitive emission dword. */ #define R300_PACKET3_3D_DRAW_VBUF 0x00002800 /* Specify the full set of vertex arrays as (address, stride). * The first parameter is the number of vertex arrays specified. * The rest of the command is a variable length list of blocks, where * each block is three dwords long and specifies two arrays. * The first dword of a block is split into two words, the lower significant * word refers to the first array, the more significant word to the second * array in the block. * The low byte of each word contains the size of an array entry in dwords, * the high byte contains the stride of the array. * The second dword of a block contains the pointer to the first array, * the third dword of a block contains the pointer to the second array. * Note that if the total number of arrays is odd, the third dword of * the last block is omitted. */ #define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00 #define R300_PACKET3_INDX_BUFFER 0x00003300 # define R300_EB_UNK1_SHIFT 24 # define R300_EB_UNK1 (0x80<<24) # define R300_EB_UNK2 0x0810 #define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400 #define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 /* END: Packet 3 commands */ /* Color formats for 2d packets */ #define R300_CP_COLOR_FORMAT_CI8 2 #define R300_CP_COLOR_FORMAT_ARGB1555 3 #define R300_CP_COLOR_FORMAT_RGB565 4 #define R300_CP_COLOR_FORMAT_ARGB8888 6 #define R300_CP_COLOR_FORMAT_RGB332 7 #define R300_CP_COLOR_FORMAT_RGB8 9 #define R300_CP_COLOR_FORMAT_ARGB4444 15 /* * CP type-3 packets */ #define R300_CP_CMD_BITBLT_MULTI 0xC0009B00 #define R500_VAP_INDEX_OFFSET 0x208c #define R500_GA_US_VECTOR_INDEX 0x4250 #define R500_GA_US_VECTOR_DATA 0x4254 #define R500_RS_IP_0 0x4074 #define R500_RS_INST_0 0x4320 #define R500_US_CONFIG 0x4600 #define R500_US_FC_CTRL 0x4624 #define R500_US_CODE_ADDR 0x4630 #define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0 #define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8 #define R300_SU_REG_DEST 0x42c8 #define RV530_FG_ZBREG_DEST 0x4be8 #define R300_ZB_ZPASS_DATA 0x4f58 #define R300_ZB_ZPASS_ADDR 0x4f5c #endif /* _R300_REG_H */ Index: head/sys/dev/drm2/radeon/r500_reg.h =================================================================== --- head/sys/dev/drm2/radeon/r500_reg.h (revision 258779) +++ head/sys/dev/drm2/radeon/r500_reg.h (revision 258780) @@ -1,804 +1,804 @@ /* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #ifndef __R500_REG_H__ #define __R500_REG_H__ #include __FBSDID("$FreeBSD$"); /* pipe config regs */ #define R300_GA_POLY_MODE 0x4288 # define R300_FRONT_PTYPE_POINT (0 << 4) # define R300_FRONT_PTYPE_LINE (1 << 4) # define R300_FRONT_PTYPE_TRIANGE (2 << 4) # define R300_BACK_PTYPE_POINT (0 << 7) # define R300_BACK_PTYPE_LINE (1 << 7) # define R300_BACK_PTYPE_TRIANGE (2 << 7) #define R300_GA_ROUND_MODE 0x428c # define R300_GEOMETRY_ROUND_TRUNC (0 << 0) # define R300_GEOMETRY_ROUND_NEAREST (1 << 0) # define R300_COLOR_ROUND_TRUNC (0 << 2) # define R300_COLOR_ROUND_NEAREST (1 << 2) #define R300_GB_MSPOS0 0x4010 # define R300_MS_X0_SHIFT 0 # define R300_MS_Y0_SHIFT 4 # define R300_MS_X1_SHIFT 8 # define R300_MS_Y1_SHIFT 12 # define R300_MS_X2_SHIFT 16 # define R300_MS_Y2_SHIFT 20 # define R300_MSBD0_Y_SHIFT 24 # define R300_MSBD0_X_SHIFT 28 #define R300_GB_MSPOS1 0x4014 # define R300_MS_X3_SHIFT 0 # define R300_MS_Y3_SHIFT 4 # define R300_MS_X4_SHIFT 8 # define R300_MS_Y4_SHIFT 12 # define R300_MS_X5_SHIFT 16 # define R300_MS_Y5_SHIFT 20 # define R300_MSBD1_SHIFT 24 #define R300_GA_ENHANCE 0x4274 # define R300_GA_DEADLOCK_CNTL (1 << 0) # define R300_GA_FASTSYNC_CNTL (1 << 1) #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c # define R300_RB3D_DC_FLUSH (2 << 0) # define R300_RB3D_DC_FREE (2 << 2) # define R300_RB3D_DC_FINISH (1 << 4) #define R300_RB3D_ZCACHE_CTLSTAT 0x4f18 # define R300_ZC_FLUSH (1 << 0) # define R300_ZC_FREE (1 << 1) # define R300_ZC_FLUSH_ALL 0x3 #define R400_GB_PIPE_SELECT 0x402c #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ #define R500_SU_REG_DEST 0x42c8 #define R300_GB_TILE_CONFIG 0x4018 # define R300_ENABLE_TILING (1 << 0) # define R300_PIPE_COUNT_RV350 (0 << 1) # define R300_PIPE_COUNT_R300 (3 << 1) # define R300_PIPE_COUNT_R420_3P (6 << 1) # define R300_PIPE_COUNT_R420 (7 << 1) # define R300_TILE_SIZE_8 (0 << 4) # define R300_TILE_SIZE_16 (1 << 4) # define R300_TILE_SIZE_32 (2 << 4) # define R300_SUBPIXEL_1_12 (0 << 16) # define R300_SUBPIXEL_1_16 (1 << 16) #define R300_DST_PIPE_CONFIG 0x170c -# define R300_PIPE_AUTO_CONFIG (1 << 31) +# define R300_PIPE_AUTO_CONFIG (1U << 31) #define R300_RB2D_DSTCACHE_MODE 0x3428 # define R300_DC_AUTOFLUSH_ENABLE (1 << 8) # define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17) #define RADEON_CP_STAT 0x7C0 #define RADEON_RBBM_CMDFIFO_ADDR 0xE70 #define RADEON_RBBM_CMDFIFO_DATA 0xE74 #define RADEON_ISYNC_CNTL 0x1724 # define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) # define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1) # define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2) # define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3) # define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) # define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) #define RS480_NB_MC_INDEX 0x168 # define RS480_NB_MC_IND_WR_EN (1 << 8) #define RS480_NB_MC_DATA 0x16c /* * RS690 */ #define RS690_MCCFG_FB_LOCATION 0x100 #define RS690_MC_FB_START_MASK 0x0000FFFF #define RS690_MC_FB_START_SHIFT 0 #define RS690_MC_FB_TOP_MASK 0xFFFF0000 #define RS690_MC_FB_TOP_SHIFT 16 #define RS690_MCCFG_AGP_LOCATION 0x101 #define RS690_MC_AGP_START_MASK 0x0000FFFF #define RS690_MC_AGP_START_SHIFT 0 #define RS690_MC_AGP_TOP_MASK 0xFFFF0000 #define RS690_MC_AGP_TOP_SHIFT 16 #define RS690_MCCFG_AGP_BASE 0x102 #define RS690_MCCFG_AGP_BASE_2 0x103 #define RS690_MC_INIT_MISC_LAT_TIMER 0x104 #define RS690_HDP_FB_LOCATION 0x0134 #define RS690_MC_INDEX 0x78 # define RS690_MC_INDEX_MASK 0x1ff # define RS690_MC_INDEX_WR_EN (1 << 9) # define RS690_MC_INDEX_WR_ACK 0x7f #define RS690_MC_NB_CNTL 0x0 # define RS690_HIDE_MMCFG_BAR (1 << 3) # define RS690_AGPMODE30 (1 << 4) # define RS690_AGP30ENHANCED (1 << 5) #define RS690_MC_DATA 0x7c #define RS690_MC_STATUS 0x90 #define RS690_MC_STATUS_IDLE (1 << 0) #define RS480_AGP_BASE_2 0x0164 #define RS480_MC_MISC_CNTL 0x18 # define RS480_DISABLE_GTW (1 << 1) # define RS480_GART_INDEX_REG_EN (1 << 12) # define RS690_BLOCK_GFX_D3_EN (1 << 14) #define RS480_GART_FEATURE_ID 0x2b # define RS480_HANG_EN (1 << 11) # define RS480_TLB_ENABLE (1 << 18) # define RS480_P2P_ENABLE (1 << 19) # define RS480_GTW_LAC_EN (1 << 25) # define RS480_2LEVEL_GART (0 << 30) # define RS480_1LEVEL_GART (1 << 30) -# define RS480_PDC_EN (1 << 31) +# define RS480_PDC_EN (1U << 31) #define RS480_GART_BASE 0x2c #define RS480_GART_CACHE_CNTRL 0x2e # define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */ #define RS480_AGP_ADDRESS_SPACE_SIZE 0x38 # define RS480_GART_EN (1 << 0) # define RS480_VA_SIZE_32MB (0 << 1) # define RS480_VA_SIZE_64MB (1 << 1) # define RS480_VA_SIZE_128MB (2 << 1) # define RS480_VA_SIZE_256MB (3 << 1) # define RS480_VA_SIZE_512MB (4 << 1) # define RS480_VA_SIZE_1GB (5 << 1) # define RS480_VA_SIZE_2GB (6 << 1) #define RS480_AGP_MODE_CNTL 0x39 # define RS480_POST_GART_Q_SIZE (1 << 18) # define RS480_NONGART_SNOOP (1 << 19) # define RS480_AGP_RD_BUF_SIZE (1 << 20) # define RS480_REQ_TYPE_SNOOP_SHIFT 22 # define RS480_REQ_TYPE_SNOOP_MASK 0x3 # define RS480_REQ_TYPE_SNOOP_DIS (1 << 24) #define RS690_AIC_CTRL_SCRATCH 0x3A # define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1) /* * RS600 */ #define RS600_MC_STATUS 0x0 #define RS600_MC_STATUS_IDLE (1 << 0) #define RS600_MC_INDEX 0x70 # define RS600_MC_ADDR_MASK 0xffff # define RS600_MC_IND_SEQ_RBS_0 (1 << 16) # define RS600_MC_IND_SEQ_RBS_1 (1 << 17) # define RS600_MC_IND_SEQ_RBS_2 (1 << 18) # define RS600_MC_IND_SEQ_RBS_3 (1 << 19) # define RS600_MC_IND_AIC_RBS (1 << 20) # define RS600_MC_IND_CITF_ARB0 (1 << 21) # define RS600_MC_IND_CITF_ARB1 (1 << 22) # define RS600_MC_IND_WR_EN (1 << 23) #define RS600_MC_DATA 0x74 #define RS600_MC_STATUS 0x0 # define RS600_MC_IDLE (1 << 1) #define RS600_MC_FB_LOCATION 0x4 #define RS600_MC_FB_START_MASK 0x0000FFFF #define RS600_MC_FB_START_SHIFT 0 #define RS600_MC_FB_TOP_MASK 0xFFFF0000 #define RS600_MC_FB_TOP_SHIFT 16 #define RS600_MC_AGP_LOCATION 0x5 #define RS600_MC_AGP_START_MASK 0x0000FFFF #define RS600_MC_AGP_START_SHIFT 0 #define RS600_MC_AGP_TOP_MASK 0xFFFF0000 #define RS600_MC_AGP_TOP_SHIFT 16 #define RS600_MC_AGP_BASE 0x6 #define RS600_MC_AGP_BASE_2 0x7 #define RS600_MC_CNTL1 0x9 # define RS600_ENABLE_PAGE_TABLES (1 << 26) #define RS600_MC_PT0_CNTL 0x100 # define RS600_ENABLE_PT (1 << 0) # define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15) # define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21) # define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28) # define RS600_INVALIDATE_L2_CACHE (1 << 29) #define RS600_MC_PT0_CONTEXT0_CNTL 0x102 # define RS600_ENABLE_PAGE_TABLE (1 << 0) # define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1) #define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112 #define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114 #define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c #define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c #define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c #define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c #define RS600_MC_PT0_CLIENT0_CNTL 0x16c # define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0) # define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1) # define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8) # define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8) # define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8) # define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8) # define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8) # define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10) # define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10) # define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11) # define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14) # define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15) # define RS600_INVALIDATE_L1_TLB (1 << 20) /* rs600/rs690/rs740 */ # define RS600_BUS_MASTER_DIS (1 << 14) # define RS600_MSI_REARM (1 << 20) /* see RS400_MSI_REARM in AIC_CNTL for rs480 */ #define RV515_MC_FB_LOCATION 0x01 #define RV515_MC_FB_START_MASK 0x0000FFFF #define RV515_MC_FB_START_SHIFT 0 #define RV515_MC_FB_TOP_MASK 0xFFFF0000 #define RV515_MC_FB_TOP_SHIFT 16 #define RV515_MC_AGP_LOCATION 0x02 #define RV515_MC_AGP_START_MASK 0x0000FFFF #define RV515_MC_AGP_START_SHIFT 0 #define RV515_MC_AGP_TOP_MASK 0xFFFF0000 #define RV515_MC_AGP_TOP_SHIFT 16 #define RV515_MC_AGP_BASE 0x03 #define RV515_MC_AGP_BASE_2 0x04 #define R520_MC_FB_LOCATION 0x04 #define R520_MC_FB_START_MASK 0x0000FFFF #define R520_MC_FB_START_SHIFT 0 #define R520_MC_FB_TOP_MASK 0xFFFF0000 #define R520_MC_FB_TOP_SHIFT 16 #define R520_MC_AGP_LOCATION 0x05 #define R520_MC_AGP_START_MASK 0x0000FFFF #define R520_MC_AGP_START_SHIFT 0 #define R520_MC_AGP_TOP_MASK 0xFFFF0000 #define R520_MC_AGP_TOP_SHIFT 16 #define R520_MC_AGP_BASE 0x06 #define R520_MC_AGP_BASE_2 0x07 #define AVIVO_MC_INDEX 0x0070 #define R520_MC_STATUS 0x00 #define R520_MC_STATUS_IDLE (1<<1) #define RV515_MC_STATUS 0x08 #define RV515_MC_STATUS_IDLE (1<<4) #define RV515_MC_INIT_MISC_LAT_TIMER 0x09 #define AVIVO_MC_DATA 0x0074 #define R520_MC_IND_INDEX 0x70 #define R520_MC_IND_WR_EN (1 << 24) #define R520_MC_IND_DATA 0x74 #define RV515_MC_CNTL 0x5 # define RV515_MEM_NUM_CHANNELS_MASK 0x3 #define R520_MC_CNTL0 0x8 # define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24) # define R520_MEM_NUM_CHANNELS_SHIFT 24 # define R520_MC_CHANNEL_SIZE (1 << 23) #define AVIVO_CP_DYN_CNTL 0x000f /* PLL */ # define AVIVO_CP_FORCEON (1 << 0) #define AVIVO_E2_DYN_CNTL 0x0011 /* PLL */ # define AVIVO_E2_FORCEON (1 << 0) #define AVIVO_IDCT_DYN_CNTL 0x0013 /* PLL */ # define AVIVO_IDCT_FORCEON (1 << 0) #define AVIVO_HDP_FB_LOCATION 0x134 #define AVIVO_VGA_RENDER_CONTROL 0x0300 # define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16) #define AVIVO_D1VGA_CONTROL 0x0330 # define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0) # define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8) # define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9) # define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10) # define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16) # define AVIVO_DVGA_CONTROL_ROTATE (1<<24) #define AVIVO_D2VGA_CONTROL 0x0338 #define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400 #define AVIVO_EXT1_PPLL_REF_DIV 0x404 #define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408 #define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c #define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410 #define AVIVO_EXT2_PPLL_REF_DIV 0x414 #define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418 #define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c #define AVIVO_EXT1_PPLL_FB_DIV 0x430 #define AVIVO_EXT2_PPLL_FB_DIV 0x434 #define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438 #define AVIVO_EXT1_PPLL_POST_DIV 0x43c #define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440 #define AVIVO_EXT2_PPLL_POST_DIV 0x444 #define AVIVO_EXT1_PPLL_CNTL 0x448 #define AVIVO_EXT2_PPLL_CNTL 0x44c #define AVIVO_P1PLL_CNTL 0x450 #define AVIVO_P2PLL_CNTL 0x454 #define AVIVO_P1PLL_INT_SS_CNTL 0x458 #define AVIVO_P2PLL_INT_SS_CNTL 0x45c #define AVIVO_P1PLL_TMDSA_CNTL 0x460 #define AVIVO_P2PLL_LVTMA_CNTL 0x464 #define AVIVO_PCLK_CRTC1_CNTL 0x480 #define AVIVO_PCLK_CRTC2_CNTL 0x484 #define AVIVO_D1CRTC_H_TOTAL 0x6000 #define AVIVO_D1CRTC_H_BLANK_START_END 0x6004 #define AVIVO_D1CRTC_H_SYNC_A 0x6008 #define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c #define AVIVO_D1CRTC_H_SYNC_B 0x6010 #define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014 #define AVIVO_D1CRTC_V_TOTAL 0x6020 #define AVIVO_D1CRTC_V_BLANK_START_END 0x6024 #define AVIVO_D1CRTC_V_SYNC_A 0x6028 #define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c #define AVIVO_D1CRTC_V_SYNC_B 0x6030 #define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034 #define AVIVO_D1CRTC_CONTROL 0x6080 # define AVIVO_CRTC_EN (1 << 0) # define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c #define AVIVO_D1CRTC_STATUS 0x609c # define AVIVO_D1CRTC_V_BLANK (1 << 0) #define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 #define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 /* master controls */ #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc #define AVIVO_D1GRPH_ENABLE 0x6100 #define AVIVO_D1GRPH_CONTROL 0x6104 # define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0) # define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0) # define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0) # define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0) # define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8) # define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8) # define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8) # define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8) # define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8) # define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8) # define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8) # define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8) # define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8) # define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8) # define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8) # define AVIVO_D1GRPH_SWAP_RB (1 << 16) # define AVIVO_D1GRPH_TILED (1 << 20) # define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) # define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20) # define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20) # define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20) # define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20) /* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2 * block and vice versa. This applies to GRPH, CUR, etc. */ #define AVIVO_D1GRPH_LUT_SEL 0x6108 #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 #define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 #define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 #define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c #define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c #define AVIVO_D1GRPH_PITCH 0x6120 #define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124 #define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128 #define AVIVO_D1GRPH_X_START 0x612c #define AVIVO_D1GRPH_Y_START 0x6130 #define AVIVO_D1GRPH_X_END 0x6134 #define AVIVO_D1GRPH_Y_END 0x6138 #define AVIVO_D1GRPH_UPDATE 0x6144 # define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2) # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 # define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) #define AVIVO_D1CUR_CONTROL 0x6400 # define AVIVO_D1CURSOR_EN (1 << 0) # define AVIVO_D1CURSOR_MODE_SHIFT 8 # define AVIVO_D1CURSOR_MODE_MASK (3 << 8) # define AVIVO_D1CURSOR_MODE_24BPP 2 #define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 #define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c #define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c #define AVIVO_D1CUR_SIZE 0x6410 #define AVIVO_D1CUR_POSITION 0x6414 #define AVIVO_D1CUR_HOT_SPOT 0x6418 #define AVIVO_D1CUR_UPDATE 0x6424 # define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16) #define AVIVO_DC_LUT_RW_SELECT 0x6480 #define AVIVO_DC_LUT_RW_MODE 0x6484 #define AVIVO_DC_LUT_RW_INDEX 0x6488 #define AVIVO_DC_LUT_SEQ_COLOR 0x648c #define AVIVO_DC_LUT_PWL_DATA 0x6490 #define AVIVO_DC_LUT_30_COLOR 0x6494 #define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498 #define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c #define AVIVO_DC_LUT_AUTOFILL 0x64a0 #define AVIVO_DC_LUTA_CONTROL 0x64c0 #define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4 #define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8 #define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc #define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0 #define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4 #define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8 #define AVIVO_DC_LB_MEMORY_SPLIT 0x6520 # define AVIVO_DC_LB_MEMORY_SPLIT_MASK 0x3 # define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT 0 # define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 # define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 # define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY 2 # define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 # define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) # define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4 # define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff #define AVIVO_D1MODE_DATA_FORMAT 0x6528 # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C #define AVIVO_D1MODE_VBLANK_STATUS 0x6534 # define AVIVO_VBLANK_ACK (1 << 4) #define AVIVO_D1MODE_VLINE_START_END 0x6538 #define AVIVO_D1MODE_VLINE_STATUS 0x653c # define AVIVO_D1MODE_VLINE_STAT (1 << 12) #define AVIVO_DxMODE_INT_MASK 0x6540 # define AVIVO_D1MODE_INT_MASK (1 << 0) # define AVIVO_D2MODE_INT_MASK (1 << 8) #define AVIVO_D1MODE_VIEWPORT_START 0x6580 #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 #define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c #define AVIVO_D1SCL_SCALER_ENABLE 0x6590 #define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594 #define AVIVO_D1SCL_UPDATE 0x65cc # define AVIVO_D1SCL_UPDATE_LOCK (1 << 16) /* second crtc */ #define AVIVO_D2CRTC_H_TOTAL 0x6800 #define AVIVO_D2CRTC_H_BLANK_START_END 0x6804 #define AVIVO_D2CRTC_H_SYNC_A 0x6808 #define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c #define AVIVO_D2CRTC_H_SYNC_B 0x6810 #define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814 #define AVIVO_D2CRTC_V_TOTAL 0x6820 #define AVIVO_D2CRTC_V_BLANK_START_END 0x6824 #define AVIVO_D2CRTC_V_SYNC_A 0x6828 #define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c #define AVIVO_D2CRTC_V_SYNC_B 0x6830 #define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834 #define AVIVO_D2CRTC_CONTROL 0x6880 #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c #define AVIVO_D2CRTC_STATUS_POSITION 0x68a0 #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 #define AVIVO_D2GRPH_ENABLE 0x6900 #define AVIVO_D2GRPH_CONTROL 0x6904 #define AVIVO_D2GRPH_LUT_SEL 0x6908 #define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910 #define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918 #define AVIVO_D2GRPH_PITCH 0x6920 #define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924 #define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928 #define AVIVO_D2GRPH_X_START 0x692c #define AVIVO_D2GRPH_Y_START 0x6930 #define AVIVO_D2GRPH_X_END 0x6934 #define AVIVO_D2GRPH_Y_END 0x6938 #define AVIVO_D2GRPH_UPDATE 0x6944 #define AVIVO_D2GRPH_FLIP_CONTROL 0x6948 #define AVIVO_D2CUR_CONTROL 0x6c00 #define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08 #define AVIVO_D2CUR_SIZE 0x6c10 #define AVIVO_D2CUR_POSITION 0x6c14 #define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 #define AVIVO_D2MODE_VLINE_START_END 0x6d38 #define AVIVO_D2MODE_VLINE_STATUS 0x6d3c #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 #define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c #define AVIVO_D2SCL_SCALER_ENABLE 0x6d90 #define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94 #define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214 #define AVIVO_DACA_ENABLE 0x7800 # define AVIVO_DAC_ENABLE (1 << 0) #define AVIVO_DACA_SOURCE_SELECT 0x7804 # define AVIVO_DAC_SOURCE_CRTC1 (0 << 0) # define AVIVO_DAC_SOURCE_CRTC2 (1 << 0) # define AVIVO_DAC_SOURCE_TV (2 << 0) #define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c # define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0) # define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8) # define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0) # define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1) # define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2) # define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24) #define AVIVO_DACA_POWERDOWN 0x7850 # define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0) # define AVIVO_DACA_POWERDOWN_BLUE (1 << 8) # define AVIVO_DACA_POWERDOWN_GREEN (1 << 16) # define AVIVO_DACA_POWERDOWN_RED (1 << 24) #define AVIVO_DACB_ENABLE 0x7a00 #define AVIVO_DACB_SOURCE_SELECT 0x7a04 #define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c # define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0) # define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8) # define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0) # define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1) # define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2) # define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24) #define AVIVO_DACB_POWERDOWN 0x7a50 # define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0) # define AVIVO_DACB_POWERDOWN_BLUE (1 << 8) # define AVIVO_DACB_POWERDOWN_GREEN (1 << 16) # define AVIVO_DACB_POWERDOWN_RED #define AVIVO_TMDSA_CNTL 0x7880 # define AVIVO_TMDSA_CNTL_ENABLE (1 << 0) # define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2) # define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4) # define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8) # define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12) # define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16) # define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24) # define AVIVO_TMDSA_CNTL_SWAP (1 << 28) #define AVIVO_TMDSA_SOURCE_SELECT 0x7884 /* 78a8 appears to be some kind of (reasonably tolerant) clock? * 78d0 definitely hits the transmitter, definitely clock. */ /* MYSTERY1 This appears to control dithering? */ #define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894 # define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24) # define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26) #define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0 # define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0) # define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8) # define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16) # define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24) #define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8 # define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0) # define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8) #define AVIVO_TMDSA_CLOCK_ENABLE 0x7900 #define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904 # define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17) # define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18) #define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910 # define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28) # define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29) -# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31) +# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1U << 31) #define AVIVO_LVTMA_CNTL 0x7a80 # define AVIVO_LVTMA_CNTL_ENABLE (1 << 0) # define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2) # define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4) # define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8) # define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12) # define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16) # define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24) # define AVIVO_LVTMA_CNTL_SWAP (1 << 28) #define AVIVO_LVTMA_SOURCE_SELECT 0x7a84 #define AVIVO_LVTMA_COLOR_FORMAT 0x7a88 #define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94 # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24) # define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26) #define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0 # define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0) # define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8) # define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16) # define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24) #define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8 # define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0) # define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8) #define R500_LVTMA_CLOCK_ENABLE 0x7b00 #define R600_LVTMA_CLOCK_ENABLE 0x7b04 #define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04 #define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08 # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17) # define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18) #define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10 #define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14 # define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28) # define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29) -# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31) +# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1U << 31) #define R500_LVTMA_PWRSEQ_CNTL 0x7af0 #define R600_LVTMA_PWRSEQ_CNTL 0x7af4 # define AVIVO_LVTMA_PWRSEQ_EN (1 << 0) # define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2) # define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3) # define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4) # define AVIVO_LVTMA_SYNCEN (1 << 8) # define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9) # define AVIVO_LVTMA_SYNCEN_POL (1 << 10) # define AVIVO_LVTMA_DIGON (1 << 16) # define AVIVO_LVTMA_DIGON_OVRD (1 << 17) # define AVIVO_LVTMA_DIGON_POL (1 << 18) # define AVIVO_LVTMA_BLON (1 << 24) # define AVIVO_LVTMA_BLON_OVRD (1 << 25) # define AVIVO_LVTMA_BLON_POL (1 << 26) #define R500_LVTMA_PWRSEQ_STATE 0x7af4 #define R600_LVTMA_PWRSEQ_STATE 0x7af8 # define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0) # define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1) # define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2) # define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3) # define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4) # define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8) #define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8 # define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0) # define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00 # define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8 #define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 #define AVIVO_DC_GPIO_HPD_A 0x7e94 #define AVIVO_DC_GPIO_HPD_Y 0x7e9c #define AVIVO_DC_I2C_STATUS1 0x7d30 # define AVIVO_DC_I2C_DONE (1 << 0) # define AVIVO_DC_I2C_NACK (1 << 1) # define AVIVO_DC_I2C_HALT (1 << 2) # define AVIVO_DC_I2C_GO (1 << 3) #define AVIVO_DC_I2C_RESET 0x7d34 # define AVIVO_DC_I2C_SOFT_RESET (1 << 0) # define AVIVO_DC_I2C_ABORT (1 << 8) #define AVIVO_DC_I2C_CONTROL1 0x7d38 # define AVIVO_DC_I2C_START (1 << 0) # define AVIVO_DC_I2C_STOP (1 << 1) # define AVIVO_DC_I2C_RECEIVE (1 << 2) # define AVIVO_DC_I2C_EN (1 << 8) # define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16) # define AVIVO_SEL_DDC1 0 # define AVIVO_SEL_DDC2 1 # define AVIVO_SEL_DDC3 2 #define AVIVO_DC_I2C_CONTROL2 0x7d3c # define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0) # define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8) #define AVIVO_DC_I2C_CONTROL3 0x7d40 # define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0) # define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1) # define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7) # define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8) # define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16) # define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24) #define AVIVO_DC_I2C_DATA 0x7d44 #define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48 # define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0) # define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8) # define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16) #define AVIVO_DC_I2C_ARBITRATION 0x7d50 # define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0) # define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1) # define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8) # define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9) # define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16) # define AVIVO_DC_I2C_HW_USING_I2C (1 << 17) #define AVIVO_DC_GPIO_DDC1_MASK 0x7e40 #define AVIVO_DC_GPIO_DDC1_A 0x7e44 #define AVIVO_DC_GPIO_DDC1_EN 0x7e48 #define AVIVO_DC_GPIO_DDC1_Y 0x7e4c #define AVIVO_DC_GPIO_DDC2_MASK 0x7e50 #define AVIVO_DC_GPIO_DDC2_A 0x7e54 #define AVIVO_DC_GPIO_DDC2_EN 0x7e58 #define AVIVO_DC_GPIO_DDC2_Y 0x7e5c #define AVIVO_DC_GPIO_DDC3_MASK 0x7e60 #define AVIVO_DC_GPIO_DDC3_A 0x7e64 #define AVIVO_DC_GPIO_DDC3_EN 0x7e68 #define AVIVO_DC_GPIO_DDC3_Y 0x7e6c #define AVIVO_DISP_INTERRUPT_STATUS 0x7edc # define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) # define AVIVO_D2_VBLANK_INTERRUPT (1 << 5) #endif Index: head/sys/dev/drm2/radeon/r600_blit.c =================================================================== --- head/sys/dev/drm2/radeon/r600_blit.c (revision 258779) +++ head/sys/dev/drm2/radeon/r600_blit.c (revision 258780) @@ -1,876 +1,876 @@ /* * Copyright 2009 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher */ #include __FBSDID("$FreeBSD$"); #include #include #include "radeon_drv.h" #include "r600_blit_shaders.h" #define DI_PT_RECTLIST 0x11 #define DI_INDEX_SIZE_16_BIT 0x0 #define DI_SRC_SEL_AUTO_INDEX 0x2 #define FMT_8 0x1 #define FMT_5_6_5 0x8 #define FMT_8_8_8_8 0x1a #define COLOR_8 0x1 #define COLOR_5_6_5 0x8 #define COLOR_8_8_8_8 0x1a static void set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr) { u32 cb_color_info; int pitch, slice; RING_LOCALS; DRM_DEBUG("\n"); h = roundup2(h, 8); if (h < 8) h = 8; cb_color_info = ((format << 2) | (1 << 27)); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) && ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) { BEGIN_RING(21 + 2); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0)); OUT_RING(2 << 0); } else { BEGIN_RING(21); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); } OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((pitch << 0) | (slice << 10)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(cb_color_info); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); ADVANCE_RING(); } static void cp_set_surface_sync(drm_radeon_private_t *dev_priv, u32 sync_type, u32 size, u64 mc_addr) { u32 cp_coher_size; RING_LOCALS; DRM_DEBUG("\n"); if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); BEGIN_RING(5); OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3)); OUT_RING(sync_type); OUT_RING(cp_coher_size); OUT_RING((mc_addr >> 8)); OUT_RING(10); /* poll interval */ ADVANCE_RING(); } static void set_shaders(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u64 gpu_addr; int i; u32 *vs, *ps; uint32_t sq_pgm_resources; RING_LOCALS; DRM_DEBUG("\n"); /* load shaders */ vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset); ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); for (i = 0; i < r6xx_vs_size; i++) vs[i] = cpu_to_le32(r6xx_vs[i]); for (i = 0; i < r6xx_ps_size; i++) ps[i] = cpu_to_le32(r6xx_ps[i]); dev_priv->blit_vb->used = 512; gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset; /* setup shader regs */ sq_pgm_resources = (1 << 0); BEGIN_RING(9 + 12); /* VS */ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(sq_pgm_resources); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); /* PS */ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((gpu_addr + 256) >> 8); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(sq_pgm_resources | (1 << 28)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(2); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); ADVANCE_RING(); cp_set_surface_sync(dev_priv, R600_SH_ACTION_ENA, 512, gpu_addr); } static void set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) { uint32_t sq_vtx_constant_word2; RING_LOCALS; DRM_DEBUG("\n"); sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); #ifdef __BIG_ENDIAN sq_vtx_constant_word2 |= (2U << 30); #endif BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); OUT_RING(0x460); OUT_RING(gpu_addr & 0xffffffff); OUT_RING(48 - 1); OUT_RING(sq_vtx_constant_word2); OUT_RING(1 << 0); OUT_RING(0); OUT_RING(0); OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30); ADVANCE_RING(); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)) cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(dev_priv, R600_VC_ACTION_ENA, 48, gpu_addr); } static void set_tex_resource(drm_radeon_private_t *dev_priv, int format, int w, int h, int pitch, u64 gpu_addr) { uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; RING_LOCALS; DRM_DEBUG("\n"); if (h < 1) h = 1; sq_tex_resource_word0 = (1 << 0); sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | ((w - 1) << 19)); sq_tex_resource_word1 = (format << 26); sq_tex_resource_word1 |= ((h - 1) << 0); sq_tex_resource_word4 = ((1 << 14) | (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25)); BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); OUT_RING(0); OUT_RING(sq_tex_resource_word0); OUT_RING(sq_tex_resource_word1); OUT_RING(gpu_addr >> 8); OUT_RING(gpu_addr >> 8); OUT_RING(sq_tex_resource_word4); OUT_RING(0); OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30); ADVANCE_RING(); } static void set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(12); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((x1 << 0) | (y1 << 16)); OUT_RING((x2 << 0) | (y2 << 16)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); - OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31)); + OUT_RING((x1 << 0) | (y1 << 16) | (1U << 31)); OUT_RING((x2 << 0) | (y2 << 16)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); - OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31)); + OUT_RING((x1 << 0) | (y1 << 16) | (1U << 31)); OUT_RING((x2 << 0) | (y2 << 16)); ADVANCE_RING(); } static void draw_auto(drm_radeon_private_t *dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(10); OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(DI_PT_RECTLIST); OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); #ifdef __BIG_ENDIAN OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT); #else OUT_RING(DI_INDEX_SIZE_16_BIT); #endif OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); OUT_RING(1); OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1)); OUT_RING(3); OUT_RING(DI_SRC_SEL_AUTO_INDEX); ADVANCE_RING(); COMMIT_RING(); } static void set_default_state(drm_radeon_private_t *dev_priv) { int i; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; RING_LOCALS; switch ((dev_priv->flags & RADEON_FAMILY_MASK)) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)) sq_config = 0; else sq_config = R600_VC_ENABLE; sq_config |= (R600_DX9_CONSTS | R600_ALU_INST_PREFER_VECTOR | R600_PS_PRIO(0) | R600_VS_PRIO(1) | R600_GS_PRIO(2) | R600_ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) | R600_NUM_VS_GPRS(num_vs_gprs) | R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) | R600_NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) | R600_NUM_VS_THREADS(num_vs_threads) | R600_NUM_GS_THREADS(num_gs_threads) | R600_NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries)); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { BEGIN_RING(r7xx_default_size + 10); for (i = 0; i < r7xx_default_size; i++) OUT_RING(r7xx_default_state[i]); } else { BEGIN_RING(r6xx_default_size + 10); for (i = 0; i < r6xx_default_size; i++) OUT_RING(r6xx_default_state[i]); } OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); /* SQ config */ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6)); OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(sq_config); OUT_RING(sq_gpr_resource_mgmt_1); OUT_RING(sq_gpr_resource_mgmt_2); OUT_RING(sq_thread_resource_mgmt); OUT_RING(sq_stack_resource_mgmt_1); OUT_RING(sq_stack_resource_mgmt_2); ADVANCE_RING(); } /* 23 bits of float fractional data */ #define I2F_FRAC_BITS 23 #define I2F_MASK ((1 << I2F_FRAC_BITS) - 1) /* * Converts unsigned integer into 32-bit IEEE floating point representation. * Will be exact from 0 to 2^24. Above that, we round towards zero * as the fractional bits will not fit in a float. (It would be better to * round towards even as the fpu does, but that is slower.) */ __pure uint32_t int2float(uint32_t x) { uint32_t msb, exponent, fraction; /* Zero is special */ if (!x) return 0; /* Get location of the most significant bit */ msb = fls(x); /* * Use a rotate instead of a shift because that works both leftwards * and rightwards due to the mod(32) behaviour. This means we don't * need to check to see if we are above 2^24 or not. */ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK; exponent = (127 + msb) << I2F_FRAC_BITS; return fraction + exponent; } static int r600_nomm_get_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->blit_vb = radeon_freelist_get(dev); if (!dev_priv->blit_vb) { DRM_ERROR("Unable to allocate vertex buffer for blit\n"); return -EAGAIN; } return 0; } static void r600_nomm_put_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->blit_vb->used = 0; radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->masterp, dev_priv->blit_vb); } static void *r600_nomm_get_vb_ptr(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; return (((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + dev_priv->blit_vb->used)); } int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; int ret; DRM_DEBUG("\n"); ret = r600_nomm_get_vb(dev); if (ret) return ret; dev_priv->blit_vb->file_priv = file_priv; set_default_state(dev_priv); set_shaders(dev); return 0; } void r600_done_blit_copy(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(5); OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); /* wait for 3D idle clean */ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN); ADVANCE_RING(); COMMIT_RING(); r600_nomm_put_vb(dev); } void r600_blit_copy(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int size_bytes) { drm_radeon_private_t *dev_priv = dev->dev_private; int max_bytes; u64 vb_addr; u32 *vb; vb = r600_nomm_get_vb_ptr(dev); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; while (size_bytes) { int cur_size = size_bytes; int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; src_gpu_addr = src_gpu_addr & ~255; dst_gpu_addr = dst_gpu_addr & ~255; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = r600_nomm_get_vb_ptr(dev); } vb[0] = int2float(dst_x); vb[1] = 0; vb[2] = int2float(src_x); vb[3] = 0; vb[4] = int2float(dst_x); vb[5] = int2float(h); vb[6] = int2float(src_x); vb[7] = int2float(h); vb[8] = int2float(dst_x + cur_size); vb[9] = int2float(h); vb[10] = int2float(src_x + cur_size); vb[11] = int2float(h); /* src */ set_tex_resource(dev_priv, FMT_8, src_x + cur_size, h, src_x + cur_size, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst */ set_render_target(dev_priv, COLOR_8, dst_x + cur_size, h, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; dev_priv->blit_vb->used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } else { max_bytes = 8192 * 4; while (size_bytes) { int cur_size = size_bytes; int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; src_gpu_addr = src_gpu_addr & ~255; dst_gpu_addr = dst_gpu_addr & ~255; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = r600_nomm_get_vb_ptr(dev); } vb[0] = int2float(dst_x / 4); vb[1] = 0; vb[2] = int2float(src_x / 4); vb[3] = 0; vb[4] = int2float(dst_x / 4); vb[5] = int2float(h); vb[6] = int2float(src_x / 4); vb[7] = int2float(h); vb[8] = int2float((dst_x + cur_size) / 4); vb[9] = int2float(h); vb[10] = int2float((src_x + cur_size) / 4); vb[11] = int2float(h); /* src */ set_tex_resource(dev_priv, FMT_8_8_8_8, (src_x + cur_size) / 4, h, (src_x + cur_size) / 4, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst */ set_render_target(dev_priv, COLOR_8_8_8_8, (dst_x + cur_size) / 4, h, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; dev_priv->blit_vb->used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } } void r600_blit_swap(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int sx, int sy, int dx, int dy, int w, int h, int src_pitch, int dst_pitch, int cpp) { drm_radeon_private_t *dev_priv = dev->dev_private; int cb_format, tex_format; int sx2, sy2, dx2, dy2; u64 vb_addr; u32 *vb; if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); } vb = r600_nomm_get_vb_ptr(dev); sx2 = sx + w; sy2 = sy + h; dx2 = dx + w; dy2 = dy + h; vb[0] = int2float(dx); vb[1] = int2float(dy); vb[2] = int2float(sx); vb[3] = int2float(sy); vb[4] = int2float(dx); vb[5] = int2float(dy2); vb[6] = int2float(sx); vb[7] = int2float(sy2); vb[8] = int2float(dx2); vb[9] = int2float(dy2); vb[10] = int2float(sx2); vb[11] = int2float(sy2); switch(cpp) { case 4: cb_format = COLOR_8_8_8_8; tex_format = FMT_8_8_8_8; break; case 2: cb_format = COLOR_5_6_5; tex_format = FMT_5_6_5; break; default: cb_format = COLOR_8; tex_format = FMT_8; break; } /* src */ set_tex_resource(dev_priv, tex_format, src_pitch / cpp, sy2, src_pitch / cpp, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr); /* dst */ set_render_target(dev_priv, cb_format, dst_pitch / cpp, dy2, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, dx, dy, dx2, dy2); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, dst_pitch * dy2, dst_gpu_addr); dev_priv->blit_vb->used += 12 * 4; } Index: head/sys/dev/drm2/radeon/r600_blit_kms.c =================================================================== --- head/sys/dev/drm2/radeon/r600_blit_kms.c (revision 258779) +++ head/sys/dev/drm2/radeon/r600_blit_kms.c (revision 258780) @@ -1,758 +1,758 @@ /* * Copyright 2009 Advanced Micro Devices, Inc. * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include "radeon.h" #include "radeon_asic.h" #include "r600d.h" #include "r600_blit_shaders.h" #include "radeon_blit_common.h" /* emits 21 on rv770+, 23 on r600 */ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cb_color_info; int pitch, slice; h = roundup2(h, 8); if (h < 8) h = 8; cb_color_info = CB_FORMAT(format) | CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); radeon_ring_write(ring, 2 << 0); } radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (pitch << 0) | (slice << 10)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, cb_color_info); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); } /* emits 5dw */ static void cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cp_coher_size; if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, sync_type); radeon_ring_write(ring, cp_coher_size); radeon_ring_write(ring, mc_addr >> 8); radeon_ring_write(ring, 10); /* poll interval */ } /* emits 21dw + 1 surface sync = 26dw */ static void set_shaders(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u64 gpu_addr; u32 sq_pgm_resources; /* setup shader regs */ sq_pgm_resources = (1 << 0); /* VS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_pgm_resources); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); /* PS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_pgm_resources | (1 << 28)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 2); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); } /* emits 9 + 1 sync (5) = 14*/ static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_vtx_constant_word2; sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | SQ_VTXC_STRIDE(16); #ifdef __BIG_ENDIAN sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); #endif radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(ring, 0x460); radeon_ring_write(ring, gpu_addr & 0xffffffff); radeon_ring_write(ring, 48 - 1); radeon_ring_write(ring, sq_vtx_constant_word2); radeon_ring_write(ring, 1 << 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30); if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(rdev, PACKET3_VC_ACTION_ENA, 48, gpu_addr); } /* emits 9 */ static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr, u32 size) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; if (h < 1) h = 1; sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) | S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) | S_038000_TEX_WIDTH(w - 1); sq_tex_resource_word1 = S_038004_DATA_FORMAT(format); sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1); sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) | S_038010_DST_SEL_X(SQ_SEL_X) | S_038010_DST_SEL_Y(SQ_SEL_Y) | S_038010_DST_SEL_Z(SQ_SEL_Z) | S_038010_DST_SEL_W(SQ_SEL_W); cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, size, gpu_addr); radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(ring, 0); radeon_ring_write(ring, sq_tex_resource_word0); radeon_ring_write(ring, sq_tex_resource_word1); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, sq_tex_resource_word4); radeon_ring_write(ring, 0); radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30); } /* emits 12 */ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); - radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1U << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); } /* emits 10 */ static void draw_auto(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, DI_PT_RECTLIST); radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 2) | #endif DI_INDEX_SIZE_16_BIT); radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(ring, 1); radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); radeon_ring_write(ring, 3); radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); } /* emits 14 */ static void set_default_state(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; u64 gpu_addr; int dwords; switch (rdev->family) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) sq_config = 0; else sq_config = VC_ENABLE; sq_config |= (DX9_CONSTS | ALU_INST_PREFER_VECTOR | PS_PRIO(0) | VS_PRIO(1) | GS_PRIO(2) | ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | NUM_VS_GPRS(num_vs_gprs) | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | NUM_VS_THREADS(num_vs_threads) | NUM_GS_THREADS(num_gs_threads) | NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); /* emit an IB pointing at default state */ dwords = roundup2(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(ring, dwords); /* SQ config */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6)); radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_config); radeon_ring_write(ring, sq_gpr_resource_mgmt_1); radeon_ring_write(ring, sq_gpr_resource_mgmt_2); radeon_ring_write(ring, sq_thread_resource_mgmt); radeon_ring_write(ring, sq_stack_resource_mgmt_1); radeon_ring_write(ring, sq_stack_resource_mgmt_2); } int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; int i, r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; rdev->r600_blit.primitives.set_render_target = set_render_target; rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; rdev->r600_blit.primitives.set_shaders = set_shaders; rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; rdev->r600_blit.primitives.set_scissors = set_scissors; rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ rdev->r600_blit.ring_size_common += 40; /* shaders + def state */ rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_per_loop = 76; /* set_render_target emits 2 extra dwords on rv6xx */ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) rdev->r600_blit.ring_size_per_loop += 2; rdev->r600_blit.max_dim = 8192; rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) rdev->r600_blit.state_len = r7xx_default_size; else rdev->r600_blit.state_len = r6xx_default_size; dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); dwords++; } obj_size = dwords * 4; obj_size = roundup2(obj_size, 256); rdev->r600_blit.vs_offset = obj_size; obj_size += r6xx_vs_size * 4; obj_size = roundup2(obj_size, 256); rdev->r600_blit.ps_offset = obj_size; obj_size += r6xx_ps_size * 4; obj_size = roundup2(obj_size, 256); /* pin copy shader into vram if not already initialized */ if (rdev->r600_blit.shader_obj == NULL) { r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); return r; } r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_gpu_addr); radeon_bo_unreserve(rdev->r600_blit.shader_obj); if (r) { dev_err(rdev->dev, "(%d) pin blit object failed\n", r); return r; } } DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); if (r) { DRM_ERROR("failed to map blit object %d\n", r); return r; } if (rdev->family >= CHIP_RV770) memcpy_toio((char *)ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len * 4); else memcpy_toio((char *)ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len * 4); if (num_packet2s) memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); for (i = 0; i < r6xx_vs_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); for (i = 0; i < r6xx_ps_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } void r600_blit_fini(struct radeon_device *rdev) { int r; radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy * it when it becomes idle. */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (!r) { radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); } radeon_bo_unref(&rdev->r600_blit.shader_obj); } static unsigned r600_blit_create_rect(unsigned num_gpu_pages, int *width, int *height, int max_dim) { unsigned max_pages; unsigned pages = num_gpu_pages; int w, h; if (num_gpu_pages == 0) { /* not supposed to be called with no pages, but just in case */ h = 0; w = 0; pages = 0; DRM_ERROR("%s: called with no pages", __func__); } else { int rect_order = 2; h = RECT_UNIT_H; while (num_gpu_pages / rect_order) { h *= 2; rect_order *= 4; if (h >= max_dim) { h = max_dim; break; } } max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H); if (pages > max_pages) pages = max_pages; w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h; w = (w / RECT_UNIT_W) * RECT_UNIT_W; pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H); KASSERT(pages != 0, ("r600_blit_create_rect: pages == 0")); } DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages); /* return width and height only of the caller wants it */ if (height) *height = h; if (width) *width = w; return pages; } int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_fence **fence, struct radeon_sa_bo **vb, struct radeon_semaphore **sem) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; int ring_size; int num_loops = 0; int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; /* num loops */ while (num_gpu_pages) { num_gpu_pages -= r600_blit_create_rect(num_gpu_pages, NULL, NULL, rdev->r600_blit.max_dim); num_loops++; } /* 48 bytes for vertex per loop */ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb, (num_loops*48)+256, 256, true); if (r) { return r; } r = radeon_semaphore_create(rdev, sem); if (r) { radeon_sa_bo_free(rdev, vb, NULL); return r; } /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; ring_size += rdev->r600_blit.ring_size_common; r = radeon_ring_lock(rdev, ring, ring_size); if (r) { radeon_sa_bo_free(rdev, vb, NULL); radeon_semaphore_free(rdev, sem, NULL); return r; } if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) { radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring, RADEON_RING_TYPE_GFX_INDEX); radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX); } else { radeon_semaphore_free(rdev, sem, NULL); } rdev->r600_blit.primitives.set_default_state(rdev); rdev->r600_blit.primitives.set_shaders(rdev); return 0; } void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, struct radeon_sa_bo *vb, struct radeon_semaphore *sem) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); if (r) { radeon_ring_unlock_undo(rdev, ring); return; } radeon_ring_unlock_commit(rdev, ring); radeon_sa_bo_free(rdev, &vb, *fence); radeon_semaphore_free(rdev, &sem, *fence); } void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, unsigned num_gpu_pages, struct radeon_sa_bo *vb) { u64 vb_gpu_addr; u32 *vb_cpu_addr; DRM_DEBUG("emitting copy %16jx %16jx %d\n", (uintmax_t)src_gpu_addr, (uintmax_t)dst_gpu_addr, num_gpu_pages); vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb); vb_gpu_addr = radeon_sa_bo_gpu_addr(vb); while (num_gpu_pages) { int w, h; unsigned size_in_bytes; unsigned pages_per_loop = r600_blit_create_rect(num_gpu_pages, &w, &h, rdev->r600_blit.max_dim); size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE; DRM_DEBUG("rectangle w=%d h=%d\n", w, h); vb_cpu_addr[0] = 0; vb_cpu_addr[1] = 0; vb_cpu_addr[2] = 0; vb_cpu_addr[3] = 0; vb_cpu_addr[4] = 0; vb_cpu_addr[5] = int2float(h); vb_cpu_addr[6] = 0; vb_cpu_addr[7] = int2float(h); vb_cpu_addr[8] = int2float(w); vb_cpu_addr[9] = int2float(h); vb_cpu_addr[10] = int2float(w); vb_cpu_addr[11] = int2float(h); rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, w, h, w, src_gpu_addr, size_in_bytes); rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, w, h, dst_gpu_addr); rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr); rdev->r600_blit.primitives.draw_auto(rdev); rdev->r600_blit.primitives.cp_set_surface_sync(rdev, PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, size_in_bytes, dst_gpu_addr); vb_cpu_addr += 12; vb_gpu_addr += 4*12; src_gpu_addr += size_in_bytes; dst_gpu_addr += size_in_bytes; num_gpu_pages -= pages_per_loop; } } Index: head/sys/dev/drm2/radeon/r600_cs.c =================================================================== --- head/sys/dev/drm2/radeon/r600_cs.c (revision 258779) +++ head/sys/dev/drm2/radeon/r600_cs.c (revision 258780) @@ -1,2761 +1,2761 @@ /* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include __FBSDID("$FreeBSD$"); #include #include "radeon.h" #include "radeon_asic.h" #include "r600d.h" #include "r600_reg_safe.h" #include "r600_cp.h" #include "r600_cs.h" static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; struct r600_cs_track { /* configuration we miror so that we use same code btw kms/ums */ u32 group_size; u32 nbanks; u32 npipes; /* value we track */ u32 sq_config; u32 log_nsamples; u32 nsamples; u32 cb_color_base_last[8]; struct radeon_bo *cb_color_bo[8]; u64 cb_color_bo_mc[8]; u64 cb_color_bo_offset[8]; struct radeon_bo *cb_color_frag_bo[8]; u64 cb_color_frag_offset[8]; struct radeon_bo *cb_color_tile_bo[8]; u64 cb_color_tile_offset[8]; u32 cb_color_mask[8]; u32 cb_color_info[8]; u32 cb_color_view[8]; u32 cb_color_size_idx[8]; /* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ bool is_resolve; u32 cb_color_size[8]; u32 vgt_strmout_en; u32 vgt_strmout_buffer_en; struct radeon_bo *vgt_strmout_bo[4]; u64 vgt_strmout_bo_mc[4]; /* unused */ u32 vgt_strmout_bo_offset[4]; u32 vgt_strmout_size[4]; u32 db_depth_control; u32 db_depth_info; u32 db_depth_size_idx; u32 db_depth_view; u32 db_depth_size; u32 db_offset; struct radeon_bo *db_bo; u64 db_bo_mc; bool sx_misc_kill_all_prims; bool cb_dirty; bool db_dirty; bool streamout_dirty; struct radeon_bo *htile_bo; u64 htile_offset; u32 htile_surface; }; #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } struct gpu_formats { unsigned blockwidth; unsigned blockheight; unsigned blocksize; unsigned valid_color; enum radeon_family min_family; }; static const struct gpu_formats color_formats_table[] = { /* 8 bit */ FMT_8_BIT(V_038004_COLOR_8, 1), FMT_8_BIT(V_038004_COLOR_4_4, 1), FMT_8_BIT(V_038004_COLOR_3_3_2, 1), FMT_8_BIT(V_038004_FMT_1, 0), /* 16-bit */ FMT_16_BIT(V_038004_COLOR_16, 1), FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), FMT_16_BIT(V_038004_COLOR_8_8, 1), FMT_16_BIT(V_038004_COLOR_5_6_5, 1), FMT_16_BIT(V_038004_COLOR_6_5_5, 1), FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), /* 24-bit */ FMT_24_BIT(V_038004_FMT_8_8_8), /* 32-bit */ FMT_32_BIT(V_038004_COLOR_32, 1), FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_16_16, 1), FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_8_24, 1), FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_24_8, 1), FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_10_11_11, 1), FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_11_11_10, 1), FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), FMT_32_BIT(V_038004_FMT_32_AS_8, 0), FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), /* 48-bit */ FMT_48_BIT(V_038004_FMT_16_16_16), FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), /* 64-bit */ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), FMT_64_BIT(V_038004_COLOR_32_32, 1), FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), FMT_96_BIT(V_038004_FMT_32_32_32), FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), /* 128-bit */ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, /* block compressed formats */ [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, [V_038004_FMT_BC5] = { 4, 4, 16, 0}, [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ /* The other Evergreen formats */ [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, }; bool r600_fmt_is_valid_color(u32 format) { if (format >= DRM_ARRAY_SIZE(color_formats_table)) return false; if (color_formats_table[format].valid_color) return true; return false; } bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) { if (format >= DRM_ARRAY_SIZE(color_formats_table)) return false; if (family < color_formats_table[format].min_family) return false; if (color_formats_table[format].blockwidth > 0) return true; return false; } int r600_fmt_get_blocksize(u32 format) { if (format >= DRM_ARRAY_SIZE(color_formats_table)) return 0; return color_formats_table[format].blocksize; } int r600_fmt_get_nblocksx(u32 format, u32 w) { unsigned bw; if (format >= DRM_ARRAY_SIZE(color_formats_table)) return 0; bw = color_formats_table[format].blockwidth; if (bw == 0) return 0; return (w + bw - 1) / bw; } int r600_fmt_get_nblocksy(u32 format, u32 h) { unsigned bh; if (format >= DRM_ARRAY_SIZE(color_formats_table)) return 0; bh = color_formats_table[format].blockheight; if (bh == 0) return 0; return (h + bh - 1) / bh; } struct array_mode_checker { int array_mode; u32 group_size; u32 nbanks; u32 npipes; u32 nsamples; u32 blocksize; }; /* returns alignment in pixels for pitch/height/depth and bytes for base */ static int r600_get_array_mode_alignment(struct array_mode_checker *values, u32 *pitch_align, u32 *height_align, u32 *depth_align, u64 *base_align) { u32 tile_width = 8; u32 tile_height = 8; u32 macro_tile_width = values->nbanks; u32 macro_tile_height = values->npipes; u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; switch (values->array_mode) { case ARRAY_LINEAR_GENERAL: /* technically tile_width/_height for pitch/height */ *pitch_align = 1; /* tile_width */ *height_align = 1; /* tile_height */ *depth_align = 1; *base_align = 1; break; case ARRAY_LINEAR_ALIGNED: *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); *height_align = 1; *depth_align = 1; *base_align = values->group_size; break; case ARRAY_1D_TILED_THIN1: *pitch_align = max((u32)tile_width, (u32)(values->group_size / (tile_height * values->blocksize * values->nsamples))); *height_align = tile_height; *depth_align = 1; *base_align = values->group_size; break; case ARRAY_2D_TILED_THIN1: *pitch_align = max((u32)macro_tile_width * tile_width, (u32)((values->group_size * values->nbanks) / (values->blocksize * values->nsamples * tile_width))); *height_align = macro_tile_height * tile_height; *depth_align = 1; *base_align = max(macro_tile_bytes, (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); break; default: return -EINVAL; } return 0; } static void r600_cs_track_init(struct r600_cs_track *track) { int i; /* assume DX9 mode */ track->sq_config = DX9_CONSTS; for (i = 0; i < 8; i++) { track->cb_color_base_last[i] = 0; track->cb_color_size[i] = 0; track->cb_color_size_idx[i] = 0; track->cb_color_info[i] = 0; track->cb_color_view[i] = 0xFFFFFFFF; track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_bo_mc[i] = 0xFFFFFFFF; track->cb_color_frag_bo[i] = NULL; track->cb_color_frag_offset[i] = 0xFFFFFFFF; track->cb_color_tile_bo[i] = NULL; track->cb_color_tile_offset[i] = 0xFFFFFFFF; track->cb_color_mask[i] = 0xFFFFFFFF; } track->is_resolve = false; track->nsamples = 16; track->log_nsamples = 4; track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; track->db_bo = NULL; track->db_bo_mc = 0xFFFFFFFF; /* assume the biggest format and that htile is enabled */ track->db_depth_info = 7 | (1 << 25); track->db_depth_view = 0xFFFFC000; track->db_depth_size = 0xFFFFFFFF; track->db_depth_size_idx = 0; track->db_depth_control = 0xFFFFFFFF; track->db_dirty = true; track->htile_bo = NULL; track->htile_offset = 0xFFFFFFFF; track->htile_surface = 0; for (i = 0; i < 4; i++) { track->vgt_strmout_size[i] = 0; track->vgt_strmout_bo[i] = NULL; track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; } track->streamout_dirty = true; track->sx_misc_kill_all_prims = false; } static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) { struct r600_cs_track *track = p->track; u32 slice_tile_max, size, tmp; u32 height, height_align, pitch, pitch_align, depth_align; u64 base_offset, base_align; struct array_mode_checker array_check; volatile u32 *ib = p->ib.ptr; unsigned array_mode; u32 format; /* When resolve is used, the second colorbuffer has always 1 sample. */ unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); if (!r600_fmt_is_valid_color(format)) { dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", __func__, __LINE__, format, i, track->cb_color_info[i]); return -EINVAL; } /* pitch in pixels */ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; slice_tile_max *= 64; height = slice_tile_max / pitch; if (height > 8192) height = 8192; array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; array_check.array_mode = array_mode; array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; array_check.nsamples = nsamples; array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, track->cb_color_info[i]); return -EINVAL; } switch (array_mode) { case V_0280A0_ARRAY_LINEAR_GENERAL: break; case V_0280A0_ARRAY_LINEAR_ALIGNED: break; case V_0280A0_ARRAY_1D_TILED_THIN1: /* avoid breaking userspace */ if (height > 7) height &= ~0x7; break; case V_0280A0_ARRAY_2D_TILED_THIN1: break; default: dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, track->cb_color_info[i]); return -EINVAL; } if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i, (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); return -EINVAL; } /* check offset */ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format) * nsamples; switch (array_mode) { default: case V_0280A0_ARRAY_LINEAR_GENERAL: case V_0280A0_ARRAY_LINEAR_ALIGNED: tmp += track->cb_color_view[i] & 0xFF; break; case V_0280A0_ARRAY_1D_TILED_THIN1: case V_0280A0_ARRAY_2D_TILED_THIN1: tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; break; } if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { /* the initial DDX does bad things with the CB size occasionally */ /* it rounds up height too far for slice tile max but the BO is smaller */ /* r600c,g also seem to flush at bad times in some apps resulting in * bogus values here. So for linear just allow anything to avoid breaking * broken userspace. */ } else { dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n", __func__, i, array_mode, (uintmax_t)track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]), pitch, height, r600_fmt_get_nblocksx(format, pitch), r600_fmt_get_nblocksy(format, height), r600_fmt_get_blocksize(format)); return -EINVAL; } } /* limit max tile */ tmp = (height * pitch) >> 6; if (tmp < slice_tile_max) slice_tile_max = tmp; tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); ib[track->cb_color_size_idx[i]] = tmp; /* FMASK/CMASK */ switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { case V_0280A0_TILE_DISABLE: break; case V_0280A0_FRAG_ENABLE: if (track->nsamples > 1) { uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); /* the tile size is 8x8, but the size is in units of bits. * for bytes, do just * 8. */ uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); if (bytes + track->cb_color_frag_offset[i] > radeon_bo_size(track->cb_color_frag_bo[i])) { dev_warn(p->dev, "%s FMASK_TILE_MAX too large " "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", __func__, tile_max, bytes, (uintmax_t)track->cb_color_frag_offset[i], radeon_bo_size(track->cb_color_frag_bo[i])); return -EINVAL; } } /* fall through */ case V_0280A0_CLEAR_ENABLE: { uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. * (128*128) / (8*8) / 2 = 128 bytes per block. */ uint32_t bytes = (block_max + 1) * 128; if (bytes + track->cb_color_tile_offset[i] > radeon_bo_size(track->cb_color_tile_bo[i])) { dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", __func__, block_max, bytes, (uintmax_t)track->cb_color_tile_offset[i], radeon_bo_size(track->cb_color_tile_bo[i])); return -EINVAL; } break; } default: dev_warn(p->dev, "%s invalid tile mode\n", __func__); return -EINVAL; } return 0; } static int r600_cs_track_validate_db(struct radeon_cs_parser *p) { struct r600_cs_track *track = p->track; u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; u32 height_align, pitch_align, depth_align; u32 pitch = 8192; u32 height = 8192; u64 base_offset, base_align; struct array_mode_checker array_check; int array_mode; volatile u32 *ib = p->ib.ptr; if (track->db_bo == NULL) { dev_warn(p->dev, "z/stencil with no depth buffer\n"); return -EINVAL; } switch (G_028010_FORMAT(track->db_depth_info)) { case V_028010_DEPTH_16: bpe = 2; break; case V_028010_DEPTH_X8_24: case V_028010_DEPTH_8_24: case V_028010_DEPTH_X8_24_FLOAT: case V_028010_DEPTH_8_24_FLOAT: case V_028010_DEPTH_32_FLOAT: bpe = 4; break; case V_028010_DEPTH_X24_8_32_FLOAT: bpe = 8; break; default: dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); return -EINVAL; } if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { if (!track->db_depth_size_idx) { dev_warn(p->dev, "z/stencil buffer size not set\n"); return -EINVAL; } tmp = radeon_bo_size(track->db_bo) - track->db_offset; tmp = (tmp / bpe) >> 6; if (!tmp) { dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", track->db_depth_size, bpe, track->db_offset, radeon_bo_size(track->db_bo)); return -EINVAL; } ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); } else { size = radeon_bo_size(track->db_bo); /* pitch in pixels */ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; slice_tile_max *= 64; height = slice_tile_max / pitch; if (height > 8192) height = 8192; base_offset = track->db_bo_mc + track->db_offset; array_mode = G_028010_ARRAY_MODE(track->db_depth_info); array_check.array_mode = array_mode; array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; array_check.nsamples = track->nsamples; array_check.blocksize = bpe; if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, G_028010_ARRAY_MODE(track->db_depth_info), track->db_depth_info); return -EINVAL; } switch (array_mode) { case V_028010_ARRAY_1D_TILED_THIN1: /* don't break userspace */ height &= ~0x7; break; case V_028010_ARRAY_2D_TILED_THIN1: break; default: dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, G_028010_ARRAY_MODE(track->db_depth_info), track->db_depth_info); return -EINVAL; } if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__, (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); return -EINVAL; } ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; tmp = ntiles * bpe * 64 * nviews * track->nsamples; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", array_mode, track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, radeon_bo_size(track->db_bo)); return -EINVAL; } } /* hyperz */ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { unsigned long size; unsigned nbx, nby; if (track->htile_bo == NULL) { dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", __func__, __LINE__, track->db_depth_info); return -EINVAL; } if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", __func__, __LINE__, track->db_depth_size); return -EINVAL; } nbx = pitch; nby = height; if (G_028D24_LINEAR(track->htile_surface)) { /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ nbx = roundup2(nbx, 16 * 8); /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ nby = roundup(nby, track->npipes * 8); } else { /* always assume 8x8 htile */ /* align is htile align * 8, htile align vary according to * number of pipe and tile width and nby */ switch (track->npipes) { case 8: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup2(nbx, 64 * 8); nby = roundup2(nby, 64 * 8); break; case 4: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup2(nbx, 64 * 8); nby = roundup2(nby, 32 * 8); break; case 2: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup2(nbx, 32 * 8); nby = roundup2(nby, 32 * 8); break; case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = roundup2(nbx, 32 * 8); nby = roundup2(nby, 16 * 8); break; default: dev_warn(p->dev, "%s:%d invalid num pipes %d\n", __func__, __LINE__, track->npipes); return -EINVAL; } } /* compute number of htile */ nbx = nbx >> 3; nby = nby >> 3; /* size must be aligned on npipes * 2K boundary */ size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); size += track->htile_offset; if (size > radeon_bo_size(track->htile_bo)) { dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", __func__, __LINE__, radeon_bo_size(track->htile_bo), size, nbx, nby); return -EINVAL; } } track->db_dirty = false; return 0; } static int r600_cs_track_check(struct radeon_cs_parser *p) { struct r600_cs_track *track = p->track; u32 tmp; int r, i; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) return 0; /* check streamout */ if (track->streamout_dirty && track->vgt_strmout_en) { for (i = 0; i < 4; i++) { if (track->vgt_strmout_buffer_en & (1 << i)) { if (track->vgt_strmout_bo[i]) { u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", i, (uintmax_t)offset, radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } } else { dev_warn(p->dev, "No buffer for streamout %d\n", i); return -EINVAL; } } } track->streamout_dirty = false; } if (track->sx_misc_kill_all_prims) return 0; /* check that we have a cb for each enabled target, we don't check * shader_mask because it seems mesa isn't always setting it :( */ if (track->cb_dirty) { tmp = track->cb_target_mask; /* We must check both colorbuffers for RESOLVE. */ if (track->is_resolve) { tmp |= 0xff; } for (i = 0; i < 8; i++) { if ((tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); return -EINVAL; } /* perform rewrite of CB_COLOR[0-7]_SIZE */ r = r600_cs_track_validate_cb(p, i); if (r) return r; } } track->cb_dirty = false; } /* Check depth buffer */ if (track->db_dirty && G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && (G_028800_STENCIL_ENABLE(track->db_depth_control) || G_028800_Z_ENABLE(track->db_depth_control))) { r = r600_cs_track_validate_db(p); if (r) return r; } return 0; } /** * r600_cs_packet_parse() - parse cp packet and point ib index to next packet * @parser: parser structure holding parsing context. * @pkt: where to store packet informations * * Assume that chunk_ib_index is properly set. Will return -EINVAL * if packet is bigger than remaining ib size. or if packets is unknown. **/ static int r600_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; uint32_t header; if (idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", idx, ib_chunk->length_dw); return -EINVAL; } header = radeon_get_ib_value(p, idx); pkt->idx = idx; pkt->type = CP_PACKET_GET_TYPE(header); pkt->count = CP_PACKET_GET_COUNT(header); pkt->one_reg_wr = 0; switch (pkt->type) { case PACKET_TYPE0: pkt->reg = CP_PACKET0_GET_REG(header); break; case PACKET_TYPE3: pkt->opcode = CP_PACKET3_GET_OPCODE(header); break; case PACKET_TYPE2: pkt->count = -1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); return -EINVAL; } if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); return -EINVAL; } return 0; } /** * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3 * @parser: parser structure holding parsing context. * @data: pointer to relocation data * @offset_start: starting offset * @offset_mask: offset mask (to align start offset on) * @reloc: reloc informations * * Check next packet is relocation packet3, do bo validation and compute * GPU offset using the provided start. **/ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_packet p3reloc; unsigned idx; int r; if (p->chunk_relocs_idx == -1) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } *cs_reloc = NULL; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; r = r600_cs_packet_parse(p, &p3reloc, p->idx); if (r) { return r; } p->idx += p3reloc.count + 2; if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { DRM_ERROR("No packet3 for relocation for packet at %d.\n", p3reloc.idx); return -EINVAL; } idx = radeon_get_ib_value(p, p3reloc.idx + 1); if (idx >= relocs_chunk->length_dw) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, relocs_chunk->length_dw); return -EINVAL; } /* FIXME: we assume reloc size is 4 dwords */ *cs_reloc = p->relocs_ptr[(idx / 4)]; return 0; } /** * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3 * @parser: parser structure holding parsing context. * @data: pointer to relocation data * @offset_start: starting offset * @offset_mask: offset mask (to align start offset on) * @reloc: reloc informations * * Check next packet is relocation packet3, do bo validation and compute * GPU offset using the provided start. **/ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_packet p3reloc; unsigned idx; int r; if (p->chunk_relocs_idx == -1) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } *cs_reloc = NULL; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; r = r600_cs_packet_parse(p, &p3reloc, p->idx); if (r) { return r; } p->idx += p3reloc.count + 2; if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { DRM_ERROR("No packet3 for relocation for packet at %d.\n", p3reloc.idx); return -EINVAL; } idx = radeon_get_ib_value(p, p3reloc.idx + 1); if (idx >= relocs_chunk->length_dw) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, relocs_chunk->length_dw); return -EINVAL; } *cs_reloc = p->relocs; (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; return 0; } /** * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc * @parser: parser structure holding parsing context. * * Check next packet is relocation packet3, do bo validation and compute * GPU offset using the provided start. **/ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) { struct radeon_cs_packet p3reloc; int r; r = r600_cs_packet_parse(p, &p3reloc, p->idx); if (r) { return 0; } if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { return 0; } return 1; } /** * r600_cs_packet_next_vline() - parse userspace VLINE packet * @parser: parser structure holding parsing context. * * Userspace sends a special sequence for VLINE waits. * PACKET0 - VLINE_START_END + value * PACKET3 - WAIT_REG_MEM poll vline status reg * RELOC (P3) - crtc_id in reloc. * * This function parses this and relocates the VLINE START END * and WAIT_REG_MEM packets to the correct crtc. * It also detects a switched off crtc and nulls out the * wait in that case. */ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) { struct drm_mode_object *obj; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, wait_reg_mem; int crtc_id; int r; uint32_t header, h_idx, reg, wait_reg_mem_info; volatile uint32_t *ib; ib = p->ib.ptr; /* parse the WAIT_REG_MEM */ r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); if (r) return r; /* check its a WAIT_REG_MEM */ if (wait_reg_mem.type != PACKET_TYPE3 || wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); return -EINVAL; } wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); /* bit 4 is reg (0) or mem (1) */ if (wait_reg_mem_info & 0x10) { DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); return -EINVAL; } /* waiting for value to be equal */ if ((wait_reg_mem_info & 0x7) != 0x3) { DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); return -EINVAL; } if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); return -EINVAL; } if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); return -EINVAL; } /* jump over the NOP */ r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); if (r) return r; h_idx = p->idx - 2; p->idx += wait_reg_mem.count + 2; p->idx += p3reloc.count + 2; header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = CP_PACKET0_GET_REG(header); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -EINVAL; } crtc = obj_to_crtc(obj); radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; if (!crtc->enabled) { /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ ib[h_idx + 2] = PACKET2(0); ib[h_idx + 3] = PACKET2(0); ib[h_idx + 4] = PACKET2(0); ib[h_idx + 5] = PACKET2(0); ib[h_idx + 6] = PACKET2(0); ib[h_idx + 7] = PACKET2(0); ib[h_idx + 8] = PACKET2(0); } else if (crtc_id == 1) { switch (reg) { case AVIVO_D1MODE_VLINE_START_END: header &= ~R600_CP_PACKET0_REG_MASK; header |= AVIVO_D2MODE_VLINE_START_END >> 2; break; default: DRM_ERROR("unknown crtc reloc\n"); return -EINVAL; } ib[h_idx] = header; ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; } return 0; } static int r600_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { int r; switch (reg) { case AVIVO_D1MODE_VLINE_START_END: r = r600_cs_packet_parse_vline(p); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); return r; } break; default: DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", reg, idx); return -EINVAL; } return 0; } static int r600_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { unsigned reg, i; unsigned idx; int r; idx = pkt->idx + 1; reg = pkt->reg; for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { r = r600_packet0_check(p, pkt, idx, reg); if (r) { return r; } } return 0; } /** * r600_cs_check_reg() - check if register is authorized or not * @parser: parser structure holding parsing context * @reg: register we are testing * @idx: index into the cs buffer * * This function will test against r600_reg_safe_bm and return 0 * if register is safe. If register is not flag as safe this function * will test it against a list of register needind special handling. */ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { struct r600_cs_track *track = (struct r600_cs_track *)p->track; struct radeon_cs_reloc *reloc; u32 m, i, tmp, *ib; int r; i = (reg >> 7); if (i >= DRM_ARRAY_SIZE(r600_reg_safe_bm)) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } m = 1 << ((reg >> 2) & 31); if (!(r600_reg_safe_bm[i] & m)) return 0; ib = p->ib.ptr; switch (reg) { /* force following reg to 0 in an attempt to disable out buffer * which will need us to better understand how it works to perform * security check on it (Jerome) */ case R_0288A8_SQ_ESGS_RING_ITEMSIZE: case R_008C44_SQ_ESGS_RING_SIZE: case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: case R_008C54_SQ_ESTMP_RING_SIZE: case R_0288C0_SQ_FBUF_RING_ITEMSIZE: case R_008C74_SQ_FBUF_RING_SIZE: case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: case R_008C5C_SQ_GSTMP_RING_SIZE: case R_0288AC_SQ_GSVS_RING_ITEMSIZE: case R_008C4C_SQ_GSVS_RING_SIZE: case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: case R_008C6C_SQ_PSTMP_RING_SIZE: case R_0288C4_SQ_REDUC_RING_ITEMSIZE: case R_008C7C_SQ_REDUC_RING_SIZE: case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: case R_008C64_SQ_VSTMP_RING_SIZE: case R_0288C8_SQ_GS_VERT_ITEMSIZE: /* get value to populate the IB don't remove */ tmp =radeon_get_ib_value(p, idx); ib[idx] = 0; break; case SQ_CONFIG: track->sq_config = radeon_get_ib_value(p, idx); break; case R_028800_DB_DEPTH_CONTROL: track->db_depth_control = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case R_028010_DB_DEPTH_INFO: if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && r600_cs_packet_next_is_pkt3_nop(p)) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_depth_info = radeon_get_ib_value(p, idx); ib[idx] &= C_028010_ARRAY_MODE; track->db_depth_info &= C_028010_ARRAY_MODE; if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); } else { ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); } } else { track->db_depth_info = radeon_get_ib_value(p, idx); } track->db_dirty = true; break; case R_028004_DB_DEPTH_VIEW: track->db_depth_view = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case R_028000_DB_DEPTH_SIZE: track->db_depth_size = radeon_get_ib_value(p, idx); track->db_depth_size_idx = idx; track->db_dirty = true; break; case R_028AB0_VGT_STRMOUT_EN: track->vgt_strmout_en = radeon_get_ib_value(p, idx); track->streamout_dirty = true; break; case R_028B20_VGT_STRMOUT_BUFFER_EN: track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_BASE_0: case VGT_STRMOUT_BUFFER_BASE_1: case VGT_STRMOUT_BUFFER_BASE_2: case VGT_STRMOUT_BUFFER_BASE_3: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->vgt_strmout_bo[tmp] = reloc->robj; track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_SIZE_0: case VGT_STRMOUT_BUFFER_SIZE_1: case VGT_STRMOUT_BUFFER_SIZE_2: case VGT_STRMOUT_BUFFER_SIZE_3: tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; /* size in register is DWs, convert to bytes */ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; track->streamout_dirty = true; break; case CP_COHER_BASE: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "missing reloc for CP_COHER_BASE " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case R_028238_CB_TARGET_MASK: track->cb_target_mask = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case R_02823C_CB_SHADER_MASK: track->cb_shader_mask = radeon_get_ib_value(p, idx); break; case R_028C04_PA_SC_AA_CONFIG: tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); track->log_nsamples = tmp; track->nsamples = 1 << tmp; track->cb_dirty = true; break; case R_028808_CB_COLOR_CONTROL: tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; track->cb_dirty = true; break; case R_0280A0_CB_COLOR0_INFO: case R_0280A4_CB_COLOR1_INFO: case R_0280A8_CB_COLOR2_INFO: case R_0280AC_CB_COLOR3_INFO: case R_0280B0_CB_COLOR4_INFO: case R_0280B4_CB_COLOR5_INFO: case R_0280B8_CB_COLOR6_INFO: case R_0280BC_CB_COLOR7_INFO: if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && r600_cs_packet_next_is_pkt3_nop(p)) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); } } else { tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); } track->cb_dirty = true; break; case R_028080_CB_COLOR0_VIEW: case R_028084_CB_COLOR1_VIEW: case R_028088_CB_COLOR2_VIEW: case R_02808C_CB_COLOR3_VIEW: case R_028090_CB_COLOR4_VIEW: case R_028094_CB_COLOR5_VIEW: case R_028098_CB_COLOR6_VIEW: case R_02809C_CB_COLOR7_VIEW: tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case R_028060_CB_COLOR0_SIZE: case R_028064_CB_COLOR1_SIZE: case R_028068_CB_COLOR2_SIZE: case R_02806C_CB_COLOR3_SIZE: case R_028070_CB_COLOR4_SIZE: case R_028074_CB_COLOR5_SIZE: case R_028078_CB_COLOR6_SIZE: case R_02807C_CB_COLOR7_SIZE: tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); track->cb_color_size_idx[tmp] = idx; track->cb_dirty = true; break; /* This register were added late, there is userspace * which does provide relocation for those but set * 0 offset. In order to avoid breaking old userspace * we detect this and set address to point to last * CB_COLOR0_BASE, note that if userspace doesn't set * CB_COLOR0_BASE before this register we will report * error. Old userspace always set CB_COLOR0_BASE * before any of this. */ case R_0280E0_CB_COLOR0_FRAG: case R_0280E4_CB_COLOR1_FRAG: case R_0280E8_CB_COLOR2_FRAG: case R_0280EC_CB_COLOR3_FRAG: case R_0280F0_CB_COLOR4_FRAG: case R_0280F4_CB_COLOR5_FRAG: case R_0280F8_CB_COLOR6_FRAG: case R_0280FC_CB_COLOR7_FRAG: tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; if (!r600_cs_packet_next_is_pkt3_nop(p)) { if (!track->cb_color_base_last[tmp]) { dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; ib[idx] = track->cb_color_base_last[tmp]; } else { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } track->cb_color_frag_bo[tmp] = reloc->robj; track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); } if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } break; case R_0280C0_CB_COLOR0_TILE: case R_0280C4_CB_COLOR1_TILE: case R_0280C8_CB_COLOR2_TILE: case R_0280CC_CB_COLOR3_TILE: case R_0280D0_CB_COLOR4_TILE: case R_0280D4_CB_COLOR5_TILE: case R_0280D8_CB_COLOR6_TILE: case R_0280DC_CB_COLOR7_TILE: tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; if (!r600_cs_packet_next_is_pkt3_nop(p)) { if (!track->cb_color_base_last[tmp]) { dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; ib[idx] = track->cb_color_base_last[tmp]; } else { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } track->cb_color_tile_bo[tmp] = reloc->robj; track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); } if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } break; case R_028100_CB_COLOR0_MASK: case R_028104_CB_COLOR1_MASK: case R_028108_CB_COLOR2_MASK: case R_02810C_CB_COLOR3_MASK: case R_028110_CB_COLOR4_MASK: case R_028114_CB_COLOR5_MASK: case R_028118_CB_COLOR6_MASK: case R_02811C_CB_COLOR7_MASK: tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } break; case CB_COLOR0_BASE: case CB_COLOR1_BASE: case CB_COLOR2_BASE: case CB_COLOR3_BASE: case CB_COLOR4_BASE: case CB_COLOR5_BASE: case CB_COLOR6_BASE: case CB_COLOR7_BASE: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 4; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_base_last[tmp] = ib[idx]; track->cb_color_bo[tmp] = reloc->robj; track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; track->cb_dirty = true; break; case DB_DEPTH_BASE: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_offset = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->db_bo = reloc->robj; track->db_bo_mc = reloc->lobj.gpu_offset; track->db_dirty = true; break; case DB_HTILE_DATA_BASE: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->htile_bo = reloc->robj; track->db_dirty = true; break; case DB_HTILE_SURFACE: track->htile_surface = radeon_get_ib_value(p, idx); /* force 8x8 htile width and height */ ib[idx] |= 3; track->db_dirty = true; break; case SQ_PGM_START_FS: case SQ_PGM_START_ES: case SQ_PGM_START_VS: case SQ_PGM_START_GS: case SQ_PGM_START_PS: case SQ_ALU_CONST_CACHE_GS_0: case SQ_ALU_CONST_CACHE_GS_1: case SQ_ALU_CONST_CACHE_GS_2: case SQ_ALU_CONST_CACHE_GS_3: case SQ_ALU_CONST_CACHE_GS_4: case SQ_ALU_CONST_CACHE_GS_5: case SQ_ALU_CONST_CACHE_GS_6: case SQ_ALU_CONST_CACHE_GS_7: case SQ_ALU_CONST_CACHE_GS_8: case SQ_ALU_CONST_CACHE_GS_9: case SQ_ALU_CONST_CACHE_GS_10: case SQ_ALU_CONST_CACHE_GS_11: case SQ_ALU_CONST_CACHE_GS_12: case SQ_ALU_CONST_CACHE_GS_13: case SQ_ALU_CONST_CACHE_GS_14: case SQ_ALU_CONST_CACHE_GS_15: case SQ_ALU_CONST_CACHE_PS_0: case SQ_ALU_CONST_CACHE_PS_1: case SQ_ALU_CONST_CACHE_PS_2: case SQ_ALU_CONST_CACHE_PS_3: case SQ_ALU_CONST_CACHE_PS_4: case SQ_ALU_CONST_CACHE_PS_5: case SQ_ALU_CONST_CACHE_PS_6: case SQ_ALU_CONST_CACHE_PS_7: case SQ_ALU_CONST_CACHE_PS_8: case SQ_ALU_CONST_CACHE_PS_9: case SQ_ALU_CONST_CACHE_PS_10: case SQ_ALU_CONST_CACHE_PS_11: case SQ_ALU_CONST_CACHE_PS_12: case SQ_ALU_CONST_CACHE_PS_13: case SQ_ALU_CONST_CACHE_PS_14: case SQ_ALU_CONST_CACHE_PS_15: case SQ_ALU_CONST_CACHE_VS_0: case SQ_ALU_CONST_CACHE_VS_1: case SQ_ALU_CONST_CACHE_VS_2: case SQ_ALU_CONST_CACHE_VS_3: case SQ_ALU_CONST_CACHE_VS_4: case SQ_ALU_CONST_CACHE_VS_5: case SQ_ALU_CONST_CACHE_VS_6: case SQ_ALU_CONST_CACHE_VS_7: case SQ_ALU_CONST_CACHE_VS_8: case SQ_ALU_CONST_CACHE_VS_9: case SQ_ALU_CONST_CACHE_VS_10: case SQ_ALU_CONST_CACHE_VS_11: case SQ_ALU_CONST_CACHE_VS_12: case SQ_ALU_CONST_CACHE_VS_13: case SQ_ALU_CONST_CACHE_VS_14: case SQ_ALU_CONST_CACHE_VS_15: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONFIG_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case SX_MISC: track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; break; default: dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } return 0; } unsigned r600_mip_minify(unsigned size, unsigned level) { unsigned val; val = max(1U, size >> level); if (level > 0) val = roundup_pow_of_two(val); return val; } static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { unsigned offset, i, level; unsigned width, height, depth, size; unsigned blocksize; unsigned nbx, nby; unsigned nlevels = llevel - blevel + 1; *l0_size = -1; blocksize = r600_fmt_get_blocksize(format); w0 = r600_mip_minify(w0, 0); h0 = r600_mip_minify(h0, 0); d0 = r600_mip_minify(d0, 0); for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { width = r600_mip_minify(w0, i); nbx = r600_fmt_get_nblocksx(format, width); nbx = roundup(nbx, block_align); height = r600_mip_minify(h0, i); nby = r600_fmt_get_nblocksy(format, height); nby = roundup(nby, height_align); depth = r600_mip_minify(d0, i); size = nbx * nby * blocksize * nsamples; if (nfaces) size *= nfaces; else size *= depth; if (i == 0) *l0_size = size; if (i == 0 || i == 1) offset = roundup(offset, base_align); offset += size; } *mipmap_size = offset; if (llevel == 0) *mipmap_size = *l0_size; if (!blevel) *mipmap_size -= *l0_size; } /** * r600_check_texture_resource() - check if register is authorized or not * @p: parser structure holding parsing context * @idx: index into the cs buffer * @texture: texture's bo structure * @mipmap: mipmap's bo structure * * This function will check that the resource has valid field and that * the texture and mipmap bo object are big enough to cover this resource. */ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, struct radeon_bo *texture, struct radeon_bo *mipmap, u64 base_offset, u64 mip_offset, u32 tiling_flags) { struct r600_cs_track *track = p->track; u32 dim, nfaces, llevel, blevel, w0, h0, d0; u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; u32 height_align, pitch, pitch_align, depth_align; u32 barray, larray; u64 base_align; struct array_mode_checker array_check; u32 format; bool is_array; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) return 0; /* convert to bytes */ base_offset <<= 8; mip_offset <<= 8; word0 = radeon_get_ib_value(p, idx + 0); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (tiling_flags & RADEON_TILING_MACRO) word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); else if (tiling_flags & RADEON_TILING_MICRO) word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } word1 = radeon_get_ib_value(p, idx + 1); word2 = radeon_get_ib_value(p, idx + 2) << 8; word3 = radeon_get_ib_value(p, idx + 3) << 8; word4 = radeon_get_ib_value(p, idx + 4); word5 = radeon_get_ib_value(p, idx + 5); dim = G_038000_DIM(word0); w0 = G_038000_TEX_WIDTH(word0) + 1; pitch = (G_038000_PITCH(word0) + 1) * 8; h0 = G_038004_TEX_HEIGHT(word1) + 1; d0 = G_038004_TEX_DEPTH(word1); format = G_038004_DATA_FORMAT(word1); blevel = G_038010_BASE_LEVEL(word4); llevel = G_038014_LAST_LEVEL(word5); /* pitch in texels */ array_check.array_mode = G_038000_TILE_MODE(word0); array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; array_check.nsamples = 1; array_check.blocksize = r600_fmt_get_blocksize(format); nfaces = 1; is_array = false; switch (dim) { case V_038000_SQ_TEX_DIM_1D: case V_038000_SQ_TEX_DIM_2D: case V_038000_SQ_TEX_DIM_3D: break; case V_038000_SQ_TEX_DIM_CUBEMAP: if (p->family >= CHIP_RV770) nfaces = 8; else nfaces = 6; break; case V_038000_SQ_TEX_DIM_1D_ARRAY: case V_038000_SQ_TEX_DIM_2D_ARRAY: is_array = true; break; case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: is_array = true; /* fall through */ case V_038000_SQ_TEX_DIM_2D_MSAA: array_check.nsamples = 1 << llevel; llevel = 0; break; default: dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } if (!r600_fmt_is_valid_texture(format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, format); return -EINVAL; } if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", __func__, __LINE__, G_038000_TILE_MODE(word0)); return -EINVAL; } /* XXX check height as well... */ if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n", __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(mip_offset, base_align)) { dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n", __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (blevel > llevel) { dev_warn(p->dev, "texture blevel %d > llevel %d\n", blevel, llevel); } if (is_array) { barray = G_038014_BASE_ARRAY(word5); larray = G_038014_LAST_ARRAY(word5); nfaces = larray - barray + 1; } r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, pitch_align, height_align, base_align, &l0_size, &mipmap_size); /* using get ib will give us the offset into the texture bo */ if ((l0_size + word2) > radeon_bo_size(texture)) { dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", w0, h0, pitch_align, height_align, array_check.array_mode, format, word2, l0_size, radeon_bo_size(texture)); dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align); return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ } return 0; } static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { u32 m, i; i = (reg >> 7); if (i >= DRM_ARRAY_SIZE(r600_reg_safe_bm)) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } m = 1 << ((reg >> 2) & 31); if (!(r600_reg_safe_bm[i] & m)) return true; dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } static int r600_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { struct radeon_cs_reloc *reloc; struct r600_cs_track *track; volatile u32 *ib; unsigned idx; unsigned i; unsigned start_reg, end_reg, reg; int r; u32 idx_value; track = (struct r600_cs_track *)p->track; ib = p->ib.ptr; idx = pkt->idx + 1; idx_value = radeon_get_ib_value(p, idx); switch (pkt->opcode) { case PACKET3_SET_PREDICATION: { int pred_op; int tmp; uint64_t offset; if (pkt->count != 1) { DRM_ERROR("bad SET PREDICATION\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx + 1); pred_op = (tmp >> 16) & 0x7; /* for the clear predicate operation */ if (pred_op == 0) return 0; if (pred_op > 2) { DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); return -EINVAL; } r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET PREDICATION\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (idx_value & 0xfffffff0) + ((u64)(tmp & 0xff) << 32); ib[idx + 0] = offset; ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); } break; case PACKET3_START_3D_CMDBUF: if (p->family >= CHIP_RV770 || pkt->count) { DRM_ERROR("bad START_3D\n"); return -EINVAL; } break; case PACKET3_CONTEXT_CONTROL: if (pkt->count != 1) { DRM_ERROR("bad CONTEXT_CONTROL\n"); return -EINVAL; } break; case PACKET3_INDEX_TYPE: case PACKET3_NUM_INSTANCES: if (pkt->count) { DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); return -EINVAL; } break; case PACKET3_DRAW_INDEX: { uint64_t offset; if (pkt->count != 3) { DRM_ERROR("bad DRAW_INDEX\n"); return -EINVAL; } r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad DRAW_INDEX\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; r = r600_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { DRM_ERROR("bad DRAW_INDEX_AUTO\n"); return -EINVAL; } r = r600_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_IMMD_BE: case PACKET3_DRAW_INDEX_IMMD: if (pkt->count < 2) { DRM_ERROR("bad DRAW_INDEX_IMMD\n"); return -EINVAL; } r = r600_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { DRM_ERROR("bad WAIT_REG_MEM\n"); return -EINVAL; } /* bit 4 is reg (0) or mem (1) */ if (idx_value & 0x10) { uint64_t offset; r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad WAIT_REG_MEM\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); ib[idx+2] = upper_32_bits(offset) & 0xff; } break; case PACKET3_CP_DMA: { u32 command, size; u64 offset, tmp; if (pkt->count != 4) { DRM_ERROR("bad CP DMA\n"); return -EINVAL; } command = radeon_get_ib_value(p, idx+4); size = command & 0x1fffff; if (command & PACKET3_CP_DMA_CMD_SAS) { /* src address space is register */ DRM_ERROR("CP DMA SAS not supported\n"); return -EINVAL; } else { if (command & PACKET3_CP_DMA_CMD_SAIC) { DRM_ERROR("CP DMA SAIC only supported for registers\n"); return -EINVAL; } /* src address space is memory */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad CP DMA SRC\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx] = offset; ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); } if (command & PACKET3_CP_DMA_CMD_DAS) { /* dst address space is register */ DRM_ERROR("CP DMA DAS not supported\n"); return -EINVAL; } else { /* dst address space is memory */ if (command & PACKET3_CP_DMA_CMD_DAIC) { DRM_ERROR("CP DMA DAIC only supported for registers\n"); return -EINVAL; } r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad CP DMA DST\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx+2) + ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); offset = reloc->lobj.gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+2] = offset; ib[idx+3] = upper_32_bits(offset) & 0xff; } break; } case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } /* 0xffffffff/0x0 is flush all cache flag */ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || radeon_get_ib_value(p, idx + 2) != 0) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_EVENT_WRITE: if (pkt->count != 2 && pkt->count != 0) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } if (pkt->count) { uint64_t offset; r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffff8; ib[idx+2] = upper_32_bits(offset) & 0xff; } break; case PACKET3_EVENT_WRITE_EOP: { uint64_t offset; if (pkt->count != 4) { DRM_ERROR("bad EVENT_WRITE_EOP\n"); return -EINVAL; } r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->lobj.gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffffc; ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); break; } case PACKET3_SET_CONFIG_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); r = r600_cs_check_reg(p, reg, idx+1+i); if (r) return r; } break; case PACKET3_SET_CONTEXT_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || (start_reg >= PACKET3_SET_CONTEXT_REG_END) || (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); r = r600_cs_check_reg(p, reg, idx+1+i); if (r) return r; } break; case PACKET3_SET_RESOURCE: if (pkt->count % 7) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || (start_reg >= PACKET3_SET_RESOURCE_END) || (end_reg >= PACKET3_SET_RESOURCE_END)) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } for (i = 0; i < (pkt->count / 7); i++) { struct radeon_bo *texture, *mipmap; u32 size, offset, base_offset, mip_offset; switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { case SQ_TEX_VTX_VALID_TEXTURE: /* tex base */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } texture = reloc->robj; /* tex mip base */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; r = r600_check_texture_resource(p, idx+(i*7)+1, texture, mipmap, base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), reloc->lobj.tiling_flags); if (r) return r; ib[idx+1+(i*7)+2] += base_offset; ib[idx+1+(i*7)+3] += mip_offset; break; case SQ_TEX_VTX_VALID_BUFFER: { uint64_t offset64; /* vtx base */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1+(i*7)+0); size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { /* force size to size of the buffer */ dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", size + offset, radeon_bo_size(reloc->robj)); ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; } offset64 = reloc->lobj.gpu_offset + offset; ib[idx+1+(i*8)+0] = offset64; ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | (upper_32_bits(offset64) & 0xff); break; } case SQ_TEX_VTX_INVALID_TEXTURE: case SQ_TEX_VTX_INVALID_BUFFER: default: DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } } break; case PACKET3_SET_ALU_CONST: if (track->sq_config & DX9_CONSTS) { start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || (start_reg >= PACKET3_SET_ALU_CONST_END) || (end_reg >= PACKET3_SET_ALU_CONST_END)) { DRM_ERROR("bad SET_ALU_CONST\n"); return -EINVAL; } } break; case PACKET3_SET_BOOL_CONST: start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || (start_reg >= PACKET3_SET_BOOL_CONST_END) || (end_reg >= PACKET3_SET_BOOL_CONST_END)) { DRM_ERROR("bad SET_BOOL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_LOOP_CONST: start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || (start_reg >= PACKET3_SET_LOOP_CONST_END) || (end_reg >= PACKET3_SET_LOOP_CONST_END)) { DRM_ERROR("bad SET_LOOP_CONST\n"); return -EINVAL; } break; case PACKET3_SET_CTL_CONST: start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || (start_reg >= PACKET3_SET_CTL_CONST_END) || (end_reg >= PACKET3_SET_CTL_CONST_END)) { DRM_ERROR("bad SET_CTL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_SAMPLER: if (pkt->count % 3) { DRM_ERROR("bad SET_SAMPLER\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || (start_reg >= PACKET3_SET_SAMPLER_END) || (end_reg >= PACKET3_SET_SAMPLER_END)) { DRM_ERROR("bad SET_SAMPLER\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BASE_UPDATE: /* RS780 and RS880 also need this */ if (p->family < CHIP_RS780) { DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); return -EINVAL; } if (pkt->count != 1) { DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); return -EINVAL; } if (idx_value > 3) { DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); return -EINVAL; } { u64 offset; r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); return -EINVAL; } if (reloc->robj != track->vgt_strmout_bo[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n", (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]); return -EINVAL; } if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_SURFACE_BASE_UPDATE: if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); return -EINVAL; } if (pkt->count) { DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BUFFER_UPDATE: if (pkt->count != 4) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); return -EINVAL; } /* Updating memory at DST_ADDRESS. */ if (idx_value & 0x1) { u64 offset; r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } /* Reading data from SRC_ADDRESS. */ if (((idx_value >> 1) & 0x3) == 2) { u64 offset; r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } break; case PACKET3_MEM_WRITE: { u64 offset; if (pkt->count != 3) { DRM_ERROR("bad MEM_WRITE (invalid count)\n"); return -EINVAL; } r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+0); offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; if (offset & 0x7) { DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; break; } case PACKET3_COPY_DW: if (pkt->count != 4) { DRM_ERROR("bad COPY_DW (invalid count)\n"); return -EINVAL; } if (idx_value & 0x1) { u64 offset; /* SRC is memory. */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad COPY_DW (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { /* SRC is a reg. */ reg = radeon_get_ib_value(p, idx+1) << 2; if (!r600_is_safe_reg(p, reg, idx+1)) return -EINVAL; } if (idx_value & 0x2) { u64 offset; /* DST is memory. */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->lobj.gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } else { /* DST is a reg. */ reg = radeon_get_ib_value(p, idx+3) << 2; if (!r600_is_safe_reg(p, reg, idx+3)) return -EINVAL; } break; case PACKET3_NOP: break; default: DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; } int r600_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct r600_cs_track *track; int r; if (p->track == NULL) { /* initialize tracker, we are in kms */ track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (track == NULL) return -ENOMEM; r600_cs_track_init(track); if (p->rdev->family < CHIP_RV770) { track->npipes = p->rdev->config.r600.tiling_npipes; track->nbanks = p->rdev->config.r600.tiling_nbanks; track->group_size = p->rdev->config.r600.tiling_group_size; } else if (p->rdev->family <= CHIP_RV740) { track->npipes = p->rdev->config.rv770.tiling_npipes; track->nbanks = p->rdev->config.rv770.tiling_nbanks; track->group_size = p->rdev->config.rv770.tiling_group_size; } p->track = track; } do { r = r600_cs_packet_parse(p, &pkt, p->idx); if (r) { free(p->track, DRM_MEM_DRIVER); p->track = NULL; return r; } p->idx += pkt.count + 2; switch (pkt.type) { case PACKET_TYPE0: r = r600_cs_parse_packet0(p, &pkt); break; case PACKET_TYPE2: break; case PACKET_TYPE3: r = r600_packet3_check(p, &pkt); break; default: DRM_ERROR("Unknown packet type %d !\n", pkt.type); free(p->track, DRM_MEM_DRIVER); p->track = NULL; return -EINVAL; } if (r) { free(p->track, DRM_MEM_DRIVER); p->track = NULL; return r; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); #if 0 for (r = 0; r < p->ib.length_dw; r++) { DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); DRM_MDELAY(1); } #endif free(p->track, DRM_MEM_DRIVER); p->track = NULL; return 0; } static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) { if (p->chunk_relocs_idx == -1) { return 0; } p->relocs = malloc(sizeof(struct radeon_cs_reloc), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (p->relocs == NULL) { return -ENOMEM; } return 0; } /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; free(parser->relocs, DRM_MEM_DRIVER); for (i = 0; i < parser->nchunks; i++) { free(parser->chunks[i].kdata, DRM_MEM_DRIVER); if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) { free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER); free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER); } } free(parser->chunks, DRM_MEM_DRIVER); free(parser->chunks_array, DRM_MEM_DRIVER); free(parser->track, DRM_MEM_DRIVER); } int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, unsigned family, u32 *ib, int *l) { struct radeon_cs_parser parser; struct radeon_cs_chunk *ib_chunk; struct r600_cs_track *track; int r; /* initialize tracker */ track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (track == NULL) return -ENOMEM; r600_cs_track_init(track); r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.dev = dev->device; parser.rdev = NULL; parser.family = family; parser.track = track; parser.ib.ptr = ib; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); r600_cs_parser_fini(&parser, r); return r; } r = r600_cs_parser_relocs_legacy(&parser); if (r) { DRM_ERROR("Failed to parse relocation !\n"); r600_cs_parser_fini(&parser, r); return r; } /* Copy the packet into the IB, the parser will read from the * input memory (cached) and write to the IB (which can be * uncached). */ ib_chunk = &parser.chunks[parser.chunk_ib_idx]; parser.ib.length_dw = ib_chunk->length_dw; *l = parser.ib.length_dw; r = r600_cs_parse(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); r600_cs_parser_fini(&parser, r); return r; } r = radeon_cs_finish_pages(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); r600_cs_parser_fini(&parser, r); return r; } r600_cs_parser_fini(&parser, r); return r; } void r600_cs_legacy_init(void) { r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; } /* * DMA */ /** * r600_dma_cs_next_reloc() - parse next reloc * @p: parser structure holding parsing context. * @cs_reloc: reloc informations * * Return the next reloc, do bo validation and compute * GPU offset using the provided start. **/ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc) { struct radeon_cs_chunk *relocs_chunk; unsigned idx; *cs_reloc = NULL; if (p->chunk_relocs_idx == -1) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } relocs_chunk = &p->chunks[p->chunk_relocs_idx]; idx = p->dma_reloc_idx; if (idx >= p->nrelocs) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, p->nrelocs); return -EINVAL; } *cs_reloc = p->relocs_ptr[idx]; p->dma_reloc_idx++; return 0; } #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) #define GET_DMA_COUNT(h) ((h) & 0x0000ffff) #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) /** * r600_dma_cs_parse() - parse the DMA IB * @p: parser structure holding parsing context. * * Parses the DMA IB from the CS ioctl and updates * the GPU addresses based on the reloc information and * checks for errors. (R6xx-R7xx) * Returns 0 for success and an error on failure. **/ int r600_dma_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; struct radeon_cs_reloc *src_reloc, *dst_reloc; u32 header, cmd, count, tiled; volatile u32 *ib = p->ib.ptr; u32 idx, idx_value; u64 src_offset, dst_offset; int r; do { if (p->idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", p->idx, ib_chunk->length_dw); return -EINVAL; } idx = p->idx; header = radeon_get_ib_value(p, idx); cmd = GET_DMA_CMD(header); count = GET_DMA_COUNT(header); tiled = GET_DMA_T(header); switch (cmd) { case DMA_PACKET_WRITE: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } if (tiled) { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += count + 5; } else { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_COPY: r = r600_dma_cs_next_reloc(p, &src_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_COPY\n"); return -EINVAL; } if (tiled) { idx_value = radeon_get_ib_value(p, idx + 2); /* detile bit */ - if (idx_value & (1 << 31)) { + if (idx_value & (1U << 31)) { /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx+5); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+5); src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } p->idx += 7; } else { if (p->family >= CHIP_RV770) { src_offset = radeon_get_ib_value(p, idx+2); src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 5; } else { src_offset = radeon_get_ib_value(p, idx+2); src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; p->idx += 4; } } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n", (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_CONSTANT_FILL: if (p->family < CHIP_RV770) { DRM_ERROR("Constant Fill is 7xx only !\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; p->idx += 4; break; case DMA_PACKET_NOP: p->idx += 1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); #if 0 for (r = 0; r < p->ib->length_dw; r++) { DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); DRM_MDELAY(1); } #endif return 0; } Index: head/sys/dev/drm2/radeon/r600d.h =================================================================== --- head/sys/dev/drm2/radeon/r600d.h (revision 258779) +++ head/sys/dev/drm2/radeon/r600d.h (revision 258780) @@ -1,1932 +1,1932 @@ /* * Copyright 2009 Advanced Micro Devices, Inc. * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #ifndef R600D_H #define R600D_H #include __FBSDID("$FreeBSD$"); #define CP_PACKET2 0x80000000 #define PACKET2_PAD_SHIFT 0 #define PACKET2_PAD_MASK (0x3fffffff << 0) #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) #define R6XX_MAX_SH_GPRS 256 #define R6XX_MAX_TEMP_GPRS 16 #define R6XX_MAX_SH_THREADS 256 #define R6XX_MAX_SH_STACK_ENTRIES 4096 #define R6XX_MAX_BACKENDS 8 #define R6XX_MAX_BACKENDS_MASK 0xff #define R6XX_MAX_SIMDS 8 #define R6XX_MAX_SIMDS_MASK 0xff #define R6XX_MAX_PIPES 8 #define R6XX_MAX_PIPES_MASK 0xff /* PTE flags */ #define PTE_VALID (1 << 0) #define PTE_SYSTEM (1 << 1) #define PTE_SNOOPED (1 << 2) #define PTE_READABLE (1 << 5) #define PTE_WRITEABLE (1 << 6) /* tiling bits */ #define ARRAY_LINEAR_GENERAL 0x00000000 #define ARRAY_LINEAR_ALIGNED 0x00000001 #define ARRAY_1D_TILED_THIN1 0x00000002 #define ARRAY_2D_TILED_THIN1 0x00000004 /* Registers */ #define ARB_POP 0x2418 #define ENABLE_TC128 (1 << 30) #define ARB_GDEC_RD_CNTL 0x246C #define CC_GC_SHADER_PIPE_CONFIG 0x8950 #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) #define R_028808_CB_COLOR_CONTROL 0x28808 #define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) #define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) #define C_028808_SPECIAL_OP 0xFFFFFF8F #define V_028808_SPECIAL_NORMAL 0x00 #define V_028808_SPECIAL_DISABLE 0x01 #define V_028808_SPECIAL_RESOLVE_BOX 0x07 #define CB_COLOR0_BASE 0x28040 #define CB_COLOR1_BASE 0x28044 #define CB_COLOR2_BASE 0x28048 #define CB_COLOR3_BASE 0x2804C #define CB_COLOR4_BASE 0x28050 #define CB_COLOR5_BASE 0x28054 #define CB_COLOR6_BASE 0x28058 #define CB_COLOR7_BASE 0x2805C #define CB_COLOR7_FRAG 0x280FC #define CB_COLOR0_SIZE 0x28060 #define CB_COLOR0_VIEW 0x28080 #define R_028080_CB_COLOR0_VIEW 0x028080 #define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0) #define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF) #define C_028080_SLICE_START 0xFFFFF800 #define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13) #define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF) #define C_028080_SLICE_MAX 0xFF001FFF #define R_028084_CB_COLOR1_VIEW 0x028084 #define R_028088_CB_COLOR2_VIEW 0x028088 #define R_02808C_CB_COLOR3_VIEW 0x02808C #define R_028090_CB_COLOR4_VIEW 0x028090 #define R_028094_CB_COLOR5_VIEW 0x028094 #define R_028098_CB_COLOR6_VIEW 0x028098 #define R_02809C_CB_COLOR7_VIEW 0x02809C #define R_028100_CB_COLOR0_MASK 0x028100 #define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0) #define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF) #define C_028100_CMASK_BLOCK_MAX 0xFFFFF000 #define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12) #define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF) #define C_028100_FMASK_TILE_MAX 0x00000FFF #define R_028104_CB_COLOR1_MASK 0x028104 #define R_028108_CB_COLOR2_MASK 0x028108 #define R_02810C_CB_COLOR3_MASK 0x02810C #define R_028110_CB_COLOR4_MASK 0x028110 #define R_028114_CB_COLOR5_MASK 0x028114 #define R_028118_CB_COLOR6_MASK 0x028118 #define R_02811C_CB_COLOR7_MASK 0x02811C #define CB_COLOR0_INFO 0x280a0 # define CB_FORMAT(x) ((x) << 2) # define CB_ARRAY_MODE(x) ((x) << 8) # define CB_SOURCE_FORMAT(x) ((x) << 27) # define CB_SF_EXPORT_FULL 0 # define CB_SF_EXPORT_NORM 1 #define CB_COLOR0_TILE 0x280c0 #define CB_COLOR0_FRAG 0x280e0 #define CB_COLOR0_MASK 0x28100 #define SQ_ALU_CONST_CACHE_PS_0 0x28940 #define SQ_ALU_CONST_CACHE_PS_1 0x28944 #define SQ_ALU_CONST_CACHE_PS_2 0x28948 #define SQ_ALU_CONST_CACHE_PS_3 0x2894c #define SQ_ALU_CONST_CACHE_PS_4 0x28950 #define SQ_ALU_CONST_CACHE_PS_5 0x28954 #define SQ_ALU_CONST_CACHE_PS_6 0x28958 #define SQ_ALU_CONST_CACHE_PS_7 0x2895c #define SQ_ALU_CONST_CACHE_PS_8 0x28960 #define SQ_ALU_CONST_CACHE_PS_9 0x28964 #define SQ_ALU_CONST_CACHE_PS_10 0x28968 #define SQ_ALU_CONST_CACHE_PS_11 0x2896c #define SQ_ALU_CONST_CACHE_PS_12 0x28970 #define SQ_ALU_CONST_CACHE_PS_13 0x28974 #define SQ_ALU_CONST_CACHE_PS_14 0x28978 #define SQ_ALU_CONST_CACHE_PS_15 0x2897c #define SQ_ALU_CONST_CACHE_VS_0 0x28980 #define SQ_ALU_CONST_CACHE_VS_1 0x28984 #define SQ_ALU_CONST_CACHE_VS_2 0x28988 #define SQ_ALU_CONST_CACHE_VS_3 0x2898c #define SQ_ALU_CONST_CACHE_VS_4 0x28990 #define SQ_ALU_CONST_CACHE_VS_5 0x28994 #define SQ_ALU_CONST_CACHE_VS_6 0x28998 #define SQ_ALU_CONST_CACHE_VS_7 0x2899c #define SQ_ALU_CONST_CACHE_VS_8 0x289a0 #define SQ_ALU_CONST_CACHE_VS_9 0x289a4 #define SQ_ALU_CONST_CACHE_VS_10 0x289a8 #define SQ_ALU_CONST_CACHE_VS_11 0x289ac #define SQ_ALU_CONST_CACHE_VS_12 0x289b0 #define SQ_ALU_CONST_CACHE_VS_13 0x289b4 #define SQ_ALU_CONST_CACHE_VS_14 0x289b8 #define SQ_ALU_CONST_CACHE_VS_15 0x289bc #define SQ_ALU_CONST_CACHE_GS_0 0x289c0 #define SQ_ALU_CONST_CACHE_GS_1 0x289c4 #define SQ_ALU_CONST_CACHE_GS_2 0x289c8 #define SQ_ALU_CONST_CACHE_GS_3 0x289cc #define SQ_ALU_CONST_CACHE_GS_4 0x289d0 #define SQ_ALU_CONST_CACHE_GS_5 0x289d4 #define SQ_ALU_CONST_CACHE_GS_6 0x289d8 #define SQ_ALU_CONST_CACHE_GS_7 0x289dc #define SQ_ALU_CONST_CACHE_GS_8 0x289e0 #define SQ_ALU_CONST_CACHE_GS_9 0x289e4 #define SQ_ALU_CONST_CACHE_GS_10 0x289e8 #define SQ_ALU_CONST_CACHE_GS_11 0x289ec #define SQ_ALU_CONST_CACHE_GS_12 0x289f0 #define SQ_ALU_CONST_CACHE_GS_13 0x289f4 #define SQ_ALU_CONST_CACHE_GS_14 0x289f8 #define SQ_ALU_CONST_CACHE_GS_15 0x289fc #define CONFIG_MEMSIZE 0x5428 #define CONFIG_CNTL 0x5424 #define CP_STALLED_STAT1 0x8674 #define CP_STALLED_STAT2 0x8678 #define CP_BUSY_STAT 0x867C #define CP_STAT 0x8680 #define CP_COHER_BASE 0x85F8 #define CP_DEBUG 0xC1FC #define R_0086D8_CP_ME_CNTL 0x86D8 #define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28) #define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF) #define CP_ME_RAM_DATA 0xC160 #define CP_ME_RAM_RADDR 0xC158 #define CP_ME_RAM_WADDR 0xC15C #define CP_MEQ_THRESHOLDS 0x8764 #define MEQ_END(x) ((x) << 16) #define ROQ_END(x) ((x) << 24) #define CP_PERFMON_CNTL 0x87FC #define CP_PFP_UCODE_ADDR 0xC150 #define CP_PFP_UCODE_DATA 0xC154 #define CP_QUEUE_THRESHOLDS 0x8760 #define ROQ_IB1_START(x) ((x) << 0) #define ROQ_IB2_START(x) ((x) << 8) #define CP_RB_BASE 0xC100 #define CP_RB_CNTL 0xC104 #define RB_BUFSZ(x) ((x) << 0) #define RB_BLKSZ(x) ((x) << 8) #define RB_NO_UPDATE (1 << 27) -#define RB_RPTR_WR_ENA (1 << 31) +#define RB_RPTR_WR_ENA (1U << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C #define RB_RPTR_SWAP(x) ((x) << 0) #define CP_RB_RPTR_ADDR_HI 0xC110 #define CP_RB_RPTR_WR 0xC108 #define CP_RB_WPTR 0xC114 #define CP_RB_WPTR_ADDR 0xC118 #define CP_RB_WPTR_ADDR_HI 0xC11C #define CP_RB_WPTR_DELAY 0x8704 #define CP_ROQ_IB1_STAT 0x8784 #define CP_ROQ_IB2_STAT 0x8788 #define CP_SEM_WAIT_TIMER 0x85BC #define DB_DEBUG 0x9830 -#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) +#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1U << 31) #define DB_DEPTH_BASE 0x2800C #define DB_HTILE_DATA_BASE 0x28014 #define DB_HTILE_SURFACE 0x28D24 #define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0) #define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1) #define C_028D24_HTILE_WIDTH 0xFFFFFFFE #define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1) #define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1) #define C_028D24_HTILE_HEIGHT 0xFFFFFFFD #define G_028D24_LINEAR(x) (((x) >> 2) & 0x1) #define DB_WATERMARKS 0x9838 #define DEPTH_FREE(x) ((x) << 0) #define DEPTH_FLUSH(x) ((x) << 5) #define DEPTH_PENDING_FREE(x) ((x) << 15) #define DEPTH_CACHELINE_FREE(x) ((x) << 20) #define DCP_TILING_CONFIG 0x6CA0 #define PIPE_TILING(x) ((x) << 1) #define BANK_TILING(x) ((x) << 4) #define GROUP_SIZE(x) ((x) << 6) #define ROW_TILING(x) ((x) << 8) #define BANK_SWAPS(x) ((x) << 11) #define SAMPLE_SPLIT(x) ((x) << 14) #define BACKEND_MAP(x) ((x) << 16) #define GB_TILING_CONFIG 0x98F0 #define PIPE_TILING__SHIFT 1 #define PIPE_TILING__MASK 0x0000000e #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 #define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS_MASK 0x00FF0000 #define SQ_CONFIG 0x8c00 # define VC_ENABLE (1 << 0) # define EXPORT_SRC_C (1 << 1) # define DX9_CONSTS (1 << 2) # define ALU_INST_PREFER_VECTOR (1 << 3) # define DX10_CLAMP (1 << 4) # define CLAUSE_SEQ_PRIO(x) ((x) << 8) # define PS_PRIO(x) ((x) << 24) # define VS_PRIO(x) ((x) << 26) # define GS_PRIO(x) ((x) << 28) # define ES_PRIO(x) ((x) << 30) #define SQ_GPR_RESOURCE_MGMT_1 0x8c04 # define NUM_PS_GPRS(x) ((x) << 0) # define NUM_VS_GPRS(x) ((x) << 16) # define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) #define SQ_GPR_RESOURCE_MGMT_2 0x8c08 # define NUM_GS_GPRS(x) ((x) << 0) # define NUM_ES_GPRS(x) ((x) << 16) #define SQ_THREAD_RESOURCE_MGMT 0x8c0c # define NUM_PS_THREADS(x) ((x) << 0) # define NUM_VS_THREADS(x) ((x) << 8) # define NUM_GS_THREADS(x) ((x) << 16) # define NUM_ES_THREADS(x) ((x) << 24) #define SQ_STACK_RESOURCE_MGMT_1 0x8c10 # define NUM_PS_STACK_ENTRIES(x) ((x) << 0) # define NUM_VS_STACK_ENTRIES(x) ((x) << 16) #define SQ_STACK_RESOURCE_MGMT_2 0x8c14 # define NUM_GS_STACK_ENTRIES(x) ((x) << 0) # define NUM_ES_STACK_ENTRIES(x) ((x) << 16) #define SQ_ESGS_RING_BASE 0x8c40 #define SQ_GSVS_RING_BASE 0x8c48 #define SQ_ESTMP_RING_BASE 0x8c50 #define SQ_GSTMP_RING_BASE 0x8c58 #define SQ_VSTMP_RING_BASE 0x8c60 #define SQ_PSTMP_RING_BASE 0x8c68 #define SQ_FBUF_RING_BASE 0x8c70 #define SQ_REDUC_RING_BASE 0x8c78 #define GRBM_CNTL 0x8000 # define GRBM_READ_TIMEOUT(x) ((x) << 0) #define GRBM_STATUS 0x8010 #define CMDFIFO_AVAIL_MASK 0x0000001F #define GUI_ACTIVE (1<<31) #define GRBM_STATUS2 0x8014 #define GRBM_SOFT_RESET 0x8020 #define SOFT_RESET_CP (1<<0) #define CG_THERMAL_STATUS 0x7F4 #define ASIC_T(x) ((x) << 0) #define ASIC_T_MASK 0x1FF #define ASIC_T_SHIFT 0 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C #define HDP_DEBUG1 0x2F34 #define MC_VM_AGP_TOP 0x2184 #define MC_VM_AGP_BOT 0x2188 #define MC_VM_AGP_BASE 0x218C #define MC_VM_FB_LOCATION 0x2180 #define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C #define ENABLE_L1_TLB (1 << 0) #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) #define ENABLE_L1_STRICT_ORDERING (1 << 2) #define SYSTEM_ACCESS_MODE_MASK 0x000000C0 #define SYSTEM_ACCESS_MODE_SHIFT 6 #define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6) #define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6) #define SYSTEM_ACCESS_MODE_IN_SYS (2 << 6) #define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6) #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8) #define SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8) #define ENABLE_SEMAPHORE_MODE (1 << 10) #define ENABLE_WAIT_L2_QUERY (1 << 11) #define EFFECTIVE_L1_TLB_SIZE(x) (((x) & 7) << 12) #define EFFECTIVE_L1_TLB_SIZE_MASK 0x00007000 #define EFFECTIVE_L1_TLB_SIZE_SHIFT 12 #define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15) #define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000 #define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15 #define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0 #define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC #define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204 #define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208 #define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C #define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200 #define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4 #define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8 #define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210 #define MC_VM_L1_TLB_MCB_WR_HDP_CNTL 0x2218 #define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL 0x221C #define MC_VM_L1_TLB_MCB_WR_SEM_CNTL 0x2220 #define MC_VM_L1_TLB_MCB_WR_SYS_CNTL 0x2214 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190 #define LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF #define LOGICAL_PAGE_NUMBER_SHIFT 0 #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 #define PA_CL_ENHANCE 0x8A14 #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) #define PA_SC_AA_CONFIG 0x28C04 #define PA_SC_AA_SAMPLE_LOCS_2S 0x8B40 #define PA_SC_AA_SAMPLE_LOCS_4S 0x8B44 #define PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8B48 #define PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8B4C #define S0_X(x) ((x) << 0) #define S0_Y(x) ((x) << 4) #define S1_X(x) ((x) << 8) #define S1_Y(x) ((x) << 12) #define S2_X(x) ((x) << 16) #define S2_Y(x) ((x) << 20) #define S3_X(x) ((x) << 24) #define S3_Y(x) ((x) << 28) #define S4_X(x) ((x) << 0) #define S4_Y(x) ((x) << 4) #define S5_X(x) ((x) << 8) #define S5_Y(x) ((x) << 12) #define S6_X(x) ((x) << 16) #define S6_Y(x) ((x) << 20) #define S7_X(x) ((x) << 24) #define S7_Y(x) ((x) << 28) #define PA_SC_CLIPRECT_RULE 0x2820c #define PA_SC_ENHANCE 0x8BF0 #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) #define FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12) #define PA_SC_LINE_STIPPLE 0x28A0C #define PA_SC_LINE_STIPPLE_STATE 0x8B10 #define PA_SC_MODE_CNTL 0x28A4C #define PA_SC_MULTI_CHIP_CNTL 0x8B20 #define PA_SC_SCREEN_SCISSOR_TL 0x28030 #define PA_SC_GENERIC_SCISSOR_TL 0x28240 #define PA_SC_WINDOW_SCISSOR_TL 0x28204 #define PCIE_PORT_INDEX 0x0038 #define PCIE_PORT_DATA 0x003C #define CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 #define RAMCFG 0x2408 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000001 #define NOOFRANK_SHIFT 1 #define NOOFRANK_MASK 0x00000002 #define NOOFROWS_SHIFT 2 #define NOOFROWS_MASK 0x0000001C #define NOOFCOLS_SHIFT 5 #define NOOFCOLS_MASK 0x00000060 #define CHANSIZE_SHIFT 7 #define CHANSIZE_MASK 0x00000080 #define BURSTLENGTH_SHIFT 8 #define BURSTLENGTH_MASK 0x00000100 #define CHANSIZE_OVERRIDE (1 << 10) #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 #define SCRATCH_REG3 0x850C #define SCRATCH_REG4 0x8510 #define SCRATCH_REG5 0x8514 #define SCRATCH_REG6 0x8518 #define SCRATCH_REG7 0x851C #define SCRATCH_UMSK 0x8540 #define SCRATCH_ADDR 0x8544 #define SPI_CONFIG_CNTL 0x9100 #define GPR_WRITE_PRIORITY(x) ((x) << 0) #define DISABLE_INTERP_1 (1 << 5) #define SPI_CONFIG_CNTL_1 0x913C #define VTX_DONE_DELAY(x) ((x) << 0) #define INTERP_ONE_PRIM_PER_ROW (1 << 4) #define SPI_INPUT_Z 0x286D8 #define SPI_PS_IN_CONTROL_0 0x286CC #define NUM_INTERP(x) ((x)<<0) #define POSITION_ENA (1<<8) #define POSITION_CENTROID (1<<9) #define POSITION_ADDR(x) ((x)<<10) #define PARAM_GEN(x) ((x)<<15) #define PARAM_GEN_ADDR(x) ((x)<<19) #define BARYC_SAMPLE_CNTL(x) ((x)<<26) #define PERSP_GRADIENT_ENA (1<<28) #define LINEAR_GRADIENT_ENA (1<<29) #define POSITION_SAMPLE (1<<30) #define BARYC_AT_SAMPLE_ENA (1<<31) #define SPI_PS_IN_CONTROL_1 0x286D0 #define GEN_INDEX_PIX (1<<0) #define GEN_INDEX_PIX_ADDR(x) ((x)<<1) #define FRONT_FACE_ENA (1<<8) #define FRONT_FACE_CHAN(x) ((x)<<9) #define FRONT_FACE_ALL_BITS (1<<11) #define FRONT_FACE_ADDR(x) ((x)<<12) #define FOG_ADDR(x) ((x)<<17) #define FIXED_PT_POSITION_ENA (1<<24) #define FIXED_PT_POSITION_ADDR(x) ((x)<<25) #define SQ_MS_FIFO_SIZES 0x8CF0 #define CACHE_FIFO_SIZE(x) ((x) << 0) #define FETCH_FIFO_HIWATER(x) ((x) << 8) #define DONE_FIFO_HIWATER(x) ((x) << 16) #define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) #define SQ_PGM_START_ES 0x28880 #define SQ_PGM_START_FS 0x28894 #define SQ_PGM_START_GS 0x2886C #define SQ_PGM_START_PS 0x28840 #define SQ_PGM_RESOURCES_PS 0x28850 #define SQ_PGM_EXPORTS_PS 0x28854 #define SQ_PGM_CF_OFFSET_PS 0x288cc #define SQ_PGM_START_VS 0x28858 #define SQ_PGM_RESOURCES_VS 0x28868 #define SQ_PGM_CF_OFFSET_VS 0x288d0 #define SQ_VTX_CONSTANT_WORD0_0 0x30000 #define SQ_VTX_CONSTANT_WORD1_0 0x30004 #define SQ_VTX_CONSTANT_WORD2_0 0x30008 # define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0) # define SQ_VTXC_STRIDE(x) ((x) << 8) # define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30) # define SQ_ENDIAN_NONE 0 # define SQ_ENDIAN_8IN16 1 # define SQ_ENDIAN_8IN32 2 #define SQ_VTX_CONSTANT_WORD3_0 0x3000c #define SQ_VTX_CONSTANT_WORD6_0 0x38018 #define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30) #define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3) #define SQ_TEX_VTX_INVALID_TEXTURE 0x0 #define SQ_TEX_VTX_INVALID_BUFFER 0x1 #define SQ_TEX_VTX_VALID_TEXTURE 0x2 #define SQ_TEX_VTX_VALID_BUFFER 0x3 #define SX_MISC 0x28350 #define SX_MEMORY_EXPORT_BASE 0x9010 #define SX_DEBUG_1 0x9054 #define SMX_EVENT_RELEASE (1 << 0) #define ENABLE_NEW_SMX_ADDRESS (1 << 16) #define TA_CNTL_AUX 0x9508 #define DISABLE_CUBE_WRAP (1 << 0) #define DISABLE_CUBE_ANISO (1 << 1) #define SYNC_GRADIENT (1 << 24) #define SYNC_WALKER (1 << 25) #define SYNC_ALIGNER (1 << 26) #define BILINEAR_PRECISION_6_BIT (0 << 31) -#define BILINEAR_PRECISION_8_BIT (1 << 31) +#define BILINEAR_PRECISION_8_BIT (1U << 31) #define TC_CNTL 0x9608 #define TC_L2_SIZE(x) ((x)<<5) #define L2_DISABLE_LATE_HIT (1<<9) #define VC_ENHANCE 0x9714 #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x)<<0) #define VC_ONLY 0 #define TC_ONLY 1 #define VC_AND_TC 2 #define VGT_DMA_BASE 0x287E8 #define VGT_DMA_BASE_HI 0x287E4 #define VGT_ES_PER_GS 0x88CC #define VGT_GS_PER_ES 0x88C8 #define VGT_GS_PER_VS 0x88E8 #define VGT_GS_VERTEX_REUSE 0x88D4 #define VGT_PRIMITIVE_TYPE 0x8958 #define VGT_NUM_INSTANCES 0x8974 #define VGT_OUT_DEALLOC_CNTL 0x28C5C #define DEALLOC_DIST_MASK 0x0000007F #define VGT_STRMOUT_BASE_OFFSET_0 0x28B10 #define VGT_STRMOUT_BASE_OFFSET_1 0x28B14 #define VGT_STRMOUT_BASE_OFFSET_2 0x28B18 #define VGT_STRMOUT_BASE_OFFSET_3 0x28B1c #define VGT_STRMOUT_BASE_OFFSET_HI_0 0x28B44 #define VGT_STRMOUT_BASE_OFFSET_HI_1 0x28B48 #define VGT_STRMOUT_BASE_OFFSET_HI_2 0x28B4c #define VGT_STRMOUT_BASE_OFFSET_HI_3 0x28B50 #define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8 #define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8 #define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8 #define VGT_STRMOUT_BUFFER_BASE_3 0x28B08 #define VGT_STRMOUT_BUFFER_OFFSET_0 0x28ADC #define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC #define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC #define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C #define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0 #define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0 #define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0 #define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00 #define VGT_STRMOUT_EN 0x28AB0 #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VTX_REUSE_DEPTH_MASK 0x000000FF #define VGT_EVENT_INITIATOR 0x28a90 # define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) #define VM_CONTEXT0_CNTL 0x1410 #define ENABLE_CONTEXT (1 << 0) #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) #define VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490 #define VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14B0 #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574 #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594 #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15B4 #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1554 #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define REQUEST_TYPE(x) (((x) & 0xf) << 0) #define RESPONSE_TYPE_MASK 0x000000F0 #define RESPONSE_TYPE_SHIFT 4 #define VM_L2_CNTL 0x1400 #define ENABLE_L2_CACHE (1 << 0) #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) #define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) #define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 13) #define VM_L2_CNTL2 0x1404 #define INVALIDATE_ALL_L1_TLBS (1 << 0) #define INVALIDATE_L2_CACHE (1 << 1) #define VM_L2_CNTL3 0x1408 #define BANK_SELECT_0(x) (((x) & 0x1f) << 0) #define BANK_SELECT_1(x) (((x) & 0x1f) << 5) #define L2_CACHE_UPDATE_MODE(x) (((x) & 3) << 10) #define VM_L2_STATUS 0x140C #define L2_BUSY (1 << 0) #define WAIT_UNTIL 0x8040 #define WAIT_2D_IDLE_bit (1 << 14) #define WAIT_3D_IDLE_bit (1 << 15) #define WAIT_2D_IDLECLEAN_bit (1 << 16) #define WAIT_3D_IDLECLEAN_bit (1 << 17) /* async DMA */ #define DMA_TILING_CONFIG 0x3ec4 #define DMA_CONFIG 0x3e4c #define DMA_RB_CNTL 0xd000 # define DMA_RB_ENABLE (1 << 0) # define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ # define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) # define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ #define DMA_RB_BASE 0xd004 #define DMA_RB_RPTR 0xd008 #define DMA_RB_WPTR 0xd00c #define DMA_RB_RPTR_ADDR_HI 0xd01c #define DMA_RB_RPTR_ADDR_LO 0xd020 #define DMA_IB_CNTL 0xd024 # define DMA_IB_ENABLE (1 << 0) # define DMA_IB_SWAP_ENABLE (1 << 4) #define DMA_IB_RPTR 0xd028 #define DMA_CNTL 0xd02c # define TRAP_ENABLE (1 << 0) # define SEM_INCOMPLETE_INT_ENABLE (1 << 1) # define SEM_WAIT_INT_ENABLE (1 << 2) # define DATA_SWAP_ENABLE (1 << 3) # define FENCE_SWAP_ENABLE (1 << 4) # define CTXEMPTY_INT_ENABLE (1 << 28) #define DMA_STATUS_REG 0xd034 # define DMA_IDLE (1 << 0) #define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044 #define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048 #define DMA_MODE 0xd0bc /* async DMA packets */ #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ (((t) & 0x1) << 23) | \ (((s) & 0x1) << 22) | \ (((n) & 0xFFFF) << 0)) /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 #define DMA_PACKET_INDIRECT_BUFFER 0x4 #define DMA_PACKET_SEMAPHORE 0x5 #define DMA_PACKET_FENCE 0x6 #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */ #define DMA_PACKET_NOP 0xf #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_RB_SIZE(x) ((x) << 1) /* log2 */ # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ # define IH_WPTR_OVERFLOW_ENABLE (1 << 16) -# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +# define IH_WPTR_OVERFLOW_CLEAR (1U << 31) #define IH_RB_BASE 0x3e04 #define IH_RB_RPTR 0x3e08 #define IH_RB_WPTR 0x3e0c # define RB_OVERFLOW (1 << 0) # define WPTR_OFFSET_MASK 0x3fffc #define IH_RB_WPTR_ADDR_HI 0x3e10 #define IH_RB_WPTR_ADDR_LO 0x3e14 #define IH_CNTL 0x3e18 # define ENABLE_INTR (1 << 0) # define IH_MC_SWAP(x) ((x) << 1) # define IH_MC_SWAP_NONE 0 # define IH_MC_SWAP_16BIT 1 # define IH_MC_SWAP_32BIT 2 # define IH_MC_SWAP_64BIT 3 # define RPTR_REARM (1 << 4) # define MC_WRREQ_CREDIT(x) ((x) << 15) # define MC_WR_CLEAN_CNT(x) ((x) << 20) #define RLC_CNTL 0x3f00 # define RLC_ENABLE (1 << 0) #define RLC_HB_BASE 0x3f10 #define RLC_HB_CNTL 0x3f0c #define RLC_HB_RPTR 0x3f20 #define RLC_HB_WPTR 0x3f1c #define RLC_HB_WPTR_LSB_ADDR 0x3f14 #define RLC_HB_WPTR_MSB_ADDR 0x3f18 #define RLC_GPU_CLOCK_COUNT_LSB 0x3f38 #define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c #define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40 #define RLC_MC_CNTL 0x3f44 #define RLC_UCODE_CNTL 0x3f48 #define RLC_UCODE_ADDR 0x3f2c #define RLC_UCODE_DATA 0x3f30 /* new for TN */ #define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10 #define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20 #define SRBM_SOFT_RESET 0xe60 # define SOFT_RESET_DMA (1 << 12) # define SOFT_RESET_RLC (1 << 13) # define RV770_SOFT_RESET_DMA (1 << 20) #define CP_INT_CNTL 0xc124 # define CNTX_BUSY_INT_ENABLE (1 << 19) # define CNTX_EMPTY_INT_ENABLE (1 << 20) # define SCRATCH_INT_ENABLE (1 << 25) # define TIME_STAMP_INT_ENABLE (1 << 26) # define IB2_INT_ENABLE (1 << 29) # define IB1_INT_ENABLE (1 << 30) -# define RB_INT_ENABLE (1 << 31) +# define RB_INT_ENABLE (1U << 31) #define CP_INT_STATUS 0xc128 # define SCRATCH_INT_STAT (1 << 25) # define TIME_STAMP_INT_STAT (1 << 26) # define IB2_INT_STAT (1 << 29) # define IB1_INT_STAT (1 << 30) -# define RB_INT_STAT (1 << 31) +# define RB_INT_STAT (1U << 31) #define GRBM_INT_CNTL 0x8060 # define RDERR_INT_ENABLE (1 << 0) # define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1) # define GUI_IDLE_INT_ENABLE (1 << 19) #define INTERRUPT_CNTL 0x5468 # define IH_DUMMY_RD_OVERRIDE (1 << 0) # define IH_DUMMY_RD_EN (1 << 1) # define IH_REQ_NONSNOOP_EN (1 << 3) # define GEN_IH_INT_EN (1 << 8) #define INTERRUPT_CNTL2 0x546c #define D1MODE_VBLANK_STATUS 0x6534 #define D2MODE_VBLANK_STATUS 0x6d34 # define DxMODE_VBLANK_OCCURRED (1 << 0) # define DxMODE_VBLANK_ACK (1 << 4) # define DxMODE_VBLANK_STAT (1 << 12) # define DxMODE_VBLANK_INTERRUPT (1 << 16) # define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17) #define D1MODE_VLINE_STATUS 0x653c #define D2MODE_VLINE_STATUS 0x6d3c # define DxMODE_VLINE_OCCURRED (1 << 0) # define DxMODE_VLINE_ACK (1 << 4) # define DxMODE_VLINE_STAT (1 << 12) # define DxMODE_VLINE_INTERRUPT (1 << 16) # define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17) #define DxMODE_INT_MASK 0x6540 # define D1MODE_VBLANK_INT_MASK (1 << 0) # define D1MODE_VLINE_INT_MASK (1 << 4) # define D2MODE_VBLANK_INT_MASK (1 << 8) # define D2MODE_VLINE_INT_MASK (1 << 12) #define DCE3_DISP_INTERRUPT_STATUS 0x7ddc # define DC_HPD1_INTERRUPT (1 << 18) # define DC_HPD2_INTERRUPT (1 << 19) #define DISP_INTERRUPT_STATUS 0x7edc # define LB_D1_VLINE_INTERRUPT (1 << 2) # define LB_D2_VLINE_INTERRUPT (1 << 3) # define LB_D1_VBLANK_INTERRUPT (1 << 4) # define LB_D2_VBLANK_INTERRUPT (1 << 5) # define DACA_AUTODETECT_INTERRUPT (1 << 16) # define DACB_AUTODETECT_INTERRUPT (1 << 17) # define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18) # define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19) # define DC_I2C_SW_DONE_INTERRUPT (1 << 20) # define DC_I2C_HW_DONE_INTERRUPT (1 << 21) #define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8 #define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8 # define DC_HPD4_INTERRUPT (1 << 14) # define DC_HPD4_RX_INTERRUPT (1 << 15) # define DC_HPD3_INTERRUPT (1 << 28) # define DC_HPD1_RX_INTERRUPT (1 << 29) # define DC_HPD2_RX_INTERRUPT (1 << 30) #define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec # define DC_HPD3_RX_INTERRUPT (1 << 0) # define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1) # define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2) # define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3) # define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4) # define AUX1_SW_DONE_INTERRUPT (1 << 5) # define AUX1_LS_DONE_INTERRUPT (1 << 6) # define AUX2_SW_DONE_INTERRUPT (1 << 7) # define AUX2_LS_DONE_INTERRUPT (1 << 8) # define AUX3_SW_DONE_INTERRUPT (1 << 9) # define AUX3_LS_DONE_INTERRUPT (1 << 10) # define AUX4_SW_DONE_INTERRUPT (1 << 11) # define AUX4_LS_DONE_INTERRUPT (1 << 12) # define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13) # define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14) /* DCE 3.2 */ # define AUX5_SW_DONE_INTERRUPT (1 << 15) # define AUX5_LS_DONE_INTERRUPT (1 << 16) # define AUX6_SW_DONE_INTERRUPT (1 << 17) # define AUX6_LS_DONE_INTERRUPT (1 << 18) # define DC_HPD5_INTERRUPT (1 << 19) # define DC_HPD5_RX_INTERRUPT (1 << 20) # define DC_HPD6_INTERRUPT (1 << 21) # define DC_HPD6_RX_INTERRUPT (1 << 22) #define DACA_AUTO_DETECT_CONTROL 0x7828 #define DACB_AUTO_DETECT_CONTROL 0x7a28 #define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028 #define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128 # define DACx_AUTODETECT_MODE(x) ((x) << 0) # define DACx_AUTODETECT_MODE_NONE 0 # define DACx_AUTODETECT_MODE_CONNECT 1 # define DACx_AUTODETECT_MODE_DISCONNECT 2 # define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8) /* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */ # define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16) #define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038 #define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138 #define DACA_AUTODETECT_INT_CONTROL 0x7838 #define DACB_AUTODETECT_INT_CONTROL 0x7a38 # define DACx_AUTODETECT_ACK (1 << 0) # define DACx_AUTODETECT_INT_ENABLE (1 << 16) #define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00 #define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10 #define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24 # define DC_HOT_PLUG_DETECTx_EN (1 << 0) #define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04 #define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14 #define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28 # define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0) # define DC_HOT_PLUG_DETECTx_SENSE (1 << 1) /* DCE 3.0 */ #define DC_HPD1_INT_STATUS 0x7d00 #define DC_HPD2_INT_STATUS 0x7d0c #define DC_HPD3_INT_STATUS 0x7d18 #define DC_HPD4_INT_STATUS 0x7d24 /* DCE 3.2 */ #define DC_HPD5_INT_STATUS 0x7dc0 #define DC_HPD6_INT_STATUS 0x7df4 # define DC_HPDx_INT_STATUS (1 << 0) # define DC_HPDx_SENSE (1 << 1) # define DC_HPDx_RX_INT_STATUS (1 << 8) #define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08 #define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18 #define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c # define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0) # define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8) # define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16) /* DCE 3.0 */ #define DC_HPD1_INT_CONTROL 0x7d04 #define DC_HPD2_INT_CONTROL 0x7d10 #define DC_HPD3_INT_CONTROL 0x7d1c #define DC_HPD4_INT_CONTROL 0x7d28 /* DCE 3.2 */ #define DC_HPD5_INT_CONTROL 0x7dc4 #define DC_HPD6_INT_CONTROL 0x7df8 # define DC_HPDx_INT_ACK (1 << 0) # define DC_HPDx_INT_POLARITY (1 << 8) # define DC_HPDx_INT_EN (1 << 16) # define DC_HPDx_RX_INT_ACK (1 << 20) # define DC_HPDx_RX_INT_EN (1 << 24) /* DCE 3.0 */ #define DC_HPD1_CONTROL 0x7d08 #define DC_HPD2_CONTROL 0x7d14 #define DC_HPD3_CONTROL 0x7d20 #define DC_HPD4_CONTROL 0x7d2c /* DCE 3.2 */ #define DC_HPD5_CONTROL 0x7dc8 #define DC_HPD6_CONTROL 0x7dfc # define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) /* DCE 3.2 */ # define DC_HPDx_EN (1 << 28) #define D1GRPH_INTERRUPT_STATUS 0x6158 #define D2GRPH_INTERRUPT_STATUS 0x6958 # define DxGRPH_PFLIP_INT_OCCURRED (1 << 0) # define DxGRPH_PFLIP_INT_CLEAR (1 << 8) #define D1GRPH_INTERRUPT_CONTROL 0x615c #define D2GRPH_INTERRUPT_CONTROL 0x695c # define DxGRPH_PFLIP_INT_MASK (1 << 0) # define DxGRPH_PFLIP_INT_TYPE (1 << 8) /* PCIE link stuff */ #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ # define LC_POINT_7_PLUS_EN (1 << 6) #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ # define LC_LINK_WIDTH_SHIFT 0 # define LC_LINK_WIDTH_MASK 0x7 # define LC_LINK_WIDTH_X0 0 # define LC_LINK_WIDTH_X1 1 # define LC_LINK_WIDTH_X2 2 # define LC_LINK_WIDTH_X4 3 # define LC_LINK_WIDTH_X8 4 # define LC_LINK_WIDTH_X16 6 # define LC_LINK_WIDTH_RD_SHIFT 4 # define LC_LINK_WIDTH_RD_MASK 0x70 # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) # define LC_RECONFIG_NOW (1 << 8) # define LC_RENEGOTIATION_SUPPORT (1 << 9) # define LC_RENEGOTIATE_EN (1 << 10) # define LC_SHORT_RECONFIG_EN (1 << 11) # define LC_UPCONFIGURE_SUPPORT (1 << 12) # define LC_UPCONFIGURE_DIS (1 << 13) #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ # define LC_GEN2_EN_STRAP (1 << 0) # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 # define LC_CURRENT_DATA_RATE (1 << 11) # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) #define MM_CFGREGS_CNTL 0x544c # define MM_WR_TO_CFG_EN (1 << 3) #define LINK_CNTL2 0x88 /* F0 */ # define TARGET_LINK_SPEED_MASK (0xf << 0) # define SELECTABLE_DEEMPHASIS (1 << 6) /* Audio clocks */ #define DCCG_AUDIO_DTO0_PHASE 0x0514 #define DCCG_AUDIO_DTO0_MODULE 0x0518 #define DCCG_AUDIO_DTO0_LOAD 0x051c -# define DTO_LOAD (1 << 31) +# define DTO_LOAD (1U << 31) #define DCCG_AUDIO_DTO0_CNTL 0x0520 #define DCCG_AUDIO_DTO1_PHASE 0x0524 #define DCCG_AUDIO_DTO1_MODULE 0x0528 #define DCCG_AUDIO_DTO1_LOAD 0x052c #define DCCG_AUDIO_DTO1_CNTL 0x0530 #define DCCG_AUDIO_DTO_SELECT 0x0534 /* digital blocks */ #define TMDSA_CNTL 0x7880 # define TMDSA_HDMI_EN (1 << 2) #define LVTMA_CNTL 0x7a80 # define LVTMA_HDMI_EN (1 << 2) #define DDIA_CNTL 0x7200 # define DDIA_HDMI_EN (1 << 2) #define DIG0_CNTL 0x75a0 # define DIG_MODE(x) (((x) & 7) << 8) # define DIG_MODE_DP 0 # define DIG_MODE_LVDS 1 # define DIG_MODE_TMDS_DVI 2 # define DIG_MODE_TMDS_HDMI 3 # define DIG_MODE_SDVO 4 #define DIG1_CNTL 0x79a0 /* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly * different due to the new DIG blocks, but also have 2 instances. * DCE 3.0 HDMI blocks are part of each DIG encoder. */ /* rs6xx/rs740/r6xx/dce3 */ #define HDMI0_CONTROL 0x7400 /* rs6xx/rs740/r6xx */ # define HDMI0_ENABLE (1 << 0) # define HDMI0_STREAM(x) (((x) & 3) << 2) # define HDMI0_STREAM_TMDSA 0 # define HDMI0_STREAM_LVTMA 1 # define HDMI0_STREAM_DVOA 2 # define HDMI0_STREAM_DDIA 3 /* rs6xx/r6xx/dce3 */ # define HDMI0_ERROR_ACK (1 << 8) # define HDMI0_ERROR_MASK (1 << 9) #define HDMI0_STATUS 0x7404 # define HDMI0_ACTIVE_AVMUTE (1 << 0) # define HDMI0_AUDIO_ENABLE (1 << 4) # define HDMI0_AZ_FORMAT_WTRIG (1 << 28) # define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29) #define HDMI0_AUDIO_PACKET_CONTROL 0x7408 # define HDMI0_AUDIO_SAMPLE_SEND (1 << 0) # define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4) # define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8) # define HDMI0_AUDIO_TEST_EN (1 << 12) # define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) # define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24) # define HDMI0_60958_CS_UPDATE (1 << 26) # define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28) # define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29) #define HDMI0_AUDIO_CRC_CONTROL 0x740c # define HDMI0_AUDIO_CRC_EN (1 << 0) #define HDMI0_VBI_PACKET_CONTROL 0x7410 # define HDMI0_NULL_SEND (1 << 0) # define HDMI0_GC_SEND (1 << 4) # define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */ #define HDMI0_INFOFRAME_CONTROL0 0x7414 # define HDMI0_AVI_INFO_SEND (1 << 0) # define HDMI0_AVI_INFO_CONT (1 << 1) # define HDMI0_AUDIO_INFO_SEND (1 << 4) # define HDMI0_AUDIO_INFO_CONT (1 << 5) # define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ # define HDMI0_AUDIO_INFO_UPDATE (1 << 7) # define HDMI0_MPEG_INFO_SEND (1 << 8) # define HDMI0_MPEG_INFO_CONT (1 << 9) # define HDMI0_MPEG_INFO_UPDATE (1 << 10) #define HDMI0_INFOFRAME_CONTROL1 0x7418 # define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) # define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) # define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) #define HDMI0_GENERIC_PACKET_CONTROL 0x741c # define HDMI0_GENERIC0_SEND (1 << 0) # define HDMI0_GENERIC0_CONT (1 << 1) # define HDMI0_GENERIC0_UPDATE (1 << 2) # define HDMI0_GENERIC1_SEND (1 << 4) # define HDMI0_GENERIC1_CONT (1 << 5) # define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16) # define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24) #define HDMI0_GC 0x7428 # define HDMI0_GC_AVMUTE (1 << 0) #define HDMI0_AVI_INFO0 0x7454 # define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8) # define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10) # define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12) # define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13) # define HDMI0_AVI_INFO_Y_RGB 0 # define HDMI0_AVI_INFO_Y_YCBCR422 1 # define HDMI0_AVI_INFO_Y_YCBCR444 2 # define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8) # define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16) # define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20) # define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22) # define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16) # define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24) # define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24) #define HDMI0_AVI_INFO1 0x7458 # define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */ # define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */ # define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16) #define HDMI0_AVI_INFO2 0x745c # define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0) # define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16) #define HDMI0_AVI_INFO3 0x7460 # define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0) # define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24) #define HDMI0_MPEG_INFO0 0x7464 # define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8) # define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16) # define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24) #define HDMI0_MPEG_INFO1 0x7468 # define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0) # define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8) # define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12) #define HDMI0_GENERIC0_HDR 0x746c #define HDMI0_GENERIC0_0 0x7470 #define HDMI0_GENERIC0_1 0x7474 #define HDMI0_GENERIC0_2 0x7478 #define HDMI0_GENERIC0_3 0x747c #define HDMI0_GENERIC0_4 0x7480 #define HDMI0_GENERIC0_5 0x7484 #define HDMI0_GENERIC0_6 0x7488 #define HDMI0_GENERIC1_HDR 0x748c #define HDMI0_GENERIC1_0 0x7490 #define HDMI0_GENERIC1_1 0x7494 #define HDMI0_GENERIC1_2 0x7498 #define HDMI0_GENERIC1_3 0x749c #define HDMI0_GENERIC1_4 0x74a0 #define HDMI0_GENERIC1_5 0x74a4 #define HDMI0_GENERIC1_6 0x74a8 #define HDMI0_ACR_32_0 0x74ac # define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12) #define HDMI0_ACR_32_1 0x74b0 # define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0) #define HDMI0_ACR_44_0 0x74b4 # define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12) #define HDMI0_ACR_44_1 0x74b8 # define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0) #define HDMI0_ACR_48_0 0x74bc # define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12) #define HDMI0_ACR_48_1 0x74c0 # define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0) #define HDMI0_ACR_STATUS_0 0x74c4 #define HDMI0_ACR_STATUS_1 0x74c8 #define HDMI0_AUDIO_INFO0 0x74cc # define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8) #define HDMI0_AUDIO_INFO1 0x74d0 # define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0) # define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11) # define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15) # define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8) #define HDMI0_60958_0 0x74d4 # define HDMI0_60958_CS_A(x) (((x) & 1) << 0) # define HDMI0_60958_CS_B(x) (((x) & 1) << 1) # define HDMI0_60958_CS_C(x) (((x) & 1) << 2) # define HDMI0_60958_CS_D(x) (((x) & 3) << 3) # define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6) # define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) # define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) # define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) # define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) # define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) #define HDMI0_60958_1 0x74d8 # define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) # define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) # define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16) # define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18) # define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) #define HDMI0_ACR_PACKET_CONTROL 0x74dc # define HDMI0_ACR_SEND (1 << 0) # define HDMI0_ACR_CONT (1 << 1) # define HDMI0_ACR_SELECT(x) (((x) & 3) << 4) # define HDMI0_ACR_HW 0 # define HDMI0_ACR_32 1 # define HDMI0_ACR_44 2 # define HDMI0_ACR_48 3 # define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ # define HDMI0_ACR_AUTO_SEND (1 << 12) #define HDMI0_RAMP_CONTROL0 0x74e0 # define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) #define HDMI0_RAMP_CONTROL1 0x74e4 # define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0) #define HDMI0_RAMP_CONTROL2 0x74e8 # define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0) #define HDMI0_RAMP_CONTROL3 0x74ec # define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0) /* HDMI0_60958_2 is r7xx only */ #define HDMI0_60958_2 0x74f0 # define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0) # define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4) # define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8) # define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12) # define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16) # define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20) /* r6xx only; second instance starts at 0x7700 */ #define HDMI1_CONTROL 0x7700 #define HDMI1_STATUS 0x7704 #define HDMI1_AUDIO_PACKET_CONTROL 0x7708 /* DCE3; second instance starts at 0x7800 NOT 0x7700 */ #define DCE3_HDMI1_CONTROL 0x7800 #define DCE3_HDMI1_STATUS 0x7804 #define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808 /* DCE3.2 (for interrupts) */ #define AFMT_STATUS 0x7600 # define AFMT_AUDIO_ENABLE (1 << 4) # define AFMT_AZ_FORMAT_WTRIG (1 << 28) # define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29) # define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30) #define AFMT_AUDIO_PACKET_CONTROL 0x7604 # define AFMT_AUDIO_SAMPLE_SEND (1 << 0) # define AFMT_AUDIO_TEST_EN (1 << 12) # define AFMT_AUDIO_CHANNEL_SWAP (1 << 24) # define AFMT_60958_CS_UPDATE (1 << 26) # define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27) # define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28) # define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) # define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) /* * PM4 */ #define PACKET_TYPE0 0 #define PACKET_TYPE1 1 #define PACKET_TYPE2 2 #define PACKET_TYPE3 3 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ (((reg) >> 2) & 0xFFFF) | \ ((n) & 0x3FFF) << 16) #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ (((op) & 0xFF) << 8) | \ ((n) & 0x3FFF) << 16) /* Packet 3 types */ #define PACKET3_NOP 0x10 #define PACKET3_INDIRECT_BUFFER_END 0x17 #define PACKET3_SET_PREDICATION 0x20 #define PACKET3_REG_RMW 0x21 #define PACKET3_COND_EXEC 0x22 #define PACKET3_PRED_EXEC 0x23 #define PACKET3_START_3D_CMDBUF 0x24 #define PACKET3_DRAW_INDEX_2 0x27 #define PACKET3_CONTEXT_CONTROL 0x28 #define PACKET3_DRAW_INDEX_IMMD_BE 0x29 #define PACKET3_INDEX_TYPE 0x2A #define PACKET3_DRAW_INDEX 0x2B #define PACKET3_DRAW_INDEX_AUTO 0x2D #define PACKET3_DRAW_INDEX_IMMD 0x2E #define PACKET3_NUM_INSTANCES 0x2F #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 #define PACKET3_INDIRECT_BUFFER_MP 0x38 #define PACKET3_MEM_SEMAPHORE 0x39 # define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12) # define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) # define PACKET3_SEM_SEL_WAIT (0x7 << 29) #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_COPY_DW 0x3B #define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_MEM_WRITE 0x3D #define PACKET3_INDIRECT_BUFFER 0x32 #define PACKET3_CP_DMA 0x41 /* 1. header * 2. SRC_ADDR_LO [31:0] * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0] * 4. DST_ADDR_LO [31:0] * 5. DST_ADDR_HI [7:0] * 6. COMMAND [29:22] | BYTE_COUNT [20:0] */ -# define PACKET3_CP_DMA_CP_SYNC (1 << 31) +# define PACKET3_CP_DMA_CP_SYNC (1U << 31) /* COMMAND */ # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) /* 0 - none * 1 - 8 in 16 * 2 - 8 in 32 * 3 - 8 in 64 */ # define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) /* 0 - none * 1 - 8 in 16 * 2 - 8 in 32 * 3 - 8 in 64 */ # define PACKET3_CP_DMA_CMD_SAS (1 << 26) /* 0 - memory * 1 - register */ # define PACKET3_CP_DMA_CMD_DAS (1 << 27) /* 0 - memory * 1 - register */ # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_VC_ACTION_ENA (1 << 24) # define PACKET3_CB_ACTION_ENA (1 << 25) # define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_SH_ACTION_ENA (1 << 27) # define PACKET3_SMX_ACTION_ENA (1 << 28) #define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 #define EVENT_TYPE(x) ((x) << 0) #define EVENT_INDEX(x) ((x) << 8) /* 0 - any non-TS event * 1 - ZPASS_DONE * 2 - SAMPLE_PIPELINESTAT * 3 - SAMPLE_STREAMOUTSTAT* * 4 - *S_PARTIAL_FLUSH * 5 - TS events */ #define PACKET3_EVENT_WRITE_EOP 0x47 #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data * 2 - send 64bit data * 3 - send 64bit counter value */ #define INT_SEL(x) ((x) << 24) /* 0 - none * 1 - interrupt only (DATA_SEL = 0) * 2 - interrupt when data write is confirmed */ #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 #define PACKET3_SET_CONFIG_REG_END 0x0000ac00 #define PACKET3_SET_CONTEXT_REG 0x69 #define PACKET3_SET_CONTEXT_REG_OFFSET 0x00028000 #define PACKET3_SET_CONTEXT_REG_END 0x00029000 #define PACKET3_SET_ALU_CONST 0x6A #define PACKET3_SET_ALU_CONST_OFFSET 0x00030000 #define PACKET3_SET_ALU_CONST_END 0x00032000 #define PACKET3_SET_BOOL_CONST 0x6B #define PACKET3_SET_BOOL_CONST_OFFSET 0x0003e380 #define PACKET3_SET_BOOL_CONST_END 0x00040000 #define PACKET3_SET_LOOP_CONST 0x6C #define PACKET3_SET_LOOP_CONST_OFFSET 0x0003e200 #define PACKET3_SET_LOOP_CONST_END 0x0003e380 #define PACKET3_SET_RESOURCE 0x6D #define PACKET3_SET_RESOURCE_OFFSET 0x00038000 #define PACKET3_SET_RESOURCE_END 0x0003c000 #define PACKET3_SET_SAMPLER 0x6E #define PACKET3_SET_SAMPLER_OFFSET 0x0003c000 #define PACKET3_SET_SAMPLER_END 0x0003cff0 #define PACKET3_SET_CTL_CONST 0x6F #define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0 #define PACKET3_SET_CTL_CONST_END 0x0003e200 #define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */ #define PACKET3_SURFACE_BASE_UPDATE 0x73 #define R_008020_GRBM_SOFT_RESET 0x8020 #define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0) #define S_008020_SOFT_RESET_CB(x) (((x) & 1) << 1) #define S_008020_SOFT_RESET_CR(x) (((x) & 1) << 2) #define S_008020_SOFT_RESET_DB(x) (((x) & 1) << 3) #define S_008020_SOFT_RESET_PA(x) (((x) & 1) << 5) #define S_008020_SOFT_RESET_SC(x) (((x) & 1) << 6) #define S_008020_SOFT_RESET_SMX(x) (((x) & 1) << 7) #define S_008020_SOFT_RESET_SPI(x) (((x) & 1) << 8) #define S_008020_SOFT_RESET_SH(x) (((x) & 1) << 9) #define S_008020_SOFT_RESET_SX(x) (((x) & 1) << 10) #define S_008020_SOFT_RESET_TC(x) (((x) & 1) << 11) #define S_008020_SOFT_RESET_TA(x) (((x) & 1) << 12) #define S_008020_SOFT_RESET_VC(x) (((x) & 1) << 13) #define S_008020_SOFT_RESET_VGT(x) (((x) & 1) << 14) #define R_008010_GRBM_STATUS 0x8010 #define S_008010_CMDFIFO_AVAIL(x) (((x) & 0x1F) << 0) #define S_008010_CP_RQ_PENDING(x) (((x) & 1) << 6) #define S_008010_CF_RQ_PENDING(x) (((x) & 1) << 7) #define S_008010_PF_RQ_PENDING(x) (((x) & 1) << 8) #define S_008010_GRBM_EE_BUSY(x) (((x) & 1) << 10) #define S_008010_VC_BUSY(x) (((x) & 1) << 11) #define S_008010_DB03_CLEAN(x) (((x) & 1) << 12) #define S_008010_CB03_CLEAN(x) (((x) & 1) << 13) #define S_008010_VGT_BUSY_NO_DMA(x) (((x) & 1) << 16) #define S_008010_VGT_BUSY(x) (((x) & 1) << 17) #define S_008010_TA03_BUSY(x) (((x) & 1) << 18) #define S_008010_TC_BUSY(x) (((x) & 1) << 19) #define S_008010_SX_BUSY(x) (((x) & 1) << 20) #define S_008010_SH_BUSY(x) (((x) & 1) << 21) #define S_008010_SPI03_BUSY(x) (((x) & 1) << 22) #define S_008010_SMX_BUSY(x) (((x) & 1) << 23) #define S_008010_SC_BUSY(x) (((x) & 1) << 24) #define S_008010_PA_BUSY(x) (((x) & 1) << 25) #define S_008010_DB03_BUSY(x) (((x) & 1) << 26) #define S_008010_CR_BUSY(x) (((x) & 1) << 27) #define S_008010_CP_COHERENCY_BUSY(x) (((x) & 1) << 28) #define S_008010_CP_BUSY(x) (((x) & 1) << 29) #define S_008010_CB03_BUSY(x) (((x) & 1) << 30) #define S_008010_GUI_ACTIVE(x) (((x) & 1) << 31) #define G_008010_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x1F) #define G_008010_CP_RQ_PENDING(x) (((x) >> 6) & 1) #define G_008010_CF_RQ_PENDING(x) (((x) >> 7) & 1) #define G_008010_PF_RQ_PENDING(x) (((x) >> 8) & 1) #define G_008010_GRBM_EE_BUSY(x) (((x) >> 10) & 1) #define G_008010_VC_BUSY(x) (((x) >> 11) & 1) #define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1) #define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1) #define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1) #define G_008010_VGT_BUSY(x) (((x) >> 17) & 1) #define G_008010_TA03_BUSY(x) (((x) >> 18) & 1) #define G_008010_TC_BUSY(x) (((x) >> 19) & 1) #define G_008010_SX_BUSY(x) (((x) >> 20) & 1) #define G_008010_SH_BUSY(x) (((x) >> 21) & 1) #define G_008010_SPI03_BUSY(x) (((x) >> 22) & 1) #define G_008010_SMX_BUSY(x) (((x) >> 23) & 1) #define G_008010_SC_BUSY(x) (((x) >> 24) & 1) #define G_008010_PA_BUSY(x) (((x) >> 25) & 1) #define G_008010_DB03_BUSY(x) (((x) >> 26) & 1) #define G_008010_CR_BUSY(x) (((x) >> 27) & 1) #define G_008010_CP_COHERENCY_BUSY(x) (((x) >> 28) & 1) #define G_008010_CP_BUSY(x) (((x) >> 29) & 1) #define G_008010_CB03_BUSY(x) (((x) >> 30) & 1) #define G_008010_GUI_ACTIVE(x) (((x) >> 31) & 1) #define R_008014_GRBM_STATUS2 0x8014 #define S_008014_CR_CLEAN(x) (((x) & 1) << 0) #define S_008014_SMX_CLEAN(x) (((x) & 1) << 1) #define S_008014_SPI0_BUSY(x) (((x) & 1) << 8) #define S_008014_SPI1_BUSY(x) (((x) & 1) << 9) #define S_008014_SPI2_BUSY(x) (((x) & 1) << 10) #define S_008014_SPI3_BUSY(x) (((x) & 1) << 11) #define S_008014_TA0_BUSY(x) (((x) & 1) << 12) #define S_008014_TA1_BUSY(x) (((x) & 1) << 13) #define S_008014_TA2_BUSY(x) (((x) & 1) << 14) #define S_008014_TA3_BUSY(x) (((x) & 1) << 15) #define S_008014_DB0_BUSY(x) (((x) & 1) << 16) #define S_008014_DB1_BUSY(x) (((x) & 1) << 17) #define S_008014_DB2_BUSY(x) (((x) & 1) << 18) #define S_008014_DB3_BUSY(x) (((x) & 1) << 19) #define S_008014_CB0_BUSY(x) (((x) & 1) << 20) #define S_008014_CB1_BUSY(x) (((x) & 1) << 21) #define S_008014_CB2_BUSY(x) (((x) & 1) << 22) #define S_008014_CB3_BUSY(x) (((x) & 1) << 23) #define G_008014_CR_CLEAN(x) (((x) >> 0) & 1) #define G_008014_SMX_CLEAN(x) (((x) >> 1) & 1) #define G_008014_SPI0_BUSY(x) (((x) >> 8) & 1) #define G_008014_SPI1_BUSY(x) (((x) >> 9) & 1) #define G_008014_SPI2_BUSY(x) (((x) >> 10) & 1) #define G_008014_SPI3_BUSY(x) (((x) >> 11) & 1) #define G_008014_TA0_BUSY(x) (((x) >> 12) & 1) #define G_008014_TA1_BUSY(x) (((x) >> 13) & 1) #define G_008014_TA2_BUSY(x) (((x) >> 14) & 1) #define G_008014_TA3_BUSY(x) (((x) >> 15) & 1) #define G_008014_DB0_BUSY(x) (((x) >> 16) & 1) #define G_008014_DB1_BUSY(x) (((x) >> 17) & 1) #define G_008014_DB2_BUSY(x) (((x) >> 18) & 1) #define G_008014_DB3_BUSY(x) (((x) >> 19) & 1) #define G_008014_CB0_BUSY(x) (((x) >> 20) & 1) #define G_008014_CB1_BUSY(x) (((x) >> 21) & 1) #define G_008014_CB2_BUSY(x) (((x) >> 22) & 1) #define G_008014_CB3_BUSY(x) (((x) >> 23) & 1) #define R_000E50_SRBM_STATUS 0x0E50 #define G_000E50_RLC_RQ_PENDING(x) (((x) >> 3) & 1) #define G_000E50_RCU_RQ_PENDING(x) (((x) >> 4) & 1) #define G_000E50_GRBM_RQ_PENDING(x) (((x) >> 5) & 1) #define G_000E50_HI_RQ_PENDING(x) (((x) >> 6) & 1) #define G_000E50_IO_EXTERN_SIGNAL(x) (((x) >> 7) & 1) #define G_000E50_VMC_BUSY(x) (((x) >> 8) & 1) #define G_000E50_MCB_BUSY(x) (((x) >> 9) & 1) #define G_000E50_MCDZ_BUSY(x) (((x) >> 10) & 1) #define G_000E50_MCDY_BUSY(x) (((x) >> 11) & 1) #define G_000E50_MCDX_BUSY(x) (((x) >> 12) & 1) #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) #define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) #define R_000E60_SRBM_SOFT_RESET 0x0E60 #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) #define S_000E60_SOFT_RESET_CMC(x) (((x) & 1) << 3) #define S_000E60_SOFT_RESET_CSC(x) (((x) & 1) << 4) #define S_000E60_SOFT_RESET_DC(x) (((x) & 1) << 5) #define S_000E60_SOFT_RESET_GRBM(x) (((x) & 1) << 8) #define S_000E60_SOFT_RESET_HDP(x) (((x) & 1) << 9) #define S_000E60_SOFT_RESET_IH(x) (((x) & 1) << 10) #define S_000E60_SOFT_RESET_MC(x) (((x) & 1) << 11) #define S_000E60_SOFT_RESET_RLC(x) (((x) & 1) << 13) #define S_000E60_SOFT_RESET_ROM(x) (((x) & 1) << 14) #define S_000E60_SOFT_RESET_SEM(x) (((x) & 1) << 15) #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define R_028C04_PA_SC_AA_CONFIG 0x028C04 #define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0) #define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3) #define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC #define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4) #define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1) #define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF #define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13) #define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF) #define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF #define R_0280E0_CB_COLOR0_FRAG 0x0280E0 #define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) #define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) #define C_0280E0_BASE_256B 0x00000000 #define R_0280E4_CB_COLOR1_FRAG 0x0280E4 #define R_0280E8_CB_COLOR2_FRAG 0x0280E8 #define R_0280EC_CB_COLOR3_FRAG 0x0280EC #define R_0280F0_CB_COLOR4_FRAG 0x0280F0 #define R_0280F4_CB_COLOR5_FRAG 0x0280F4 #define R_0280F8_CB_COLOR6_FRAG 0x0280F8 #define R_0280FC_CB_COLOR7_FRAG 0x0280FC #define R_0280C0_CB_COLOR0_TILE 0x0280C0 #define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) #define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) #define C_0280C0_BASE_256B 0x00000000 #define R_0280C4_CB_COLOR1_TILE 0x0280C4 #define R_0280C8_CB_COLOR2_TILE 0x0280C8 #define R_0280CC_CB_COLOR3_TILE 0x0280CC #define R_0280D0_CB_COLOR4_TILE 0x0280D0 #define R_0280D4_CB_COLOR5_TILE 0x0280D4 #define R_0280D8_CB_COLOR6_TILE 0x0280D8 #define R_0280DC_CB_COLOR7_TILE 0x0280DC #define R_0280A0_CB_COLOR0_INFO 0x0280A0 #define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0) #define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3) #define C_0280A0_ENDIAN 0xFFFFFFFC #define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2) #define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F) #define C_0280A0_FORMAT 0xFFFFFF03 #define V_0280A0_COLOR_INVALID 0x00000000 #define V_0280A0_COLOR_8 0x00000001 #define V_0280A0_COLOR_4_4 0x00000002 #define V_0280A0_COLOR_3_3_2 0x00000003 #define V_0280A0_COLOR_16 0x00000005 #define V_0280A0_COLOR_16_FLOAT 0x00000006 #define V_0280A0_COLOR_8_8 0x00000007 #define V_0280A0_COLOR_5_6_5 0x00000008 #define V_0280A0_COLOR_6_5_5 0x00000009 #define V_0280A0_COLOR_1_5_5_5 0x0000000A #define V_0280A0_COLOR_4_4_4_4 0x0000000B #define V_0280A0_COLOR_5_5_5_1 0x0000000C #define V_0280A0_COLOR_32 0x0000000D #define V_0280A0_COLOR_32_FLOAT 0x0000000E #define V_0280A0_COLOR_16_16 0x0000000F #define V_0280A0_COLOR_16_16_FLOAT 0x00000010 #define V_0280A0_COLOR_8_24 0x00000011 #define V_0280A0_COLOR_8_24_FLOAT 0x00000012 #define V_0280A0_COLOR_24_8 0x00000013 #define V_0280A0_COLOR_24_8_FLOAT 0x00000014 #define V_0280A0_COLOR_10_11_11 0x00000015 #define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016 #define V_0280A0_COLOR_11_11_10 0x00000017 #define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018 #define V_0280A0_COLOR_2_10_10_10 0x00000019 #define V_0280A0_COLOR_8_8_8_8 0x0000001A #define V_0280A0_COLOR_10_10_10_2 0x0000001B #define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C #define V_0280A0_COLOR_32_32 0x0000001D #define V_0280A0_COLOR_32_32_FLOAT 0x0000001E #define V_0280A0_COLOR_16_16_16_16 0x0000001F #define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020 #define V_0280A0_COLOR_32_32_32_32 0x00000022 #define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023 #define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8) #define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF) #define C_0280A0_ARRAY_MODE 0xFFFFF0FF #define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000 #define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001 #define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002 #define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004 #define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12) #define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7) #define C_0280A0_NUMBER_TYPE 0xFFFF8FFF #define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15) #define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1) #define C_0280A0_READ_SIZE 0xFFFF7FFF #define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16) #define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3) #define C_0280A0_COMP_SWAP 0xFFFCFFFF #define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18) #define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3) #define C_0280A0_TILE_MODE 0xFFF3FFFF #define V_0280A0_TILE_DISABLE 0 #define V_0280A0_CLEAR_ENABLE 1 #define V_0280A0_FRAG_ENABLE 2 #define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20) #define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1) #define C_0280A0_BLEND_CLAMP 0xFFEFFFFF #define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21) #define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1) #define C_0280A0_CLEAR_COLOR 0xFFDFFFFF #define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22) #define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1) #define C_0280A0_BLEND_BYPASS 0xFFBFFFFF #define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23) #define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1) #define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF #define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24) #define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1) #define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF #define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25) #define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1) #define C_0280A0_ROUND_MODE 0xFDFFFFFF #define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26) #define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1) #define C_0280A0_TILE_COMPACT 0xFBFFFFFF #define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27) #define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1) #define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF #define R_0280A4_CB_COLOR1_INFO 0x0280A4 #define R_0280A8_CB_COLOR2_INFO 0x0280A8 #define R_0280AC_CB_COLOR3_INFO 0x0280AC #define R_0280B0_CB_COLOR4_INFO 0x0280B0 #define R_0280B4_CB_COLOR5_INFO 0x0280B4 #define R_0280B8_CB_COLOR6_INFO 0x0280B8 #define R_0280BC_CB_COLOR7_INFO 0x0280BC #define R_028060_CB_COLOR0_SIZE 0x028060 #define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0) #define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF) #define C_028060_PITCH_TILE_MAX 0xFFFFFC00 #define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10) #define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF) #define C_028060_SLICE_TILE_MAX 0xC00003FF #define R_028064_CB_COLOR1_SIZE 0x028064 #define R_028068_CB_COLOR2_SIZE 0x028068 #define R_02806C_CB_COLOR3_SIZE 0x02806C #define R_028070_CB_COLOR4_SIZE 0x028070 #define R_028074_CB_COLOR5_SIZE 0x028074 #define R_028078_CB_COLOR6_SIZE 0x028078 #define R_02807C_CB_COLOR7_SIZE 0x02807C #define R_028238_CB_TARGET_MASK 0x028238 #define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0) #define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF) #define C_028238_TARGET0_ENABLE 0xFFFFFFF0 #define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4) #define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF) #define C_028238_TARGET1_ENABLE 0xFFFFFF0F #define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8) #define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF) #define C_028238_TARGET2_ENABLE 0xFFFFF0FF #define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12) #define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF) #define C_028238_TARGET3_ENABLE 0xFFFF0FFF #define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16) #define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF) #define C_028238_TARGET4_ENABLE 0xFFF0FFFF #define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20) #define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF) #define C_028238_TARGET5_ENABLE 0xFF0FFFFF #define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24) #define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF) #define C_028238_TARGET6_ENABLE 0xF0FFFFFF #define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28) #define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF) #define C_028238_TARGET7_ENABLE 0x0FFFFFFF #define R_02823C_CB_SHADER_MASK 0x02823C #define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0) #define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF) #define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0 #define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4) #define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF) #define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F #define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8) #define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF) #define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF #define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12) #define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF) #define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF #define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16) #define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF) #define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF #define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20) #define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF) #define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF #define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24) #define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF) #define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF #define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28) #define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF) #define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF #define R_028AB0_VGT_STRMOUT_EN 0x028AB0 #define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0) #define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1) #define C_028AB0_STREAMOUT 0xFFFFFFFE #define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20 #define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0) #define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1) #define C_028B20_BUFFER_0_EN 0xFFFFFFFE #define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1) #define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1) #define C_028B20_BUFFER_1_EN 0xFFFFFFFD #define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2) #define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1) #define C_028B20_BUFFER_2_EN 0xFFFFFFFB #define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3) #define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1) #define C_028B20_BUFFER_3_EN 0xFFFFFFF7 #define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_028B20_SIZE 0x00000000 #define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000 #define S_038000_DIM(x) (((x) & 0x7) << 0) #define G_038000_DIM(x) (((x) >> 0) & 0x7) #define C_038000_DIM 0xFFFFFFF8 #define V_038000_SQ_TEX_DIM_1D 0x00000000 #define V_038000_SQ_TEX_DIM_2D 0x00000001 #define V_038000_SQ_TEX_DIM_3D 0x00000002 #define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003 #define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004 #define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005 #define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006 #define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007 #define S_038000_TILE_MODE(x) (((x) & 0xF) << 3) #define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF) #define C_038000_TILE_MODE 0xFFFFFF87 #define V_038000_ARRAY_LINEAR_GENERAL 0x00000000 #define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001 #define V_038000_ARRAY_1D_TILED_THIN1 0x00000002 #define V_038000_ARRAY_2D_TILED_THIN1 0x00000004 #define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7) #define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1) #define C_038000_TILE_TYPE 0xFFFFFF7F #define S_038000_PITCH(x) (((x) & 0x7FF) << 8) #define G_038000_PITCH(x) (((x) >> 8) & 0x7FF) #define C_038000_PITCH 0xFFF800FF #define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19) #define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF) #define C_038000_TEX_WIDTH 0x0007FFFF #define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004 #define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0) #define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF) #define C_038004_TEX_HEIGHT 0xFFFFE000 #define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13) #define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF) #define C_038004_TEX_DEPTH 0xFC001FFF #define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26) #define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F) #define C_038004_DATA_FORMAT 0x03FFFFFF #define V_038004_COLOR_INVALID 0x00000000 #define V_038004_COLOR_8 0x00000001 #define V_038004_COLOR_4_4 0x00000002 #define V_038004_COLOR_3_3_2 0x00000003 #define V_038004_COLOR_16 0x00000005 #define V_038004_COLOR_16_FLOAT 0x00000006 #define V_038004_COLOR_8_8 0x00000007 #define V_038004_COLOR_5_6_5 0x00000008 #define V_038004_COLOR_6_5_5 0x00000009 #define V_038004_COLOR_1_5_5_5 0x0000000A #define V_038004_COLOR_4_4_4_4 0x0000000B #define V_038004_COLOR_5_5_5_1 0x0000000C #define V_038004_COLOR_32 0x0000000D #define V_038004_COLOR_32_FLOAT 0x0000000E #define V_038004_COLOR_16_16 0x0000000F #define V_038004_COLOR_16_16_FLOAT 0x00000010 #define V_038004_COLOR_8_24 0x00000011 #define V_038004_COLOR_8_24_FLOAT 0x00000012 #define V_038004_COLOR_24_8 0x00000013 #define V_038004_COLOR_24_8_FLOAT 0x00000014 #define V_038004_COLOR_10_11_11 0x00000015 #define V_038004_COLOR_10_11_11_FLOAT 0x00000016 #define V_038004_COLOR_11_11_10 0x00000017 #define V_038004_COLOR_11_11_10_FLOAT 0x00000018 #define V_038004_COLOR_2_10_10_10 0x00000019 #define V_038004_COLOR_8_8_8_8 0x0000001A #define V_038004_COLOR_10_10_10_2 0x0000001B #define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C #define V_038004_COLOR_32_32 0x0000001D #define V_038004_COLOR_32_32_FLOAT 0x0000001E #define V_038004_COLOR_16_16_16_16 0x0000001F #define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020 #define V_038004_COLOR_32_32_32_32 0x00000022 #define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023 #define V_038004_FMT_1 0x00000025 #define V_038004_FMT_GB_GR 0x00000027 #define V_038004_FMT_BG_RG 0x00000028 #define V_038004_FMT_32_AS_8 0x00000029 #define V_038004_FMT_32_AS_8_8 0x0000002A #define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B #define V_038004_FMT_8_8_8 0x0000002C #define V_038004_FMT_16_16_16 0x0000002D #define V_038004_FMT_16_16_16_FLOAT 0x0000002E #define V_038004_FMT_32_32_32 0x0000002F #define V_038004_FMT_32_32_32_FLOAT 0x00000030 #define V_038004_FMT_BC1 0x00000031 #define V_038004_FMT_BC2 0x00000032 #define V_038004_FMT_BC3 0x00000033 #define V_038004_FMT_BC4 0x00000034 #define V_038004_FMT_BC5 0x00000035 #define V_038004_FMT_BC6 0x00000036 #define V_038004_FMT_BC7 0x00000037 #define V_038004_FMT_32_AS_32_32_32_32 0x00000038 #define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 #define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) #define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) #define C_038010_FORMAT_COMP_X 0xFFFFFFFC #define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2) #define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3) #define C_038010_FORMAT_COMP_Y 0xFFFFFFF3 #define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4) #define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3) #define C_038010_FORMAT_COMP_Z 0xFFFFFFCF #define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6) #define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3) #define C_038010_FORMAT_COMP_W 0xFFFFFF3F #define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8) #define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3) #define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF #define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10) #define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1) #define C_038010_SRF_MODE_ALL 0xFFFFFBFF #define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11) #define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1) #define C_038010_FORCE_DEGAMMA 0xFFFFF7FF #define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12) #define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3) #define C_038010_ENDIAN_SWAP 0xFFFFCFFF #define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14) #define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3) #define C_038010_REQUEST_SIZE 0xFFFF3FFF #define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16) #define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7) #define C_038010_DST_SEL_X 0xFFF8FFFF #define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19) #define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7) #define C_038010_DST_SEL_Y 0xFFC7FFFF #define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22) #define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7) #define C_038010_DST_SEL_Z 0xFE3FFFFF #define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25) #define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7) #define C_038010_DST_SEL_W 0xF1FFFFFF # define SQ_SEL_X 0 # define SQ_SEL_Y 1 # define SQ_SEL_Z 2 # define SQ_SEL_W 3 # define SQ_SEL_0 4 # define SQ_SEL_1 5 #define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28) #define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF) #define C_038010_BASE_LEVEL 0x0FFFFFFF #define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014 #define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0) #define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF) #define C_038014_LAST_LEVEL 0xFFFFFFF0 #define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4) #define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF) #define C_038014_BASE_ARRAY 0xFFFE000F #define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17) #define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF) #define C_038014_LAST_ARRAY 0xC001FFFF #define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8 #define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288A8_ITEMSIZE 0xFFFF8000 #define R_008C44_SQ_ESGS_RING_SIZE 0x008C44 #define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C44_MEM_SIZE 0x00000000 #define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0 #define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288B0_ITEMSIZE 0xFFFF8000 #define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54 #define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C54_MEM_SIZE 0x00000000 #define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0 #define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288C0_ITEMSIZE 0xFFFF8000 #define R_008C74_SQ_FBUF_RING_SIZE 0x008C74 #define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C74_MEM_SIZE 0x00000000 #define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4 #define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288B4_ITEMSIZE 0xFFFF8000 #define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C #define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C5C_MEM_SIZE 0x00000000 #define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC #define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288AC_ITEMSIZE 0xFFFF8000 #define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C #define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C4C_MEM_SIZE 0x00000000 #define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC #define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288BC_ITEMSIZE 0xFFFF8000 #define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C #define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C6C_MEM_SIZE 0x00000000 #define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4 #define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288C4_ITEMSIZE 0xFFFF8000 #define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C #define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C7C_MEM_SIZE 0x00000000 #define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8 #define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288B8_ITEMSIZE 0xFFFF8000 #define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64 #define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) #define C_008C64_MEM_SIZE 0x00000000 #define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8 #define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0) #define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) #define C_0288C8_ITEMSIZE 0xFFFF8000 #define R_028010_DB_DEPTH_INFO 0x028010 #define S_028010_FORMAT(x) (((x) & 0x7) << 0) #define G_028010_FORMAT(x) (((x) >> 0) & 0x7) #define C_028010_FORMAT 0xFFFFFFF8 #define V_028010_DEPTH_INVALID 0x00000000 #define V_028010_DEPTH_16 0x00000001 #define V_028010_DEPTH_X8_24 0x00000002 #define V_028010_DEPTH_8_24 0x00000003 #define V_028010_DEPTH_X8_24_FLOAT 0x00000004 #define V_028010_DEPTH_8_24_FLOAT 0x00000005 #define V_028010_DEPTH_32_FLOAT 0x00000006 #define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007 #define S_028010_READ_SIZE(x) (((x) & 0x1) << 3) #define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1) #define C_028010_READ_SIZE 0xFFFFFFF7 #define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15) #define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF) #define C_028010_ARRAY_MODE 0xFFF87FFF #define V_028010_ARRAY_1D_TILED_THIN1 0x00000002 #define V_028010_ARRAY_2D_TILED_THIN1 0x00000004 #define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25) #define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1) #define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF #define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26) #define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1) #define C_028010_TILE_COMPACT 0xFBFFFFFF #define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31) #define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1) #define C_028010_ZRANGE_PRECISION 0x7FFFFFFF #define R_028000_DB_DEPTH_SIZE 0x028000 #define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0) #define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF) #define C_028000_PITCH_TILE_MAX 0xFFFFFC00 #define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10) #define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF) #define C_028000_SLICE_TILE_MAX 0xC00003FF #define R_028004_DB_DEPTH_VIEW 0x028004 #define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0) #define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF) #define C_028004_SLICE_START 0xFFFFF800 #define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13) #define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF) #define C_028004_SLICE_MAX 0xFF001FFF #define R_028800_DB_DEPTH_CONTROL 0x028800 #define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0) #define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1) #define C_028800_STENCIL_ENABLE 0xFFFFFFFE #define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1) #define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1) #define C_028800_Z_ENABLE 0xFFFFFFFD #define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2) #define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1) #define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB #define S_028800_ZFUNC(x) (((x) & 0x7) << 4) #define G_028800_ZFUNC(x) (((x) >> 4) & 0x7) #define C_028800_ZFUNC 0xFFFFFF8F #define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7) #define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1) #define C_028800_BACKFACE_ENABLE 0xFFFFFF7F #define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8) #define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7) #define C_028800_STENCILFUNC 0xFFFFF8FF #define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11) #define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7) #define C_028800_STENCILFAIL 0xFFFFC7FF #define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14) #define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7) #define C_028800_STENCILZPASS 0xFFFE3FFF #define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17) #define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7) #define C_028800_STENCILZFAIL 0xFFF1FFFF #define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20) #define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7) #define C_028800_STENCILFUNC_BF 0xFF8FFFFF #define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23) #define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7) #define C_028800_STENCILFAIL_BF 0xFC7FFFFF #define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26) #define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7) #define C_028800_STENCILZPASS_BF 0xE3FFFFFF #define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29) #define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7) #define C_028800_STENCILZFAIL_BF 0x1FFFFFFF #endif Index: head/sys/dev/drm2/radeon/radeon_cp.c =================================================================== --- head/sys/dev/drm2/radeon/radeon_cp.c (revision 258779) +++ head/sys/dev/drm2/radeon/radeon_cp.c (revision 258780) @@ -1,2245 +1,2245 @@ /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ /* * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * Copyright 2007 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Kevin E. Martin * Gareth Hughes */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "radeon_drv.h" #include "r300_reg.h" #define RADEON_FIFO_DEBUG 0 /* Firmware Names */ #define FIRMWARE_R100 "radeonkmsfw_R100_cp" #define FIRMWARE_R200 "radeonkmsfw_R200_cp" #define FIRMWARE_R300 "radeonkmsfw_R300_cp" #define FIRMWARE_R420 "radeonkmsfw_R420_cp" #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp" #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp" #define FIRMWARE_R520 "radeonkmsfw_R520_cp" static int radeon_do_cleanup_cp(struct drm_device * dev); static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off) { u32 val; if (dev_priv->flags & RADEON_IS_AGP) { val = DRM_READ32(dev_priv->ring_rptr, off); } else { val = *(((volatile u32 *) dev_priv->ring_rptr->handle) + (off / sizeof(u32))); val = le32_to_cpu(val); } return val; } u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv) { if (dev_priv->writeback_works) return radeon_read_ring_rptr(dev_priv, 0); else { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_CP_RB_RPTR); else return RADEON_READ(RADEON_CP_RB_RPTR); } } void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val) { if (dev_priv->flags & RADEON_IS_AGP) DRM_WRITE32(dev_priv->ring_rptr, off, val); else *(((volatile u32 *) dev_priv->ring_rptr->handle) + (off / sizeof(u32))) = cpu_to_le32(val); } void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val) { radeon_write_ring_rptr(dev_priv, 0, val); } u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index) { if (dev_priv->writeback_works) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(index)); else return radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(index)); } else { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_SCRATCH_REG0 + 4*index); else return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index); } } static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); ret = RADEON_READ(R520_MC_IND_DATA); RADEON_WRITE(R520_MC_IND_INDEX, 0); return ret; } static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); ret = RADEON_READ(RS480_NB_MC_DATA); RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); return ret; } static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); ret = RADEON_READ(RS690_MC_DATA); RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); return ret; } static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); ret = RADEON_READ(RS600_MC_DATA); return ret; } static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) return RS690_READ_MCIND(dev_priv, addr); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) return RS600_READ_MCIND(dev_priv, addr); else return RS480_READ_MCIND(dev_priv, addr); } u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) return RADEON_READ(R700_MC_VM_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_MC_VM_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); else return RADEON_READ(RADEON_MC_FB_LOCATION); } static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); else RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); } void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) { /*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); else RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); } void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) { u32 agp_base_hi = upper_32_bits(agp_base); u32 agp_base_lo = agp_base & 0xffffffff; u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff; /* R6xx/R7xx must be aligned to a 4MB boundary */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo); RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); } else { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); } } void radeon_enable_bm(struct drm_radeon_private *dev_priv) { u32 tmp; /* Turn on bus mastering */ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { /* rs600/rs690/rs740 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } /* PCIE cards appears to not need this */ } static int RADEON_READ_PLL(struct drm_device * dev, int addr) { drm_radeon_private_t *dev_priv = dev->dev_private; RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); return RADEON_READ(RADEON_CLOCK_CNTL_DATA); } static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) { RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); return RADEON_READ(RADEON_PCIE_DATA); } #if RADEON_FIFO_DEBUG static void radeon_status(drm_radeon_private_t * dev_priv) { printk("%s:\n", __func__); printk("RBBM_STATUS = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); printk("CP_RB_RTPR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); printk("CP_RB_WTPR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); printk("AIC_CNTL = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); printk("AIC_STAT = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_STAT)); printk("AIC_PT_BASE = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); printk("TLB_ADDR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); printk("TLB_DATA = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); } #endif /* ================================================================ * Engine, FIFO control */ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) { u32 tmp; int i; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); tmp |= RADEON_RB3D_DC_FLUSH_ALL; RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) & RADEON_RB3D_DC_BUSY)) { return 0; } DRM_UDELAY(1); } } else { /* don't flush or purge cache here or lockup */ return 0; } #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) { int i; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; for (i = 0; i < dev_priv->usec_timeout; i++) { int slots = (RADEON_READ(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK); if (slots >= entries) return 0; DRM_UDELAY(1); } DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n", RADEON_READ(RADEON_RBBM_STATUS), RADEON_READ(R300_VAP_CNTL_STATUS)); #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) { int i, ret; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ret = radeon_do_wait_for_fifo(dev_priv, 64); if (ret) return ret; for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(RADEON_READ(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)) { radeon_do_pixcache_flush(dev_priv); return 0; } DRM_UDELAY(1); } DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n", RADEON_READ(RADEON_RBBM_STATUS), RADEON_READ(R300_VAP_CNTL_STATUS)); #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static void radeon_init_pipes(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; uint32_t gb_tile_config, gb_pipe_sel = 0; if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2); if ((z_pipe_sel & 3) == 3) dev_priv->num_z_pipes = 2; else dev_priv->num_z_pipes = 1; } else dev_priv->num_z_pipes = 1; /* RS4xx/RS6xx/R4xx/R5xx */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; /* SE cards have 1 pipe */ if ((dev->pci_device == 0x5e4c) || (dev->pci_device == 0x5e4f)) dev_priv->num_gb_pipes = 1; } else { /* R3xx */ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && dev->pci_device != 0x4144) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 && dev->pci_device != 0x4148)) { dev_priv->num_gb_pipes = 2; } else { /* RV3xx/R300 AD/R350 AH */ dev_priv->num_gb_pipes = 1; } } DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); switch (dev_priv->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; default: case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); } RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | R300_DC_AUTOFLUSH_ENABLE | R300_DC_DC_DISABLE_IGNORE_PE)); } /* ================================================================ * CP control, initialization */ /* Load the microcode for the CP */ static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv) { const char *fw_name = NULL; int err; DRM_DEBUG("\n"); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { DRM_INFO("Loading R100 Microcode\n"); fw_name = FIRMWARE_R100; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { DRM_INFO("Loading R200 Microcode\n"); fw_name = FIRMWARE_R200; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { DRM_INFO("Loading R300 Microcode\n"); fw_name = FIRMWARE_R300; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { DRM_INFO("Loading R400 Microcode\n"); fw_name = FIRMWARE_R420; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { DRM_INFO("Loading RS690/RS740 Microcode\n"); fw_name = FIRMWARE_RS690; } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { DRM_INFO("Loading RS600 Microcode\n"); fw_name = FIRMWARE_RS600; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { DRM_INFO("Loading R500 Microcode\n"); fw_name = FIRMWARE_R520; } err = 0; dev_priv->me_fw = firmware_get(fw_name); if (dev_priv->me_fw == NULL) { err = -ENOENT; DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n", fw_name); } else if (dev_priv->me_fw->datasize % 8) { DRM_ERROR( "radeon_cp: Bogus length %zu in firmware \"%s\"\n", dev_priv->me_fw->datasize, fw_name); err = -EINVAL; firmware_put(dev_priv->me_fw, FIRMWARE_UNLOAD); dev_priv->me_fw = NULL; } return err; } static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv) { const __be32 *fw_data; int i, size; radeon_do_wait_for_idle(dev_priv); if (dev_priv->me_fw) { size = dev_priv->me_fw->datasize / 4; fw_data = (const __be32 *)dev_priv->me_fw->data; RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); for (i = 0; i < size; i += 2) { RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, be32_to_cpup(&fw_data[i])); RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, be32_to_cpup(&fw_data[i + 1])); } } } /* Flush any pending commands to the CP. This should only be used just * prior to a wait for idle, as it informs the engine that the command * stream is ending. */ static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) { DRM_DEBUG("\n"); #if 0 u32 tmp; - tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); + tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1U << 31); RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); #endif } /* Wait for the CP to go idle. */ int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(6); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); COMMIT_RING(); return radeon_do_wait_for_idle(dev_priv); } /* Start the Command Processor. */ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); dev_priv->cp_running = 1; /* on r420, any DMA from CP to system memory while 2D is active * can cause a hang. workaround is to queue a CP RESYNC token */ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { BEGIN_RING(3); OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1)); OUT_RING(5); /* scratch reg 5 */ OUT_RING(0xdeadbeef); ADVANCE_RING(); COMMIT_RING(); } BEGIN_RING(8); /* isync can only be written through cp on r5xx write it here */ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); COMMIT_RING(); dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; } /* Reset the Command Processor. This will not flush any pending * commands, so you must wait for the CP command stream to complete * before calling this routine. */ static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) { u32 cur_read_ptr; DRM_DEBUG("\n"); cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); SET_RING_HEAD(dev_priv, cur_read_ptr); dev_priv->ring.tail = cur_read_ptr; } /* Stop the Command Processor. This will not flush any pending * commands, so you must flush the command stream and wait for the CP * to go idle before calling this routine. */ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); /* finish the pending CP_RESYNC token */ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { BEGIN_RING(2); OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); OUT_RING(R300_RB3D_DC_FINISH); ADVANCE_RING(); COMMIT_RING(); radeon_do_wait_for_idle(dev_priv); } RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); dev_priv->cp_running = 0; } /* Reset the engine. This will stop the CP if it is running. */ static int radeon_do_engine_reset(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; DRM_DEBUG("\n"); radeon_do_pixcache_flush(dev_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { /* may need something similar for newer chips */ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | RADEON_FORCEON_MCLKA | RADEON_FORCEON_MCLKB | RADEON_FORCEON_YCLKA | RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC | RADEON_FORCEON_AIC)); } rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB)); RADEON_READ(RADEON_RBBM_SOFT_RESET); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & ~(RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB))); RADEON_READ(RADEON_RBBM_SOFT_RESET); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); } /* setup the raster pipes */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) radeon_init_pipes(dev); /* Reset the CP ring */ radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; /* Reset any pending vertex, indirect buffers */ radeon_freelist_reset(dev); return 0; } static void radeon_cp_init_ring_buffer(struct drm_device * dev, drm_radeon_private_t *dev_priv, struct drm_file *file_priv) { struct drm_radeon_master_private *master_priv; u32 ring_start, cur_read_ptr; /* Initialize the memory controller. With new memory map, the fb location * is not changed, it should have been properly initialized already. Part * of the problem is that the code below is bogus, assuming the GART is * always appended to the fb which is not necessarily the case */ if (!dev_priv->new_memmap) radeon_write_fb_location(dev_priv, ((dev_priv->gart_vm_start - 1) & 0xffff0000) | (dev_priv->fb_location >> 16)); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { radeon_write_agp_base(dev_priv, dev->agp->base); radeon_write_agp_location(dev_priv, (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16))); ring_start = (dev_priv->cp_ring->offset - dev->agp->base + dev_priv->gart_vm_start); } else #endif ring_start = (dev_priv->cp_ring->offset - (unsigned long)dev->sg->vaddr + dev_priv->gart_vm_start); RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); /* Set the write pointer delay */ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); /* Initialize the ring buffer's read and write pointers */ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); SET_RING_HEAD(dev_priv, cur_read_ptr); dev_priv->ring.tail = cur_read_ptr; #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - dev->agp->base + dev_priv->gart_vm_start); } else #endif { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - ((unsigned long) dev->sg->vaddr) + dev_priv->gart_vm_start); } /* Set ring buffer size */ #ifdef __BIG_ENDIAN RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_BUF_SWAP_32BIT | (dev_priv->ring.fetch_size_l2ow << 18) | (dev_priv->ring.rptr_update_l2qw << 8) | dev_priv->ring.size_l2qw); #else RADEON_WRITE(RADEON_CP_RB_CNTL, (dev_priv->ring.fetch_size_l2ow << 18) | (dev_priv->ring.rptr_update_l2qw << 8) | dev_priv->ring.size_l2qw); #endif /* Initialize the scratch register pointer. This will cause * the scratch register values to be written out to memory * whenever they are updated. * * We simply put this behind the ring read pointer, this works * with PCI GART as well as (whatever kind of) AGP GART */ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) + RADEON_SCRATCH_REG_OFFSET); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); radeon_enable_bm(dev_priv); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0); RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0); RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); /* reset sarea copies of these */ master_priv = file_priv->masterp->driver_priv; if (master_priv->sarea_priv) { master_priv->sarea_priv->last_frame = 0; master_priv->sarea_priv->last_dispatch = 0; master_priv->sarea_priv->last_clear = 0; } radeon_do_wait_for_idle(dev_priv); /* Sync everything up */ RADEON_WRITE(RADEON_ISYNC_CNTL, (RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); } static void radeon_test_writeback(drm_radeon_private_t * dev_priv) { u32 tmp; /* Start with assuming that writeback doesn't work */ dev_priv->writeback_works = 0; /* Writeback doesn't seem to work everywhere, test it here and possibly * enable it if it appears to work */ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { u32 val; val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1)); if (val == 0xdeadbeef) break; DRM_UDELAY(1); } if (tmp < dev_priv->usec_timeout) { dev_priv->writeback_works = 1; DRM_INFO("writeback test succeeded in %d usecs\n", tmp); } else { dev_priv->writeback_works = 0; DRM_INFO("writeback test failed\n"); } if (radeon_no_wb == 1) { dev_priv->writeback_works = 0; DRM_INFO("writeback forced off\n"); } if (!dev_priv->writeback_works) { /* Disable writeback to avoid unnecessary bus master transfer */ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); } } /* Enable or disable IGP GART on the chip */ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) { u32 temp; if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); else IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | RS480_VA_SIZE_32MB)); temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | RS480_TLB_ENABLE | RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); temp = dev_priv->gart_info.bus_addr & 0xfffff000; temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; IGP_WRITE_MCIND(RS480_GART_BASE, temp); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS)); radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); dev_priv->gart_size = 32*1024*1024; temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16)); radeon_write_agp_location(dev_priv, temp); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | RS480_VA_SIZE_32MB)); do { temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); } while (1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); do { temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); } while (1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); } else { IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); } } /* Enable or disable IGP GART on the chip */ static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on) { u32 temp; int i; if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); for (i = 0; i < 19; i++) IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i, (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | RS600_SYSTEM_ACCESS_MODE_IN_SYS | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | RS600_ENABLE_FRAGMENT_PROCESSING | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); /* disable all other contexts */ for (i = 1; i < 8; i++) IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); /* setup the page table aperture */ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, dev_priv->gart_info.bus_addr); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, dev_priv->gart_vm_start); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); /* setup the system aperture */ IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start); IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); /* enable page tables */ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT)); temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES)); /* invalidate the cache */ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); } else { IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0); temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); temp &= ~RS600_ENABLE_PAGE_TABLES; IGP_WRITE_MCIND(RS600_MC_CNTL1, temp); } } static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) { u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); if (on) { DRM_DEBUG("programming pcie %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, dev_priv->gart_vm_start); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, dev_priv->gart_info.bus_addr); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, dev_priv->gart_vm_start); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, dev_priv->gart_vm_start + dev_priv->gart_size - 1); radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_PCIE_TX_GART_EN); } else { RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); } } /* Enable or disable PCI GART on the chip */ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) { u32 tmp; if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) || (dev_priv->flags & RADEON_IS_IGPGART)) { radeon_set_igpgart(dev_priv, on); return; } if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { rs600_set_igpgart(dev_priv, on); return; } if (dev_priv->flags & RADEON_IS_PCIE) { radeon_set_pciegart(dev_priv, on); return; } tmp = RADEON_READ(RADEON_AIC_CNTL); if (on) { RADEON_WRITE(RADEON_AIC_CNTL, tmp | RADEON_PCIGART_TRANSLATE_EN); /* set PCI GART page-table base address */ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); /* set address range for PCI address translate */ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start + dev_priv->gart_size - 1); /* Turn off AGP aperture -- is this required for PCI GART? */ radeon_write_agp_location(dev_priv, 0xffffffc0); RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ } else { RADEON_WRITE(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); } } static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv) { struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info; struct radeon_virt_surface *vp; int i; for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) { if (!dev_priv->virt_surfaces[i].file_priv || dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV) break; } if (i >= 2 * RADEON_MAX_SURFACES) return -ENOMEM; vp = &dev_priv->virt_surfaces[i]; for (i = 0; i < RADEON_MAX_SURFACES; i++) { struct radeon_surface *sp = &dev_priv->surfaces[i]; if (sp->refcount) continue; vp->surface_index = i; vp->lower = gart_info->bus_addr; vp->upper = vp->lower + gart_info->table_size; vp->flags = 0; vp->file_priv = PCIGART_FILE_PRIV; sp->refcount = 1; sp->lower = vp->lower; sp->upper = vp->upper; sp->flags = 0; RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper); return 0; } return -ENOMEM; } static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->masterp->driver_priv; DRM_DEBUG("\n"); /* if we require new memory map but we don't have it fail */ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { DRM_DEBUG("Forcing AGP card to PCI mode\n"); dev_priv->flags &= ~RADEON_IS_AGP; } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) && !init->is_pci) { DRM_DEBUG("Restoring AGP flag\n"); dev_priv->flags |= RADEON_IS_AGP; } if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { DRM_ERROR("PCI GART memory not allocated!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; if (dev_priv->usec_timeout < 1 || dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { DRM_DEBUG("TIMEOUT problem!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } /* Enable vblank on CRTC1 for older X servers */ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; switch(init->func) { case RADEON_INIT_R200_CP: dev_priv->microcode_version = UCODE_R200; break; case RADEON_INIT_R300_CP: dev_priv->microcode_version = UCODE_R300; break; default: dev_priv->microcode_version = UCODE_R100; } dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. */ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); radeon_do_cleanup_cp(dev); return -EINVAL; } switch (init->fb_bpp) { case 16: dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; break; case 32: default: dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; break; } dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; switch (init->depth_bpp) { case 16: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; break; case 32: default: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; break; } dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; /* Hardware state for depth clears. Remove this if/when we no * longer clear the depth buffer with a 3D rectangle. Hard-code * all values to prevent unwanted 3D state from slipping through * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | (dev_priv->color_fmt << 10) | (dev_priv->microcode_version == UCODE_R100 ? RADEON_ZBLOCK16 : 0)); dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | RADEON_Z_TEST_ALWAYS | RADEON_STENCIL_TEST_ALWAYS | RADEON_STENCIL_S_FAIL_REPLACE | RADEON_STENCIL_ZPASS_REPLACE | RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | RADEON_FFACE_SOLID | RADEON_FLAT_SHADE_VTX_LAST | RADEON_DIFFUSE_SHADE_FLAT | RADEON_ALPHA_SHADE_FLAT | RADEON_SPECULAR_SHADE_FLAT | RADEON_FOG_SHADE_FLAT | RADEON_VTX_PIX_CENTER_OGL | RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); dev_priv->ring_offset = init->ring_offset; dev_priv->ring_rptr_offset = init->ring_rptr_offset; dev_priv->buffers_offset = init->buffers_offset; dev_priv->gart_textures_offset = init->gart_textures_offset; master_priv->sarea = drm_getsarea(dev); if (!master_priv->sarea) { DRM_ERROR("could not find sarea!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); if (!dev_priv->cp_ring) { DRM_ERROR("could not find cp ring region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } if (init->gart_textures_offset) { dev_priv->gart_textures = drm_core_findmap(dev, init->gart_textures_offset); if (!dev_priv->gart_textures) { DRM_ERROR("could not find GART texture region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { drm_core_ioremap_wc(dev_priv->cp_ring, dev); drm_core_ioremap_wc(dev_priv->ring_rptr, dev); drm_core_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev->agp_buffer_map->handle) { DRM_ERROR("could not find ioremap agp regions!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } else #endif { dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset; dev_priv->ring_rptr->handle = (void *)(unsigned long)dev_priv->ring_rptr->offset; dev->agp_buffer_map->handle = (void *)(unsigned long)dev->agp_buffer_map->offset; DRM_DEBUG("dev_priv->cp_ring->handle %p\n", dev_priv->cp_ring->handle); DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", dev_priv->ring_rptr->handle); DRM_DEBUG("dev->agp_buffer_map->handle %p\n", dev->agp_buffer_map->handle); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; dev_priv->fb_size = ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) - dev_priv->fb_location; dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | ((dev_priv->front_offset + dev_priv->fb_location) >> 10)); dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | ((dev_priv->back_offset + dev_priv->fb_location) >> 10)); dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | ((dev_priv->depth_offset + dev_priv->fb_location) >> 10)); dev_priv->gart_size = init->gart_size; /* New let's set the memory map ... */ if (dev_priv->new_memmap) { u32 base = 0; DRM_INFO("Setting GART location based on new memory map\n"); /* If using AGP, try to locate the AGP aperture at the same * location in the card and on the bus, though we have to * align it down. */ #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { base = dev->agp->base; /* Check if valid */ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", dev->agp->base); base = 0; } } #endif /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ if (base == 0) { base = dev_priv->fb_location + dev_priv->fb_size; if (base < dev_priv->fb_location || ((base + dev_priv->gart_size) & 0xfffffffful) < base) base = dev_priv->fb_location - dev_priv->gart_size; } dev_priv->gart_vm_start = base & 0xffc00000u; if (dev_priv->gart_vm_start != base) DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", base, dev_priv->gart_vm_start); } else { DRM_INFO("Setting GART location based on old memory map\n"); dev_priv->gart_vm_start = dev_priv->fb_location + RADEON_READ(RADEON_CONFIG_APER_SIZE); } #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - dev->agp->base + dev_priv->gart_vm_start); else #endif dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - (unsigned long)dev->sg->vaddr + dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", dev_priv->gart_buffers_offset); dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else #endif { u32 sctrl; int ret; dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); /* if we have an offset set from userspace */ if (dev_priv->pcigart_offset_set) { dev_priv->gart_info.bus_addr = (resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location; dev_priv->gart_info.mapping.offset = dev_priv->pcigart_offset + dev_priv->fb_aper_offset; dev_priv->gart_info.mapping.size = dev_priv->gart_info.table_size; drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = dev_priv->gart_info.mapping.handle; if (dev_priv->flags & RADEON_IS_PCIE) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB; DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", dev_priv->gart_info.addr, dev_priv->pcigart_offset); } else { if (dev_priv->flags & RADEON_IS_IGPGART) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; dev_priv->gart_info.addr = NULL; dev_priv->gart_info.bus_addr = 0; if (dev_priv->flags & RADEON_IS_PCIE) { DRM_ERROR ("Cannot use PCI Express without GART in FB memory\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } sctrl = RADEON_READ(RADEON_SURFACE_CNTL); RADEON_WRITE(RADEON_SURFACE_CNTL, 0); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) ret = r600_page_table_init(dev); else ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info); RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl); if (!ret) { DRM_ERROR("failed to init PCI GART!\n"); radeon_do_cleanup_cp(dev); return -ENOMEM; } ret = radeon_setup_pcigart_surface(dev_priv); if (ret) { DRM_ERROR("failed to setup GART surface!\n"); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) r600_page_table_cleanup(dev, &dev_priv->gart_info); else drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info); radeon_do_cleanup_cp(dev); return ret; } /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } if (!dev_priv->me_fw) { int err = radeon_cp_init_microcode(dev_priv); if (err) { DRM_ERROR("Failed to load firmware!\n"); radeon_do_cleanup_cp(dev); return err; } } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); dev_priv->last_buf = 0; radeon_do_engine_reset(dev); radeon_test_writeback(dev_priv); return 0; } static int radeon_do_cleanup_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->cp_ring != NULL) { drm_core_ioremapfree(dev_priv->cp_ring, dev); dev_priv->cp_ring = NULL; } if (dev_priv->ring_rptr != NULL) { drm_core_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } if (dev->agp_buffer_map != NULL) { drm_core_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else #endif { if (dev_priv->gart_info.bus_addr) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) r600_page_table_cleanup(dev, &dev_priv->gart_info); else { if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) DRM_ERROR("failed to cleanup PCI GART!\n"); } } if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = NULL; } } /* only clear to the start of flags */ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); return 0; } /* This code will reinit the Radeon CP hardware after a resume from disc. * AFAIK, it would be very difficult to pickle the state at suspend time, so * here we make sure that all Radeon hardware initialisation is re-done without * affecting running applications. * * Charl P. Botha */ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; if (!dev_priv) { DRM_ERROR("Called with no initialization\n"); return -EINVAL; } DRM_DEBUG("Starting radeon_do_resume_cp()\n"); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else #endif { /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); dev_priv->have_z_offset = 0; radeon_do_engine_reset(dev); radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); DRM_DEBUG("radeon_do_resume_cp() complete\n"); return 0; } int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (init->func == RADEON_INIT_R300_CP) r300_init_reg_flags(dev); switch (init->func) { case RADEON_INIT_CP: case RADEON_INIT_R200_CP: case RADEON_INIT_R300_CP: return radeon_do_init_cp(dev, init, file_priv); case RADEON_INIT_R600_CP: return r600_do_init_cp(dev, init, file_priv); break; case RADEON_CLEANUP_CP: if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_cleanup_cp(dev); else return radeon_do_cleanup_cp(dev); } return -EINVAL; } int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (dev_priv->cp_running) { DRM_DEBUG("while CP running\n"); return 0; } if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { DRM_DEBUG("called with bogus CP mode (%d)\n", dev_priv->cp_mode); return 0; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_start(dev_priv); else radeon_do_cp_start(dev_priv); return 0; } /* Stop the CP. The engine must have been idled before calling this * routine. */ int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_cp_stop_t *stop = data; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv->cp_running) return 0; /* Flush any pending CP commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ if (stop->flush) { radeon_do_cp_flush(dev_priv); } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ if (stop->idle) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ret = r600_do_cp_idle(dev_priv); else ret = radeon_do_cp_idle(dev_priv); if (ret) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, * we will get some dropped triangles as they won't be fully * rendered before the CP is shut down. */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_stop(dev_priv); else radeon_do_cp_stop(dev_priv); /* Reset the engine */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_engine_reset(dev); else radeon_do_engine_reset(dev); return 0; } void radeon_do_release(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; int i, ret; if (dev_priv) { if (dev_priv->cp_running) { /* Stop the cp */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { while ((ret = r600_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); #ifdef __linux__ schedule(); #else tsleep(&ret, PZERO, "rdnrel", 1); #endif } } else { while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); #ifdef __linux__ schedule(); #else tsleep(&ret, PZERO, "rdnrel", 1); #endif } } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { r600_do_cp_stop(dev_priv); r600_do_engine_reset(dev); } else { radeon_do_cp_stop(dev_priv); radeon_do_engine_reset(dev); } } if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) { /* Disable *all* interrupts */ if (dev_priv->mmio) /* remove this after permanent addmaps */ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); if (dev_priv->mmio) { /* remove all surfaces */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, 0); } } } /* Free memory heap structures */ radeon_mem_takedown(&(dev_priv->gart_heap)); radeon_mem_takedown(&(dev_priv->fb_heap)); /* deallocate kernel resources */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cleanup_cp(dev); else radeon_do_cleanup_cp(dev); if (dev_priv->me_fw != NULL) { firmware_put(dev_priv->me_fw, FIRMWARE_UNLOAD); dev_priv->me_fw = NULL; } if (dev_priv->pfp_fw != NULL) { firmware_put(dev_priv->pfp_fw, FIRMWARE_UNLOAD); dev_priv->pfp_fw = NULL; } } } /* Just reset the CP ring. Called as part of an X Server engine reset. */ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_DEBUG("called before init done\n"); return -EINVAL; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_reset(dev_priv); else radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; return 0; } int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_cp_idle(dev_priv); else return radeon_do_cp_idle(dev_priv); } /* Added by Charl P. Botha to call radeon_do_resume_cp(). */ int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_resume_cp(dev, file_priv); else return radeon_do_resume_cp(dev, file_priv); } int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_engine_reset(dev); else return radeon_do_engine_reset(dev); } /* ================================================================ * Fullscreen mode */ /* KW: Deprecated to say the least: */ int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) { return 0; } /* ================================================================ * Freelist management */ /* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through * bufs until freelist code is used. Note this hides a problem with * the scratch register * (used to keep track of last buffer * completed) being written to before * the last buffer has actually * completed rendering. * * KW: It's also a good way to find free buffers quickly. * * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't * sleep. However, bugs in older versions of radeon_accel.c mean that * we essentially have to do this, else old clients will break. * * However, it does leave open a potential deadlock where all the * buffers are held by other clients, which can't release them because * they can't get the lock. */ struct drm_buf *radeon_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; struct drm_buf *buf; int i, t; int start; if (++dev_priv->last_buf >= dma->buf_count) dev_priv->last_buf = 0; start = dev_priv->last_buf; for (t = 0; t < dev_priv->usec_timeout; t++) { u32 done_age = GET_SCRATCH(dev_priv, 1); DRM_DEBUG("done_age = %d\n", done_age); for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[start]; buf_priv = buf->dev_private; if (buf->file_priv == NULL || (buf->pending && buf_priv->age <= done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; } if (++start >= dma->buf_count) start = 0; } if (t) { DRM_UDELAY(1); dev_priv->stats.freelist_loops++; } } return NULL; } void radeon_freelist_reset(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; dev_priv->last_buf = 0; for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } } /* ================================================================ * CP command submission */ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) { drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; u32 last_head = GET_RING_HEAD(dev_priv); for (i = 0; i < dev_priv->usec_timeout; i++) { u32 head = GET_RING_HEAD(dev_priv); ring->space = (head - ring->tail) * sizeof(u32); if (ring->space <= 0) ring->space += ring->size; if (ring->space > n) return 0; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if (head != last_head) i = 0; last_head = head; DRM_UDELAY(1); } /* FIXME: This return value is ignored in the BEGIN_RING macro! */ #if RADEON_FIFO_DEBUG radeon_status(dev_priv); DRM_ERROR("failed!\n"); #endif return -EBUSY; } static int radeon_cp_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma * d) { int i; struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); if (!buf) return -EBUSY; /* NOTE: broken client */ buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int ret = 0; struct drm_dma *d = data; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) { ret = radeon_cp_get_buffers(dev, file_priv, d); } return ret; } int radeon_driver_load(struct drm_device *dev, unsigned long flags) { drm_radeon_private_t *dev_priv; int ret = 0; dev_priv = malloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->flags = flags; switch (flags & RADEON_FAMILY_MASK) { case CHIP_R100: case CHIP_RV200: case CHIP_R200: case CHIP_R300: case CHIP_R350: case CHIP_R420: case CHIP_R423: case CHIP_RV410: case CHIP_RV515: case CHIP_R520: case CHIP_RV570: case CHIP_R580: dev_priv->flags |= RADEON_HAS_HIERZ; break; default: /* all other chips have no hierarchical z buffer */ break; } pci_enable_busmaster(dev->device); if (drm_device_is_agp(dev)) dev_priv->flags |= RADEON_IS_AGP; else if (drm_device_is_pcie(dev)) dev_priv->flags |= RADEON_IS_PCIE; else dev_priv->flags |= RADEON_IS_PCI; ret = drm_addmap(dev, drm_get_resource_start(dev, 2), drm_get_resource_len(dev, 2), _DRM_REGISTERS, _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); if (ret != 0) return ret; ret = drm_vblank_init(dev, 2); if (ret) { radeon_driver_unload(dev); return ret; } DRM_DEBUG("%s card detected\n", ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); return ret; } int radeon_master_create(struct drm_device *dev, struct drm_master *master) { struct drm_radeon_master_private *master_priv; unsigned long sareapage; int ret; master_priv = malloc(sizeof(*master_priv), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (!master_priv) return -ENOMEM; /* prebuild the SAREA */ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); free(master_priv, DRM_MEM_DRIVER); return ret; } master_priv->sarea_priv = (drm_radeon_sarea_t *)((char *)master_priv->sarea->handle) + sizeof(struct drm_sarea); master_priv->sarea_priv->pfCurrentPage = 0; master->driver_priv = master_priv; return 0; } void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) { struct drm_radeon_master_private *master_priv = master->driver_priv; if (!master_priv) return; if (master_priv->sarea_priv && master_priv->sarea_priv->pfCurrentPage != 0) radeon_cp_dispatch_flip(dev, master); master_priv->sarea_priv = NULL; if (master_priv->sarea) #ifdef __linux__ drm_rmmap_locked(dev, master_priv->sarea); #else drm_rmmap(dev, master_priv->sarea); #endif free(master_priv, DRM_MEM_DRIVER); master->driver_priv = NULL; } /* Create mappings for registers and framebuffer so userland doesn't necessarily * have to find them. */ int radeon_driver_firstopen(struct drm_device *dev) { int ret; drm_local_map_t *map; drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ret = drm_addmap(dev, dev_priv->fb_aper_offset, drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) return ret; return 0; } int radeon_driver_unload(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); drm_rmmap(dev, dev_priv->mmio); free(dev_priv, DRM_MEM_DRIVER); dev->dev_private = NULL; return 0; } void radeon_commit_ring(drm_radeon_private_t *dev_priv) { int i; u32 *ring; int tail_aligned; /* check if the ring is padded out to 16-dword alignment */ tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1); if (tail_aligned) { int num_p2 = RADEON_RING_ALIGN - tail_aligned; ring = dev_priv->ring.start; /* pad with some CP_PACKET2 */ for (i = 0; i < num_p2; i++) ring[dev_priv->ring.tail + i] = CP_PACKET2(); dev_priv->ring.tail += i; dev_priv->ring.space -= num_p2 * sizeof(u32); } dev_priv->ring.tail &= dev_priv->ring.tail_mask; DRM_MEMORYBARRIER(); GET_RING_HEAD( dev_priv ); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail); /* read from PCI bus to ensure correct posting */ RADEON_READ(R600_CP_RB_RPTR); } else { RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail); /* read from PCI bus to ensure correct posting */ RADEON_READ(RADEON_CP_RB_RPTR); } } Index: head/sys/dev/drm2/radeon/radeon_drv.h =================================================================== --- head/sys/dev/drm2/radeon/radeon_drv.h (revision 258779) +++ head/sys/dev/drm2/radeon/radeon_drv.h (revision 258780) @@ -1,2165 +1,2165 @@ /* radeon_drv.h -- Private header for radeon driver -*- linux-c -*- * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Kevin E. Martin * Gareth Hughes */ #include __FBSDID("$FreeBSD$"); #ifndef __RADEON_DRV_H__ #define __RADEON_DRV_H__ #include "radeon_family.h" /* General customization: */ #define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." #define DRIVER_NAME "radeon" #define DRIVER_DESC "ATI Radeon" #define DRIVER_DATE "20080528" /* Interface history: * * 1.1 - ?? * 1.2 - Add vertex2 ioctl (keith) * - Add stencil capability to clear ioctl (gareth, keith) * - Increase MAX_TEXTURE_LEVELS (brian) * 1.3 - Add cmdbuf ioctl (keith) * - Add support for new radeon packets (keith) * - Add getparam ioctl (keith) * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). * 1.4 - Add scratch registers to get_param ioctl. * 1.5 - Add r200 packets to cmdbuf ioctl * - Add r200 function to init ioctl * - Add 'scalar2' instruction to cmdbuf * 1.6 - Add static GART memory manager * Add irq handler (won't be turned on unless X server knows to) * Add irq ioctls and irq_active getparam. * Add wait command for cmdbuf ioctl * Add GART offset query for getparam * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] * and R200_PP_CUBIC_OFFSET_F1_[0..5]. * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) * 1.8 - Remove need to call cleanup ioctls on last client exit (keith) * Add 'GET' queries for starting additional clients on different VT's. * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl. * Add texture rectangle support for r100. * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which * clients use to tell the DRM where they think the framebuffer is * located in the card's address space * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color * and GL_EXT_blend_[func|equation]_separate on r200 * 1.12- Add R300 CP microcode support - this just loads the CP on r300 * (No 3D support yet - just microcode loading). * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters * - Add hyperz support, add hyperz flags to clear ioctl. * 1.14- Add support for color tiling * - Add R100/R200 surface allocation/free support * 1.15- Add support for texture micro tiling * - Add support for r100 cube maps * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear * texture filtering on r200 * 1.17- Add initial support for R300 (3D). * 1.18- Add support for GL_ATI_fragment_shader, new packets * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) * 1.19- Add support for gart table in FB memory and PCIE r300 * 1.20- Add support for r300 texrect * 1.21- Add support for card type getparam * 1.22- Add support for texture cache flushes (R300_TX_CNTL) * 1.23- Add new radeon memory map work from benh * 1.24- Add general-purpose packet for manipulating scratch registers (r300) * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL, * new packet type) * 1.26- Add support for variable size PCI(E) gart aperture * 1.27- Add support for IGP GART * 1.28- Add support for VBL on CRTC2 * 1.29- R500 3D cmd buffer support * 1.30- Add support for occlusion queries * 1.31- Add support for num Z pipes from GET_PARAM * 1.32- fixes for rv740 setup * 1.33- Add r6xx/r7xx const buffer support */ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 33 #define DRIVER_PATCHLEVEL 0 enum radeon_cp_microcode_version { UCODE_R100, UCODE_R200, UCODE_R300, }; typedef struct drm_radeon_freelist { unsigned int age; struct drm_buf *buf; struct drm_radeon_freelist *next; struct drm_radeon_freelist *prev; } drm_radeon_freelist_t; typedef struct drm_radeon_ring_buffer { u32 *start; u32 *end; int size; int size_l2qw; int rptr_update; /* Double Words */ int rptr_update_l2qw; /* log2 Quad Words */ int fetch_size; /* Double Words */ int fetch_size_l2ow; /* log2 Oct Words */ u32 tail; u32 tail_mask; int space; int high_mark; } drm_radeon_ring_buffer_t; typedef struct drm_radeon_depth_clear_t { u32 rb3d_cntl; u32 rb3d_zstencilcntl; u32 se_cntl; } drm_radeon_depth_clear_t; struct drm_radeon_driver_file_fields { int64_t radeon_fb_delta; }; struct mem_block { struct mem_block *next; struct mem_block *prev; int start; int size; struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; struct radeon_surface { int refcount; u32 lower; u32 upper; u32 flags; }; struct radeon_virt_surface { int surface_index; u32 lower; u32 upper; u32 flags; struct drm_file *file_priv; #define PCIGART_FILE_PRIV ((void *) -1L) }; #define RADEON_FLUSH_EMITED (1 << 0) #define RADEON_PURGE_EMITED (1 << 1) struct drm_radeon_master_private { drm_local_map_t *sarea; drm_radeon_sarea_t *sarea_priv; }; typedef struct drm_radeon_private { drm_radeon_ring_buffer_t ring; u32 fb_location; u32 fb_size; int new_memmap; int gart_size; u32 gart_vm_start; unsigned long gart_buffers_offset; int cp_mode; int cp_running; drm_radeon_freelist_t *head; drm_radeon_freelist_t *tail; int last_buf; int writeback_works; int usec_timeout; int microcode_version; struct { u32 boxes; int freelist_timeouts; int freelist_loops; int requested_bufs; int last_frame_reads; int last_clear_reads; int clears; int texture_uploads; } stats; int do_boxes; int page_flipping; u32 color_fmt; unsigned int front_offset; unsigned int front_pitch; unsigned int back_offset; unsigned int back_pitch; u32 depth_fmt; unsigned int depth_offset; unsigned int depth_pitch; u32 front_pitch_offset; u32 back_pitch_offset; u32 depth_pitch_offset; drm_radeon_depth_clear_t depth_clear; unsigned long ring_offset; unsigned long ring_rptr_offset; unsigned long buffers_offset; unsigned long gart_textures_offset; drm_local_map_t *sarea; drm_local_map_t *cp_ring; drm_local_map_t *ring_rptr; drm_local_map_t *gart_textures; struct mem_block *gart_heap; struct mem_block *fb_heap; /* SW interrupt */ wait_queue_head_t swi_queue; atomic_t swi_emitted; int vblank_crtc; uint32_t irq_enable_reg; uint32_t r500_disp_irq_reg; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; unsigned long pcigart_offset; unsigned int pcigart_offset_set; struct drm_ati_pcigart_info gart_info; u32 scratch_ages[5]; int have_z_offset; /* starting from here on, data is preserved across an open */ uint32_t flags; /* see radeon_chip_flags */ resource_size_t fb_aper_offset; int num_gb_pipes; int num_z_pipes; int track_flush; drm_local_map_t *mmio; /* r6xx/r7xx pipe/shader config */ int r600_max_pipes; int r600_max_tile_pipes; int r600_max_simds; int r600_max_backends; int r600_max_gprs; int r600_max_threads; int r600_max_stack_entries; int r600_max_hw_contexts; int r600_max_gs_threads; int r600_sx_max_export_size; int r600_sx_max_export_pos_size; int r600_sx_max_export_smx_size; int r600_sq_num_cf_insts; int r700_sx_num_of_sets; int r700_sc_prim_fifo_size; int r700_sc_hiz_tile_fifo_size; int r700_sc_earlyz_tile_fifo_fize; int r600_group_size; int r600_npipes; int r600_nbanks; struct sx cs_mutex; u32 cs_id_scnt; u32 cs_id_wcnt; /* r6xx/r7xx drm blit vertex buffer */ struct drm_buf *blit_vb; /* firmware */ const struct firmware *me_fw, *pfp_fw; } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { u32 age; } drm_radeon_buf_priv_t; struct drm_buffer; typedef struct drm_radeon_kcmd_buffer { int bufsz; struct drm_buffer *buffer; int nbox; struct drm_clip_rect __user *boxes; } drm_radeon_kcmd_buffer_t; extern int radeon_no_wb; extern struct drm_ioctl_desc radeon_ioctls[]; extern int radeon_max_ioctl; extern u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv); extern void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val); #define GET_RING_HEAD(dev_priv) radeon_get_ring_head(dev_priv) #define SET_RING_HEAD(dev_priv, val) radeon_set_ring_head(dev_priv, val) /* Check whether the given hardware address is inside the framebuffer or the * GART area. */ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, u64 off) { u32 fb_start = dev_priv->fb_location; u32 fb_end = fb_start + dev_priv->fb_size - 1; u32 gart_start = dev_priv->gart_vm_start; u32 gart_end = gart_start + dev_priv->gart_size - 1; return ((off >= fb_start && off <= fb_end) || (off >= gart_start && off <= gart_end)); } /* radeon_state.c */ extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf); /* radeon_cp.c */ extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc); extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base); extern void radeon_freelist_reset(struct drm_device * dev); extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); extern int radeon_presetup(struct drm_device *dev); extern int radeon_driver_postcleanup(struct drm_device *dev); extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_mem_takedown(struct mem_block **heap); extern void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap); extern void radeon_enable_bm(struct drm_radeon_private *dev_priv); extern u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off); extern void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val); /* radeon_irq.c */ extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state); extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_do_release(struct drm_device * dev); extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); extern int radeon_enable_vblank(struct drm_device *dev, int crtc); extern void radeon_disable_vblank(struct drm_device *dev, int crtc); extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); extern void radeon_driver_irq_preinstall(struct drm_device * dev); extern int radeon_driver_irq_postinstall(struct drm_device *dev); extern void radeon_driver_irq_uninstall(struct drm_device * dev); extern void radeon_enable_interrupt(struct drm_device *dev); extern int radeon_vblank_crtc_get(struct drm_device *dev); extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); extern int radeon_driver_firstopen(struct drm_device *dev); extern void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); extern void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv); extern void radeon_driver_lastclose(struct drm_device * dev); extern int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master); /* r300_cmdbuf.c */ extern void r300_init_reg_flags(struct drm_device *dev); extern int r300_do_cp_cmdbuf(struct drm_device *dev, struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf); /* r600_cp.c */ extern int r600_do_engine_reset(struct drm_device *dev); extern int r600_do_cleanup_cp(struct drm_device *dev); extern int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, struct drm_file *file_priv); extern int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv); extern int r600_do_cp_idle(drm_radeon_private_t *dev_priv); extern void r600_do_cp_start(drm_radeon_private_t *dev_priv); extern void r600_do_cp_reset(drm_radeon_private_t *dev_priv); extern void r600_do_cp_stop(drm_radeon_private_t *dev_priv); extern int r600_cp_dispatch_indirect(struct drm_device *dev, struct drm_buf *buf, int start, int end); extern int r600_page_table_init(struct drm_device *dev); extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv); extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv); extern int r600_cp_dispatch_texture(struct drm_device *dev, struct drm_file *file_priv, drm_radeon_texture_t *tex, drm_radeon_tex_image_t *image); /* r600_blit.c */ extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv); extern void r600_done_blit_copy(struct drm_device *dev); extern void r600_blit_copy(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int size_bytes); extern void r600_blit_swap(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int sx, int sy, int dx, int dy, int w, int h, int src_pitch, int dst_pitch, int cpp); /* atpx handler */ void radeon_register_atpx_handler(void); void radeon_unregister_atpx_handler(void); /* Flags for stats.boxes */ #define RADEON_BOX_DMA_IDLE 0x1 #define RADEON_BOX_RING_FULL 0x2 #define RADEON_BOX_FLIP 0x4 #define RADEON_BOX_WAIT_IDLE 0x8 #define RADEON_BOX_TEXTURE_LOAD 0x10 /* Register definitions, register access macros and drmAddMap constants * for Radeon kernel driver. */ #define RADEON_MM_INDEX 0x0000 #define RADEON_MM_DATA 0x0004 #define RADEON_AGP_COMMAND 0x0f60 #define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ # define RADEON_AGP_ENABLE (1<<8) #define RADEON_AUX_SCISSOR_CNTL 0x26f0 # define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) # define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) # define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26) # define RADEON_SCISSOR_0_ENABLE (1 << 28) # define RADEON_SCISSOR_1_ENABLE (1 << 29) # define RADEON_SCISSOR_2_ENABLE (1 << 30) /* * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx) * don't have an explicit bus mastering disable bit. It's handled * by the PCI D-states. PMI_BM_DIS disables D-state bus master * handling, not bus mastering itself. */ #define RADEON_BUS_CNTL 0x0030 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ # define RADEON_BUS_MASTER_DIS (1 << 6) /* rs600/rs690/rs740 */ # define RS600_BUS_MASTER_DIS (1 << 14) # define RS600_MSI_REARM (1 << 20) /* see RS400_MSI_REARM in AIC_CNTL for rs480 */ #define RADEON_BUS_CNTL1 0x0034 # define RADEON_PMI_BM_DIS (1 << 2) # define RADEON_PMI_INT_DIS (1 << 3) #define RV370_BUS_CNTL 0x004c # define RV370_PMI_BM_DIS (1 << 5) # define RV370_PMI_INT_DIS (1 << 6) #define RADEON_MSI_REARM_EN 0x0160 /* rv370/rv380, rv410, r423/r430/r480, r5xx */ # define RV370_MSI_REARM_EN (1 << 0) #define RADEON_CLOCK_CNTL_DATA 0x000c # define RADEON_PLL_WR_EN (1 << 7) #define RADEON_CLOCK_CNTL_INDEX 0x0008 #define RADEON_CONFIG_APER_SIZE 0x0108 #define RADEON_CONFIG_MEMSIZE 0x00f8 #define RADEON_CRTC_OFFSET 0x0224 #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) #define RADEON_CRTC2_OFFSET 0x0324 #define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_PCIE_INDEX 0x0030 #define RADEON_PCIE_DATA 0x0034 #define RADEON_PCIE_TX_GART_CNTL 0x10 # define RADEON_PCIE_TX_GART_EN (1 << 0) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1) # define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3) # define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3) # define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5) # define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8) #define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11 #define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 #define RADEON_PCIE_TX_GART_BASE 0x13 #define RADEON_PCIE_TX_GART_START_LO 0x14 #define RADEON_PCIE_TX_GART_START_HI 0x15 #define RADEON_PCIE_TX_GART_END_LO 0x16 #define RADEON_PCIE_TX_GART_END_HI 0x17 #define RS480_NB_MC_INDEX 0x168 # define RS480_NB_MC_IND_WR_EN (1 << 8) #define RS480_NB_MC_DATA 0x16c #define RS690_MC_INDEX 0x78 # define RS690_MC_INDEX_MASK 0x1ff # define RS690_MC_INDEX_WR_EN (1 << 9) # define RS690_MC_INDEX_WR_ACK 0x7f #define RS690_MC_DATA 0x7c /* MC indirect registers */ #define RS480_MC_MISC_CNTL 0x18 # define RS480_DISABLE_GTW (1 << 1) /* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */ # define RS480_GART_INDEX_REG_EN (1 << 12) # define RS690_BLOCK_GFX_D3_EN (1 << 14) #define RS480_K8_FB_LOCATION 0x1e #define RS480_GART_FEATURE_ID 0x2b # define RS480_HANG_EN (1 << 11) # define RS480_TLB_ENABLE (1 << 18) # define RS480_P2P_ENABLE (1 << 19) # define RS480_GTW_LAC_EN (1 << 25) # define RS480_2LEVEL_GART (0 << 30) # define RS480_1LEVEL_GART (1 << 30) -# define RS480_PDC_EN (1 << 31) +# define RS480_PDC_EN (1U << 31) #define RS480_GART_BASE 0x2c #define RS480_GART_CACHE_CNTRL 0x2e # define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */ #define RS480_AGP_ADDRESS_SPACE_SIZE 0x38 # define RS480_GART_EN (1 << 0) # define RS480_VA_SIZE_32MB (0 << 1) # define RS480_VA_SIZE_64MB (1 << 1) # define RS480_VA_SIZE_128MB (2 << 1) # define RS480_VA_SIZE_256MB (3 << 1) # define RS480_VA_SIZE_512MB (4 << 1) # define RS480_VA_SIZE_1GB (5 << 1) # define RS480_VA_SIZE_2GB (6 << 1) #define RS480_AGP_MODE_CNTL 0x39 # define RS480_POST_GART_Q_SIZE (1 << 18) # define RS480_NONGART_SNOOP (1 << 19) # define RS480_AGP_RD_BUF_SIZE (1 << 20) # define RS480_REQ_TYPE_SNOOP_SHIFT 22 # define RS480_REQ_TYPE_SNOOP_MASK 0x3 # define RS480_REQ_TYPE_SNOOP_DIS (1 << 24) #define RS480_MC_MISC_UMA_CNTL 0x5f #define RS480_MC_MCLK_CNTL 0x7a #define RS480_MC_UMA_DUALCH_CNTL 0x86 #define RS690_MC_FB_LOCATION 0x100 #define RS690_MC_AGP_LOCATION 0x101 #define RS690_MC_AGP_BASE 0x102 #define RS690_MC_AGP_BASE_2 0x103 #define RS600_MC_INDEX 0x70 # define RS600_MC_ADDR_MASK 0xffff # define RS600_MC_IND_SEQ_RBS_0 (1 << 16) # define RS600_MC_IND_SEQ_RBS_1 (1 << 17) # define RS600_MC_IND_SEQ_RBS_2 (1 << 18) # define RS600_MC_IND_SEQ_RBS_3 (1 << 19) # define RS600_MC_IND_AIC_RBS (1 << 20) # define RS600_MC_IND_CITF_ARB0 (1 << 21) # define RS600_MC_IND_CITF_ARB1 (1 << 22) # define RS600_MC_IND_WR_EN (1 << 23) #define RS600_MC_DATA 0x74 #define RS600_MC_STATUS 0x0 # define RS600_MC_IDLE (1 << 1) #define RS600_MC_FB_LOCATION 0x4 #define RS600_MC_AGP_LOCATION 0x5 #define RS600_AGP_BASE 0x6 #define RS600_AGP_BASE_2 0x7 #define RS600_MC_CNTL1 0x9 # define RS600_ENABLE_PAGE_TABLES (1 << 26) #define RS600_MC_PT0_CNTL 0x100 # define RS600_ENABLE_PT (1 << 0) # define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15) # define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21) # define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28) # define RS600_INVALIDATE_L2_CACHE (1 << 29) #define RS600_MC_PT0_CONTEXT0_CNTL 0x102 # define RS600_ENABLE_PAGE_TABLE (1 << 0) # define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1) #define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112 #define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114 #define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c #define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c #define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c #define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c #define RS600_MC_PT0_CLIENT0_CNTL 0x16c # define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0) # define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1) # define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8) # define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8) # define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8) # define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8) # define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8) # define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10) # define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10) # define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11) # define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14) # define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15) # define RS600_INVALIDATE_L1_TLB (1 << 20) #define R520_MC_IND_INDEX 0x70 #define R520_MC_IND_WR_EN (1 << 24) #define R520_MC_IND_DATA 0x74 #define RV515_MC_FB_LOCATION 0x01 #define RV515_MC_AGP_LOCATION 0x02 #define RV515_MC_AGP_BASE 0x03 #define RV515_MC_AGP_BASE_2 0x04 #define R520_MC_FB_LOCATION 0x04 #define R520_MC_AGP_LOCATION 0x05 #define R520_MC_AGP_BASE 0x06 #define R520_MC_AGP_BASE_2 0x07 #define RADEON_MPP_TB_CONFIG 0x01c0 #define RADEON_MEM_CNTL 0x0140 #define RADEON_MEM_SDRAM_MODE_REG 0x0158 #define RADEON_AGP_BASE_2 0x015c /* r200+ only */ #define RS480_AGP_BASE_2 0x0164 #define RADEON_AGP_BASE 0x0170 /* pipe config regs */ #define R400_GB_PIPE_SELECT 0x402c #define RV530_GB_PIPE_SELECT2 0x4124 #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ #define R300_GB_TILE_CONFIG 0x4018 # define R300_ENABLE_TILING (1 << 0) # define R300_PIPE_COUNT_RV350 (0 << 1) # define R300_PIPE_COUNT_R300 (3 << 1) # define R300_PIPE_COUNT_R420_3P (6 << 1) # define R300_PIPE_COUNT_R420 (7 << 1) # define R300_TILE_SIZE_8 (0 << 4) # define R300_TILE_SIZE_16 (1 << 4) # define R300_TILE_SIZE_32 (2 << 4) # define R300_SUBPIXEL_1_12 (0 << 16) # define R300_SUBPIXEL_1_16 (1 << 16) #define R300_DST_PIPE_CONFIG 0x170c -# define R300_PIPE_AUTO_CONFIG (1 << 31) +# define R300_PIPE_AUTO_CONFIG (1U << 31) #define R300_RB2D_DSTCACHE_MODE 0x3428 # define R300_DC_AUTOFLUSH_ENABLE (1 << 8) # define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17) #define RADEON_RB3D_COLOROFFSET 0x1c40 #define RADEON_RB3D_COLORPITCH 0x1c48 #define RADEON_SRC_X_Y 0x1590 #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) # define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) # define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) # define RADEON_GMC_BRUSH_NONE (15 << 4) # define RADEON_GMC_DST_16BPP (4 << 8) # define RADEON_GMC_DST_24BPP (5 << 8) # define RADEON_GMC_DST_32BPP (6 << 8) # define RADEON_GMC_DST_DATATYPE_SHIFT 8 # define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) # define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) # define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) # define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) # define RADEON_GMC_WR_MSK_DIS (1 << 30) # define RADEON_ROP3_S 0x00cc0000 # define RADEON_ROP3_P 0x00f00000 #define RADEON_DP_WRITE_MASK 0x16cc #define RADEON_SRC_PITCH_OFFSET 0x1428 #define RADEON_DST_PITCH_OFFSET 0x142c #define RADEON_DST_PITCH_OFFSET_C 0x1c80 # define RADEON_DST_TILE_LINEAR (0 << 30) # define RADEON_DST_TILE_MACRO (1 << 30) # define RADEON_DST_TILE_MICRO (2U << 30) # define RADEON_DST_TILE_BOTH (3U << 30) #define RADEON_SCRATCH_REG0 0x15e0 #define RADEON_SCRATCH_REG1 0x15e4 #define RADEON_SCRATCH_REG2 0x15e8 #define RADEON_SCRATCH_REG3 0x15ec #define RADEON_SCRATCH_REG4 0x15f0 #define RADEON_SCRATCH_REG5 0x15f4 #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 #define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x)) extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); #define GET_SCRATCH(dev_priv, x) radeon_get_scratch(dev_priv, x) #define R600_SCRATCH_REG0 0x8500 #define R600_SCRATCH_REG1 0x8504 #define R600_SCRATCH_REG2 0x8508 #define R600_SCRATCH_REG3 0x850c #define R600_SCRATCH_REG4 0x8510 #define R600_SCRATCH_REG5 0x8514 #define R600_SCRATCH_REG6 0x8518 #define R600_SCRATCH_REG7 0x851c #define R600_SCRATCH_UMSK 0x8540 #define R600_SCRATCH_ADDR 0x8544 #define R600_SCRATCHOFF(x) (R600_SCRATCH_REG_OFFSET + 4*(x)) #define RADEON_GEN_INT_CNTL 0x0040 # define RADEON_CRTC_VBLANK_MASK (1 << 0) # define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 # define RADEON_CRTC_VBLANK_STAT (1 << 0) # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) # define RADEON_CRTC2_VBLANK_STAT (1 << 9) # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) # define RADEON_SW_INT_FIRE (1 << 26) # define R500_DISPLAY_INT_STATUS (1 << 0) #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HDP_SOFT_RESET (1 << 26) # define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) # define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28) #define RADEON_ISYNC_CNTL 0x1724 # define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) # define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1) # define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2) # define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3) # define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) # define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) #define RADEON_RBBM_GUICNTL 0x172c # define RADEON_HOST_DATA_SWAP_NONE (0 << 0) # define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) # define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) # define RADEON_HOST_DATA_SWAP_HDW (3 << 0) #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_FB_LOCATION 0x0148 #define RADEON_MCLK_CNTL 0x0012 # define RADEON_FORCEON_MCLKA (1 << 16) # define RADEON_FORCEON_MCLKB (1 << 17) # define RADEON_FORCEON_YCLKA (1 << 18) # define RADEON_FORCEON_YCLKB (1 << 19) # define RADEON_FORCEON_MC (1 << 20) # define RADEON_FORCEON_AIC (1 << 21) #define RADEON_PP_BORDER_COLOR_0 0x1d40 #define RADEON_PP_BORDER_COLOR_1 0x1d44 #define RADEON_PP_BORDER_COLOR_2 0x1d48 #define RADEON_PP_CNTL 0x1c38 # define RADEON_SCISSOR_ENABLE (1 << 1) #define RADEON_PP_LUM_MATRIX 0x1d00 #define RADEON_PP_MISC 0x1c14 #define RADEON_PP_ROT_MATRIX_0 0x1d58 #define RADEON_PP_TXFILTER_0 0x1c54 #define RADEON_PP_TXOFFSET_0 0x1c5c #define RADEON_PP_TXFILTER_1 0x1c6c #define RADEON_PP_TXFILTER_2 0x1c84 #define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */ #define R300_DSTCACHE_CTLSTAT 0x1714 # define R300_RB2D_DC_FLUSH (3 << 0) # define R300_RB2D_DC_FREE (3 << 2) # define R300_RB2D_DC_FLUSH_ALL 0xf -# define R300_RB2D_DC_BUSY (1 << 31) +# define R300_RB2D_DC_BUSY (1U << 31) #define RADEON_RB3D_CNTL 0x1c3c # define RADEON_ALPHA_BLEND_ENABLE (1 << 0) # define RADEON_PLANE_MASK_ENABLE (1 << 1) # define RADEON_DITHER_ENABLE (1 << 2) # define RADEON_ROUND_ENABLE (1 << 3) # define RADEON_SCALE_DITHER_ENABLE (1 << 4) # define RADEON_DITHER_INIT (1 << 5) # define RADEON_ROP_ENABLE (1 << 6) # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) # define RADEON_ZBLOCK16 (1 << 15) #define RADEON_RB3D_DEPTHOFFSET 0x1c24 #define RADEON_RB3D_DEPTHCLEARVALUE 0x3230 #define RADEON_RB3D_DEPTHPITCH 0x1c28 #define RADEON_RB3D_PLANEMASK 0x1d84 #define RADEON_RB3D_STENCILREFMASK 0x1d7c #define RADEON_RB3D_ZCACHE_MODE 0x3250 #define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 # define RADEON_RB3D_ZC_FLUSH (1 << 0) # define RADEON_RB3D_ZC_FREE (1 << 2) # define RADEON_RB3D_ZC_FLUSH_ALL 0x5 -# define RADEON_RB3D_ZC_BUSY (1 << 31) +# define RADEON_RB3D_ZC_BUSY (1U << 31) #define R300_ZB_ZCACHE_CTLSTAT 0x4f18 # define R300_ZC_FLUSH (1 << 0) # define R300_ZC_FREE (1 << 1) -# define R300_ZC_BUSY (1 << 31) +# define R300_ZC_BUSY (1U << 31) #define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c # define RADEON_RB3D_DC_FLUSH (3 << 0) # define RADEON_RB3D_DC_FREE (3 << 2) # define RADEON_RB3D_DC_FLUSH_ALL 0xf -# define RADEON_RB3D_DC_BUSY (1 << 31) +# define RADEON_RB3D_DC_BUSY (1U << 31) #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c # define R300_RB3D_DC_FLUSH (2 << 0) # define R300_RB3D_DC_FREE (2 << 2) # define R300_RB3D_DC_FINISH (1 << 4) #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_Z_HIERARCHY_ENABLE (1 << 8) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) # define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) # define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) # define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_Z_COMPRESSION_ENABLE (1 << 28) # define RADEON_FORCE_Z_DIRTY (1 << 29) # define RADEON_Z_WRITE_ENABLE (1 << 30) -# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) +# define RADEON_Z_DECOMPRESSION_ENABLE (1U << 31) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) # define RADEON_SOFT_RESET_HI (1 << 1) # define RADEON_SOFT_RESET_SE (1 << 2) # define RADEON_SOFT_RESET_RE (1 << 3) # define RADEON_SOFT_RESET_PP (1 << 4) # define RADEON_SOFT_RESET_E2 (1 << 5) # define RADEON_SOFT_RESET_RB (1 << 6) # define RADEON_SOFT_RESET_HDP (1 << 7) /* * 6:0 Available slots in the FIFO * 8 Host Interface active * 9 CP request active * 10 FIFO request active * 11 Host Interface retry active * 12 CP retry active * 13 FIFO retry active * 14 FIFO pipeline busy * 15 Event engine busy * 16 CP command stream busy * 17 2D engine busy * 18 2D portion of render backend busy * 20 3D setup engine busy * 26 GA engine busy * 27 CBA 2D engine busy * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or * command stream queue not empty or Ring Buffer not empty */ #define RADEON_RBBM_STATUS 0x0e40 /* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */ /* #define RADEON_RBBM_STATUS 0x1740 */ /* bits 6:0 are dword slots available in the cmd fifo */ # define RADEON_RBBM_FIFOCNT_MASK 0x007f # define RADEON_HIRQ_ON_RBB (1 << 8) # define RADEON_CPRQ_ON_RBB (1 << 9) # define RADEON_CFRQ_ON_RBB (1 << 10) # define RADEON_HIRQ_IN_RTBUF (1 << 11) # define RADEON_CPRQ_IN_RTBUF (1 << 12) # define RADEON_CFRQ_IN_RTBUF (1 << 13) # define RADEON_PIPE_BUSY (1 << 14) # define RADEON_ENG_EV_BUSY (1 << 15) # define RADEON_CP_CMDSTRM_BUSY (1 << 16) # define RADEON_E2_BUSY (1 << 17) # define RADEON_RB2D_BUSY (1 << 18) # define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */ # define RADEON_VAP_BUSY (1 << 20) # define RADEON_RE_BUSY (1 << 21) /* not used on r300 */ # define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */ # define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */ # define RADEON_PB_BUSY (1 << 24) /* not used on r300 */ # define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */ # define RADEON_GA_BUSY (1 << 26) # define RADEON_CBA2D_BUSY (1 << 27) -# define RADEON_RBBM_ACTIVE (1 << 31) +# define RADEON_RBBM_ACTIVE (1U << 31) #define RADEON_RE_LINE_PATTERN 0x1cd0 #define RADEON_RE_MISC 0x26c4 #define RADEON_RE_TOP_LEFT 0x26c0 #define RADEON_RE_WIDTH_HEIGHT 0x1c44 #define RADEON_RE_STIPPLE_ADDR 0x1cc8 #define RADEON_RE_STIPPLE_DATA 0x1ccc #define RADEON_SCISSOR_TL_0 0x1cd8 #define RADEON_SCISSOR_BR_0 0x1cdc #define RADEON_SCISSOR_TL_1 0x1ce0 #define RADEON_SCISSOR_BR_1 0x1ce4 #define RADEON_SCISSOR_TL_2 0x1ce8 #define RADEON_SCISSOR_BR_2 0x1cec #define RADEON_SE_COORD_FMT 0x1c50 #define RADEON_SE_CNTL 0x1c4c # define RADEON_FFACE_CULL_CW (0 << 0) # define RADEON_BFACE_SOLID (3 << 1) # define RADEON_FFACE_SOLID (3 << 3) # define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) # define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) # define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) # define RADEON_ALPHA_SHADE_FLAT (1 << 10) # define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) # define RADEON_SPECULAR_SHADE_FLAT (1 << 12) # define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) # define RADEON_FOG_SHADE_FLAT (1 << 14) # define RADEON_FOG_SHADE_GOURAUD (2 << 14) # define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) # define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) # define RADEON_VTX_PIX_CENTER_OGL (1 << 27) # define RADEON_ROUND_MODE_TRUNC (0 << 28) # define RADEON_ROUND_PREC_8TH_PIX (1 << 30) #define RADEON_SE_CNTL_STATUS 0x2140 #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_VPORT_XSCALE 0x1d98 #define RADEON_SE_ZBIAS_FACTOR 0x1db0 #define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 #define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 #define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 # define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 # define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 #define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 #define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 # define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 #define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C #define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 #define RADEON_SURFACE_ACCESS_CLR 0x0bfc #define RADEON_SURFACE_CNTL 0x0b00 # define RADEON_SURF_TRANSLATION_DIS (1 << 8) # define RADEON_NONSURF_AP0_SWP_MASK (3 << 20) # define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20) # define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20) # define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20) # define RADEON_NONSURF_AP1_SWP_MASK (3 << 22) # define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22) # define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22) # define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22) #define RADEON_SURFACE0_INFO 0x0b0c # define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0) # define RADEON_SURF_TILE_MODE_MASK (3 << 16) # define RADEON_SURF_TILE_MODE_MACRO (0 << 16) # define RADEON_SURF_TILE_MODE_MICRO (1 << 16) # define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16) # define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16) #define RADEON_SURFACE0_LOWER_BOUND 0x0b04 #define RADEON_SURFACE0_UPPER_BOUND 0x0b08 # define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0) #define RADEON_SURFACE1_INFO 0x0b1c #define RADEON_SURFACE1_LOWER_BOUND 0x0b14 #define RADEON_SURFACE1_UPPER_BOUND 0x0b18 #define RADEON_SURFACE2_INFO 0x0b2c #define RADEON_SURFACE2_LOWER_BOUND 0x0b24 #define RADEON_SURFACE2_UPPER_BOUND 0x0b28 #define RADEON_SURFACE3_INFO 0x0b3c #define RADEON_SURFACE3_LOWER_BOUND 0x0b34 #define RADEON_SURFACE3_UPPER_BOUND 0x0b38 #define RADEON_SURFACE4_INFO 0x0b4c #define RADEON_SURFACE4_LOWER_BOUND 0x0b44 #define RADEON_SURFACE4_UPPER_BOUND 0x0b48 #define RADEON_SURFACE5_INFO 0x0b5c #define RADEON_SURFACE5_LOWER_BOUND 0x0b54 #define RADEON_SURFACE5_UPPER_BOUND 0x0b58 #define RADEON_SURFACE6_INFO 0x0b6c #define RADEON_SURFACE6_LOWER_BOUND 0x0b64 #define RADEON_SURFACE6_UPPER_BOUND 0x0b68 #define RADEON_SURFACE7_INFO 0x0b7c #define RADEON_SURFACE7_LOWER_BOUND 0x0b74 #define RADEON_SURFACE7_UPPER_BOUND 0x0b78 #define RADEON_SW_SEMAPHORE 0x013c #define RADEON_WAIT_UNTIL 0x1720 # define RADEON_WAIT_CRTC_PFLIP (1 << 0) # define RADEON_WAIT_2D_IDLE (1 << 14) # define RADEON_WAIT_3D_IDLE (1 << 15) # define RADEON_WAIT_2D_IDLECLEAN (1 << 16) # define RADEON_WAIT_3D_IDLECLEAN (1 << 17) # define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) #define RADEON_RB3D_ZMASKOFFSET 0x3234 #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c # define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) # define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) /* CP registers */ #define RADEON_CP_ME_RAM_ADDR 0x07d4 #define RADEON_CP_ME_RAM_RADDR 0x07d8 #define RADEON_CP_ME_RAM_DATAH 0x07dc #define RADEON_CP_ME_RAM_DATAL 0x07e0 #define RADEON_CP_RB_BASE 0x0700 #define RADEON_CP_RB_CNTL 0x0704 # define RADEON_BUF_SWAP_32BIT (2 << 16) # define RADEON_RB_NO_UPDATE (1 << 27) -# define RADEON_RB_RPTR_WR_ENA (1 << 31) +# define RADEON_RB_RPTR_WR_ENA (1U << 31) #define RADEON_CP_RB_RPTR_ADDR 0x070c #define RADEON_CP_RB_RPTR 0x0710 #define RADEON_CP_RB_WPTR 0x0714 #define RADEON_CP_RB_WPTR_DELAY 0x0718 # define RADEON_PRE_WRITE_TIMER_SHIFT 0 # define RADEON_PRE_WRITE_LIMIT_SHIFT 23 #define RADEON_CP_IB_BASE 0x0738 #define RADEON_CP_CSQ_CNTL 0x0740 # define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) # define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) # define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) # define RADEON_CSQ_PRIBM_INDDIS (2 << 28) # define RADEON_CSQ_PRIPIO_INDBM (3 << 28) # define RADEON_CSQ_PRIBM_INDBM (4 << 28) # define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) #define R300_CP_RESYNC_ADDR 0x0778 #define R300_CP_RESYNC_DATA 0x077c #define RADEON_AIC_CNTL 0x01d0 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) # define RS400_MSI_REARM (1 << 3) #define RADEON_AIC_STAT 0x01d4 #define RADEON_AIC_PT_BASE 0x01d8 #define RADEON_AIC_LO_ADDR 0x01dc #define RADEON_AIC_HI_ADDR 0x01e0 #define RADEON_AIC_TLB_ADDR 0x01e4 #define RADEON_AIC_TLB_DATA 0x01e8 /* CP command packets */ #define RADEON_CP_PACKET0 0x00000000 # define RADEON_ONE_REG_WR (1 << 15) #define RADEON_CP_PACKET1 0x40000000 #define RADEON_CP_PACKET2 0x80000000 #define RADEON_CP_PACKET3 0xC0000000 # define RADEON_CP_NOP 0x00001000 # define RADEON_CP_NEXT_CHAR 0x00001900 # define RADEON_CP_PLY_NEXTSCAN 0x00001D00 # define RADEON_CP_SET_SCISSORS 0x00001E00 /* GEN_INDX_PRIM is unsupported starting with R300 */ # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_WAIT_FOR_IDLE 0x00002600 # define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_IMMD 0x00002900 # define RADEON_3D_DRAW_INDX 0x00002A00 # define RADEON_CP_LOAD_PALETTE 0x00002C00 # define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 # define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 # define RADEON_3D_CLEAR_ZMASK 0x00003200 # define RADEON_CP_INDX_BUFFER 0x00003300 # define RADEON_CP_3D_DRAW_VBUF_2 0x00003400 # define RADEON_CP_3D_DRAW_IMMD_2 0x00003500 # define RADEON_CP_3D_DRAW_INDX_2 0x00003600 # define RADEON_3D_CLEAR_HIZ 0x00003700 # define RADEON_CP_3D_CLEAR_CMASK 0x00003802 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 # define RADEON_CNTL_SET_SCISSORS 0xC0001E00 # define R600_IT_INDIRECT_BUFFER_END 0x00001700 # define R600_IT_SET_PREDICATION 0x00002000 # define R600_IT_REG_RMW 0x00002100 # define R600_IT_COND_EXEC 0x00002200 # define R600_IT_PRED_EXEC 0x00002300 # define R600_IT_START_3D_CMDBUF 0x00002400 # define R600_IT_DRAW_INDEX_2 0x00002700 # define R600_IT_CONTEXT_CONTROL 0x00002800 # define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900 # define R600_IT_INDEX_TYPE 0x00002A00 # define R600_IT_DRAW_INDEX 0x00002B00 # define R600_IT_DRAW_INDEX_AUTO 0x00002D00 # define R600_IT_DRAW_INDEX_IMMD 0x00002E00 # define R600_IT_NUM_INSTANCES 0x00002F00 # define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400 # define R600_IT_INDIRECT_BUFFER_MP 0x00003800 # define R600_IT_MEM_SEMAPHORE 0x00003900 # define R600_IT_MPEG_INDEX 0x00003A00 # define R600_IT_WAIT_REG_MEM 0x00003C00 # define R600_IT_MEM_WRITE 0x00003D00 # define R600_IT_INDIRECT_BUFFER 0x00003200 # define R600_IT_SURFACE_SYNC 0x00004300 # define R600_CB0_DEST_BASE_ENA (1 << 6) # define R600_TC_ACTION_ENA (1 << 23) # define R600_VC_ACTION_ENA (1 << 24) # define R600_CB_ACTION_ENA (1 << 25) # define R600_DB_ACTION_ENA (1 << 26) # define R600_SH_ACTION_ENA (1 << 27) # define R600_SMX_ACTION_ENA (1 << 28) # define R600_IT_ME_INITIALIZE 0x00004400 # define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) # define R600_IT_COND_WRITE 0x00004500 # define R600_IT_EVENT_WRITE 0x00004600 # define R600_IT_EVENT_WRITE_EOP 0x00004700 # define R600_IT_ONE_REG_WRITE 0x00005700 # define R600_IT_SET_CONFIG_REG 0x00006800 # define R600_SET_CONFIG_REG_OFFSET 0x00008000 # define R600_SET_CONFIG_REG_END 0x0000ac00 # define R600_IT_SET_CONTEXT_REG 0x00006900 # define R600_SET_CONTEXT_REG_OFFSET 0x00028000 # define R600_SET_CONTEXT_REG_END 0x00029000 # define R600_IT_SET_ALU_CONST 0x00006A00 # define R600_SET_ALU_CONST_OFFSET 0x00030000 # define R600_SET_ALU_CONST_END 0x00032000 # define R600_IT_SET_BOOL_CONST 0x00006B00 # define R600_SET_BOOL_CONST_OFFSET 0x0003e380 # define R600_SET_BOOL_CONST_END 0x00040000 # define R600_IT_SET_LOOP_CONST 0x00006C00 # define R600_SET_LOOP_CONST_OFFSET 0x0003e200 # define R600_SET_LOOP_CONST_END 0x0003e380 # define R600_IT_SET_RESOURCE 0x00006D00 # define R600_SET_RESOURCE_OFFSET 0x00038000 # define R600_SET_RESOURCE_END 0x0003c000 # define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0 # define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1 # define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2 # define R600_SQ_TEX_VTX_VALID_BUFFER 0x3 # define R600_IT_SET_SAMPLER 0x00006E00 # define R600_SET_SAMPLER_OFFSET 0x0003c000 # define R600_SET_SAMPLER_END 0x0003cff0 # define R600_IT_SET_CTL_CONST 0x00006F00 # define R600_SET_CTL_CONST_OFFSET 0x0003cff0 # define R600_SET_CTL_CONST_END 0x0003e200 # define R600_IT_SURFACE_BASE_UPDATE 0x00007300 #define RADEON_CP_PACKET_MASK 0xC0000000 #define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 #define RADEON_CP_PACKET0_REG_MASK 0x000007ff #define RADEON_CP_PACKET1_REG0_MASK 0x000007ff #define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 -#define RADEON_VTX_Z_PRESENT (1 << 31) +#define RADEON_VTX_Z_PRESENT (1U << 31) #define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) #define RADEON_PRIM_TYPE_NONE (0 << 0) #define RADEON_PRIM_TYPE_POINT (1 << 0) #define RADEON_PRIM_TYPE_LINE (2 << 0) #define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0) #define RADEON_PRIM_TYPE_TRI_LIST (4 << 0) #define RADEON_PRIM_TYPE_TRI_FAN (5 << 0) #define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0) #define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0) #define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) #define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) #define RADEON_PRIM_TYPE_MASK 0xf #define RADEON_PRIM_WALK_IND (1 << 4) #define RADEON_PRIM_WALK_LIST (2 << 4) #define RADEON_PRIM_WALK_RING (3 << 4) #define RADEON_COLOR_ORDER_BGRA (0 << 6) #define RADEON_COLOR_ORDER_RGBA (1 << 6) #define RADEON_MAOS_ENABLE (1 << 7) #define RADEON_VTX_FMT_R128_MODE (0 << 8) #define RADEON_VTX_FMT_RADEON_MODE (1 << 8) #define RADEON_NUM_VERTICES_SHIFT 16 #define RADEON_COLOR_FORMAT_CI8 2 #define RADEON_COLOR_FORMAT_ARGB1555 3 #define RADEON_COLOR_FORMAT_RGB565 4 #define RADEON_COLOR_FORMAT_ARGB8888 6 #define RADEON_COLOR_FORMAT_RGB332 7 #define RADEON_COLOR_FORMAT_RGB8 9 #define RADEON_COLOR_FORMAT_ARGB4444 15 #define RADEON_TXFORMAT_I8 0 #define RADEON_TXFORMAT_AI88 1 #define RADEON_TXFORMAT_RGB332 2 #define RADEON_TXFORMAT_ARGB1555 3 #define RADEON_TXFORMAT_RGB565 4 #define RADEON_TXFORMAT_ARGB4444 5 #define RADEON_TXFORMAT_ARGB8888 6 #define RADEON_TXFORMAT_RGBA8888 7 #define RADEON_TXFORMAT_Y8 8 #define RADEON_TXFORMAT_VYUY422 10 #define RADEON_TXFORMAT_YVYU422 11 #define RADEON_TXFORMAT_DXT1 12 #define RADEON_TXFORMAT_DXT23 14 #define RADEON_TXFORMAT_DXT45 15 #define R200_PP_TXCBLEND_0 0x2f00 #define R200_PP_TXCBLEND_1 0x2f10 #define R200_PP_TXCBLEND_2 0x2f20 #define R200_PP_TXCBLEND_3 0x2f30 #define R200_PP_TXCBLEND_4 0x2f40 #define R200_PP_TXCBLEND_5 0x2f50 #define R200_PP_TXCBLEND_6 0x2f60 #define R200_PP_TXCBLEND_7 0x2f70 #define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 #define R200_PP_TFACTOR_0 0x2ee0 #define R200_SE_VTX_FMT_0 0x2088 #define R200_SE_VAP_CNTL 0x2080 #define R200_SE_TCL_MATRIX_SEL_0 0x2230 #define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 #define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 #define R200_PP_TXFILTER_5 0x2ca0 #define R200_PP_TXFILTER_4 0x2c80 #define R200_PP_TXFILTER_3 0x2c60 #define R200_PP_TXFILTER_2 0x2c40 #define R200_PP_TXFILTER_1 0x2c20 #define R200_PP_TXFILTER_0 0x2c00 #define R200_PP_TXOFFSET_5 0x2d78 #define R200_PP_TXOFFSET_4 0x2d60 #define R200_PP_TXOFFSET_3 0x2d48 #define R200_PP_TXOFFSET_2 0x2d30 #define R200_PP_TXOFFSET_1 0x2d18 #define R200_PP_TXOFFSET_0 0x2d00 #define R200_PP_CUBIC_FACES_0 0x2c18 #define R200_PP_CUBIC_FACES_1 0x2c38 #define R200_PP_CUBIC_FACES_2 0x2c58 #define R200_PP_CUBIC_FACES_3 0x2c78 #define R200_PP_CUBIC_FACES_4 0x2c98 #define R200_PP_CUBIC_FACES_5 0x2cb8 #define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 #define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 #define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c #define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 #define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 #define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c #define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 #define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 #define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 #define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c #define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 #define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 #define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c #define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 #define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 #define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c #define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 #define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 #define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 #define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c #define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 #define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 #define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c #define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 #define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 #define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c #define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 #define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 #define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 #define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c #define R200_RE_AUX_SCISSOR_CNTL 0x26f0 #define R200_SE_VTE_CNTL 0x20b0 #define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 #define R200_PP_TAM_DEBUG3 0x2d9c #define R200_PP_CNTL_X 0x2cc4 #define R200_SE_VAP_CNTL_STATUS 0x2140 #define R200_RE_SCISSOR_TL_0 0x1cd8 #define R200_RE_SCISSOR_TL_1 0x1ce0 #define R200_RE_SCISSOR_TL_2 0x1ce8 #define R200_RB3D_DEPTHXY_OFFSET 0x1d60 #define R200_RE_AUX_SCISSOR_CNTL 0x26f0 #define R200_SE_VTX_STATE_CNTL 0x2180 #define R200_RE_POINTSIZE 0x2648 #define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 #define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */ #define RADEON_PP_TEX_SIZE_1 0x1d0c #define RADEON_PP_TEX_SIZE_2 0x1d14 #define RADEON_PP_CUBIC_FACES_0 0x1d24 #define RADEON_PP_CUBIC_FACES_1 0x1d28 #define RADEON_PP_CUBIC_FACES_2 0x1d2c #define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */ #define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00 #define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14 #define RADEON_SE_TCL_STATE_FLUSH 0x2284 #define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 #define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 #define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 #define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 #define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 #define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 #define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 #define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b #define R200_3D_DRAW_IMMD_2 0xC0003500 #define R200_SE_VTX_FMT_1 0x208c #define R200_RE_CNTL 0x1c50 #define R200_RB3D_BLENDCOLOR 0x3218 #define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4 #define R200_PP_TRI_PERF 0x2cf8 #define R200_PP_AFS_0 0x2f80 #define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ #define R200_VAP_PVS_CNTL_1 0x22D0 #define RADEON_CRTC_CRNT_FRAME 0x0214 #define RADEON_CRTC2_CRNT_FRAME 0x0314 #define R500_D1CRTC_STATUS 0x609c #define R500_D2CRTC_STATUS 0x689c #define R500_CRTC_V_BLANK (1<<0) #define R500_D1CRTC_FRAME_COUNT 0x60a4 #define R500_D2CRTC_FRAME_COUNT 0x68a4 #define R500_D1MODE_V_COUNTER 0x6530 #define R500_D2MODE_V_COUNTER 0x6d30 #define R500_D1MODE_VBLANK_STATUS 0x6534 #define R500_D2MODE_VBLANK_STATUS 0x6d34 #define R500_VBLANK_OCCURED (1<<0) #define R500_VBLANK_ACK (1<<4) #define R500_VBLANK_STAT (1<<12) #define R500_VBLANK_INT (1<<16) #define R500_DxMODE_INT_MASK 0x6540 #define R500_D1MODE_INT_MASK (1<<0) #define R500_D2MODE_INT_MASK (1<<8) #define R500_DISP_INTERRUPT_STATUS 0x7edc #define R500_D1_VBLANK_INTERRUPT (1 << 4) #define R500_D2_VBLANK_INTERRUPT (1 << 5) /* R6xx/R7xx registers */ #define R600_MC_VM_FB_LOCATION 0x2180 #define R600_MC_VM_AGP_TOP 0x2184 #define R600_MC_VM_AGP_BOT 0x2188 #define R600_MC_VM_AGP_BASE 0x218c #define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190 #define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 #define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 #define R700_MC_VM_FB_LOCATION 0x2024 #define R700_MC_VM_AGP_TOP 0x2028 #define R700_MC_VM_AGP_BOT 0x202c #define R700_MC_VM_AGP_BASE 0x2030 #define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c #define R600_MCD_RD_A_CNTL 0x219c #define R600_MCD_RD_B_CNTL 0x21a0 #define R600_MCD_WR_A_CNTL 0x21a4 #define R600_MCD_WR_B_CNTL 0x21a8 #define R600_MCD_RD_SYS_CNTL 0x2200 #define R600_MCD_WR_SYS_CNTL 0x2214 #define R600_MCD_RD_GFX_CNTL 0x21fc #define R600_MCD_RD_HDP_CNTL 0x2204 #define R600_MCD_RD_PDMA_CNTL 0x2208 #define R600_MCD_RD_SEM_CNTL 0x220c #define R600_MCD_WR_GFX_CNTL 0x2210 #define R600_MCD_WR_HDP_CNTL 0x2218 #define R600_MCD_WR_PDMA_CNTL 0x221c #define R600_MCD_WR_SEM_CNTL 0x2220 # define R600_MCD_L1_TLB (1 << 0) # define R600_MCD_L1_FRAG_PROC (1 << 1) # define R600_MCD_L1_STRICT_ORDERING (1 << 2) # define R600_MCD_SYSTEM_ACCESS_MODE_MASK (3 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS (2 << 6) # define R600_MCD_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6) # define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8) # define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8) # define R600_MCD_SEMAPHORE_MODE (1 << 10) # define R600_MCD_WAIT_L2_QUERY (1 << 11) # define R600_MCD_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 12) # define R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15) #define R700_MC_VM_MD_L1_TLB0_CNTL 0x2654 #define R700_MC_VM_MD_L1_TLB1_CNTL 0x2658 #define R700_MC_VM_MD_L1_TLB2_CNTL 0x265c #define R700_MC_VM_MB_L1_TLB0_CNTL 0x2234 #define R700_MC_VM_MB_L1_TLB1_CNTL 0x2238 #define R700_MC_VM_MB_L1_TLB2_CNTL 0x223c #define R700_MC_VM_MB_L1_TLB3_CNTL 0x2240 # define R700_ENABLE_L1_TLB (1 << 0) # define R700_ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) # define R700_SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) # define R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) # define R700_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 15) # define R700_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 18) #define R700_MC_ARB_RAMCFG 0x2760 # define R700_NOOFBANK_SHIFT 0 # define R700_NOOFBANK_MASK 0x3 # define R700_NOOFRANK_SHIFT 2 # define R700_NOOFRANK_MASK 0x1 # define R700_NOOFROWS_SHIFT 3 # define R700_NOOFROWS_MASK 0x7 # define R700_NOOFCOLS_SHIFT 6 # define R700_NOOFCOLS_MASK 0x3 # define R700_CHANSIZE_SHIFT 8 # define R700_CHANSIZE_MASK 0x1 # define R700_BURSTLENGTH_SHIFT 9 # define R700_BURSTLENGTH_MASK 0x1 #define R600_RAMCFG 0x2408 # define R600_NOOFBANK_SHIFT 0 # define R600_NOOFBANK_MASK 0x1 # define R600_NOOFRANK_SHIFT 1 # define R600_NOOFRANK_MASK 0x1 # define R600_NOOFROWS_SHIFT 2 # define R600_NOOFROWS_MASK 0x7 # define R600_NOOFCOLS_SHIFT 5 # define R600_NOOFCOLS_MASK 0x3 # define R600_CHANSIZE_SHIFT 7 # define R600_CHANSIZE_MASK 0x1 # define R600_BURSTLENGTH_SHIFT 8 # define R600_BURSTLENGTH_MASK 0x1 #define R600_VM_L2_CNTL 0x1400 # define R600_VM_L2_CACHE_EN (1 << 0) # define R600_VM_L2_FRAG_PROC (1 << 1) # define R600_VM_ENABLE_PTE_CACHE_LRU_W (1 << 9) # define R600_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 13) # define R700_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 14) #define R600_VM_L2_CNTL2 0x1404 # define R600_VM_L2_CNTL2_INVALIDATE_ALL_L1_TLBS (1 << 0) # define R600_VM_L2_CNTL2_INVALIDATE_L2_CACHE (1 << 1) #define R600_VM_L2_CNTL3 0x1408 # define R600_VM_L2_CNTL3_BANK_SELECT_0(x) ((x) << 0) # define R600_VM_L2_CNTL3_BANK_SELECT_1(x) ((x) << 5) # define R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 10) # define R700_VM_L2_CNTL3_BANK_SELECT(x) ((x) << 0) # define R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 6) #define R600_VM_L2_STATUS 0x140c #define R600_VM_CONTEXT0_CNTL 0x1410 # define R600_VM_ENABLE_CONTEXT (1 << 0) # define R600_VM_PAGE_TABLE_DEPTH_FLAT (0 << 1) #define R600_VM_CONTEXT0_CNTL2 0x1430 #define R600_VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490 #define R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14b0 #define R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574 #define R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594 #define R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15b4 #define R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c #define R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c #define R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157c #define R600_HDP_HOST_PATH_CNTL 0x2c00 #define R600_GRBM_CNTL 0x8000 # define R600_GRBM_READ_TIMEOUT(x) ((x) << 0) #define R600_GRBM_STATUS 0x8010 # define R600_CMDFIFO_AVAIL_MASK 0x1f # define R700_CMDFIFO_AVAIL_MASK 0xf -# define R600_GUI_ACTIVE (1 << 31) +# define R600_GUI_ACTIVE (1U << 31) #define R600_GRBM_STATUS2 0x8014 #define R600_GRBM_SOFT_RESET 0x8020 # define R600_SOFT_RESET_CP (1 << 0) #define R600_WAIT_UNTIL 0x8040 #define R600_CP_SEM_WAIT_TIMER 0x85bc #define R600_CP_ME_CNTL 0x86d8 # define R600_CP_ME_HALT (1 << 28) #define R600_CP_QUEUE_THRESHOLDS 0x8760 # define R600_ROQ_IB1_START(x) ((x) << 0) # define R600_ROQ_IB2_START(x) ((x) << 8) #define R600_CP_MEQ_THRESHOLDS 0x8764 # define R700_STQ_SPLIT(x) ((x) << 0) # define R600_MEQ_END(x) ((x) << 16) # define R600_ROQ_END(x) ((x) << 24) #define R600_CP_PERFMON_CNTL 0x87fc #define R600_CP_RB_BASE 0xc100 #define R600_CP_RB_CNTL 0xc104 # define R600_RB_BUFSZ(x) ((x) << 0) # define R600_RB_BLKSZ(x) ((x) << 8) # define R600_BUF_SWAP_32BIT (2 << 16) # define R600_RB_NO_UPDATE (1 << 27) -# define R600_RB_RPTR_WR_ENA (1 << 31) +# define R600_RB_RPTR_WR_ENA (1U << 31) #define R600_CP_RB_RPTR_WR 0xc108 #define R600_CP_RB_RPTR_ADDR 0xc10c #define R600_CP_RB_RPTR_ADDR_HI 0xc110 #define R600_CP_RB_WPTR 0xc114 #define R600_CP_RB_WPTR_ADDR 0xc118 #define R600_CP_RB_WPTR_ADDR_HI 0xc11c #define R600_CP_RB_RPTR 0x8700 #define R600_CP_RB_WPTR_DELAY 0x8704 #define R600_CP_PFP_UCODE_ADDR 0xc150 #define R600_CP_PFP_UCODE_DATA 0xc154 #define R600_CP_ME_RAM_RADDR 0xc158 #define R600_CP_ME_RAM_WADDR 0xc15c #define R600_CP_ME_RAM_DATA 0xc160 #define R600_CP_DEBUG 0xc1fc #define R600_PA_CL_ENHANCE 0x8a14 # define R600_CLIP_VTX_REORDER_ENA (1 << 0) # define R600_NUM_CLIP_SEQ(x) ((x) << 1) #define R600_PA_SC_LINE_STIPPLE_STATE 0x8b10 #define R600_PA_SC_MULTI_CHIP_CNTL 0x8b20 #define R700_PA_SC_FORCE_EOV_MAX_CNTS 0x8b24 # define R700_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) # define R700_FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define R600_PA_SC_AA_SAMPLE_LOCS_2S 0x8b40 #define R600_PA_SC_AA_SAMPLE_LOCS_4S 0x8b44 #define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8b48 #define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8b4c # define R600_S0_X(x) ((x) << 0) # define R600_S0_Y(x) ((x) << 4) # define R600_S1_X(x) ((x) << 8) # define R600_S1_Y(x) ((x) << 12) # define R600_S2_X(x) ((x) << 16) # define R600_S2_Y(x) ((x) << 20) # define R600_S3_X(x) ((x) << 24) # define R600_S3_Y(x) ((x) << 28) # define R600_S4_X(x) ((x) << 0) # define R600_S4_Y(x) ((x) << 4) # define R600_S5_X(x) ((x) << 8) # define R600_S5_Y(x) ((x) << 12) # define R600_S6_X(x) ((x) << 16) # define R600_S6_Y(x) ((x) << 20) # define R600_S7_X(x) ((x) << 24) # define R600_S7_Y(x) ((x) << 28) #define R600_PA_SC_FIFO_SIZE 0x8bd0 # define R600_SC_PRIM_FIFO_SIZE(x) ((x) << 0) # define R600_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 8) # define R600_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 16) #define R700_PA_SC_FIFO_SIZE_R7XX 0x8bcc # define R700_SC_PRIM_FIFO_SIZE(x) ((x) << 0) # define R700_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) # define R700_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) #define R600_PA_SC_ENHANCE 0x8bf0 # define R600_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) # define R600_FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12) #define R600_PA_SC_CLIPRECT_RULE 0x2820c #define R700_PA_SC_EDGERULE 0x28230 #define R600_PA_SC_LINE_STIPPLE 0x28a0c #define R600_PA_SC_MODE_CNTL 0x28a4c #define R600_PA_SC_AA_CONFIG 0x28c04 #define R600_SX_EXPORT_BUFFER_SIZES 0x900c # define R600_COLOR_BUFFER_SIZE(x) ((x) << 0) # define R600_POSITION_BUFFER_SIZE(x) ((x) << 8) # define R600_SMX_BUFFER_SIZE(x) ((x) << 16) #define R600_SX_DEBUG_1 0x9054 # define R600_SMX_EVENT_RELEASE (1 << 0) # define R600_ENABLE_NEW_SMX_ADDRESS (1 << 16) #define R700_SX_DEBUG_1 0x9058 # define R700_ENABLE_NEW_SMX_ADDRESS (1 << 16) #define R600_SX_MISC 0x28350 #define R600_DB_DEBUG 0x9830 -# define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) +# define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE (1U << 31) #define R600_DB_WATERMARKS 0x9838 # define R600_DEPTH_FREE(x) ((x) << 0) # define R600_DEPTH_FLUSH(x) ((x) << 5) # define R600_DEPTH_PENDING_FREE(x) ((x) << 15) # define R600_DEPTH_CACHELINE_FREE(x) ((x) << 20) #define R700_DB_DEBUG3 0x98b0 # define R700_DB_CLK_OFF_DELAY(x) ((x) << 11) #define RV700_DB_DEBUG4 0x9b8c # define RV700_DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6) #define R600_VGT_CACHE_INVALIDATION 0x88c4 # define R600_CACHE_INVALIDATION(x) ((x) << 0) # define R600_VC_ONLY 0 # define R600_TC_ONLY 1 # define R600_VC_AND_TC 2 # define R700_AUTO_INVLD_EN(x) ((x) << 6) # define R700_NO_AUTO 0 # define R700_ES_AUTO 1 # define R700_GS_AUTO 2 # define R700_ES_AND_GS_AUTO 3 #define R600_VGT_GS_PER_ES 0x88c8 #define R600_VGT_ES_PER_GS 0x88cc #define R600_VGT_GS_PER_VS 0x88e8 #define R600_VGT_GS_VERTEX_REUSE 0x88d4 #define R600_VGT_NUM_INSTANCES 0x8974 #define R600_VGT_STRMOUT_EN 0x28ab0 #define R600_VGT_EVENT_INITIATOR 0x28a90 # define R600_CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) #define R600_VGT_VERTEX_REUSE_BLOCK_CNTL 0x28c58 # define R600_VTX_REUSE_DEPTH_MASK 0xff #define R600_VGT_OUT_DEALLOC_CNTL 0x28c5c # define R600_DEALLOC_DIST_MASK 0x7f #define R600_CB_COLOR0_BASE 0x28040 #define R600_CB_COLOR1_BASE 0x28044 #define R600_CB_COLOR2_BASE 0x28048 #define R600_CB_COLOR3_BASE 0x2804c #define R600_CB_COLOR4_BASE 0x28050 #define R600_CB_COLOR5_BASE 0x28054 #define R600_CB_COLOR6_BASE 0x28058 #define R600_CB_COLOR7_BASE 0x2805c #define R600_CB_COLOR7_FRAG 0x280fc #define R600_CB_COLOR0_SIZE 0x28060 #define R600_CB_COLOR0_VIEW 0x28080 #define R600_CB_COLOR0_INFO 0x280a0 #define R600_CB_COLOR0_TILE 0x280c0 #define R600_CB_COLOR0_FRAG 0x280e0 #define R600_CB_COLOR0_MASK 0x28100 #define AVIVO_D1MODE_VLINE_START_END 0x6538 #define AVIVO_D2MODE_VLINE_START_END 0x6d38 #define R600_CP_COHER_BASE 0x85f8 #define R600_DB_DEPTH_BASE 0x2800c #define R600_SQ_PGM_START_FS 0x28894 #define R600_SQ_PGM_START_ES 0x28880 #define R600_SQ_PGM_START_VS 0x28858 #define R600_SQ_PGM_RESOURCES_VS 0x28868 #define R600_SQ_PGM_CF_OFFSET_VS 0x288d0 #define R600_SQ_PGM_START_GS 0x2886c #define R600_SQ_PGM_START_PS 0x28840 #define R600_SQ_PGM_RESOURCES_PS 0x28850 #define R600_SQ_PGM_EXPORTS_PS 0x28854 #define R600_SQ_PGM_CF_OFFSET_PS 0x288cc #define R600_VGT_DMA_BASE 0x287e8 #define R600_VGT_DMA_BASE_HI 0x287e4 #define R600_VGT_STRMOUT_BASE_OFFSET_0 0x28b10 #define R600_VGT_STRMOUT_BASE_OFFSET_1 0x28b14 #define R600_VGT_STRMOUT_BASE_OFFSET_2 0x28b18 #define R600_VGT_STRMOUT_BASE_OFFSET_3 0x28b1c #define R600_VGT_STRMOUT_BASE_OFFSET_HI_0 0x28b44 #define R600_VGT_STRMOUT_BASE_OFFSET_HI_1 0x28b48 #define R600_VGT_STRMOUT_BASE_OFFSET_HI_2 0x28b4c #define R600_VGT_STRMOUT_BASE_OFFSET_HI_3 0x28b50 #define R600_VGT_STRMOUT_BUFFER_BASE_0 0x28ad8 #define R600_VGT_STRMOUT_BUFFER_BASE_1 0x28ae8 #define R600_VGT_STRMOUT_BUFFER_BASE_2 0x28af8 #define R600_VGT_STRMOUT_BUFFER_BASE_3 0x28b08 #define R600_VGT_STRMOUT_BUFFER_OFFSET_0 0x28adc #define R600_VGT_STRMOUT_BUFFER_OFFSET_1 0x28aec #define R600_VGT_STRMOUT_BUFFER_OFFSET_2 0x28afc #define R600_VGT_STRMOUT_BUFFER_OFFSET_3 0x28b0c #define R600_VGT_PRIMITIVE_TYPE 0x8958 #define R600_PA_SC_SCREEN_SCISSOR_TL 0x28030 #define R600_PA_SC_GENERIC_SCISSOR_TL 0x28240 #define R600_PA_SC_WINDOW_SCISSOR_TL 0x28204 #define R600_TC_CNTL 0x9608 # define R600_TC_L2_SIZE(x) ((x) << 5) # define R600_L2_DISABLE_LATE_HIT (1 << 9) #define R600_ARB_POP 0x2418 # define R600_ENABLE_TC128 (1 << 30) #define R600_ARB_GDEC_RD_CNTL 0x246c #define R600_TA_CNTL_AUX 0x9508 # define R600_DISABLE_CUBE_WRAP (1 << 0) # define R600_DISABLE_CUBE_ANISO (1 << 1) # define R700_GETLOD_SELECT(x) ((x) << 2) # define R600_SYNC_GRADIENT (1 << 24) # define R600_SYNC_WALKER (1 << 25) # define R600_SYNC_ALIGNER (1 << 26) # define R600_BILINEAR_PRECISION_6_BIT (0 << 31) -# define R600_BILINEAR_PRECISION_8_BIT (1 << 31) +# define R600_BILINEAR_PRECISION_8_BIT (1U << 31) #define R700_TCP_CNTL 0x9610 #define R600_SMX_DC_CTL0 0xa020 # define R700_USE_HASH_FUNCTION (1 << 0) # define R700_CACHE_DEPTH(x) ((x) << 1) # define R700_FLUSH_ALL_ON_EVENT (1 << 10) # define R700_STALL_ON_EVENT (1 << 11) #define R700_SMX_EVENT_CTL 0xa02c # define R700_ES_FLUSH_CTL(x) ((x) << 0) # define R700_GS_FLUSH_CTL(x) ((x) << 3) # define R700_ACK_FLUSH_CTL(x) ((x) << 6) # define R700_SYNC_FLUSH_CTL (1 << 8) #define R600_SQ_CONFIG 0x8c00 # define R600_VC_ENABLE (1 << 0) # define R600_EXPORT_SRC_C (1 << 1) # define R600_DX9_CONSTS (1 << 2) # define R600_ALU_INST_PREFER_VECTOR (1 << 3) # define R600_DX10_CLAMP (1 << 4) # define R600_CLAUSE_SEQ_PRIO(x) ((x) << 8) # define R600_PS_PRIO(x) ((x) << 24) # define R600_VS_PRIO(x) ((x) << 26) # define R600_GS_PRIO(x) ((x) << 28) # define R600_ES_PRIO(x) ((x) << 30) #define R600_SQ_GPR_RESOURCE_MGMT_1 0x8c04 # define R600_NUM_PS_GPRS(x) ((x) << 0) # define R600_NUM_VS_GPRS(x) ((x) << 16) # define R700_DYN_GPR_ENABLE (1 << 27) # define R600_NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) #define R600_SQ_GPR_RESOURCE_MGMT_2 0x8c08 # define R600_NUM_GS_GPRS(x) ((x) << 0) # define R600_NUM_ES_GPRS(x) ((x) << 16) #define R600_SQ_THREAD_RESOURCE_MGMT 0x8c0c # define R600_NUM_PS_THREADS(x) ((x) << 0) # define R600_NUM_VS_THREADS(x) ((x) << 8) # define R600_NUM_GS_THREADS(x) ((x) << 16) # define R600_NUM_ES_THREADS(x) ((x) << 24) #define R600_SQ_STACK_RESOURCE_MGMT_1 0x8c10 # define R600_NUM_PS_STACK_ENTRIES(x) ((x) << 0) # define R600_NUM_VS_STACK_ENTRIES(x) ((x) << 16) #define R600_SQ_STACK_RESOURCE_MGMT_2 0x8c14 # define R600_NUM_GS_STACK_ENTRIES(x) ((x) << 0) # define R600_NUM_ES_STACK_ENTRIES(x) ((x) << 16) #define R600_SQ_MS_FIFO_SIZES 0x8cf0 # define R600_CACHE_FIFO_SIZE(x) ((x) << 0) # define R600_FETCH_FIFO_HIWATER(x) ((x) << 8) # define R600_DONE_FIFO_HIWATER(x) ((x) << 16) # define R600_ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8db0 # define R700_SIMDA_RING0(x) ((x) << 0) # define R700_SIMDA_RING1(x) ((x) << 8) # define R700_SIMDB_RING0(x) ((x) << 16) # define R700_SIMDB_RING1(x) ((x) << 24) #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8db4 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8db8 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8dbc #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8dc0 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8dc4 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8dc8 #define R700_SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8dcc #define R600_SPI_PS_IN_CONTROL_0 0x286cc # define R600_NUM_INTERP(x) ((x) << 0) # define R600_POSITION_ENA (1 << 8) # define R600_POSITION_CENTROID (1 << 9) # define R600_POSITION_ADDR(x) ((x) << 10) # define R600_PARAM_GEN(x) ((x) << 15) # define R600_PARAM_GEN_ADDR(x) ((x) << 19) # define R600_BARYC_SAMPLE_CNTL(x) ((x) << 26) # define R600_PERSP_GRADIENT_ENA (1 << 28) # define R600_LINEAR_GRADIENT_ENA (1 << 29) # define R600_POSITION_SAMPLE (1 << 30) -# define R600_BARYC_AT_SAMPLE_ENA (1 << 31) +# define R600_BARYC_AT_SAMPLE_ENA (1U << 31) #define R600_SPI_PS_IN_CONTROL_1 0x286d0 # define R600_GEN_INDEX_PIX (1 << 0) # define R600_GEN_INDEX_PIX_ADDR(x) ((x) << 1) # define R600_FRONT_FACE_ENA (1 << 8) # define R600_FRONT_FACE_CHAN(x) ((x) << 9) # define R600_FRONT_FACE_ALL_BITS (1 << 11) # define R600_FRONT_FACE_ADDR(x) ((x) << 12) # define R600_FOG_ADDR(x) ((x) << 17) # define R600_FIXED_PT_POSITION_ENA (1 << 24) # define R600_FIXED_PT_POSITION_ADDR(x) ((x) << 25) # define R700_POSITION_ULC (1 << 30) #define R600_SPI_INPUT_Z 0x286d8 #define R600_SPI_CONFIG_CNTL 0x9100 # define R600_GPR_WRITE_PRIORITY(x) ((x) << 0) # define R600_DISABLE_INTERP_1 (1 << 5) #define R600_SPI_CONFIG_CNTL_1 0x913c # define R600_VTX_DONE_DELAY(x) ((x) << 0) # define R600_INTERP_ONE_PRIM_PER_ROW (1 << 4) #define R600_GB_TILING_CONFIG 0x98f0 # define R600_PIPE_TILING(x) ((x) << 1) # define R600_BANK_TILING(x) ((x) << 4) # define R600_GROUP_SIZE(x) ((x) << 6) # define R600_ROW_TILING(x) ((x) << 8) # define R600_BANK_SWAPS(x) ((x) << 11) # define R600_SAMPLE_SPLIT(x) ((x) << 14) # define R600_BACKEND_MAP(x) ((x) << 16) #define R600_DCP_TILING_CONFIG 0x6ca0 #define R600_HDP_TILING_CONFIG 0x2f3c #define R600_CC_RB_BACKEND_DISABLE 0x98f4 #define R700_CC_SYS_RB_BACKEND_DISABLE 0x3f88 # define R600_BACKEND_DISABLE(x) ((x) << 16) #define R600_CC_GC_SHADER_PIPE_CONFIG 0x8950 #define R600_GC_USER_SHADER_PIPE_CONFIG 0x8954 # define R600_INACTIVE_QD_PIPES(x) ((x) << 8) # define R600_INACTIVE_QD_PIPES_MASK (0xff << 8) # define R600_INACTIVE_SIMDS(x) ((x) << 16) # define R600_INACTIVE_SIMDS_MASK (0xff << 16) #define R700_CGTS_SYS_TCC_DISABLE 0x3f90 #define R700_CGTS_USER_SYS_TCC_DISABLE 0x3f94 #define R700_CGTS_TCC_DISABLE 0x9148 #define R700_CGTS_USER_TCC_DISABLE 0x914c /* Constants */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 #define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 #define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 #define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 #define RADEON_LAST_DISPATCH 1 #define R600_LAST_FRAME_REG R600_SCRATCH_REG0 #define R600_LAST_DISPATCH_REG R600_SCRATCH_REG1 #define R600_LAST_CLEAR_REG R600_SCRATCH_REG2 #define R600_LAST_SWI_REG R600_SCRATCH_REG3 #define RADEON_MAX_VB_AGE 0x7fffffff #define RADEON_MAX_VB_VERTS (0xffff) #define RADEON_RING_HIGH_MARK 128 #define RADEON_PCIGART_TABLE_SIZE (32*1024) #define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) #define RADEON_WRITE(reg, val) \ do { \ if (reg < 0x10000) { \ DRM_WRITE32(dev_priv->mmio, (reg), (val)); \ } else { \ DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, (reg)); \ DRM_WRITE32(dev_priv->mmio, RADEON_MM_DATA, (val)); \ } \ } while (0) #define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) #define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) #define RADEON_WRITE_PLL(addr, val) \ do { \ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \ ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \ } while (0) #define RADEON_WRITE_PCIE(addr, val) \ do { \ RADEON_WRITE8(RADEON_PCIE_INDEX, \ ((addr) & 0xff)); \ RADEON_WRITE(RADEON_PCIE_DATA, (val)); \ } while (0) #define R500_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \ RADEON_WRITE(R520_MC_IND_DATA, (val)); \ RADEON_WRITE(R520_MC_IND_INDEX, 0); \ } while (0) #define RS480_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS480_NB_MC_INDEX, \ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \ RADEON_WRITE(RS480_NB_MC_DATA, (val)); \ RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \ } while (0) #define RS690_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \ RADEON_WRITE(RS690_MC_DATA, val); \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \ } while (0) #define RS600_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS600_MC_INDEX, RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | ((addr) & RS600_MC_ADDR_MASK)); \ RADEON_WRITE(RS600_MC_DATA, val); \ } while (0) #define IGP_WRITE_MCIND(addr, val) \ do { \ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \ RS690_WRITE_MCIND(addr, val); \ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) \ RS600_WRITE_MCIND(addr, val); \ else \ RS480_WRITE_MCIND(addr, val); \ } while (0) #define CP_PACKET0( reg, n ) \ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) #define CP_PACKET0_TABLE( reg, n ) \ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2)) #define CP_PACKET1( reg0, reg1 ) \ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2)) #define CP_PACKET2() \ (RADEON_CP_PACKET2) #define CP_PACKET3( pkt, n ) \ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16)) /* ================================================================ * Engine control helper macros */ #define RADEON_WAIT_UNTIL_2D_IDLE() do { \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ RADEON_WAIT_HOST_IDLECLEAN) ); \ } while (0) #define RADEON_WAIT_UNTIL_3D_IDLE() do { \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \ RADEON_WAIT_HOST_IDLECLEAN) ); \ } while (0) #define RADEON_WAIT_UNTIL_IDLE() do { \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ RADEON_WAIT_3D_IDLECLEAN | \ RADEON_WAIT_HOST_IDLECLEAN) ); \ } while (0) #define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \ } while (0) #define RADEON_FLUSH_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_DC_FLUSH); \ } else { \ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(R300_RB3D_DC_FLUSH); \ } \ } while (0) #define RADEON_PURGE_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ } else { \ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \ } \ } while (0) #define RADEON_FLUSH_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_ZC_FLUSH); \ } else { \ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ OUT_RING(R300_ZC_FLUSH); \ } \ } while (0) #define RADEON_PURGE_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ } else { \ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ } \ } while (0) /* ================================================================ * Misc helper macros */ /* Perfbox functionality only. */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ u32 head = GET_RING_HEAD( dev_priv ); \ if (head == dev_priv->ring.tail) \ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ } \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ do { \ struct drm_radeon_master_private *master_priv = file_priv->masterp->driver_priv;\ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ int __ret; \ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \ __ret = r600_do_cp_idle(dev_priv); \ else \ __ret = radeon_do_cp_idle(dev_priv); \ if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ radeon_freelist_reset( dev ); \ } \ } while (0) #define RADEON_DISPATCH_AGE( age ) do { \ OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \ OUT_RING( age ); \ } while (0) #define RADEON_FRAME_AGE( age ) do { \ OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \ OUT_RING( age ); \ } while (0) #define RADEON_CLEAR_AGE( age ) do { \ OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \ OUT_RING( age ); \ } while (0) #define R600_DISPATCH_AGE(age) do { \ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \ OUT_RING((R600_LAST_DISPATCH_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \ OUT_RING(age); \ } while (0) #define R600_FRAME_AGE(age) do { \ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \ OUT_RING((R600_LAST_FRAME_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \ OUT_RING(age); \ } while (0) #define R600_CLEAR_AGE(age) do { \ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \ OUT_RING((R600_LAST_CLEAR_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \ OUT_RING(age); \ } while (0) /* ================================================================ * Ring control */ #define RADEON_VERBOSE 0 #define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; #define RADEON_RING_ALIGN 16 #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \ _align_nr += n; \ if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \ COMMIT_RING(); \ radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ mask = dev_priv->ring.tail_mask; \ } while (0) #define ADVANCE_RING() do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ if (((dev_priv->ring.tail + _nr) & mask) != write) { \ DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & mask), \ write, __LINE__); \ } else \ dev_priv->ring.tail = write; \ } while (0) extern void radeon_commit_ring(drm_radeon_private_t *dev_priv); #define COMMIT_RING() do { \ radeon_commit_ring(dev_priv); \ } while(0) #define OUT_RING( x ) do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ (unsigned int)(x), write ); \ } \ ring[write++] = (x); \ write &= mask; \ } while (0) #define OUT_RING_REG( reg, val ) do { \ OUT_RING( CP_PACKET0( reg, 0 ) ); \ OUT_RING( val ); \ } while (0) #define OUT_RING_TABLE( tab, sz ) do { \ int _size = (sz); \ int *_tab = (int *)(tab); \ \ if (write + _size > mask) { \ int _i = (mask+1) - write; \ _size -= _i; \ while (_i > 0 ) { \ *(int *)(ring + write) = *_tab++; \ write++; \ _i--; \ } \ write = 0; \ _tab += _i; \ } \ while (_size > 0) { \ *(ring + write) = *_tab++; \ write++; \ _size--; \ } \ write &= mask; \ } while (0) /** * Copy given number of dwords from drm buffer to the ring buffer. */ #define OUT_RING_DRM_BUFFER(buf, sz) do { \ int _size = (sz) * 4; \ struct drm_buffer *_buf = (buf); \ int _part_size; \ while (_size > 0) { \ _part_size = _size; \ \ if (write + _part_size/4 > mask) \ _part_size = ((mask + 1) - write)*4; \ \ if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \ _part_size = PAGE_SIZE - drm_buffer_index(_buf);\ \ \ \ memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \ [drm_buffer_index(_buf)], _part_size); \ \ _size -= _part_size; \ write = (write + _part_size/4) & mask; \ drm_buffer_advance(_buf, _part_size); \ } \ } while (0) #endif /* __RADEON_DRV_H__ */ Index: head/sys/dev/drm2/radeon/radeon_reg.h =================================================================== --- head/sys/dev/drm2/radeon/radeon_reg.h (revision 258779) +++ head/sys/dev/drm2/radeon/radeon_reg.h (revision 258780) @@ -1,3713 +1,3713 @@ /* * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and * VA Linux Systems Inc., Fremont, California. * * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation on the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* * Authors: * Kevin E. Martin * Rickard E. Faith * Alan Hourihane * * References: * * !!!! FIXME !!!! * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April * 1999. * * !!!! FIXME !!!! * RAGE 128 Software Development Manual (Technical Reference Manual P/N * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999. * */ /* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */ #include __FBSDID("$FreeBSD$"); #ifndef _RADEON_REG_H_ #define _RADEON_REG_H_ #include "r300_reg.h" #include "r500_reg.h" #include "r600_reg.h" #include "evergreen_reg.h" #include "ni_reg.h" #include "si_reg.h" #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_AGP_START_MASK 0x0000FFFF #define RADEON_MC_AGP_START_SHIFT 0 #define RADEON_MC_AGP_TOP_MASK 0xFFFF0000 #define RADEON_MC_AGP_TOP_SHIFT 16 #define RADEON_MC_FB_LOCATION 0x0148 #define RADEON_MC_FB_START_MASK 0x0000FFFF #define RADEON_MC_FB_START_SHIFT 0 #define RADEON_MC_FB_TOP_MASK 0xFFFF0000 #define RADEON_MC_FB_TOP_SHIFT 16 #define RADEON_AGP_BASE_2 0x015c /* r200+ only */ #define RADEON_AGP_BASE 0x0170 #define ATI_DATATYPE_VQ 0 #define ATI_DATATYPE_CI4 1 #define ATI_DATATYPE_CI8 2 #define ATI_DATATYPE_ARGB1555 3 #define ATI_DATATYPE_RGB565 4 #define ATI_DATATYPE_RGB888 5 #define ATI_DATATYPE_ARGB8888 6 #define ATI_DATATYPE_RGB332 7 #define ATI_DATATYPE_Y8 8 #define ATI_DATATYPE_RGB8 9 #define ATI_DATATYPE_CI16 10 #define ATI_DATATYPE_VYUY_422 11 #define ATI_DATATYPE_YVYU_422 12 #define ATI_DATATYPE_AYUV_444 14 #define ATI_DATATYPE_ARGB4444 15 /* Registers for 2D/Video/Overlay */ #define RADEON_ADAPTER_ID 0x0f2c /* PCI */ #define RADEON_AGP_BASE 0x0170 #define RADEON_AGP_CNTL 0x0174 # define RADEON_AGP_APER_SIZE_256MB (0x00 << 0) # define RADEON_AGP_APER_SIZE_128MB (0x20 << 0) # define RADEON_AGP_APER_SIZE_64MB (0x30 << 0) # define RADEON_AGP_APER_SIZE_32MB (0x38 << 0) # define RADEON_AGP_APER_SIZE_16MB (0x3c << 0) # define RADEON_AGP_APER_SIZE_8MB (0x3e << 0) # define RADEON_AGP_APER_SIZE_4MB (0x3f << 0) # define RADEON_AGP_APER_SIZE_MASK (0x3f << 0) #define RADEON_STATUS_PCI_CONFIG 0x06 # define RADEON_CAP_LIST 0x100000 #define RADEON_CAPABILITIES_PTR_PCI_CONFIG 0x34 /* offset in PCI config*/ # define RADEON_CAP_PTR_MASK 0xfc /* mask off reserved bits of CAP_PTR */ # define RADEON_CAP_ID_NULL 0x00 /* End of capability list */ # define RADEON_CAP_ID_AGP 0x02 /* AGP capability ID */ # define RADEON_CAP_ID_EXP 0x10 /* PCI Express */ #define RADEON_AGP_COMMAND 0x0f60 /* PCI */ #define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/ # define RADEON_AGP_ENABLE (1<<8) #define RADEON_AGP_PLL_CNTL 0x000b /* PLL */ #define RADEON_AGP_STATUS 0x0f5c /* PCI */ # define RADEON_AGP_1X_MODE 0x01 # define RADEON_AGP_2X_MODE 0x02 # define RADEON_AGP_4X_MODE 0x04 # define RADEON_AGP_FW_MODE 0x10 # define RADEON_AGP_MODE_MASK 0x17 # define RADEON_AGPv3_MODE 0x08 # define RADEON_AGPv3_4X_MODE 0x01 # define RADEON_AGPv3_8X_MODE 0x02 #define RADEON_ATTRDR 0x03c1 /* VGA */ #define RADEON_ATTRDW 0x03c0 /* VGA */ #define RADEON_ATTRX 0x03c0 /* VGA */ #define RADEON_AUX_SC_CNTL 0x1660 # define RADEON_AUX1_SC_EN (1 << 0) # define RADEON_AUX1_SC_MODE_OR (0 << 1) # define RADEON_AUX1_SC_MODE_NAND (1 << 1) # define RADEON_AUX2_SC_EN (1 << 2) # define RADEON_AUX2_SC_MODE_OR (0 << 3) # define RADEON_AUX2_SC_MODE_NAND (1 << 3) # define RADEON_AUX3_SC_EN (1 << 4) # define RADEON_AUX3_SC_MODE_OR (0 << 5) # define RADEON_AUX3_SC_MODE_NAND (1 << 5) #define RADEON_AUX1_SC_BOTTOM 0x1670 #define RADEON_AUX1_SC_LEFT 0x1664 #define RADEON_AUX1_SC_RIGHT 0x1668 #define RADEON_AUX1_SC_TOP 0x166c #define RADEON_AUX2_SC_BOTTOM 0x1680 #define RADEON_AUX2_SC_LEFT 0x1674 #define RADEON_AUX2_SC_RIGHT 0x1678 #define RADEON_AUX2_SC_TOP 0x167c #define RADEON_AUX3_SC_BOTTOM 0x1690 #define RADEON_AUX3_SC_LEFT 0x1684 #define RADEON_AUX3_SC_RIGHT 0x1688 #define RADEON_AUX3_SC_TOP 0x168c #define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8 #define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc #define RADEON_BASE_CODE 0x0f0b #define RADEON_BIOS_0_SCRATCH 0x0010 # define RADEON_FP_PANEL_SCALABLE (1 << 16) # define RADEON_FP_PANEL_SCALE_EN (1 << 17) # define RADEON_FP_CHIP_SCALE_EN (1 << 18) # define RADEON_DRIVER_BRIGHTNESS_EN (1 << 26) # define RADEON_DISPLAY_ROT_MASK (3 << 28) # define RADEON_DISPLAY_ROT_00 (0 << 28) # define RADEON_DISPLAY_ROT_90 (1 << 28) # define RADEON_DISPLAY_ROT_180 (2 << 28) # define RADEON_DISPLAY_ROT_270 (3 << 28) #define RADEON_BIOS_1_SCRATCH 0x0014 #define RADEON_BIOS_2_SCRATCH 0x0018 #define RADEON_BIOS_3_SCRATCH 0x001c #define RADEON_BIOS_4_SCRATCH 0x0020 # define RADEON_CRT1_ATTACHED_MASK (3 << 0) # define RADEON_CRT1_ATTACHED_MONO (1 << 0) # define RADEON_CRT1_ATTACHED_COLOR (2 << 0) # define RADEON_LCD1_ATTACHED (1 << 2) # define RADEON_DFP1_ATTACHED (1 << 3) # define RADEON_TV1_ATTACHED_MASK (3 << 4) # define RADEON_TV1_ATTACHED_COMP (1 << 4) # define RADEON_TV1_ATTACHED_SVIDEO (2 << 4) # define RADEON_CRT2_ATTACHED_MASK (3 << 8) # define RADEON_CRT2_ATTACHED_MONO (1 << 8) # define RADEON_CRT2_ATTACHED_COLOR (2 << 8) # define RADEON_DFP2_ATTACHED (1 << 11) #define RADEON_BIOS_5_SCRATCH 0x0024 # define RADEON_LCD1_ON (1 << 0) # define RADEON_CRT1_ON (1 << 1) # define RADEON_TV1_ON (1 << 2) # define RADEON_DFP1_ON (1 << 3) # define RADEON_CRT2_ON (1 << 5) # define RADEON_CV1_ON (1 << 6) # define RADEON_DFP2_ON (1 << 7) # define RADEON_LCD1_CRTC_MASK (1 << 8) # define RADEON_LCD1_CRTC_SHIFT 8 # define RADEON_CRT1_CRTC_MASK (1 << 9) # define RADEON_CRT1_CRTC_SHIFT 9 # define RADEON_TV1_CRTC_MASK (1 << 10) # define RADEON_TV1_CRTC_SHIFT 10 # define RADEON_DFP1_CRTC_MASK (1 << 11) # define RADEON_DFP1_CRTC_SHIFT 11 # define RADEON_CRT2_CRTC_MASK (1 << 12) # define RADEON_CRT2_CRTC_SHIFT 12 # define RADEON_CV1_CRTC_MASK (1 << 13) # define RADEON_CV1_CRTC_SHIFT 13 # define RADEON_DFP2_CRTC_MASK (1 << 14) # define RADEON_DFP2_CRTC_SHIFT 14 # define RADEON_ACC_REQ_LCD1 (1 << 16) # define RADEON_ACC_REQ_CRT1 (1 << 17) # define RADEON_ACC_REQ_TV1 (1 << 18) # define RADEON_ACC_REQ_DFP1 (1 << 19) # define RADEON_ACC_REQ_CRT2 (1 << 21) # define RADEON_ACC_REQ_TV2 (1 << 22) # define RADEON_ACC_REQ_DFP2 (1 << 23) #define RADEON_BIOS_6_SCRATCH 0x0028 # define RADEON_ACC_MODE_CHANGE (1 << 2) # define RADEON_EXT_DESKTOP_MODE (1 << 3) # define RADEON_LCD_DPMS_ON (1 << 20) # define RADEON_CRT_DPMS_ON (1 << 21) # define RADEON_TV_DPMS_ON (1 << 22) # define RADEON_DFP_DPMS_ON (1 << 23) # define RADEON_DPMS_MASK (3 << 24) # define RADEON_DPMS_ON (0 << 24) # define RADEON_DPMS_STANDBY (1 << 24) # define RADEON_DPMS_SUSPEND (2 << 24) # define RADEON_DPMS_OFF (3 << 24) # define RADEON_SCREEN_BLANKING (1 << 26) # define RADEON_DRIVER_CRITICAL (1 << 27) # define RADEON_DISPLAY_SWITCHING_DIS (1 << 30) #define RADEON_BIOS_7_SCRATCH 0x002c # define RADEON_SYS_HOTKEY (1 << 10) # define RADEON_DRV_LOADED (1 << 12) #define RADEON_BIOS_ROM 0x0f30 /* PCI */ #define RADEON_BIST 0x0f0f /* PCI */ #define RADEON_BRUSH_DATA0 0x1480 #define RADEON_BRUSH_DATA1 0x1484 #define RADEON_BRUSH_DATA10 0x14a8 #define RADEON_BRUSH_DATA11 0x14ac #define RADEON_BRUSH_DATA12 0x14b0 #define RADEON_BRUSH_DATA13 0x14b4 #define RADEON_BRUSH_DATA14 0x14b8 #define RADEON_BRUSH_DATA15 0x14bc #define RADEON_BRUSH_DATA16 0x14c0 #define RADEON_BRUSH_DATA17 0x14c4 #define RADEON_BRUSH_DATA18 0x14c8 #define RADEON_BRUSH_DATA19 0x14cc #define RADEON_BRUSH_DATA2 0x1488 #define RADEON_BRUSH_DATA20 0x14d0 #define RADEON_BRUSH_DATA21 0x14d4 #define RADEON_BRUSH_DATA22 0x14d8 #define RADEON_BRUSH_DATA23 0x14dc #define RADEON_BRUSH_DATA24 0x14e0 #define RADEON_BRUSH_DATA25 0x14e4 #define RADEON_BRUSH_DATA26 0x14e8 #define RADEON_BRUSH_DATA27 0x14ec #define RADEON_BRUSH_DATA28 0x14f0 #define RADEON_BRUSH_DATA29 0x14f4 #define RADEON_BRUSH_DATA3 0x148c #define RADEON_BRUSH_DATA30 0x14f8 #define RADEON_BRUSH_DATA31 0x14fc #define RADEON_BRUSH_DATA32 0x1500 #define RADEON_BRUSH_DATA33 0x1504 #define RADEON_BRUSH_DATA34 0x1508 #define RADEON_BRUSH_DATA35 0x150c #define RADEON_BRUSH_DATA36 0x1510 #define RADEON_BRUSH_DATA37 0x1514 #define RADEON_BRUSH_DATA38 0x1518 #define RADEON_BRUSH_DATA39 0x151c #define RADEON_BRUSH_DATA4 0x1490 #define RADEON_BRUSH_DATA40 0x1520 #define RADEON_BRUSH_DATA41 0x1524 #define RADEON_BRUSH_DATA42 0x1528 #define RADEON_BRUSH_DATA43 0x152c #define RADEON_BRUSH_DATA44 0x1530 #define RADEON_BRUSH_DATA45 0x1534 #define RADEON_BRUSH_DATA46 0x1538 #define RADEON_BRUSH_DATA47 0x153c #define RADEON_BRUSH_DATA48 0x1540 #define RADEON_BRUSH_DATA49 0x1544 #define RADEON_BRUSH_DATA5 0x1494 #define RADEON_BRUSH_DATA50 0x1548 #define RADEON_BRUSH_DATA51 0x154c #define RADEON_BRUSH_DATA52 0x1550 #define RADEON_BRUSH_DATA53 0x1554 #define RADEON_BRUSH_DATA54 0x1558 #define RADEON_BRUSH_DATA55 0x155c #define RADEON_BRUSH_DATA56 0x1560 #define RADEON_BRUSH_DATA57 0x1564 #define RADEON_BRUSH_DATA58 0x1568 #define RADEON_BRUSH_DATA59 0x156c #define RADEON_BRUSH_DATA6 0x1498 #define RADEON_BRUSH_DATA60 0x1570 #define RADEON_BRUSH_DATA61 0x1574 #define RADEON_BRUSH_DATA62 0x1578 #define RADEON_BRUSH_DATA63 0x157c #define RADEON_BRUSH_DATA7 0x149c #define RADEON_BRUSH_DATA8 0x14a0 #define RADEON_BRUSH_DATA9 0x14a4 #define RADEON_BRUSH_SCALE 0x1470 #define RADEON_BRUSH_Y_X 0x1474 #define RADEON_BUS_CNTL 0x0030 # define RADEON_BUS_MASTER_DIS (1 << 6) # define RADEON_BUS_BIOS_DIS_ROM (1 << 12) # define RS600_BUS_MASTER_DIS (1 << 14) # define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */ # define RADEON_BUS_RD_DISCARD_EN (1 << 24) # define RADEON_BUS_RD_ABORT_EN (1 << 25) # define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28) # define RADEON_BUS_WRT_BURST (1 << 29) # define RADEON_BUS_READ_BURST (1 << 30) #define RADEON_BUS_CNTL1 0x0034 # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) #define RV370_BUS_CNTL 0x004c # define RV370_BUS_BIOS_DIS_ROM (1 << 2) /* rv370/rv380, rv410, r423/r430/r480, r5xx */ #define RADEON_MSI_REARM_EN 0x0160 # define RV370_MSI_REARM_EN (1 << 0) /* #define RADEON_PCIE_INDEX 0x0030 */ /* #define RADEON_PCIE_DATA 0x0034 */ #define RADEON_PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE */ # define RADEON_PCIE_LC_LINK_WIDTH_SHIFT 0 # define RADEON_PCIE_LC_LINK_WIDTH_MASK 0x7 # define RADEON_PCIE_LC_LINK_WIDTH_X0 0 # define RADEON_PCIE_LC_LINK_WIDTH_X1 1 # define RADEON_PCIE_LC_LINK_WIDTH_X2 2 # define RADEON_PCIE_LC_LINK_WIDTH_X4 3 # define RADEON_PCIE_LC_LINK_WIDTH_X8 4 # define RADEON_PCIE_LC_LINK_WIDTH_X12 5 # define RADEON_PCIE_LC_LINK_WIDTH_X16 6 # define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT 4 # define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK 0x70 # define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) # define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) # define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) # define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) # define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9) # define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10) # define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11) # define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12) # define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13) #define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c #define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c #define RADEON_CACHE_CNTL 0x1724 #define RADEON_CACHE_LINE 0x0f0c /* PCI */ #define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */ #define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */ #define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */ # define RADEON_DONT_USE_XTALIN (1 << 4) # define RADEON_SCLK_DYN_START_CNTL (1 << 15) #define RADEON_CLOCK_CNTL_DATA 0x000c #define RADEON_CLOCK_CNTL_INDEX 0x0008 # define RADEON_PLL_WR_EN (1 << 7) # define RADEON_PLL_DIV_SEL (3 << 8) # define RADEON_PLL2_DIV_SEL_MASK (~(3 << 8)) #define RADEON_CLK_PWRMGT_CNTL 0x0014 # define RADEON_ENGIN_DYNCLK_MODE (1 << 12) # define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13) # define RADEON_ACTIVE_HILO_LAT_SHIFT 13 # define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12) # define RADEON_MC_BUSY (1 << 16) # define RADEON_DLL_READY (1 << 19) # define RADEON_CG_NO1_DEBUG_0 (1 << 24) # define RADEON_CG_NO1_DEBUG_MASK (0x1f << 24) # define RADEON_DYN_STOP_MODE_MASK (7 << 21) # define RADEON_TVPLL_PWRMGT_OFF (1 << 30) -# define RADEON_TVCLK_TURNOFF (1 << 31) +# define RADEON_TVCLK_TURNOFF (1U << 31) #define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ # define RADEON_PM_MODE_SEL (1 << 13) # define RADEON_TCL_BYPASS_DISABLE (1 << 20) #define RADEON_CLR_CMP_CLR_3D 0x1a24 #define RADEON_CLR_CMP_CLR_DST 0x15c8 #define RADEON_CLR_CMP_CLR_SRC 0x15c4 #define RADEON_CLR_CMP_CNTL 0x15c0 # define RADEON_SRC_CMP_EQ_COLOR (4 << 0) # define RADEON_SRC_CMP_NEQ_COLOR (5 << 0) # define RADEON_CLR_CMP_SRC_SOURCE (1 << 24) #define RADEON_CLR_CMP_MASK 0x15cc # define RADEON_CLR_CMP_MSK 0xffffffff #define RADEON_CLR_CMP_MASK_3D 0x1A28 #define RADEON_COMMAND 0x0f04 /* PCI */ #define RADEON_COMPOSITE_SHADOW_ID 0x1a0c #define RADEON_CONFIG_APER_0_BASE 0x0100 #define RADEON_CONFIG_APER_1_BASE 0x0104 #define RADEON_CONFIG_APER_SIZE 0x0108 #define RADEON_CONFIG_BONDS 0x00e8 #define RADEON_CONFIG_CNTL 0x00e0 # define RADEON_CFG_VGA_RAM_EN (1 << 8) # define RADEON_CFG_VGA_IO_DIS (1 << 9) # define RADEON_CFG_ATI_REV_A11 (0 << 16) # define RADEON_CFG_ATI_REV_A12 (1 << 16) # define RADEON_CFG_ATI_REV_A13 (2 << 16) # define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16) #define RADEON_CONFIG_MEMSIZE 0x00f8 #define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114 #define RADEON_CONFIG_REG_1_BASE 0x010c #define RADEON_CONFIG_REG_APER_SIZE 0x0110 #define RADEON_CONFIG_XSTRAP 0x00e4 #define RADEON_CONSTANT_COLOR_C 0x1d34 # define RADEON_CONSTANT_COLOR_MASK 0x00ffffff # define RADEON_CONSTANT_COLOR_ONE 0x00ffffff # define RADEON_CONSTANT_COLOR_ZERO 0x00000000 #define RADEON_CRC_CMDFIFO_ADDR 0x0740 #define RADEON_CRC_CMDFIFO_DOUT 0x0744 #define RADEON_GRPH_BUFFER_CNTL 0x02f0 # define RADEON_GRPH_START_REQ_MASK (0x7f) # define RADEON_GRPH_START_REQ_SHIFT 0 # define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8) # define RADEON_GRPH_STOP_REQ_SHIFT 8 # define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16) # define RADEON_GRPH_CRITICAL_POINT_SHIFT 16 # define RADEON_GRPH_CRITICAL_CNTL (1<<28) # define RADEON_GRPH_BUFFER_SIZE (1<<29) # define RADEON_GRPH_CRITICAL_AT_SOF (1<<30) # define RADEON_GRPH_STOP_CNTL (1<<31) #define RADEON_GRPH2_BUFFER_CNTL 0x03f0 # define RADEON_GRPH2_START_REQ_MASK (0x7f) # define RADEON_GRPH2_START_REQ_SHIFT 0 # define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8) # define RADEON_GRPH2_STOP_REQ_SHIFT 8 # define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16) # define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16 # define RADEON_GRPH2_CRITICAL_CNTL (1<<28) # define RADEON_GRPH2_BUFFER_SIZE (1<<29) # define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30) # define RADEON_GRPH2_STOP_CNTL (1<<31) #define RADEON_CRTC_CRNT_FRAME 0x0214 #define RADEON_CRTC_EXT_CNTL 0x0054 # define RADEON_CRTC_VGA_XOVERSCAN (1 << 0) # define RADEON_VGA_ATI_LINEAR (1 << 3) # define RADEON_XCRT_CNT_EN (1 << 6) # define RADEON_CRTC_HSYNC_DIS (1 << 8) # define RADEON_CRTC_VSYNC_DIS (1 << 9) # define RADEON_CRTC_DISPLAY_DIS (1 << 10) # define RADEON_CRTC_SYNC_TRISTAT (1 << 11) # define RADEON_CRTC_CRT_ON (1 << 15) #define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055 # define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0) # define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1) # define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2) #define RADEON_CRTC_GEN_CNTL 0x0050 # define RADEON_CRTC_DBL_SCAN_EN (1 << 0) # define RADEON_CRTC_INTERLACE_EN (1 << 1) # define RADEON_CRTC_CSYNC_EN (1 << 4) # define RADEON_CRTC_ICON_EN (1 << 15) # define RADEON_CRTC_CUR_EN (1 << 16) # define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17) # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) # define RADEON_CRTC_CUR_MODE_SHIFT 20 # define RADEON_CRTC_CUR_MODE_MONO 0 # define RADEON_CRTC_CUR_MODE_24BPP 2 # define RADEON_CRTC_EXT_DISP_EN (1 << 24) # define RADEON_CRTC_EN (1 << 25) # define RADEON_CRTC_DISP_REQ_EN_B (1 << 26) #define RADEON_CRTC2_GEN_CNTL 0x03f8 # define RADEON_CRTC2_DBL_SCAN_EN (1 << 0) # define RADEON_CRTC2_INTERLACE_EN (1 << 1) # define RADEON_CRTC2_SYNC_TRISTAT (1 << 4) # define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5) # define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6) # define RADEON_CRTC2_CRT2_ON (1 << 7) # define RADEON_CRTC2_PIX_WIDTH_SHIFT 8 # define RADEON_CRTC2_PIX_WIDTH_MASK (0xf << 8) # define RADEON_CRTC2_ICON_EN (1 << 15) # define RADEON_CRTC2_CUR_EN (1 << 16) # define RADEON_CRTC2_CUR_MODE_MASK (7 << 20) # define RADEON_CRTC2_DISP_DIS (1 << 23) # define RADEON_CRTC2_EN (1 << 25) # define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26) # define RADEON_CRTC2_CSYNC_EN (1 << 27) # define RADEON_CRTC2_HSYNC_DIS (1 << 28) # define RADEON_CRTC2_VSYNC_DIS (1 << 29) #define RADEON_CRTC_MORE_CNTL 0x27c # define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2) # define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3) # define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4) # define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5) #define RADEON_CRTC_GUI_TRIG_VLINE 0x0218 #define RADEON_CRTC_H_SYNC_STRT_WID 0x0204 # define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0) # define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3) # define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3 # define RADEON_CRTC_H_SYNC_WID (0x3f << 16) # define RADEON_CRTC_H_SYNC_WID_SHIFT 16 # define RADEON_CRTC_H_SYNC_POL (1 << 23) #define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304 # define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0) # define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3) # define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3 # define RADEON_CRTC2_H_SYNC_WID (0x3f << 16) # define RADEON_CRTC2_H_SYNC_WID_SHIFT 16 # define RADEON_CRTC2_H_SYNC_POL (1 << 23) #define RADEON_CRTC_H_TOTAL_DISP 0x0200 # define RADEON_CRTC_H_TOTAL (0x03ff << 0) # define RADEON_CRTC_H_TOTAL_SHIFT 0 # define RADEON_CRTC_H_DISP (0x01ff << 16) # define RADEON_CRTC_H_DISP_SHIFT 16 #define RADEON_CRTC2_H_TOTAL_DISP 0x0300 # define RADEON_CRTC2_H_TOTAL (0x03ff << 0) # define RADEON_CRTC2_H_TOTAL_SHIFT 0 # define RADEON_CRTC2_H_DISP (0x01ff << 16) # define RADEON_CRTC2_H_DISP_SHIFT 16 #define RADEON_CRTC_OFFSET_RIGHT 0x0220 #define RADEON_CRTC_OFFSET 0x0224 # define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30) # define RADEON_CRTC_OFFSET__OFFSET_LOCK (1<<31) #define RADEON_CRTC2_OFFSET 0x0324 # define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30) # define RADEON_CRTC2_OFFSET__OFFSET_LOCK (1<<31) #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_LINE_SHIFT 0 # define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT 4 # define R300_CRTC_X_Y_MODE_EN_RIGHT (1 << 6) # define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK (3 << 7) # define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO (0 << 7) # define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7) # define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7) # define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS (3 << 7) # define R300_CRTC_X_Y_MODE_EN (1 << 9) # define R300_CRTC_MICRO_TILE_BUFFER_MASK (3 << 10) # define R300_CRTC_MICRO_TILE_BUFFER_AUTO (0 << 10) # define R300_CRTC_MICRO_TILE_BUFFER_SINGLE (1 << 10) # define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE (2 << 10) # define R300_CRTC_MICRO_TILE_BUFFER_DIS (3 << 10) # define R300_CRTC_MICRO_TILE_EN_RIGHT (1 << 12) # define R300_CRTC_MICRO_TILE_EN (1 << 13) # define R300_CRTC_MACRO_TILE_EN_RIGHT (1 << 14) # define R300_CRTC_MACRO_TILE_EN (1 << 15) # define RADEON_CRTC_TILE_EN_RIGHT (1 << 14) # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) # define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28) # define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29) #define R300_CRTC_TILE_X0_Y0 0x0350 #define R300_CRTC2_TILE_X0_Y0 0x0358 #define RADEON_CRTC2_OFFSET_CNTL 0x0328 # define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16) # define RADEON_CRTC2_TILE_EN (1 << 15) #define RADEON_CRTC_PITCH 0x022c # define RADEON_CRTC_PITCH__SHIFT 0 # define RADEON_CRTC_PITCH__RIGHT_SHIFT 16 #define RADEON_CRTC2_PITCH 0x032c #define RADEON_CRTC_STATUS 0x005c # define RADEON_CRTC_VBLANK_CUR (1 << 0) # define RADEON_CRTC_VBLANK_SAVE (1 << 1) # define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1) #define RADEON_CRTC2_STATUS 0x03fc # define RADEON_CRTC2_VBLANK_CUR (1 << 0) # define RADEON_CRTC2_VBLANK_SAVE (1 << 1) # define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1) #define RADEON_CRTC_V_SYNC_STRT_WID 0x020c # define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0) # define RADEON_CRTC_V_SYNC_STRT_SHIFT 0 # define RADEON_CRTC_V_SYNC_WID (0x1f << 16) # define RADEON_CRTC_V_SYNC_WID_SHIFT 16 # define RADEON_CRTC_V_SYNC_POL (1 << 23) #define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c # define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0) # define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0 # define RADEON_CRTC2_V_SYNC_WID (0x1f << 16) # define RADEON_CRTC2_V_SYNC_WID_SHIFT 16 # define RADEON_CRTC2_V_SYNC_POL (1 << 23) #define RADEON_CRTC_V_TOTAL_DISP 0x0208 # define RADEON_CRTC_V_TOTAL (0x07ff << 0) # define RADEON_CRTC_V_TOTAL_SHIFT 0 # define RADEON_CRTC_V_DISP (0x07ff << 16) # define RADEON_CRTC_V_DISP_SHIFT 16 #define RADEON_CRTC2_V_TOTAL_DISP 0x0308 # define RADEON_CRTC2_V_TOTAL (0x07ff << 0) # define RADEON_CRTC2_V_TOTAL_SHIFT 0 # define RADEON_CRTC2_V_DISP (0x07ff << 16) # define RADEON_CRTC2_V_DISP_SHIFT 16 #define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210 # define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) #define RADEON_CRTC2_CRNT_FRAME 0x0314 #define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 #define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 #define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ #define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ #define RADEON_CUR_CLR0 0x026c #define RADEON_CUR_CLR1 0x0270 #define RADEON_CUR_HORZ_VERT_OFF 0x0268 #define RADEON_CUR_HORZ_VERT_POSN 0x0264 #define RADEON_CUR_OFFSET 0x0260 -# define RADEON_CUR_LOCK (1 << 31) +# define RADEON_CUR_LOCK (1U << 31) #define RADEON_CUR2_CLR0 0x036c #define RADEON_CUR2_CLR1 0x0370 #define RADEON_CUR2_HORZ_VERT_OFF 0x0368 #define RADEON_CUR2_HORZ_VERT_POSN 0x0364 #define RADEON_CUR2_OFFSET 0x0360 -# define RADEON_CUR2_LOCK (1 << 31) +# define RADEON_CUR2_LOCK (1U << 31) #define RADEON_DAC_CNTL 0x0058 # define RADEON_DAC_RANGE_CNTL (3 << 0) # define RADEON_DAC_RANGE_CNTL_PS2 (2 << 0) # define RADEON_DAC_RANGE_CNTL_MASK 0x03 # define RADEON_DAC_BLANKING (1 << 2) # define RADEON_DAC_CMP_EN (1 << 3) # define RADEON_DAC_CMP_OUTPUT (1 << 7) # define RADEON_DAC_8BIT_EN (1 << 8) # define RADEON_DAC_TVO_EN (1 << 10) # define RADEON_DAC_VGA_ADR_EN (1 << 13) # define RADEON_DAC_PDWN (1 << 15) # define RADEON_DAC_MASK_ALL (0xff << 24) #define RADEON_DAC_CNTL2 0x007c # define RADEON_DAC2_TV_CLK_SEL (0 << 1) # define RADEON_DAC2_DAC_CLK_SEL (1 << 0) # define RADEON_DAC2_DAC2_CLK_SEL (1 << 1) # define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5) # define RADEON_DAC2_CMP_EN (1 << 7) # define RADEON_DAC2_CMP_OUT_R (1 << 8) # define RADEON_DAC2_CMP_OUT_G (1 << 9) # define RADEON_DAC2_CMP_OUT_B (1 << 10) # define RADEON_DAC2_CMP_OUTPUT (1 << 11) #define RADEON_DAC_EXT_CNTL 0x0280 # define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0) # define RADEON_DAC2_FORCE_DATA_EN (1 << 1) # define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4) # define RADEON_DAC_FORCE_DATA_EN (1 << 5) # define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6) # define RADEON_DAC_FORCE_DATA_SEL_R (0 << 6) # define RADEON_DAC_FORCE_DATA_SEL_G (1 << 6) # define RADEON_DAC_FORCE_DATA_SEL_B (2 << 6) # define RADEON_DAC_FORCE_DATA_SEL_RGB (3 << 6) # define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00 # define RADEON_DAC_FORCE_DATA_SHIFT 8 #define RADEON_DAC_MACRO_CNTL 0x0d04 # define RADEON_DAC_PDWN_R (1 << 16) # define RADEON_DAC_PDWN_G (1 << 17) # define RADEON_DAC_PDWN_B (1 << 18) #define RADEON_DISP_PWR_MAN 0x0d08 # define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0) # define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4) # define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8) # define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8) # define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8) # define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8) # define RADEON_DISP_D3_RST (1 << 16) # define RADEON_DISP_D3_REG_RST (1 << 17) # define RADEON_DISP_D3_GRPH_RST (1 << 18) # define RADEON_DISP_D3_SUBPIC_RST (1 << 19) # define RADEON_DISP_D3_OV0_RST (1 << 20) # define RADEON_DISP_D1D2_GRPH_RST (1 << 21) # define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22) # define RADEON_DISP_D1D2_OV0_RST (1 << 23) # define RADEON_DIG_TMDS_ENABLE_RST (1 << 24) # define RADEON_TV_ENABLE_RST (1 << 25) # define RADEON_AUTO_PWRUP_EN (1 << 26) #define RADEON_TV_DAC_CNTL 0x088c # define RADEON_TV_DAC_NBLANK (1 << 0) # define RADEON_TV_DAC_NHOLD (1 << 1) # define RADEON_TV_DAC_PEDESTAL (1 << 2) # define RADEON_TV_MONITOR_DETECT_EN (1 << 4) # define RADEON_TV_DAC_CMPOUT (1 << 5) # define RADEON_TV_DAC_STD_MASK (3 << 8) # define RADEON_TV_DAC_STD_PAL (0 << 8) # define RADEON_TV_DAC_STD_NTSC (1 << 8) # define RADEON_TV_DAC_STD_PS2 (2 << 8) # define RADEON_TV_DAC_STD_RS343 (3 << 8) # define RADEON_TV_DAC_BGSLEEP (1 << 6) # define RADEON_TV_DAC_BGADJ_MASK (0xf << 16) # define RADEON_TV_DAC_BGADJ_SHIFT 16 # define RADEON_TV_DAC_DACADJ_MASK (0xf << 20) # define RADEON_TV_DAC_DACADJ_SHIFT 20 # define RADEON_TV_DAC_RDACPD (1 << 24) # define RADEON_TV_DAC_GDACPD (1 << 25) # define RADEON_TV_DAC_BDACPD (1 << 26) # define RADEON_TV_DAC_RDACDET (1 << 29) # define RADEON_TV_DAC_GDACDET (1 << 30) -# define RADEON_TV_DAC_BDACDET (1 << 31) +# define RADEON_TV_DAC_BDACDET (1U << 31) # define R420_TV_DAC_DACADJ_MASK (0x1f << 20) # define R420_TV_DAC_RDACPD (1 << 25) # define R420_TV_DAC_GDACPD (1 << 26) # define R420_TV_DAC_BDACPD (1 << 27) # define R420_TV_DAC_TVENABLE (1 << 28) #define RADEON_DISP_HW_DEBUG 0x0d14 # define RADEON_CRT2_DISP1_SEL (1 << 5) #define RADEON_DISP_OUTPUT_CNTL 0x0d64 # define RADEON_DISP_DAC_SOURCE_MASK 0x03 # define RADEON_DISP_DAC2_SOURCE_MASK 0x0c # define RADEON_DISP_DAC_SOURCE_CRTC2 0x01 # define RADEON_DISP_DAC_SOURCE_RMX 0x02 # define RADEON_DISP_DAC_SOURCE_LTU 0x03 # define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04 # define RADEON_DISP_TVDAC_SOURCE_MASK (0x03 << 2) # define RADEON_DISP_TVDAC_SOURCE_CRTC 0x0 # define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2) # define RADEON_DISP_TVDAC_SOURCE_RMX (0x02 << 2) # define RADEON_DISP_TVDAC_SOURCE_LTU (0x03 << 2) # define RADEON_DISP_TRANS_MATRIX_MASK (0x03 << 4) # define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4) # define RADEON_DISP_TRANS_MATRIX_GRAPHICS (0x01 << 4) # define RADEON_DISP_TRANS_MATRIX_VIDEO (0x02 << 4) # define RADEON_DISP_TV_SOURCE_CRTC (1 << 16) /* crtc1 or crtc2 */ # define RADEON_DISP_TV_SOURCE_LTU (0 << 16) /* linear transform unit */ #define RADEON_DISP_TV_OUT_CNTL 0x0d6c # define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16) # define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16) #define RADEON_DAC_CRC_SIG 0x02cc #define RADEON_DAC_DATA 0x03c9 /* VGA */ #define RADEON_DAC_MASK 0x03c6 /* VGA */ #define RADEON_DAC_R_INDEX 0x03c7 /* VGA */ #define RADEON_DAC_W_INDEX 0x03c8 /* VGA */ #define RADEON_DDA_CONFIG 0x02e0 #define RADEON_DDA_ON_OFF 0x02e4 #define RADEON_DEFAULT_OFFSET 0x16e0 #define RADEON_DEFAULT_PITCH 0x16e4 #define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8 # define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0) # define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16) #define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820 #define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824 #define RADEON_DEVICE_ID 0x0f02 /* PCI */ #define RADEON_DISP_MISC_CNTL 0x0d00 # define RADEON_SOFT_RESET_GRPH_PP (1 << 0) #define RADEON_DISP_MERGE_CNTL 0x0d60 # define RADEON_DISP_ALPHA_MODE_MASK 0x03 # define RADEON_DISP_ALPHA_MODE_KEY 0 # define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1 # define RADEON_DISP_ALPHA_MODE_GLOBAL 2 # define RADEON_DISP_RGB_OFFSET_EN (1 << 8) # define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16) # define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24) # define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9) #define RADEON_DISP2_MERGE_CNTL 0x0d68 # define RADEON_DISP2_RGB_OFFSET_EN (1 << 8) #define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80 #define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84 #define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88 #define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c #define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90 #define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98 #define RADEON_DP_BRUSH_BKGD_CLR 0x1478 #define RADEON_DP_BRUSH_FRGD_CLR 0x147c #define RADEON_DP_CNTL 0x16c0 # define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0) # define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1) # define RADEON_DP_DST_TILE_LINEAR (0 << 3) # define RADEON_DP_DST_TILE_MACRO (1 << 3) # define RADEON_DP_DST_TILE_MICRO (2 << 3) # define RADEON_DP_DST_TILE_BOTH (3 << 3) #define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0 # define RADEON_DST_Y_MAJOR (1 << 2) # define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15) -# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31) +# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1U << 31) #define RADEON_DP_DATATYPE 0x16c4 # define RADEON_HOST_BIG_ENDIAN_EN (1 << 29) #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) # define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) # define RADEON_GMC_SRC_CLIPPING (1 << 2) # define RADEON_GMC_DST_CLIPPING (1 << 3) # define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4) # define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4) # define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4) # define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4) # define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4) # define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4) # define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4) # define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4) # define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4) # define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4) # define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4) # define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) # define RADEON_GMC_BRUSH_NONE (15 << 4) # define RADEON_GMC_DST_8BPP_CI (2 << 8) # define RADEON_GMC_DST_15BPP (3 << 8) # define RADEON_GMC_DST_16BPP (4 << 8) # define RADEON_GMC_DST_24BPP (5 << 8) # define RADEON_GMC_DST_32BPP (6 << 8) # define RADEON_GMC_DST_8BPP_RGB (7 << 8) # define RADEON_GMC_DST_Y8 (8 << 8) # define RADEON_GMC_DST_RGB8 (9 << 8) # define RADEON_GMC_DST_VYUY (11 << 8) # define RADEON_GMC_DST_YVYU (12 << 8) # define RADEON_GMC_DST_AYUV444 (14 << 8) # define RADEON_GMC_DST_ARGB4444 (15 << 8) # define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8) # define RADEON_GMC_DST_DATATYPE_SHIFT 8 # define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12) # define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12) # define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12) # define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) # define RADEON_GMC_BYTE_PIX_ORDER (1 << 14) # define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14) # define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14) # define RADEON_GMC_CONVERSION_TEMP (1 << 15) # define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15) # define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15) # define RADEON_GMC_ROP3_MASK (0xff << 16) # define RADEON_DP_SRC_SOURCE_MASK (7 << 24) # define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) # define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) # define RADEON_GMC_3D_FCN_EN (1 << 27) # define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) # define RADEON_GMC_AUX_CLIP_DIS (1 << 29) # define RADEON_GMC_WR_MSK_DIS (1 << 30) # define RADEON_GMC_LD_BRUSH_Y_X (1 << 31) # define RADEON_ROP3_ZERO 0x00000000 # define RADEON_ROP3_DSa 0x00880000 # define RADEON_ROP3_SDna 0x00440000 # define RADEON_ROP3_S 0x00cc0000 # define RADEON_ROP3_DSna 0x00220000 # define RADEON_ROP3_D 0x00aa0000 # define RADEON_ROP3_DSx 0x00660000 # define RADEON_ROP3_DSo 0x00ee0000 # define RADEON_ROP3_DSon 0x00110000 # define RADEON_ROP3_DSxn 0x00990000 # define RADEON_ROP3_Dn 0x00550000 # define RADEON_ROP3_SDno 0x00dd0000 # define RADEON_ROP3_Sn 0x00330000 # define RADEON_ROP3_DSno 0x00bb0000 # define RADEON_ROP3_DSan 0x00770000 # define RADEON_ROP3_ONE 0x00ff0000 # define RADEON_ROP3_DPa 0x00a00000 # define RADEON_ROP3_PDna 0x00500000 # define RADEON_ROP3_P 0x00f00000 # define RADEON_ROP3_DPna 0x000a0000 # define RADEON_ROP3_D 0x00aa0000 # define RADEON_ROP3_DPx 0x005a0000 # define RADEON_ROP3_DPo 0x00fa0000 # define RADEON_ROP3_DPon 0x00050000 # define RADEON_ROP3_PDxn 0x00a50000 # define RADEON_ROP3_PDno 0x00f50000 # define RADEON_ROP3_Pn 0x000f0000 # define RADEON_ROP3_DPno 0x00af0000 # define RADEON_ROP3_DPan 0x005f0000 #define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84 #define RADEON_DP_MIX 0x16c8 #define RADEON_DP_SRC_BKGD_CLR 0x15dc #define RADEON_DP_SRC_FRGD_CLR 0x15d8 #define RADEON_DP_WRITE_MASK 0x16cc #define RADEON_DST_BRES_DEC 0x1630 #define RADEON_DST_BRES_ERR 0x1628 #define RADEON_DST_BRES_INC 0x162c #define RADEON_DST_BRES_LNTH 0x1634 #define RADEON_DST_BRES_LNTH_SUB 0x1638 #define RADEON_DST_HEIGHT 0x1410 #define RADEON_DST_HEIGHT_WIDTH 0x143c #define RADEON_DST_HEIGHT_WIDTH_8 0x158c #define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4 #define RADEON_DST_HEIGHT_Y 0x15a0 #define RADEON_DST_LINE_START 0x1600 #define RADEON_DST_LINE_END 0x1604 #define RADEON_DST_LINE_PATCOUNT 0x1608 # define RADEON_BRES_CNTL_SHIFT 8 #define RADEON_DST_OFFSET 0x1404 #define RADEON_DST_PITCH 0x1408 #define RADEON_DST_PITCH_OFFSET 0x142c #define RADEON_DST_PITCH_OFFSET_C 0x1c80 # define RADEON_PITCH_SHIFT 21 # define RADEON_DST_TILE_LINEAR (0 << 30) # define RADEON_DST_TILE_MACRO (1 << 30) # define RADEON_DST_TILE_MICRO (2U << 30) # define RADEON_DST_TILE_BOTH (3U << 30) #define RADEON_DST_WIDTH 0x140c #define RADEON_DST_WIDTH_HEIGHT 0x1598 #define RADEON_DST_WIDTH_X 0x1588 #define RADEON_DST_WIDTH_X_INCY 0x159c #define RADEON_DST_X 0x141c #define RADEON_DST_X_SUB 0x15a4 #define RADEON_DST_X_Y 0x1594 #define RADEON_DST_Y 0x1420 #define RADEON_DST_Y_SUB 0x15a8 #define RADEON_DST_Y_X 0x1438 #define RADEON_FCP_CNTL 0x0910 # define RADEON_FCP0_SRC_PCICLK 0 # define RADEON_FCP0_SRC_PCLK 1 # define RADEON_FCP0_SRC_PCLKb 2 # define RADEON_FCP0_SRC_HREF 3 # define RADEON_FCP0_SRC_GND 4 # define RADEON_FCP0_SRC_HREFb 5 #define RADEON_FLUSH_1 0x1704 #define RADEON_FLUSH_2 0x1708 #define RADEON_FLUSH_3 0x170c #define RADEON_FLUSH_4 0x1710 #define RADEON_FLUSH_5 0x1714 #define RADEON_FLUSH_6 0x1718 #define RADEON_FLUSH_7 0x171c #define RADEON_FOG_3D_TABLE_START 0x1810 #define RADEON_FOG_3D_TABLE_END 0x1814 #define RADEON_FOG_3D_TABLE_DENSITY 0x181c #define RADEON_FOG_TABLE_INDEX 0x1a14 #define RADEON_FOG_TABLE_DATA 0x1a18 #define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250 #define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254 # define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff # define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000 # define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff # define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000 # define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8 # define RADEON_FP_H_SYNC_WID_MASK 0x003f0000 # define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff # define RADEON_FP_V_SYNC_WID_MASK 0x001f0000 # define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000 # define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010 # define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000 # define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010 # define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003 # define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010 # define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000 # define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010 #define RADEON_FP_GEN_CNTL 0x0284 # define RADEON_FP_FPON (1 << 0) # define RADEON_FP_BLANK_EN (1 << 1) # define RADEON_FP_TMDS_EN (1 << 2) # define RADEON_FP_PANEL_FORMAT (1 << 3) # define RADEON_FP_EN_TMDS (1 << 7) # define RADEON_FP_DETECT_SENSE (1 << 8) # define RADEON_FP_DETECT_INT_POL (1 << 9) # define R200_FP_SOURCE_SEL_MASK (3 << 10) # define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) # define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) # define R200_FP_SOURCE_SEL_RMX (2 << 10) # define R200_FP_SOURCE_SEL_TRANS (3 << 10) # define RADEON_FP_SEL_CRTC1 (0 << 13) # define RADEON_FP_SEL_CRTC2 (1 << 13) # define R300_HPD_SEL(x) ((x) << 13) # define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) # define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) # define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) # define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18) # define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20) # define RADEON_FP_DFP_SYNC_SEL (1 << 21) # define RADEON_FP_CRTC_LOCK_8DOT (1 << 22) # define RADEON_FP_CRT_SYNC_SEL (1 << 23) # define RADEON_FP_USE_SHADOW_EN (1 << 24) # define RADEON_FP_CRT_SYNC_ALT (1 << 26) #define RADEON_FP2_GEN_CNTL 0x0288 # define RADEON_FP2_BLANK_EN (1 << 1) # define RADEON_FP2_ON (1 << 2) # define RADEON_FP2_PANEL_FORMAT (1 << 3) # define RADEON_FP2_DETECT_SENSE (1 << 8) # define RADEON_FP2_DETECT_INT_POL (1 << 9) # define R200_FP2_SOURCE_SEL_MASK (3 << 10) # define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) # define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) # define R200_FP2_SOURCE_SEL_RMX (2 << 10) # define R200_FP2_SOURCE_SEL_TRANS_UNIT (3 << 10) # define RADEON_FP2_SRC_SEL_MASK (3 << 13) # define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13) # define RADEON_FP2_FP_POL (1 << 16) # define RADEON_FP2_LP_POL (1 << 17) # define RADEON_FP2_SCK_POL (1 << 18) # define RADEON_FP2_LCD_CNTL_MASK (7 << 19) # define RADEON_FP2_PAD_FLOP_EN (1 << 22) # define RADEON_FP2_CRC_EN (1 << 23) # define RADEON_FP2_CRC_READ_EN (1 << 24) # define RADEON_FP2_DVO_EN (1 << 25) # define RADEON_FP2_DVO_RATE_SEL_SDR (1 << 26) # define R200_FP2_DVO_RATE_SEL_SDR (1 << 27) # define R300_FP2_DVO_CLOCK_MODE_SINGLE (1 << 28) # define R300_FP2_DVO_DUAL_CHANNEL_EN (1 << 29) #define RADEON_FP_H_SYNC_STRT_WID 0x02c4 #define RADEON_FP_H2_SYNC_STRT_WID 0x03c4 #define RADEON_FP_HORZ_STRETCH 0x028c #define RADEON_FP_HORZ2_STRETCH 0x038c # define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff # define RADEON_HORZ_STRETCH_RATIO_MAX 4096 # define RADEON_HORZ_PANEL_SIZE (0x1ff << 16) # define RADEON_HORZ_PANEL_SHIFT 16 # define RADEON_HORZ_STRETCH_PIXREP (0 << 25) # define RADEON_HORZ_STRETCH_BLEND (1 << 26) # define RADEON_HORZ_STRETCH_ENABLE (1 << 25) # define RADEON_HORZ_AUTO_RATIO (1 << 27) # define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28) # define RADEON_HORZ_AUTO_RATIO_INC (1 << 31) #define RADEON_FP_HORZ_VERT_ACTIVE 0x0278 #define RADEON_FP_V_SYNC_STRT_WID 0x02c8 #define RADEON_FP_VERT_STRETCH 0x0290 #define RADEON_FP_V2_SYNC_STRT_WID 0x03c8 #define RADEON_FP_VERT2_STRETCH 0x0390 # define RADEON_VERT_PANEL_SIZE (0xfff << 12) # define RADEON_VERT_PANEL_SHIFT 12 # define RADEON_VERT_STRETCH_RATIO_MASK 0xfff # define RADEON_VERT_STRETCH_RATIO_SHIFT 0 # define RADEON_VERT_STRETCH_RATIO_MAX 4096 # define RADEON_VERT_STRETCH_ENABLE (1 << 25) # define RADEON_VERT_STRETCH_LINEREP (0 << 26) # define RADEON_VERT_STRETCH_BLEND (1 << 26) # define RADEON_VERT_AUTO_RATIO_EN (1 << 27) # define RADEON_VERT_AUTO_RATIO_INC (1 << 31) # define RADEON_VERT_STRETCH_RESERVED 0x71000000 #define RS400_FP_2ND_GEN_CNTL 0x0384 # define RS400_FP_2ND_ON (1 << 0) # define RS400_FP_2ND_BLANK_EN (1 << 1) # define RS400_TMDS_2ND_EN (1 << 2) # define RS400_PANEL_FORMAT_2ND (1 << 3) # define RS400_FP_2ND_EN_TMDS (1 << 7) # define RS400_FP_2ND_DETECT_SENSE (1 << 8) # define RS400_FP_2ND_SOURCE_SEL_MASK (3 << 10) # define RS400_FP_2ND_SOURCE_SEL_CRTC1 (0 << 10) # define RS400_FP_2ND_SOURCE_SEL_CRTC2 (1 << 10) # define RS400_FP_2ND_SOURCE_SEL_RMX (2 << 10) # define RS400_FP_2ND_DETECT_EN (1 << 12) # define RS400_HPD_2ND_SEL (1 << 13) #define RS400_FP2_2_GEN_CNTL 0x0388 # define RS400_FP2_2_BLANK_EN (1 << 1) # define RS400_FP2_2_ON (1 << 2) # define RS400_FP2_2_PANEL_FORMAT (1 << 3) # define RS400_FP2_2_DETECT_SENSE (1 << 8) # define RS400_FP2_2_SOURCE_SEL_MASK (3 << 10) # define RS400_FP2_2_SOURCE_SEL_CRTC1 (0 << 10) # define RS400_FP2_2_SOURCE_SEL_CRTC2 (1 << 10) # define RS400_FP2_2_SOURCE_SEL_RMX (2 << 10) # define RS400_FP2_2_DVO2_EN (1 << 25) #define RS400_TMDS2_CNTL 0x0394 #define RS400_TMDS2_TRANSMITTER_CNTL 0x03a4 # define RS400_TMDS2_PLLEN (1 << 0) # define RS400_TMDS2_PLLRST (1 << 1) #define RADEON_GEN_INT_CNTL 0x0040 # define RADEON_CRTC_VBLANK_MASK (1 << 0) # define RADEON_FP_DETECT_MASK (1 << 4) # define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_FP2_DETECT_MASK (1 << 10) # define RADEON_GUI_IDLE_MASK (1 << 19) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 # define AVIVO_DISPLAY_INT_STATUS (1 << 0) # define RADEON_CRTC_VBLANK_STAT (1 << 0) # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) # define RADEON_FP_DETECT_STAT (1 << 4) # define RADEON_FP_DETECT_STAT_ACK (1 << 4) # define RADEON_CRTC2_VBLANK_STAT (1 << 9) # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_FP2_DETECT_STAT (1 << 10) # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) # define RADEON_GUI_IDLE_STAT (1 << 19) # define RADEON_GUI_IDLE_STAT_ACK (1 << 19) # define RADEON_SW_INT_FIRE (1 << 26) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) #define RADEON_GENENB 0x03c3 /* VGA */ #define RADEON_GENFC_RD 0x03ca /* VGA */ #define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */ #define RADEON_GENMO_RD 0x03cc /* VGA */ #define RADEON_GENMO_WT 0x03c2 /* VGA */ #define RADEON_GENS0 0x03c2 /* VGA */ #define RADEON_GENS1 0x03da /* VGA, 0x03ba */ #define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */ /* DDC3 */ #define RADEON_GPIO_MONIDB 0x006c #define RADEON_GPIO_CRT2_DDC 0x006c #define RADEON_GPIO_DVI_DDC 0x0064 /* DDC2 */ #define RADEON_GPIO_VGA_DDC 0x0060 /* DDC1 */ # define RADEON_GPIO_A_0 (1 << 0) # define RADEON_GPIO_A_1 (1 << 1) # define RADEON_GPIO_Y_0 (1 << 8) # define RADEON_GPIO_Y_1 (1 << 9) # define RADEON_GPIO_Y_SHIFT_0 8 # define RADEON_GPIO_Y_SHIFT_1 9 # define RADEON_GPIO_EN_0 (1 << 16) # define RADEON_GPIO_EN_1 (1 << 17) # define RADEON_GPIO_MASK_0 (1 << 24) /*??*/ # define RADEON_GPIO_MASK_1 (1 << 25) /*??*/ #define RADEON_GRPH8_DATA 0x03cf /* VGA */ #define RADEON_GRPH8_IDX 0x03ce /* VGA */ #define RADEON_GUI_SCRATCH_REG0 0x15e0 #define RADEON_GUI_SCRATCH_REG1 0x15e4 #define RADEON_GUI_SCRATCH_REG2 0x15e8 #define RADEON_GUI_SCRATCH_REG3 0x15ec #define RADEON_GUI_SCRATCH_REG4 0x15f0 #define RADEON_GUI_SCRATCH_REG5 0x15f4 #define RADEON_HEADER 0x0f0e /* PCI */ #define RADEON_HOST_DATA0 0x17c0 #define RADEON_HOST_DATA1 0x17c4 #define RADEON_HOST_DATA2 0x17c8 #define RADEON_HOST_DATA3 0x17cc #define RADEON_HOST_DATA4 0x17d0 #define RADEON_HOST_DATA5 0x17d4 #define RADEON_HOST_DATA6 0x17d8 #define RADEON_HOST_DATA7 0x17dc #define RADEON_HOST_DATA_LAST 0x17e0 #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24) # define RADEON_HDP_READ_BUFFER_INVALIDATE (1 << 27) # define RADEON_HDP_SOFT_RESET (1 << 26) # define RADEON_HDP_APER_CNTL (1 << 23) #define RADEON_HTOTAL_CNTL 0x0009 /* PLL */ # define RADEON_HTOT_CNTL_VGA_EN (1 << 28) #define RADEON_HTOTAL2_CNTL 0x002e /* PLL */ /* Multimedia I2C bus */ #define RADEON_I2C_CNTL_0 0x0090 # define RADEON_I2C_DONE (1 << 0) # define RADEON_I2C_NACK (1 << 1) # define RADEON_I2C_HALT (1 << 2) # define RADEON_I2C_SOFT_RST (1 << 5) # define RADEON_I2C_DRIVE_EN (1 << 6) # define RADEON_I2C_DRIVE_SEL (1 << 7) # define RADEON_I2C_START (1 << 8) # define RADEON_I2C_STOP (1 << 9) # define RADEON_I2C_RECEIVE (1 << 10) # define RADEON_I2C_ABORT (1 << 11) # define RADEON_I2C_GO (1 << 12) # define RADEON_I2C_PRESCALE_SHIFT 16 #define RADEON_I2C_CNTL_1 0x0094 # define RADEON_I2C_DATA_COUNT_SHIFT 0 # define RADEON_I2C_ADDR_COUNT_SHIFT 4 # define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 # define RADEON_I2C_SEL (1 << 16) # define RADEON_I2C_EN (1 << 17) # define RADEON_I2C_TIME_LIMIT_SHIFT 24 #define RADEON_I2C_DATA 0x0098 #define RADEON_DVI_I2C_CNTL_0 0x02e0 # define R200_DVI_I2C_PIN_SEL(x) ((x) << 3) # define R200_SEL_DDC1 0 /* depends on asic */ # define R200_SEL_DDC2 1 /* depends on asic */ # define R200_SEL_DDC3 2 /* depends on asic */ # define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13) # define RADEON_SW_CAN_USE_DVI_I2C (1 << 13) # define RADEON_SW_DONE_USING_DVI_I2C (1 << 14) # define RADEON_HW_NEEDS_DVI_I2C (1 << 14) # define RADEON_ABORT_HW_DVI_I2C (1 << 15) # define RADEON_HW_USING_DVI_I2C (1 << 15) #define RADEON_DVI_I2C_CNTL_1 0x02e4 #define RADEON_DVI_I2C_DATA 0x02e8 #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ #define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */ #define RADEON_IO_BASE 0x0f14 /* PCI */ #define RADEON_LATENCY 0x0f0d /* PCI */ #define RADEON_LEAD_BRES_DEC 0x1608 #define RADEON_LEAD_BRES_LNTH 0x161c #define RADEON_LEAD_BRES_LNTH_SUB 0x1624 #define RADEON_LVDS_GEN_CNTL 0x02d0 # define RADEON_LVDS_ON (1 << 0) # define RADEON_LVDS_DISPLAY_DIS (1 << 1) # define RADEON_LVDS_PANEL_TYPE (1 << 2) # define RADEON_LVDS_PANEL_FORMAT (1 << 3) # define RADEON_LVDS_NO_FM (0 << 4) # define RADEON_LVDS_2_GREY (1 << 4) # define RADEON_LVDS_4_GREY (2 << 4) # define RADEON_LVDS_RST_FM (1 << 6) # define RADEON_LVDS_EN (1 << 7) # define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8 # define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8) # define RADEON_LVDS_BL_MOD_EN (1 << 16) # define RADEON_LVDS_BL_CLK_SEL (1 << 17) # define RADEON_LVDS_DIGON (1 << 18) # define RADEON_LVDS_BLON (1 << 19) # define RADEON_LVDS_FP_POL_LOW (1 << 20) # define RADEON_LVDS_LP_POL_LOW (1 << 21) # define RADEON_LVDS_DTM_POL_LOW (1 << 22) # define RADEON_LVDS_SEL_CRTC2 (1 << 23) # define RADEON_LVDS_FPDI_EN (1 << 27) # define RADEON_LVDS_HSYNC_DELAY_SHIFT 28 #define RADEON_LVDS_PLL_CNTL 0x02d4 # define RADEON_HSYNC_DELAY_SHIFT 28 # define RADEON_HSYNC_DELAY_MASK (0xf << 28) # define RADEON_LVDS_PLL_EN (1 << 16) # define RADEON_LVDS_PLL_RESET (1 << 17) # define R300_LVDS_SRC_SEL_MASK (3 << 18) # define R300_LVDS_SRC_SEL_CRTC1 (0 << 18) # define R300_LVDS_SRC_SEL_CRTC2 (1 << 18) # define R300_LVDS_SRC_SEL_RMX (2 << 18) #define RADEON_LVDS_SS_GEN_CNTL 0x02ec # define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT 16 # define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT 20 #define RADEON_MAX_LATENCY 0x0f3f /* PCI */ #define RADEON_DISPLAY_BASE_ADDR 0x23c #define RADEON_DISPLAY2_BASE_ADDR 0x33c #define RADEON_OV0_BASE_ADDR 0x43c #define RADEON_NB_TOM 0x15c #define R300_MC_INIT_MISC_LAT_TIMER 0x180 # define R300_MC_DISP0R_INIT_LAT_SHIFT 8 # define R300_MC_DISP0R_INIT_LAT_MASK 0xf # define R300_MC_DISP1R_INIT_LAT_SHIFT 12 # define R300_MC_DISP1R_INIT_LAT_MASK 0xf #define RADEON_MCLK_CNTL 0x0012 /* PLL */ # define RADEON_MCLKA_SRC_SEL_MASK 0x7 # define RADEON_FORCEON_MCLKA (1 << 16) # define RADEON_FORCEON_MCLKB (1 << 17) # define RADEON_FORCEON_YCLKA (1 << 18) # define RADEON_FORCEON_YCLKB (1 << 19) # define RADEON_FORCEON_MC (1 << 20) # define RADEON_FORCEON_AIC (1 << 21) # define R300_DISABLE_MC_MCLKA (1 << 21) # define R300_DISABLE_MC_MCLKB (1 << 21) #define RADEON_MCLK_MISC 0x001f /* PLL */ # define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12) # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) #define RADEON_GPIOPAD_MASK 0x0198 #define RADEON_GPIOPAD_A 0x019c #define RADEON_GPIOPAD_EN 0x01a0 #define RADEON_GPIOPAD_Y 0x01a4 #define RADEON_MDGPIO_MASK 0x01a8 #define RADEON_MDGPIO_A 0x01ac #define RADEON_MDGPIO_EN 0x01b0 #define RADEON_MDGPIO_Y 0x01b4 #define RADEON_MEM_ADDR_CONFIG 0x0148 #define RADEON_MEM_BASE 0x0f10 /* PCI */ #define RADEON_MEM_CNTL 0x0140 # define RADEON_MEM_NUM_CHANNELS_MASK 0x01 # define RADEON_MEM_USE_B_CH_ONLY (1 << 1) # define RV100_HALF_MODE (1 << 3) # define R300_MEM_NUM_CHANNELS_MASK 0x03 # define R300_MEM_USE_CD_CH_ONLY (1 << 2) #define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */ #define RADEON_MEM_INIT_LAT_TIMER 0x0154 #define RADEON_MEM_INTF_CNTL 0x014c #define RADEON_MEM_SDRAM_MODE_REG 0x0158 # define RADEON_SDRAM_MODE_MASK 0xffff0000 # define RADEON_B3MEM_RESET_MASK 0x6fffffff # define RADEON_MEM_CFG_TYPE_DDR (1 << 30) #define RADEON_MEM_STR_CNTL 0x0150 # define RADEON_MEM_PWRUP_COMPL_A (1 << 0) # define RADEON_MEM_PWRUP_COMPL_B (1 << 1) # define R300_MEM_PWRUP_COMPL_C (1 << 2) # define R300_MEM_PWRUP_COMPL_D (1 << 3) # define RADEON_MEM_PWRUP_COMPLETE 0x03 # define R300_MEM_PWRUP_COMPLETE 0x0f #define RADEON_MC_STATUS 0x0150 # define RADEON_MC_IDLE (1 << 2) # define R300_MC_IDLE (1 << 4) #define RADEON_MEM_VGA_RP_SEL 0x003c #define RADEON_MEM_VGA_WP_SEL 0x0038 #define RADEON_MIN_GRANT 0x0f3e /* PCI */ #define RADEON_MM_DATA 0x0004 #define RADEON_MM_INDEX 0x0000 -# define RADEON_MM_APER (1 << 31) +# define RADEON_MM_APER (1U << 31) #define RADEON_MPLL_CNTL 0x000e /* PLL */ #define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */ #define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */ #define RADEON_SEPROM_CNTL1 0x01c0 # define RADEON_SCK_PRESCALE_SHIFT 24 # define RADEON_SCK_PRESCALE_MASK (0xff << 24) #define R300_MC_IND_INDEX 0x01f8 # define R300_MC_IND_ADDR_MASK 0x3f # define R300_MC_IND_WR_EN (1 << 8) #define R300_MC_IND_DATA 0x01fc #define R300_MC_READ_CNTL_AB 0x017c # define R300_MEM_RBS_POSITION_A_MASK 0x03 #define R300_MC_READ_CNTL_CD_mcind 0x24 # define R300_MEM_RBS_POSITION_C_MASK 0x03 #define RADEON_N_VIF_COUNT 0x0248 #define RADEON_OV0_AUTO_FLIP_CNTL 0x0470 # define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM 0x00000007 # define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD 0x00000008 # define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD 0x00000010 # define RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020 # define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE 0x00000040 # define RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT 0x00000300 # define RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN 0x00010000 # define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN 0x00040000 # define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN 0x00080000 # define RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE 0x00800000 #define RADEON_OV0_COLOUR_CNTL 0x04E0 #define RADEON_OV0_DEINTERLACE_PATTERN 0x0474 #define RADEON_OV0_EXCLUSIVE_HORZ 0x0408 # define RADEON_EXCL_HORZ_START_MASK 0x000000ff # define RADEON_EXCL_HORZ_END_MASK 0x0000ff00 # define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000 # define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000 #define RADEON_OV0_EXCLUSIVE_VERT 0x040C # define RADEON_EXCL_VERT_START_MASK 0x000003ff # define RADEON_EXCL_VERT_END_MASK 0x03ff0000 #define RADEON_OV0_FILTER_CNTL 0x04A0 # define RADEON_FILTER_PROGRAMMABLE_COEF 0x0 # define RADEON_FILTER_HC_COEF_HORZ_Y 0x1 # define RADEON_FILTER_HC_COEF_HORZ_UV 0x2 # define RADEON_FILTER_HC_COEF_VERT_Y 0x4 # define RADEON_FILTER_HC_COEF_VERT_UV 0x8 # define RADEON_FILTER_HARDCODED_COEF 0xf # define RADEON_FILTER_COEF_MASK 0xf #define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0 #define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4 #define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8 #define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC #define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0 #define RADEON_OV0_FLAG_CNTL 0x04DC #define RADEON_OV0_GAMMA_000_00F 0x0d40 #define RADEON_OV0_GAMMA_010_01F 0x0d44 #define RADEON_OV0_GAMMA_020_03F 0x0d48 #define RADEON_OV0_GAMMA_040_07F 0x0d4c #define RADEON_OV0_GAMMA_080_0BF 0x0e00 #define RADEON_OV0_GAMMA_0C0_0FF 0x0e04 #define RADEON_OV0_GAMMA_100_13F 0x0e08 #define RADEON_OV0_GAMMA_140_17F 0x0e0c #define RADEON_OV0_GAMMA_180_1BF 0x0e10 #define RADEON_OV0_GAMMA_1C0_1FF 0x0e14 #define RADEON_OV0_GAMMA_200_23F 0x0e18 #define RADEON_OV0_GAMMA_240_27F 0x0e1c #define RADEON_OV0_GAMMA_280_2BF 0x0e20 #define RADEON_OV0_GAMMA_2C0_2FF 0x0e24 #define RADEON_OV0_GAMMA_300_33F 0x0e28 #define RADEON_OV0_GAMMA_340_37F 0x0e2c #define RADEON_OV0_GAMMA_380_3BF 0x0d50 #define RADEON_OV0_GAMMA_3C0_3FF 0x0d54 #define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC #define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0 #define RADEON_OV0_H_INC 0x0480 #define RADEON_OV0_KEY_CNTL 0x04F4 # define RADEON_VIDEO_KEY_FN_MASK 0x00000003L # define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L # define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L # define RADEON_VIDEO_KEY_FN_EQ 0x00000002L # define RADEON_VIDEO_KEY_FN_NE 0x00000003L # define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L # define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L # define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L # define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L # define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L # define RADEON_CMP_MIX_MASK 0x00000100L # define RADEON_CMP_MIX_OR 0x00000000L # define RADEON_CMP_MIX_AND 0x00000100L #define RADEON_OV0_LIN_TRANS_A 0x0d20 #define RADEON_OV0_LIN_TRANS_B 0x0d24 #define RADEON_OV0_LIN_TRANS_C 0x0d28 #define RADEON_OV0_LIN_TRANS_D 0x0d2c #define RADEON_OV0_LIN_TRANS_E 0x0d30 #define RADEON_OV0_LIN_TRANS_F 0x0d34 #define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430 # define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL # define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L #define RADEON_OV0_P1_H_ACCUM_INIT 0x0488 #define RADEON_OV0_P1_V_ACCUM_INIT 0x0428 # define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L # define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L #define RADEON_OV0_P1_X_START_END 0x0494 #define RADEON_OV0_P2_X_START_END 0x0498 #define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434 # define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL # define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L #define RADEON_OV0_P23_H_ACCUM_INIT 0x048C #define RADEON_OV0_P23_V_ACCUM_INIT 0x042C #define RADEON_OV0_P3_X_START_END 0x049C #define RADEON_OV0_REG_LOAD_CNTL 0x0410 # define RADEON_REG_LD_CTL_LOCK 0x00000001L # define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L # define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L # define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L # define RADEON_REG_LD_CTL_FLIP_READBACK 0x00000010L #define RADEON_OV0_SCALE_CNTL 0x0420 # define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L # define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L # define RADEON_SCALER_SIGNED_UV 0x00000010L # define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L # define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L # define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L # define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L # define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L # define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L # define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L # define RADEON_SCALER_SOURCE_15BPP 0x00000300L # define RADEON_SCALER_SOURCE_16BPP 0x00000400L # define RADEON_SCALER_SOURCE_32BPP 0x00000600L # define RADEON_SCALER_SOURCE_YUV9 0x00000900L # define RADEON_SCALER_SOURCE_YUV12 0x00000A00L # define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L # define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L # define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L # define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L # define RADEON_SCALER_CRTC_SEL 0x00004000L # define RADEON_SCALER_SMART_SWITCH 0x00008000L # define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L # define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L # define RADEON_SCALER_DIS_LIMIT 0x08000000L # define RADEON_SCALER_LIN_TRANS_BYPASS 0x10000000L # define RADEON_SCALER_INT_EMU 0x20000000L # define RADEON_SCALER_ENABLE 0x40000000L # define RADEON_SCALER_SOFT_RESET 0x80000000L #define RADEON_OV0_STEP_BY 0x0484 #define RADEON_OV0_TEST 0x04F8 #define RADEON_OV0_V_INC 0x0424 #define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460 #define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464 #define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440 # define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L # define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L # define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L # define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L #define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444 # define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L # define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L # define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L # define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L #define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448 # define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L # define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L # define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L # define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L #define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C #define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450 #define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454 #define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8 #define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4 #define RADEON_OV0_Y_X_START 0x0400 #define RADEON_OV0_Y_X_END 0x0404 #define RADEON_OV1_Y_X_START 0x0600 #define RADEON_OV1_Y_X_END 0x0604 #define RADEON_OVR_CLR 0x0230 #define RADEON_OVR_WID_LEFT_RIGHT 0x0234 #define RADEON_OVR_WID_TOP_BOTTOM 0x0238 #define RADEON_OVR2_CLR 0x0330 #define RADEON_OVR2_WID_LEFT_RIGHT 0x0334 #define RADEON_OVR2_WID_TOP_BOTTOM 0x0338 /* first capture unit */ #define RADEON_CAP0_BUF0_OFFSET 0x0920 #define RADEON_CAP0_BUF1_OFFSET 0x0924 #define RADEON_CAP0_BUF0_EVEN_OFFSET 0x0928 #define RADEON_CAP0_BUF1_EVEN_OFFSET 0x092C #define RADEON_CAP0_BUF_PITCH 0x0930 #define RADEON_CAP0_V_WINDOW 0x0934 #define RADEON_CAP0_H_WINDOW 0x0938 #define RADEON_CAP0_VBI0_OFFSET 0x093C #define RADEON_CAP0_VBI1_OFFSET 0x0940 #define RADEON_CAP0_VBI_V_WINDOW 0x0944 #define RADEON_CAP0_VBI_H_WINDOW 0x0948 #define RADEON_CAP0_PORT_MODE_CNTL 0x094C #define RADEON_CAP0_TRIG_CNTL 0x0950 #define RADEON_CAP0_DEBUG 0x0954 #define RADEON_CAP0_CONFIG 0x0958 # define RADEON_CAP0_CONFIG_CONTINUOS 0x00000001 # define RADEON_CAP0_CONFIG_START_FIELD_EVEN 0x00000002 # define RADEON_CAP0_CONFIG_START_BUF_GET 0x00000004 # define RADEON_CAP0_CONFIG_START_BUF_SET 0x00000008 # define RADEON_CAP0_CONFIG_BUF_TYPE_ALT 0x00000010 # define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME 0x00000020 # define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040 # define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE 0x00000080 # define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE 0x00000100 # define RADEON_CAP0_CONFIG_MIRROR_EN 0x00000200 # define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN 0x00000400 # define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV 0x00000800 # define RADEON_CAP0_CONFIG_ANC_DECODE_EN 0x00001000 # define RADEON_CAP0_CONFIG_VBI_EN 0x00002000 # define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN 0x00004000 # define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000 # define RADEON_CAP0_CONFIG_FAKE_FIELD_EN 0x00010000 # define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE 0x00020000 # define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000 # define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2 0x00080000 # define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4 0x00100000 # define RADEON_CAP0_CONFIG_VERT_DIVIDE_2 0x00200000 # define RADEON_CAP0_CONFIG_VERT_DIVIDE_4 0x00400000 # define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE 0x00000000 # define RADEON_CAP0_CONFIG_FORMAT_CCIR656 0x00800000 # define RADEON_CAP0_CONFIG_FORMAT_ZV 0x01000000 # define RADEON_CAP0_CONFIG_FORMAT_VIP 0x01800000 # define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT 0x02000000 # define RADEON_CAP0_CONFIG_HORZ_DECIMATOR 0x04000000 # define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422 0x00000000 # define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422 0x20000000 # define RADEON_CAP0_CONFIG_VBI_DIVIDE_2 0x40000000 # define RADEON_CAP0_CONFIG_VBI_DIVIDE_4 0x80000000 #define RADEON_CAP0_ANC_ODD_OFFSET 0x095C #define RADEON_CAP0_ANC_EVEN_OFFSET 0x0960 #define RADEON_CAP0_ANC_H_WINDOW 0x0964 #define RADEON_CAP0_VIDEO_SYNC_TEST 0x0968 #define RADEON_CAP0_ONESHOT_BUF_OFFSET 0x096C #define RADEON_CAP0_BUF_STATUS 0x0970 /* #define RADEON_CAP0_DWNSC_XRATIO 0x0978 */ /* #define RADEON_CAP0_XSHARPNESS 0x097C */ #define RADEON_CAP0_VBI2_OFFSET 0x0980 #define RADEON_CAP0_VBI3_OFFSET 0x0984 #define RADEON_CAP0_ANC2_OFFSET 0x0988 #define RADEON_CAP0_ANC3_OFFSET 0x098C #define RADEON_VID_BUFFER_CONTROL 0x0900 /* second capture unit */ #define RADEON_CAP1_BUF0_OFFSET 0x0990 #define RADEON_CAP1_BUF1_OFFSET 0x0994 #define RADEON_CAP1_BUF0_EVEN_OFFSET 0x0998 #define RADEON_CAP1_BUF1_EVEN_OFFSET 0x099C #define RADEON_CAP1_BUF_PITCH 0x09A0 #define RADEON_CAP1_V_WINDOW 0x09A4 #define RADEON_CAP1_H_WINDOW 0x09A8 #define RADEON_CAP1_VBI_ODD_OFFSET 0x09AC #define RADEON_CAP1_VBI_EVEN_OFFSET 0x09B0 #define RADEON_CAP1_VBI_V_WINDOW 0x09B4 #define RADEON_CAP1_VBI_H_WINDOW 0x09B8 #define RADEON_CAP1_PORT_MODE_CNTL 0x09BC #define RADEON_CAP1_TRIG_CNTL 0x09C0 #define RADEON_CAP1_DEBUG 0x09C4 #define RADEON_CAP1_CONFIG 0x09C8 #define RADEON_CAP1_ANC_ODD_OFFSET 0x09CC #define RADEON_CAP1_ANC_EVEN_OFFSET 0x09D0 #define RADEON_CAP1_ANC_H_WINDOW 0x09D4 #define RADEON_CAP1_VIDEO_SYNC_TEST 0x09D8 #define RADEON_CAP1_ONESHOT_BUF_OFFSET 0x09DC #define RADEON_CAP1_BUF_STATUS 0x09E0 #define RADEON_CAP1_DWNSC_XRATIO 0x09E8 #define RADEON_CAP1_XSHARPNESS 0x09EC /* misc multimedia registers */ #define RADEON_IDCT_RUNS 0x1F80 #define RADEON_IDCT_LEVELS 0x1F84 #define RADEON_IDCT_CONTROL 0x1FBC #define RADEON_IDCT_AUTH_CONTROL 0x1F88 #define RADEON_IDCT_AUTH 0x1F8C #define RADEON_P2PLL_CNTL 0x002a /* P2PLL */ # define RADEON_P2PLL_RESET (1 << 0) # define RADEON_P2PLL_SLEEP (1 << 1) # define RADEON_P2PLL_PVG_MASK (7 << 11) # define RADEON_P2PLL_PVG_SHIFT 11 # define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16) # define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17) # define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18) #define RADEON_P2PLL_DIV_0 0x002c # define RADEON_P2PLL_FB0_DIV_MASK 0x07ff # define RADEON_P2PLL_POST0_DIV_MASK 0x00070000 #define RADEON_P2PLL_REF_DIV 0x002B /* PLL */ # define RADEON_P2PLL_REF_DIV_MASK 0x03ff # define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */ # define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */ # define R300_PPLL_REF_DIV_ACC_MASK (0x3ff << 18) # define R300_PPLL_REF_DIV_ACC_SHIFT 18 #define RADEON_PALETTE_DATA 0x00b4 #define RADEON_PALETTE_30_DATA 0x00b8 #define RADEON_PALETTE_INDEX 0x00b0 #define RADEON_PCI_GART_PAGE 0x017c #define RADEON_PIXCLKS_CNTL 0x002d # define RADEON_PIX2CLK_SRC_SEL_MASK 0x03 # define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00 # define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01 # define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02 # define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03 # define RADEON_PIX2CLK_ALWAYS_ONb (1<<6) # define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7) # define RADEON_PIXCLK_TV_SRC_SEL (1 << 8) # define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9) # define R300_DVOCLK_ALWAYS_ONb (1 << 10) # define RADEON_PIXCLK_BLEND_ALWAYS_ONb (1 << 11) # define RADEON_PIXCLK_GV_ALWAYS_ONb (1 << 12) # define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13) # define R300_PIXCLK_DVO_ALWAYS_ONb (1 << 13) # define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14) # define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15) # define R300_PIXCLK_TRANS_ALWAYS_ONb (1 << 16) # define R300_PIXCLK_TVO_ALWAYS_ONb (1 << 17) # define R300_P2G2CLK_ALWAYS_ONb (1 << 18) # define R300_P2G2CLK_DAC_ALWAYS_ONb (1 << 19) # define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23) #define RADEON_PLANE_3D_MASK_C 0x1d44 #define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */ # define RADEON_PLL_MASK_READ_B (1 << 9) #define RADEON_PMI_CAP_ID 0x0f5c /* PCI */ #define RADEON_PMI_DATA 0x0f63 /* PCI */ #define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */ #define RADEON_PMI_PMC_REG 0x0f5e /* PCI */ #define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */ #define RADEON_PMI_REGISTER 0x0f5c /* PCI */ #define RADEON_PPLL_CNTL 0x0002 /* PLL */ # define RADEON_PPLL_RESET (1 << 0) # define RADEON_PPLL_SLEEP (1 << 1) # define RADEON_PPLL_PVG_MASK (7 << 11) # define RADEON_PPLL_PVG_SHIFT 11 # define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16) # define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17) # define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18) #define RADEON_PPLL_DIV_0 0x0004 /* PLL */ #define RADEON_PPLL_DIV_1 0x0005 /* PLL */ #define RADEON_PPLL_DIV_2 0x0006 /* PLL */ #define RADEON_PPLL_DIV_3 0x0007 /* PLL */ # define RADEON_PPLL_FB3_DIV_MASK 0x07ff # define RADEON_PPLL_POST3_DIV_MASK 0x00070000 #define RADEON_PPLL_REF_DIV 0x0003 /* PLL */ # define RADEON_PPLL_REF_DIV_MASK 0x03ff # define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */ # define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */ #define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */ #define RADEON_RBBM_GUICNTL 0x172c # define RADEON_HOST_DATA_SWAP_NONE (0 << 0) # define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) # define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) # define RADEON_HOST_DATA_SWAP_HDW (3 << 0) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) # define RADEON_SOFT_RESET_HI (1 << 1) # define RADEON_SOFT_RESET_SE (1 << 2) # define RADEON_SOFT_RESET_RE (1 << 3) # define RADEON_SOFT_RESET_PP (1 << 4) # define RADEON_SOFT_RESET_E2 (1 << 5) # define RADEON_SOFT_RESET_RB (1 << 6) # define RADEON_SOFT_RESET_HDP (1 << 7) #define RADEON_RBBM_STATUS 0x0e40 # define RADEON_RBBM_FIFOCNT_MASK 0x007f -# define RADEON_RBBM_ACTIVE (1 << 31) +# define RADEON_RBBM_ACTIVE (1U << 31) #define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c # define RADEON_RB2D_DC_FLUSH (3 << 0) # define RADEON_RB2D_DC_FREE (3 << 2) # define RADEON_RB2D_DC_FLUSH_ALL 0xf -# define RADEON_RB2D_DC_BUSY (1 << 31) +# define RADEON_RB2D_DC_BUSY (1U << 31) #define RADEON_RB2D_DSTCACHE_MODE 0x3428 #define RADEON_DSTCACHE_CTLSTAT 0x1714 #define RADEON_RB3D_ZCACHE_MODE 0x3250 #define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 # define RADEON_RB3D_ZC_FLUSH_ALL 0x5 #define RADEON_RB3D_DSTCACHE_MODE 0x3258 # define RADEON_RB3D_DC_CACHE_ENABLE (0) # define RADEON_RB3D_DC_2D_CACHE_DISABLE (1) # define RADEON_RB3D_DC_3D_CACHE_DISABLE (2) # define RADEON_RB3D_DC_CACHE_DISABLE (3) # define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128 (1 << 2) # define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128 (2 << 2) # define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH (1 << 8) # define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH (2 << 8) # define R200_RB3D_DC_2D_CACHE_AUTOFREE (1 << 10) # define R200_RB3D_DC_3D_CACHE_AUTOFREE (2 << 10) # define RADEON_RB3D_DC_FORCE_RMW (1 << 16) # define RADEON_RB3D_DC_DISABLE_RI_FILL (1 << 24) # define RADEON_RB3D_DC_DISABLE_RI_READ (1 << 25) #define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325C # define RADEON_RB3D_DC_FLUSH (3 << 0) # define RADEON_RB3D_DC_FREE (3 << 2) # define RADEON_RB3D_DC_FLUSH_ALL 0xf -# define RADEON_RB3D_DC_BUSY (1 << 31) +# define RADEON_RB3D_DC_BUSY (1U << 31) #define RADEON_REG_BASE 0x0f18 /* PCI */ #define RADEON_REGPROG_INF 0x0f09 /* PCI */ #define RADEON_REVISION_ID 0x0f08 /* PCI */ #define RADEON_SC_BOTTOM 0x164c #define RADEON_SC_BOTTOM_RIGHT 0x16f0 #define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c #define RADEON_SC_LEFT 0x1640 #define RADEON_SC_RIGHT 0x1644 #define RADEON_SC_TOP 0x1648 #define RADEON_SC_TOP_LEFT 0x16ec #define RADEON_SC_TOP_LEFT_C 0x1c88 # define RADEON_SC_SIGN_MASK_LO 0x8000 # define RADEON_SC_SIGN_MASK_HI 0x80000000 #define RADEON_M_SPLL_REF_FB_DIV 0x000a /* PLL */ # define RADEON_M_SPLL_REF_DIV_SHIFT 0 # define RADEON_M_SPLL_REF_DIV_MASK 0xff # define RADEON_MPLL_FB_DIV_SHIFT 8 # define RADEON_MPLL_FB_DIV_MASK 0xff # define RADEON_SPLL_FB_DIV_SHIFT 16 # define RADEON_SPLL_FB_DIV_MASK 0xff #define RADEON_SPLL_CNTL 0x000c /* PLL */ # define RADEON_SPLL_SLEEP (1 << 0) # define RADEON_SPLL_RESET (1 << 1) # define RADEON_SPLL_PCP_MASK 0x7 # define RADEON_SPLL_PCP_SHIFT 8 # define RADEON_SPLL_PVG_MASK 0x7 # define RADEON_SPLL_PVG_SHIFT 11 # define RADEON_SPLL_PDC_MASK 0x3 # define RADEON_SPLL_PDC_SHIFT 14 #define RADEON_SCLK_CNTL 0x000d /* PLL */ # define RADEON_SCLK_SRC_SEL_MASK 0x0007 # define RADEON_DYN_STOP_LAT_MASK 0x00007ff8 # define RADEON_CP_MAX_DYN_STOP_LAT 0x0008 # define RADEON_SCLK_FORCEON_MASK 0xffff8000 # define RADEON_SCLK_FORCE_DISP2 (1<<15) # define RADEON_SCLK_FORCE_CP (1<<16) # define RADEON_SCLK_FORCE_HDP (1<<17) # define RADEON_SCLK_FORCE_DISP1 (1<<18) # define RADEON_SCLK_FORCE_TOP (1<<19) # define RADEON_SCLK_FORCE_E2 (1<<20) # define RADEON_SCLK_FORCE_SE (1<<21) # define RADEON_SCLK_FORCE_IDCT (1<<22) # define RADEON_SCLK_FORCE_VIP (1<<23) # define RADEON_SCLK_FORCE_RE (1<<24) # define RADEON_SCLK_FORCE_PB (1<<25) # define RADEON_SCLK_FORCE_TAM (1<<26) # define RADEON_SCLK_FORCE_TDM (1<<27) # define RADEON_SCLK_FORCE_RB (1<<28) # define RADEON_SCLK_FORCE_TV_SCLK (1<<29) # define RADEON_SCLK_FORCE_SUBPIC (1<<30) # define RADEON_SCLK_FORCE_OV0 (1<<31) # define R300_SCLK_FORCE_VAP (1<<21) # define R300_SCLK_FORCE_SR (1<<25) # define R300_SCLK_FORCE_PX (1<<26) # define R300_SCLK_FORCE_TX (1<<27) # define R300_SCLK_FORCE_US (1<<28) # define R300_SCLK_FORCE_SU (1<<30) #define R300_SCLK_CNTL2 0x1e /* PLL */ # define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10) # define R300_SCLK_GA_MAX_DYN_STOP_LAT (1<<11) # define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12) # define R300_SCLK_FORCE_TCL (1<<13) # define R300_SCLK_FORCE_CBA (1<<14) # define R300_SCLK_FORCE_GA (1<<15) #define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */ # define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007 # define RADEON_SCLK_MORE_FORCEON 0x0700 #define RADEON_SDRAM_MODE_REG 0x0158 #define RADEON_SEQ8_DATA 0x03c5 /* VGA */ #define RADEON_SEQ8_IDX 0x03c4 /* VGA */ #define RADEON_SNAPSHOT_F_COUNT 0x0244 #define RADEON_SNAPSHOT_VH_COUNTS 0x0240 #define RADEON_SNAPSHOT_VIF_COUNT 0x024c #define RADEON_SRC_OFFSET 0x15ac #define RADEON_SRC_PITCH 0x15b0 #define RADEON_SRC_PITCH_OFFSET 0x1428 #define RADEON_SRC_SC_BOTTOM 0x165c #define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4 #define RADEON_SRC_SC_RIGHT 0x1654 #define RADEON_SRC_X 0x1414 #define RADEON_SRC_X_Y 0x1590 #define RADEON_SRC_Y 0x1418 #define RADEON_SRC_Y_X 0x1434 #define RADEON_STATUS 0x0f06 /* PCI */ #define RADEON_SUBPIC_CNTL 0x0540 /* ? */ #define RADEON_SUB_CLASS 0x0f0a /* PCI */ #define RADEON_SURFACE_CNTL 0x0b00 # define RADEON_SURF_TRANSLATION_DIS (1 << 8) # define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20) # define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21) # define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22) # define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23) #define RADEON_SURFACE0_INFO 0x0b0c # define RADEON_SURF_TILE_COLOR_MACRO (0 << 16) # define RADEON_SURF_TILE_COLOR_BOTH (1 << 16) # define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16) # define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16) # define R200_SURF_TILE_NONE (0 << 16) # define R200_SURF_TILE_COLOR_MACRO (1 << 16) # define R200_SURF_TILE_COLOR_MICRO (2 << 16) # define R200_SURF_TILE_COLOR_BOTH (3 << 16) # define R200_SURF_TILE_DEPTH_32BPP (4 << 16) # define R200_SURF_TILE_DEPTH_16BPP (5 << 16) # define R300_SURF_TILE_NONE (0 << 16) # define R300_SURF_TILE_COLOR_MACRO (1 << 16) # define R300_SURF_TILE_DEPTH_32BPP (2 << 16) # define RADEON_SURF_AP0_SWP_16BPP (1 << 20) # define RADEON_SURF_AP0_SWP_32BPP (1 << 21) # define RADEON_SURF_AP1_SWP_16BPP (1 << 22) # define RADEON_SURF_AP1_SWP_32BPP (1 << 23) #define RADEON_SURFACE0_LOWER_BOUND 0x0b04 #define RADEON_SURFACE0_UPPER_BOUND 0x0b08 #define RADEON_SURFACE1_INFO 0x0b1c #define RADEON_SURFACE1_LOWER_BOUND 0x0b14 #define RADEON_SURFACE1_UPPER_BOUND 0x0b18 #define RADEON_SURFACE2_INFO 0x0b2c #define RADEON_SURFACE2_LOWER_BOUND 0x0b24 #define RADEON_SURFACE2_UPPER_BOUND 0x0b28 #define RADEON_SURFACE3_INFO 0x0b3c #define RADEON_SURFACE3_LOWER_BOUND 0x0b34 #define RADEON_SURFACE3_UPPER_BOUND 0x0b38 #define RADEON_SURFACE4_INFO 0x0b4c #define RADEON_SURFACE4_LOWER_BOUND 0x0b44 #define RADEON_SURFACE4_UPPER_BOUND 0x0b48 #define RADEON_SURFACE5_INFO 0x0b5c #define RADEON_SURFACE5_LOWER_BOUND 0x0b54 #define RADEON_SURFACE5_UPPER_BOUND 0x0b58 #define RADEON_SURFACE6_INFO 0x0b6c #define RADEON_SURFACE6_LOWER_BOUND 0x0b64 #define RADEON_SURFACE6_UPPER_BOUND 0x0b68 #define RADEON_SURFACE7_INFO 0x0b7c #define RADEON_SURFACE7_LOWER_BOUND 0x0b74 #define RADEON_SURFACE7_UPPER_BOUND 0x0b78 #define RADEON_SW_SEMAPHORE 0x013c #define RADEON_TEST_DEBUG_CNTL 0x0120 #define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001 #define RADEON_TEST_DEBUG_MUX 0x0124 #define RADEON_TEST_DEBUG_OUT 0x012c #define RADEON_TMDS_PLL_CNTL 0x02a8 #define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4 # define RADEON_TMDS_TRANSMITTER_PLLEN 1 # define RADEON_TMDS_TRANSMITTER_PLLRST 2 #define RADEON_TRAIL_BRES_DEC 0x1614 #define RADEON_TRAIL_BRES_ERR 0x160c #define RADEON_TRAIL_BRES_INC 0x1610 #define RADEON_TRAIL_X 0x1618 #define RADEON_TRAIL_X_SUB 0x1620 #define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */ # define RADEON_VCLK_SRC_SEL_MASK 0x03 # define RADEON_VCLK_SRC_SEL_CPUCLK 0x00 # define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01 # define RADEON_VCLK_SRC_SEL_BYTECLK 0x02 # define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03 # define RADEON_PIXCLK_ALWAYS_ONb (1<<6) # define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7) # define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23) #define RADEON_VENDOR_ID 0x0f00 /* PCI */ #define RADEON_VGA_DDA_CONFIG 0x02e8 #define RADEON_VGA_DDA_ON_OFF 0x02ec #define RADEON_VID_BUFFER_CONTROL 0x0900 #define RADEON_VIDEOMUX_CNTL 0x0190 /* VIP bus */ #define RADEON_VIPH_CH0_DATA 0x0c00 #define RADEON_VIPH_CH1_DATA 0x0c04 #define RADEON_VIPH_CH2_DATA 0x0c08 #define RADEON_VIPH_CH3_DATA 0x0c0c #define RADEON_VIPH_CH0_ADDR 0x0c10 #define RADEON_VIPH_CH1_ADDR 0x0c14 #define RADEON_VIPH_CH2_ADDR 0x0c18 #define RADEON_VIPH_CH3_ADDR 0x0c1c #define RADEON_VIPH_CH0_SBCNT 0x0c20 #define RADEON_VIPH_CH1_SBCNT 0x0c24 #define RADEON_VIPH_CH2_SBCNT 0x0c28 #define RADEON_VIPH_CH3_SBCNT 0x0c2c #define RADEON_VIPH_CH0_ABCNT 0x0c30 #define RADEON_VIPH_CH1_ABCNT 0x0c34 #define RADEON_VIPH_CH2_ABCNT 0x0c38 #define RADEON_VIPH_CH3_ABCNT 0x0c3c #define RADEON_VIPH_CONTROL 0x0c40 # define RADEON_VIP_BUSY 0 # define RADEON_VIP_IDLE 1 # define RADEON_VIP_RESET 2 # define RADEON_VIPH_EN (1 << 21) #define RADEON_VIPH_DV_LAT 0x0c44 #define RADEON_VIPH_BM_CHUNK 0x0c48 #define RADEON_VIPH_DV_INT 0x0c4c #define RADEON_VIPH_TIMEOUT_STAT 0x0c50 #define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010 #define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK 0x00000010 #define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000 #define RADEON_VIPH_REG_DATA 0x0084 #define RADEON_VIPH_REG_ADDR 0x0080 #define RADEON_WAIT_UNTIL 0x1720 # define RADEON_WAIT_CRTC_PFLIP (1 << 0) # define RADEON_WAIT_RE_CRTC_VLINE (1 << 1) # define RADEON_WAIT_FE_CRTC_VLINE (1 << 2) # define RADEON_WAIT_CRTC_VLINE (1 << 3) # define RADEON_WAIT_DMA_VID_IDLE (1 << 8) # define RADEON_WAIT_DMA_GUI_IDLE (1 << 9) # define RADEON_WAIT_CMDFIFO (1 << 10) /* wait for CMDFIFO_ENTRIES */ # define RADEON_WAIT_OV0_FLIP (1 << 11) # define RADEON_WAIT_AGP_FLUSH (1 << 13) # define RADEON_WAIT_2D_IDLE (1 << 14) # define RADEON_WAIT_3D_IDLE (1 << 15) # define RADEON_WAIT_2D_IDLECLEAN (1 << 16) # define RADEON_WAIT_3D_IDLECLEAN (1 << 17) # define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) # define RADEON_CMDFIFO_ENTRIES_SHIFT 10 # define RADEON_CMDFIFO_ENTRIES_MASK 0x7f # define RADEON_WAIT_VAP_IDLE (1 << 28) # define RADEON_WAIT_BOTH_CRTC_PFLIP (1 << 30) # define RADEON_ENG_DISPLAY_SELECT_CRTC0 (0 << 31) -# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1 << 31) +# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1U << 31) #define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */ #define RADEON_XCLK_CNTL 0x000d /* PLL */ #define RADEON_XDLL_CNTL 0x000c /* PLL */ #define RADEON_XPLL_CNTL 0x000b /* PLL */ /* Registers for 3D/TCL */ #define RADEON_PP_BORDER_COLOR_0 0x1d40 #define RADEON_PP_BORDER_COLOR_1 0x1d44 #define RADEON_PP_BORDER_COLOR_2 0x1d48 #define RADEON_PP_CNTL 0x1c38 # define RADEON_STIPPLE_ENABLE (1 << 0) # define RADEON_SCISSOR_ENABLE (1 << 1) # define RADEON_PATTERN_ENABLE (1 << 2) # define RADEON_SHADOW_ENABLE (1 << 3) # define RADEON_TEX_ENABLE_MASK (0xf << 4) # define RADEON_TEX_0_ENABLE (1 << 4) # define RADEON_TEX_1_ENABLE (1 << 5) # define RADEON_TEX_2_ENABLE (1 << 6) # define RADEON_TEX_3_ENABLE (1 << 7) # define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12) # define RADEON_TEX_BLEND_0_ENABLE (1 << 12) # define RADEON_TEX_BLEND_1_ENABLE (1 << 13) # define RADEON_TEX_BLEND_2_ENABLE (1 << 14) # define RADEON_TEX_BLEND_3_ENABLE (1 << 15) # define RADEON_PLANAR_YUV_ENABLE (1 << 20) # define RADEON_SPECULAR_ENABLE (1 << 21) # define RADEON_FOG_ENABLE (1 << 22) # define RADEON_ALPHA_TEST_ENABLE (1 << 23) # define RADEON_ANTI_ALIAS_NONE (0 << 24) # define RADEON_ANTI_ALIAS_LINE (1 << 24) # define RADEON_ANTI_ALIAS_POLY (2 << 24) # define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24) # define RADEON_BUMP_MAP_ENABLE (1 << 26) # define RADEON_BUMPED_MAP_T0 (0 << 27) # define RADEON_BUMPED_MAP_T1 (1 << 27) # define RADEON_BUMPED_MAP_T2 (2 << 27) # define RADEON_TEX_3D_ENABLE_0 (1 << 29) # define RADEON_TEX_3D_ENABLE_1 (1 << 30) -# define RADEON_MC_ENABLE (1 << 31) +# define RADEON_MC_ENABLE (1U << 31) #define RADEON_PP_FOG_COLOR 0x1c18 # define RADEON_FOG_COLOR_MASK 0x00ffffff # define RADEON_FOG_VERTEX (0 << 24) # define RADEON_FOG_TABLE (1 << 24) # define RADEON_FOG_USE_DEPTH (0 << 25) # define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25) # define RADEON_FOG_USE_SPEC_ALPHA (3 << 25) #define RADEON_PP_LUM_MATRIX 0x1d00 #define RADEON_PP_MISC 0x1c14 # define RADEON_REF_ALPHA_MASK 0x000000ff # define RADEON_ALPHA_TEST_FAIL (0 << 8) # define RADEON_ALPHA_TEST_LESS (1 << 8) # define RADEON_ALPHA_TEST_LEQUAL (2 << 8) # define RADEON_ALPHA_TEST_EQUAL (3 << 8) # define RADEON_ALPHA_TEST_GEQUAL (4 << 8) # define RADEON_ALPHA_TEST_GREATER (5 << 8) # define RADEON_ALPHA_TEST_NEQUAL (6 << 8) # define RADEON_ALPHA_TEST_PASS (7 << 8) # define RADEON_ALPHA_TEST_OP_MASK (7 << 8) # define RADEON_CHROMA_FUNC_FAIL (0 << 16) # define RADEON_CHROMA_FUNC_PASS (1 << 16) # define RADEON_CHROMA_FUNC_NEQUAL (2 << 16) # define RADEON_CHROMA_FUNC_EQUAL (3 << 16) # define RADEON_CHROMA_KEY_NEAREST (0 << 18) # define RADEON_CHROMA_KEY_ZERO (1 << 18) # define RADEON_SHADOW_ID_AUTO_INC (1 << 20) # define RADEON_SHADOW_FUNC_EQUAL (0 << 21) # define RADEON_SHADOW_FUNC_NEQUAL (1 << 21) # define RADEON_SHADOW_PASS_1 (0 << 22) # define RADEON_SHADOW_PASS_2 (1 << 22) # define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24) # define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24) #define RADEON_PP_ROT_MATRIX_0 0x1d58 #define RADEON_PP_ROT_MATRIX_1 0x1d5c #define RADEON_PP_TXFILTER_0 0x1c54 #define RADEON_PP_TXFILTER_1 0x1c6c #define RADEON_PP_TXFILTER_2 0x1c84 # define RADEON_MAG_FILTER_NEAREST (0 << 0) # define RADEON_MAG_FILTER_LINEAR (1 << 0) # define RADEON_MAG_FILTER_MASK (1 << 0) # define RADEON_MIN_FILTER_NEAREST (0 << 1) # define RADEON_MIN_FILTER_LINEAR (1 << 1) # define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1) # define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1) # define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1) # define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1) # define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1) # define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1) # define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1) # define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1) # define RADEON_MIN_FILTER_MASK (15 << 1) # define RADEON_MAX_ANISO_1_TO_1 (0 << 5) # define RADEON_MAX_ANISO_2_TO_1 (1 << 5) # define RADEON_MAX_ANISO_4_TO_1 (2 << 5) # define RADEON_MAX_ANISO_8_TO_1 (3 << 5) # define RADEON_MAX_ANISO_16_TO_1 (4 << 5) # define RADEON_MAX_ANISO_MASK (7 << 5) # define RADEON_LOD_BIAS_MASK (0xff << 8) # define RADEON_LOD_BIAS_SHIFT 8 # define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16) # define RADEON_MAX_MIP_LEVEL_SHIFT 16 # define RADEON_YUV_TO_RGB (1 << 20) # define RADEON_YUV_TEMPERATURE_COOL (0 << 21) # define RADEON_YUV_TEMPERATURE_HOT (1 << 21) # define RADEON_YUV_TEMPERATURE_MASK (1 << 21) # define RADEON_WRAPEN_S (1 << 22) # define RADEON_CLAMP_S_WRAP (0 << 23) # define RADEON_CLAMP_S_MIRROR (1 << 23) # define RADEON_CLAMP_S_CLAMP_LAST (2 << 23) # define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23) # define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23) # define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23) # define RADEON_CLAMP_S_CLAMP_GL (6 << 23) # define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23) # define RADEON_CLAMP_S_MASK (7 << 23) # define RADEON_WRAPEN_T (1 << 26) # define RADEON_CLAMP_T_WRAP (0 << 27) # define RADEON_CLAMP_T_MIRROR (1 << 27) # define RADEON_CLAMP_T_CLAMP_LAST (2 << 27) # define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27) # define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27) # define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27) # define RADEON_CLAMP_T_CLAMP_GL (6 << 27) # define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27) # define RADEON_CLAMP_T_MASK (7 << 27) # define RADEON_BORDER_MODE_OGL (0 << 31) # define RADEON_BORDER_MODE_D3D (1 << 31) #define RADEON_PP_TXFORMAT_0 0x1c58 #define RADEON_PP_TXFORMAT_1 0x1c70 #define RADEON_PP_TXFORMAT_2 0x1c88 # define RADEON_TXFORMAT_I8 (0 << 0) # define RADEON_TXFORMAT_AI88 (1 << 0) # define RADEON_TXFORMAT_RGB332 (2 << 0) # define RADEON_TXFORMAT_ARGB1555 (3 << 0) # define RADEON_TXFORMAT_RGB565 (4 << 0) # define RADEON_TXFORMAT_ARGB4444 (5 << 0) # define RADEON_TXFORMAT_ARGB8888 (6 << 0) # define RADEON_TXFORMAT_RGBA8888 (7 << 0) # define RADEON_TXFORMAT_Y8 (8 << 0) # define RADEON_TXFORMAT_VYUY422 (10 << 0) # define RADEON_TXFORMAT_YVYU422 (11 << 0) # define RADEON_TXFORMAT_DXT1 (12 << 0) # define RADEON_TXFORMAT_DXT23 (14 << 0) # define RADEON_TXFORMAT_DXT45 (15 << 0) # define RADEON_TXFORMAT_SHADOW16 (16 << 0) # define RADEON_TXFORMAT_SHADOW32 (17 << 0) # define RADEON_TXFORMAT_DUDV88 (18 << 0) # define RADEON_TXFORMAT_LDUDV655 (19 << 0) # define RADEON_TXFORMAT_LDUDUV8888 (20 << 0) # define RADEON_TXFORMAT_FORMAT_MASK (31 << 0) # define RADEON_TXFORMAT_FORMAT_SHIFT 0 # define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5) # define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6) # define RADEON_TXFORMAT_NON_POWER2 (1 << 7) # define RADEON_TXFORMAT_WIDTH_MASK (15 << 8) # define RADEON_TXFORMAT_WIDTH_SHIFT 8 # define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12) # define RADEON_TXFORMAT_HEIGHT_SHIFT 12 # define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16) # define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16 # define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20) # define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20 # define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24) # define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24) # define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24) # define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24) # define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26) # define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26) # define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26) # define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26) # define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) # define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) # define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) # define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31) #define RADEON_PP_CUBIC_FACES_0 0x1d24 #define RADEON_PP_CUBIC_FACES_1 0x1d28 #define RADEON_PP_CUBIC_FACES_2 0x1d2c # define RADEON_FACE_WIDTH_1_SHIFT 0 # define RADEON_FACE_HEIGHT_1_SHIFT 4 # define RADEON_FACE_WIDTH_1_MASK (0xf << 0) # define RADEON_FACE_HEIGHT_1_MASK (0xf << 4) # define RADEON_FACE_WIDTH_2_SHIFT 8 # define RADEON_FACE_HEIGHT_2_SHIFT 12 # define RADEON_FACE_WIDTH_2_MASK (0xf << 8) # define RADEON_FACE_HEIGHT_2_MASK (0xf << 12) # define RADEON_FACE_WIDTH_3_SHIFT 16 # define RADEON_FACE_HEIGHT_3_SHIFT 20 # define RADEON_FACE_WIDTH_3_MASK (0xf << 16) # define RADEON_FACE_HEIGHT_3_MASK (0xf << 20) # define RADEON_FACE_WIDTH_4_SHIFT 24 # define RADEON_FACE_HEIGHT_4_SHIFT 28 # define RADEON_FACE_WIDTH_4_MASK (0xf << 24) # define RADEON_FACE_HEIGHT_4_MASK (0xf << 28) #define RADEON_PP_TXOFFSET_0 0x1c5c #define RADEON_PP_TXOFFSET_1 0x1c74 #define RADEON_PP_TXOFFSET_2 0x1c8c # define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0) # define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0) # define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0) # define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0) # define RADEON_TXO_MACRO_LINEAR (0 << 2) # define RADEON_TXO_MACRO_TILE (1 << 2) # define RADEON_TXO_MICRO_LINEAR (0 << 3) # define RADEON_TXO_MICRO_TILE_X2 (1 << 3) # define RADEON_TXO_MICRO_TILE_OPT (2 << 3) # define RADEON_TXO_OFFSET_MASK 0xffffffe0 # define RADEON_TXO_OFFSET_SHIFT 5 #define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */ #define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4 #define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8 #define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc #define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0 #define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00 #define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04 #define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08 #define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c #define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10 #define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14 #define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18 #define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c #define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20 #define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24 #define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */ #define RADEON_PP_TEX_SIZE_1 0x1d0c #define RADEON_PP_TEX_SIZE_2 0x1d14 # define RADEON_TEX_USIZE_MASK (0x7ff << 0) # define RADEON_TEX_USIZE_SHIFT 0 # define RADEON_TEX_VSIZE_MASK (0x7ff << 16) # define RADEON_TEX_VSIZE_SHIFT 16 # define RADEON_SIGNED_RGB_MASK (1 << 30) # define RADEON_SIGNED_RGB_SHIFT 30 -# define RADEON_SIGNED_ALPHA_MASK (1 << 31) +# define RADEON_SIGNED_ALPHA_MASK (1U << 31) # define RADEON_SIGNED_ALPHA_SHIFT 31 #define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */ #define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */ #define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */ /* note: bits 13-5: 32 byte aligned stride of texture map */ #define RADEON_PP_TXCBLEND_0 0x1c60 #define RADEON_PP_TXCBLEND_1 0x1c78 #define RADEON_PP_TXCBLEND_2 0x1c90 # define RADEON_COLOR_ARG_A_SHIFT 0 # define RADEON_COLOR_ARG_A_MASK (0x1f << 0) # define RADEON_COLOR_ARG_A_ZERO (0 << 0) # define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0) # define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0) # define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0) # define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0) # define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0) # define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0) # define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0) # define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0) # define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0) # define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0) # define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0) # define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0) # define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0) # define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0) # define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0) # define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0) # define RADEON_COLOR_ARG_B_SHIFT 5 # define RADEON_COLOR_ARG_B_MASK (0x1f << 5) # define RADEON_COLOR_ARG_B_ZERO (0 << 5) # define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5) # define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5) # define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5) # define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5) # define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5) # define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5) # define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5) # define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5) # define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5) # define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5) # define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5) # define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5) # define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5) # define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5) # define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5) # define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5) # define RADEON_COLOR_ARG_C_SHIFT 10 # define RADEON_COLOR_ARG_C_MASK (0x1f << 10) # define RADEON_COLOR_ARG_C_ZERO (0 << 10) # define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10) # define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10) # define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10) # define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10) # define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10) # define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10) # define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10) # define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10) # define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10) # define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10) # define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10) # define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10) # define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10) # define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10) # define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10) # define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10) # define RADEON_COMP_ARG_A (1 << 15) # define RADEON_COMP_ARG_A_SHIFT 15 # define RADEON_COMP_ARG_B (1 << 16) # define RADEON_COMP_ARG_B_SHIFT 16 # define RADEON_COMP_ARG_C (1 << 17) # define RADEON_COMP_ARG_C_SHIFT 17 # define RADEON_BLEND_CTL_MASK (7 << 18) # define RADEON_BLEND_CTL_ADD (0 << 18) # define RADEON_BLEND_CTL_SUBTRACT (1 << 18) # define RADEON_BLEND_CTL_ADDSIGNED (2 << 18) # define RADEON_BLEND_CTL_BLEND (3 << 18) # define RADEON_BLEND_CTL_DOT3 (4 << 18) # define RADEON_SCALE_SHIFT 21 # define RADEON_SCALE_MASK (3 << 21) # define RADEON_SCALE_1X (0 << 21) # define RADEON_SCALE_2X (1 << 21) # define RADEON_SCALE_4X (2 << 21) # define RADEON_CLAMP_TX (1 << 23) # define RADEON_T0_EQ_TCUR (1 << 24) # define RADEON_T1_EQ_TCUR (1 << 25) # define RADEON_T2_EQ_TCUR (1 << 26) # define RADEON_T3_EQ_TCUR (1 << 27) # define RADEON_COLOR_ARG_MASK 0x1f # define RADEON_COMP_ARG_SHIFT 15 #define RADEON_PP_TXABLEND_0 0x1c64 #define RADEON_PP_TXABLEND_1 0x1c7c #define RADEON_PP_TXABLEND_2 0x1c94 # define RADEON_ALPHA_ARG_A_SHIFT 0 # define RADEON_ALPHA_ARG_A_MASK (0xf << 0) # define RADEON_ALPHA_ARG_A_ZERO (0 << 0) # define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0) # define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0) # define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0) # define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0) # define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0) # define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0) # define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0) # define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0) # define RADEON_ALPHA_ARG_B_SHIFT 4 # define RADEON_ALPHA_ARG_B_MASK (0xf << 4) # define RADEON_ALPHA_ARG_B_ZERO (0 << 4) # define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4) # define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4) # define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4) # define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4) # define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4) # define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4) # define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4) # define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4) # define RADEON_ALPHA_ARG_C_SHIFT 8 # define RADEON_ALPHA_ARG_C_MASK (0xf << 8) # define RADEON_ALPHA_ARG_C_ZERO (0 << 8) # define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8) # define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8) # define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8) # define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8) # define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8) # define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8) # define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8) # define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8) # define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 9) # define RADEON_ALPHA_ARG_MASK 0xf #define RADEON_PP_TFACTOR_0 0x1c68 #define RADEON_PP_TFACTOR_1 0x1c80 #define RADEON_PP_TFACTOR_2 0x1c98 #define RADEON_RB3D_BLENDCNTL 0x1c20 # define RADEON_COMB_FCN_MASK (3 << 12) # define RADEON_COMB_FCN_ADD_CLAMP (0 << 12) # define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12) # define RADEON_COMB_FCN_SUB_CLAMP (2 << 12) # define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12) # define RADEON_SRC_BLEND_GL_ZERO (32 << 16) # define RADEON_SRC_BLEND_GL_ONE (33 << 16) # define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16) # define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16) # define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16) # define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16) # define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16) # define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16) # define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16) # define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16) # define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16) # define RADEON_SRC_BLEND_MASK (63 << 16) # define RADEON_DST_BLEND_GL_ZERO (32 << 24) # define RADEON_DST_BLEND_GL_ONE (33 << 24) # define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24) # define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24) # define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24) # define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24) # define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24) # define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24) # define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24) # define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24) # define RADEON_DST_BLEND_MASK (63 << 24) #define RADEON_RB3D_CNTL 0x1c3c # define RADEON_ALPHA_BLEND_ENABLE (1 << 0) # define RADEON_PLANE_MASK_ENABLE (1 << 1) # define RADEON_DITHER_ENABLE (1 << 2) # define RADEON_ROUND_ENABLE (1 << 3) # define RADEON_SCALE_DITHER_ENABLE (1 << 4) # define RADEON_DITHER_INIT (1 << 5) # define RADEON_ROP_ENABLE (1 << 6) # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) # define RADEON_DEPTHXY_OFFSET_ENABLE (1 << 9) # define RADEON_RB3D_COLOR_FORMAT_SHIFT 10 # define RADEON_COLOR_FORMAT_ARGB1555 3 # define RADEON_COLOR_FORMAT_RGB565 4 # define RADEON_COLOR_FORMAT_ARGB8888 6 # define RADEON_COLOR_FORMAT_RGB332 7 # define RADEON_COLOR_FORMAT_Y8 8 # define RADEON_COLOR_FORMAT_RGB8 9 # define RADEON_COLOR_FORMAT_YUV422_VYUY 11 # define RADEON_COLOR_FORMAT_YUV422_YVYU 12 # define RADEON_COLOR_FORMAT_aYUV444 14 # define RADEON_COLOR_FORMAT_ARGB4444 15 # define RADEON_CLRCMP_FLIP_ENABLE (1 << 14) #define RADEON_RB3D_COLOROFFSET 0x1c40 # define RADEON_COLOROFFSET_MASK 0xfffffff0 #define RADEON_RB3D_COLORPITCH 0x1c48 # define RADEON_COLORPITCH_MASK 0x000001ff8 # define RADEON_COLOR_TILE_ENABLE (1 << 16) # define RADEON_COLOR_MICROTILE_ENABLE (1 << 17) # define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18) # define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18) # define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18) #define RADEON_RB3D_DEPTHOFFSET 0x1c24 #define RADEON_RB3D_DEPTHPITCH 0x1c28 # define RADEON_DEPTHPITCH_MASK 0x00001ff8 # define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18) # define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18) # define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) #define RADEON_RB3D_PLANEMASK 0x1d84 #define RADEON_RB3D_ROPCNTL 0x1d80 # define RADEON_ROP_MASK (15 << 8) # define RADEON_ROP_CLEAR (0 << 8) # define RADEON_ROP_NOR (1 << 8) # define RADEON_ROP_AND_INVERTED (2 << 8) # define RADEON_ROP_COPY_INVERTED (3 << 8) # define RADEON_ROP_AND_REVERSE (4 << 8) # define RADEON_ROP_INVERT (5 << 8) # define RADEON_ROP_XOR (6 << 8) # define RADEON_ROP_NAND (7 << 8) # define RADEON_ROP_AND (8 << 8) # define RADEON_ROP_EQUIV (9 << 8) # define RADEON_ROP_NOOP (10 << 8) # define RADEON_ROP_OR_INVERTED (11 << 8) # define RADEON_ROP_COPY (12 << 8) # define RADEON_ROP_OR_REVERSE (13 << 8) # define RADEON_ROP_OR (14 << 8) # define RADEON_ROP_SET (15 << 8) #define RADEON_RB3D_STENCILREFMASK 0x1d7c # define RADEON_STENCIL_REF_SHIFT 0 # define RADEON_STENCIL_REF_MASK (0xff << 0) # define RADEON_STENCIL_MASK_SHIFT 16 # define RADEON_STENCIL_VALUE_MASK (0xff << 16) # define RADEON_STENCIL_WRITEMASK_SHIFT 24 # define RADEON_STENCIL_WRITE_MASK (0xff << 24) #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c # define RADEON_DEPTH_FORMAT_MASK (0xf << 0) # define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) # define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) # define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0) # define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0) # define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0) # define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0) # define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0) # define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0) # define RADEON_Z_TEST_NEVER (0 << 4) # define RADEON_Z_TEST_LESS (1 << 4) # define RADEON_Z_TEST_LEQUAL (2 << 4) # define RADEON_Z_TEST_EQUAL (3 << 4) # define RADEON_Z_TEST_GEQUAL (4 << 4) # define RADEON_Z_TEST_GREATER (5 << 4) # define RADEON_Z_TEST_NEQUAL (6 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_STENCIL_TEST_NEVER (0 << 12) # define RADEON_STENCIL_TEST_LESS (1 << 12) # define RADEON_STENCIL_TEST_LEQUAL (2 << 12) # define RADEON_STENCIL_TEST_EQUAL (3 << 12) # define RADEON_STENCIL_TEST_GEQUAL (4 << 12) # define RADEON_STENCIL_TEST_GREATER (5 << 12) # define RADEON_STENCIL_TEST_NEQUAL (6 << 12) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) # define RADEON_STENCIL_TEST_MASK (0x7 << 12) # define RADEON_STENCIL_FAIL_KEEP (0 << 16) # define RADEON_STENCIL_FAIL_ZERO (1 << 16) # define RADEON_STENCIL_FAIL_REPLACE (2 << 16) # define RADEON_STENCIL_FAIL_INC (3 << 16) # define RADEON_STENCIL_FAIL_DEC (4 << 16) # define RADEON_STENCIL_FAIL_INVERT (5 << 16) # define RADEON_STENCIL_FAIL_MASK (0x7 << 16) # define RADEON_STENCIL_ZPASS_KEEP (0 << 20) # define RADEON_STENCIL_ZPASS_ZERO (1 << 20) # define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) # define RADEON_STENCIL_ZPASS_INC (3 << 20) # define RADEON_STENCIL_ZPASS_DEC (4 << 20) # define RADEON_STENCIL_ZPASS_INVERT (5 << 20) # define RADEON_STENCIL_ZPASS_MASK (0x7 << 20) # define RADEON_STENCIL_ZFAIL_KEEP (0 << 24) # define RADEON_STENCIL_ZFAIL_ZERO (1 << 24) # define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_STENCIL_ZFAIL_INC (3 << 24) # define RADEON_STENCIL_ZFAIL_DEC (4 << 24) # define RADEON_STENCIL_ZFAIL_INVERT (5 << 24) # define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24) # define RADEON_Z_COMPRESSION_ENABLE (1 << 28) # define RADEON_FORCE_Z_DIRTY (1 << 29) # define RADEON_Z_WRITE_ENABLE (1 << 30) #define RADEON_RE_LINE_PATTERN 0x1cd0 # define RADEON_LINE_PATTERN_MASK 0x0000ffff # define RADEON_LINE_REPEAT_COUNT_SHIFT 16 # define RADEON_LINE_PATTERN_START_SHIFT 24 # define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28) # define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28) # define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29) #define RADEON_RE_LINE_STATE 0x1cd4 # define RADEON_LINE_CURRENT_PTR_SHIFT 0 # define RADEON_LINE_CURRENT_COUNT_SHIFT 8 #define RADEON_RE_MISC 0x26c4 # define RADEON_STIPPLE_COORD_MASK 0x1f # define RADEON_STIPPLE_X_OFFSET_SHIFT 0 # define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0) # define RADEON_STIPPLE_Y_OFFSET_SHIFT 8 # define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8) # define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16) # define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16) #define RADEON_RE_SOLID_COLOR 0x1c1c #define RADEON_RE_TOP_LEFT 0x26c0 # define RADEON_RE_LEFT_SHIFT 0 # define RADEON_RE_TOP_SHIFT 16 #define RADEON_RE_WIDTH_HEIGHT 0x1c44 # define RADEON_RE_WIDTH_SHIFT 0 # define RADEON_RE_HEIGHT_SHIFT 16 #define RADEON_RB3D_ZPASS_DATA 0x3290 #define RADEON_RB3D_ZPASS_ADDR 0x3294 #define RADEON_SE_CNTL 0x1c4c # define RADEON_FFACE_CULL_CW (0 << 0) # define RADEON_FFACE_CULL_CCW (1 << 0) # define RADEON_FFACE_CULL_DIR_MASK (1 << 0) # define RADEON_BFACE_CULL (0 << 1) # define RADEON_BFACE_SOLID (3 << 1) # define RADEON_FFACE_CULL (0 << 3) # define RADEON_FFACE_SOLID (3 << 3) # define RADEON_FFACE_CULL_MASK (3 << 3) # define RADEON_BADVTX_CULL_DISABLE (1 << 5) # define RADEON_FLAT_SHADE_VTX_0 (0 << 6) # define RADEON_FLAT_SHADE_VTX_1 (1 << 6) # define RADEON_FLAT_SHADE_VTX_2 (2 << 6) # define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) # define RADEON_DIFFUSE_SHADE_SOLID (0 << 8) # define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) # define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) # define RADEON_DIFFUSE_SHADE_MASK (3 << 8) # define RADEON_ALPHA_SHADE_SOLID (0 << 10) # define RADEON_ALPHA_SHADE_FLAT (1 << 10) # define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) # define RADEON_ALPHA_SHADE_MASK (3 << 10) # define RADEON_SPECULAR_SHADE_SOLID (0 << 12) # define RADEON_SPECULAR_SHADE_FLAT (1 << 12) # define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) # define RADEON_SPECULAR_SHADE_MASK (3 << 12) # define RADEON_FOG_SHADE_SOLID (0 << 14) # define RADEON_FOG_SHADE_FLAT (1 << 14) # define RADEON_FOG_SHADE_GOURAUD (2 << 14) # define RADEON_FOG_SHADE_MASK (3 << 14) # define RADEON_ZBIAS_ENABLE_POINT (1 << 16) # define RADEON_ZBIAS_ENABLE_LINE (1 << 17) # define RADEON_ZBIAS_ENABLE_TRI (1 << 18) # define RADEON_WIDELINE_ENABLE (1 << 20) # define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) # define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) # define RADEON_VTX_PIX_CENTER_D3D (0 << 27) # define RADEON_VTX_PIX_CENTER_OGL (1 << 27) # define RADEON_ROUND_MODE_TRUNC (0 << 28) # define RADEON_ROUND_MODE_ROUND (1 << 28) # define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28) # define RADEON_ROUND_MODE_ROUND_ODD (3 << 28) # define RADEON_ROUND_PREC_16TH_PIX (0 << 30) # define RADEON_ROUND_PREC_8TH_PIX (1 << 30) # define RADEON_ROUND_PREC_4TH_PIX (2 << 30) # define RADEON_ROUND_PREC_HALF_PIX (3 << 30) #define R200_RE_CNTL 0x1c50 # define R200_STIPPLE_ENABLE 0x1 # define R200_SCISSOR_ENABLE 0x2 # define R200_PATTERN_ENABLE 0x4 # define R200_PERSPECTIVE_ENABLE 0x8 # define R200_POINT_SMOOTH 0x20 # define R200_VTX_STQ0_D3D 0x00010000 # define R200_VTX_STQ1_D3D 0x00040000 # define R200_VTX_STQ2_D3D 0x00100000 # define R200_VTX_STQ3_D3D 0x00400000 # define R200_VTX_STQ4_D3D 0x01000000 # define R200_VTX_STQ5_D3D 0x04000000 #define RADEON_SE_CNTL_STATUS 0x2140 # define RADEON_VC_NO_SWAP (0 << 0) # define RADEON_VC_16BIT_SWAP (1 << 0) # define RADEON_VC_32BIT_SWAP (2 << 0) # define RADEON_VC_HALF_DWORD_SWAP (3 << 0) # define RADEON_TCL_BYPASS (1 << 8) #define RADEON_SE_COORD_FMT 0x1c50 # define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0) # define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1) # define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8) # define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9) # define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10) # define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11) # define RADEON_VTX_W0_NORMALIZE (1 << 12) # define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16) # define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17) # define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19) # define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21) # define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23) # define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26) # define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26) #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c # define RADEON_LIGHTING_ENABLE (1 << 0) # define RADEON_LIGHT_IN_MODELSPACE (1 << 1) # define RADEON_LOCAL_VIEWER (1 << 2) # define RADEON_NORMALIZE_NORMALS (1 << 3) # define RADEON_RESCALE_NORMALS (1 << 4) # define RADEON_SPECULAR_LIGHTS (1 << 5) # define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6) # define RADEON_LIGHT_ALPHA (1 << 7) # define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8) # define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9) # define RADEON_LM_SOURCE_STATE_PREMULT 0 # define RADEON_LM_SOURCE_STATE_MULT 1 # define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2 # define RADEON_LM_SOURCE_VERTEX_SPECULAR 3 # define RADEON_EMISSIVE_SOURCE_SHIFT 16 # define RADEON_AMBIENT_SOURCE_SHIFT 18 # define RADEON_DIFFUSE_SOURCE_SHIFT 20 # define RADEON_SPECULAR_SOURCE_SHIFT 22 #define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220 #define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224 #define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228 #define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c #define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230 #define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234 #define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238 #define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c #define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 #define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214 #define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218 #define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c #define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240 #define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244 #define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248 #define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c #define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c # define RADEON_MODELVIEW_0_SHIFT 0 # define RADEON_MODELVIEW_1_SHIFT 4 # define RADEON_MODELVIEW_2_SHIFT 8 # define RADEON_MODELVIEW_3_SHIFT 12 # define RADEON_IT_MODELVIEW_0_SHIFT 16 # define RADEON_IT_MODELVIEW_1_SHIFT 20 # define RADEON_IT_MODELVIEW_2_SHIFT 24 # define RADEON_IT_MODELVIEW_3_SHIFT 28 #define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260 # define RADEON_MODELPROJECT_0_SHIFT 0 # define RADEON_MODELPROJECT_1_SHIFT 4 # define RADEON_MODELPROJECT_2_SHIFT 8 # define RADEON_MODELPROJECT_3_SHIFT 12 # define RADEON_TEXMAT_0_SHIFT 16 # define RADEON_TEXMAT_1_SHIFT 20 # define RADEON_TEXMAT_2_SHIFT 24 # define RADEON_TEXMAT_3_SHIFT 28 #define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 # define RADEON_TCL_VTX_W0 (1 << 0) # define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1) # define RADEON_TCL_VTX_FP_ALPHA (1 << 2) # define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3) # define RADEON_TCL_VTX_FP_SPEC (1 << 4) # define RADEON_TCL_VTX_FP_FOG (1 << 5) # define RADEON_TCL_VTX_PK_SPEC (1 << 6) # define RADEON_TCL_VTX_ST0 (1 << 7) # define RADEON_TCL_VTX_ST1 (1 << 8) # define RADEON_TCL_VTX_Q1 (1 << 9) # define RADEON_TCL_VTX_ST2 (1 << 10) # define RADEON_TCL_VTX_Q2 (1 << 11) # define RADEON_TCL_VTX_ST3 (1 << 12) # define RADEON_TCL_VTX_Q3 (1 << 13) # define RADEON_TCL_VTX_Q0 (1 << 14) # define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15 # define RADEON_TCL_VTX_NORM0 (1 << 18) # define RADEON_TCL_VTX_XY1 (1 << 27) # define RADEON_TCL_VTX_Z1 (1 << 28) # define RADEON_TCL_VTX_W1 (1 << 29) # define RADEON_TCL_VTX_NORM1 (1 << 30) -# define RADEON_TCL_VTX_Z0 (1 << 31) +# define RADEON_TCL_VTX_Z0 (1U << 31) #define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258 # define RADEON_TCL_COMPUTE_XYZW (1 << 0) # define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1) # define RADEON_TCL_COMPUTE_SPECULAR (1 << 2) # define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3) # define RADEON_TCL_FORCE_INORDER_PROC (1 << 4) # define RADEON_TCL_TEX_INPUT_TEX_0 0 # define RADEON_TCL_TEX_INPUT_TEX_1 1 # define RADEON_TCL_TEX_INPUT_TEX_2 2 # define RADEON_TCL_TEX_INPUT_TEX_3 3 # define RADEON_TCL_TEX_COMPUTED_TEX_0 8 # define RADEON_TCL_TEX_COMPUTED_TEX_1 9 # define RADEON_TCL_TEX_COMPUTED_TEX_2 10 # define RADEON_TCL_TEX_COMPUTED_TEX_3 11 # define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16 # define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20 # define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24 # define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28 #define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270 # define RADEON_LIGHT_0_ENABLE (1 << 0) # define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1) # define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2) # define RADEON_LIGHT_0_IS_LOCAL (1 << 3) # define RADEON_LIGHT_0_IS_SPOT (1 << 4) # define RADEON_LIGHT_0_DUAL_CONE (1 << 5) # define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6) # define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7) # define RADEON_LIGHT_0_SHIFT 0 # define RADEON_LIGHT_1_ENABLE (1 << 16) # define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17) # define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18) # define RADEON_LIGHT_1_IS_LOCAL (1 << 19) # define RADEON_LIGHT_1_IS_SPOT (1 << 20) # define RADEON_LIGHT_1_DUAL_CONE (1 << 21) # define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22) # define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23) # define RADEON_LIGHT_1_SHIFT 16 #define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274 # define RADEON_LIGHT_2_SHIFT 0 # define RADEON_LIGHT_3_SHIFT 16 #define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278 # define RADEON_LIGHT_4_SHIFT 0 # define RADEON_LIGHT_5_SHIFT 16 #define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c # define RADEON_LIGHT_6_SHIFT 0 # define RADEON_LIGHT_7_SHIFT 16 #define RADEON_SE_TCL_SHININESS 0x2250 #define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268 # define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0) # define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1) # define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2) # define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3) # define RADEON_TEXMAT_0_ENABLE (1 << 4) # define RADEON_TEXMAT_1_ENABLE (1 << 5) # define RADEON_TEXMAT_2_ENABLE (1 << 6) # define RADEON_TEXMAT_3_ENABLE (1 << 7) # define RADEON_TEXGEN_INPUT_MASK 0xf # define RADEON_TEXGEN_INPUT_TEXCOORD_0 0 # define RADEON_TEXGEN_INPUT_TEXCOORD_1 1 # define RADEON_TEXGEN_INPUT_TEXCOORD_2 2 # define RADEON_TEXGEN_INPUT_TEXCOORD_3 3 # define RADEON_TEXGEN_INPUT_OBJ 4 # define RADEON_TEXGEN_INPUT_EYE 5 # define RADEON_TEXGEN_INPUT_EYE_NORMAL 6 # define RADEON_TEXGEN_INPUT_EYE_REFLECT 7 # define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8 # define RADEON_TEXGEN_0_INPUT_SHIFT 16 # define RADEON_TEXGEN_1_INPUT_SHIFT 20 # define RADEON_TEXGEN_2_INPUT_SHIFT 24 # define RADEON_TEXGEN_3_INPUT_SHIFT 28 #define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264 # define RADEON_UCP_IN_CLIP_SPACE (1 << 0) # define RADEON_UCP_IN_MODEL_SPACE (1 << 1) # define RADEON_UCP_ENABLE_0 (1 << 2) # define RADEON_UCP_ENABLE_1 (1 << 3) # define RADEON_UCP_ENABLE_2 (1 << 4) # define RADEON_UCP_ENABLE_3 (1 << 5) # define RADEON_UCP_ENABLE_4 (1 << 6) # define RADEON_UCP_ENABLE_5 (1 << 7) # define RADEON_TCL_FOG_MASK (3 << 8) # define RADEON_TCL_FOG_DISABLE (0 << 8) # define RADEON_TCL_FOG_EXP (1 << 8) # define RADEON_TCL_FOG_EXP2 (2 << 8) # define RADEON_TCL_FOG_LINEAR (3 << 8) # define RADEON_RNG_BASED_FOG (1 << 10) # define RADEON_LIGHT_TWOSIDE (1 << 11) # define RADEON_BLEND_OP_COUNT_MASK (7 << 12) # define RADEON_BLEND_OP_COUNT_SHIFT 12 # define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16) # define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17) # define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (1 << 18) # define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18) # define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (1 << 19) # define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19) # define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (1 << 20) # define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20) # define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (1 << 21) # define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21) # define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22) # define RADEON_CULL_FRONT_IS_CW (0 << 28) # define RADEON_CULL_FRONT_IS_CCW (1 << 28) # define RADEON_CULL_FRONT (1 << 29) # define RADEON_CULL_BACK (1 << 30) -# define RADEON_FORCE_W_TO_ONE (1 << 31) +# define RADEON_FORCE_W_TO_ONE (1U << 31) #define RADEON_SE_VPORT_XSCALE 0x1d98 #define RADEON_SE_VPORT_XOFFSET 0x1d9c #define RADEON_SE_VPORT_YSCALE 0x1da0 #define RADEON_SE_VPORT_YOFFSET 0x1da4 #define RADEON_SE_VPORT_ZSCALE 0x1da8 #define RADEON_SE_VPORT_ZOFFSET 0x1dac #define RADEON_SE_ZBIAS_FACTOR 0x1db0 #define RADEON_SE_ZBIAS_CONSTANT 0x1db4 #define RADEON_SE_VTX_FMT 0x2080 # define RADEON_SE_VTX_FMT_XY 0x00000000 # define RADEON_SE_VTX_FMT_W0 0x00000001 # define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002 # define RADEON_SE_VTX_FMT_FPALPHA 0x00000004 # define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008 # define RADEON_SE_VTX_FMT_FPSPEC 0x00000010 # define RADEON_SE_VTX_FMT_FPFOG 0x00000020 # define RADEON_SE_VTX_FMT_PKSPEC 0x00000040 # define RADEON_SE_VTX_FMT_ST0 0x00000080 # define RADEON_SE_VTX_FMT_ST1 0x00000100 # define RADEON_SE_VTX_FMT_Q1 0x00000200 # define RADEON_SE_VTX_FMT_ST2 0x00000400 # define RADEON_SE_VTX_FMT_Q2 0x00000800 # define RADEON_SE_VTX_FMT_ST3 0x00001000 # define RADEON_SE_VTX_FMT_Q3 0x00002000 # define RADEON_SE_VTX_FMT_Q0 0x00004000 # define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000 # define RADEON_SE_VTX_FMT_N0 0x00040000 # define RADEON_SE_VTX_FMT_XY1 0x08000000 # define RADEON_SE_VTX_FMT_Z1 0x10000000 # define RADEON_SE_VTX_FMT_W1 0x20000000 # define RADEON_SE_VTX_FMT_N1 0x40000000 # define RADEON_SE_VTX_FMT_Z 0x80000000 #define RADEON_SE_VF_CNTL 0x2084 # define RADEON_VF_PRIM_TYPE_POINT_LIST 1 # define RADEON_VF_PRIM_TYPE_LINE_LIST 2 # define RADEON_VF_PRIM_TYPE_LINE_STRIP 3 # define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST 4 # define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN 5 # define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP 6 # define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG 7 # define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST 8 # define RADEON_VF_PRIM_TYPE_POINT_LIST_3 9 # define RADEON_VF_PRIM_TYPE_LINE_LIST_3 10 # define RADEON_VF_PRIM_TYPE_SPIRIT_LIST 11 # define RADEON_VF_PRIM_TYPE_LINE_LOOP 12 # define RADEON_VF_PRIM_TYPE_QUAD_LIST 13 # define RADEON_VF_PRIM_TYPE_QUAD_STRIP 14 # define RADEON_VF_PRIM_TYPE_POLYGON 15 # define RADEON_VF_PRIM_WALK_STATE (0<<4) # define RADEON_VF_PRIM_WALK_INDEX (1<<4) # define RADEON_VF_PRIM_WALK_LIST (2<<4) # define RADEON_VF_PRIM_WALK_DATA (3<<4) # define RADEON_VF_COLOR_ORDER_RGBA (1<<6) # define RADEON_VF_RADEON_MODE (1<<8) # define RADEON_VF_TCL_OUTPUT_CTL_ENA (1<<9) # define RADEON_VF_PROG_STREAM_ENA (1<<10) # define RADEON_VF_INDEX_SIZE_SHIFT 11 # define RADEON_VF_NUM_VERTICES_SHIFT 16 #define RADEON_SE_PORT_DATA0 0x2000 #define R200_SE_VAP_CNTL 0x2080 # define R200_VAP_TCL_ENABLE 0x00000001 # define R200_VAP_SINGLE_BUF_STATE_ENABLE 0x00000010 # define R200_VAP_FORCE_W_TO_ONE 0x00010000 # define R200_VAP_D3D_TEX_DEFAULT 0x00020000 # define R200_VAP_VF_MAX_VTX_NUM__SHIFT 18 # define R200_VAP_VF_MAX_VTX_NUM (9 << 18) # define R200_VAP_DX_CLIP_SPACE_DEF 0x00400000 #define R200_VF_MAX_VTX_INDX 0x210c #define R200_VF_MIN_VTX_INDX 0x2110 #define R200_SE_VTE_CNTL 0x20b0 # define R200_VPORT_X_SCALE_ENA 0x00000001 # define R200_VPORT_X_OFFSET_ENA 0x00000002 # define R200_VPORT_Y_SCALE_ENA 0x00000004 # define R200_VPORT_Y_OFFSET_ENA 0x00000008 # define R200_VPORT_Z_SCALE_ENA 0x00000010 # define R200_VPORT_Z_OFFSET_ENA 0x00000020 # define R200_VTX_XY_FMT 0x00000100 # define R200_VTX_Z_FMT 0x00000200 # define R200_VTX_W0_FMT 0x00000400 # define R200_VTX_W0_NORMALIZE 0x00000800 # define R200_VTX_ST_DENORMALIZED 0x00001000 #define R200_SE_VAP_CNTL_STATUS 0x2140 # define R200_VC_NO_SWAP (0 << 0) # define R200_VC_16BIT_SWAP (1 << 0) # define R200_VC_32BIT_SWAP (2 << 0) #define R200_PP_TXFILTER_0 0x2c00 #define R200_PP_TXFILTER_1 0x2c20 #define R200_PP_TXFILTER_2 0x2c40 #define R200_PP_TXFILTER_3 0x2c60 #define R200_PP_TXFILTER_4 0x2c80 #define R200_PP_TXFILTER_5 0x2ca0 # define R200_MAG_FILTER_NEAREST (0 << 0) # define R200_MAG_FILTER_LINEAR (1 << 0) # define R200_MAG_FILTER_MASK (1 << 0) # define R200_MIN_FILTER_NEAREST (0 << 1) # define R200_MIN_FILTER_LINEAR (1 << 1) # define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1) # define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1) # define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1) # define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1) # define R200_MIN_FILTER_ANISO_NEAREST (8 << 1) # define R200_MIN_FILTER_ANISO_LINEAR (9 << 1) # define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1) # define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1) # define R200_MIN_FILTER_MASK (15 << 1) # define R200_MAX_ANISO_1_TO_1 (0 << 5) # define R200_MAX_ANISO_2_TO_1 (1 << 5) # define R200_MAX_ANISO_4_TO_1 (2 << 5) # define R200_MAX_ANISO_8_TO_1 (3 << 5) # define R200_MAX_ANISO_16_TO_1 (4 << 5) # define R200_MAX_ANISO_MASK (7 << 5) # define R200_MAX_MIP_LEVEL_MASK (0x0f << 16) # define R200_MAX_MIP_LEVEL_SHIFT 16 # define R200_YUV_TO_RGB (1 << 20) # define R200_YUV_TEMPERATURE_COOL (0 << 21) # define R200_YUV_TEMPERATURE_HOT (1 << 21) # define R200_YUV_TEMPERATURE_MASK (1 << 21) # define R200_WRAPEN_S (1 << 22) # define R200_CLAMP_S_WRAP (0 << 23) # define R200_CLAMP_S_MIRROR (1 << 23) # define R200_CLAMP_S_CLAMP_LAST (2 << 23) # define R200_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23) # define R200_CLAMP_S_CLAMP_BORDER (4 << 23) # define R200_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23) # define R200_CLAMP_S_CLAMP_GL (6 << 23) # define R200_CLAMP_S_MIRROR_CLAMP_GL (7 << 23) # define R200_CLAMP_S_MASK (7 << 23) # define R200_WRAPEN_T (1 << 26) # define R200_CLAMP_T_WRAP (0 << 27) # define R200_CLAMP_T_MIRROR (1 << 27) # define R200_CLAMP_T_CLAMP_LAST (2 << 27) # define R200_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27) # define R200_CLAMP_T_CLAMP_BORDER (4 << 27) # define R200_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27) # define R200_CLAMP_T_CLAMP_GL (6 << 27) # define R200_CLAMP_T_MIRROR_CLAMP_GL (7 << 27) # define R200_CLAMP_T_MASK (7 << 27) # define R200_KILL_LT_ZERO (1 << 30) # define R200_BORDER_MODE_OGL (0 << 31) # define R200_BORDER_MODE_D3D (1 << 31) #define R200_PP_TXFORMAT_0 0x2c04 #define R200_PP_TXFORMAT_1 0x2c24 #define R200_PP_TXFORMAT_2 0x2c44 #define R200_PP_TXFORMAT_3 0x2c64 #define R200_PP_TXFORMAT_4 0x2c84 #define R200_PP_TXFORMAT_5 0x2ca4 # define R200_TXFORMAT_I8 (0 << 0) # define R200_TXFORMAT_AI88 (1 << 0) # define R200_TXFORMAT_RGB332 (2 << 0) # define R200_TXFORMAT_ARGB1555 (3 << 0) # define R200_TXFORMAT_RGB565 (4 << 0) # define R200_TXFORMAT_ARGB4444 (5 << 0) # define R200_TXFORMAT_ARGB8888 (6 << 0) # define R200_TXFORMAT_RGBA8888 (7 << 0) # define R200_TXFORMAT_Y8 (8 << 0) # define R200_TXFORMAT_AVYU4444 (9 << 0) # define R200_TXFORMAT_VYUY422 (10 << 0) # define R200_TXFORMAT_YVYU422 (11 << 0) # define R200_TXFORMAT_DXT1 (12 << 0) # define R200_TXFORMAT_DXT23 (14 << 0) # define R200_TXFORMAT_DXT45 (15 << 0) # define R200_TXFORMAT_DVDU88 (18 << 0) # define R200_TXFORMAT_LDVDU655 (19 << 0) # define R200_TXFORMAT_LDVDU8888 (20 << 0) # define R200_TXFORMAT_GR1616 (21 << 0) # define R200_TXFORMAT_ABGR8888 (22 << 0) # define R200_TXFORMAT_BGR111110 (23 << 0) # define R200_TXFORMAT_FORMAT_MASK (31 << 0) # define R200_TXFORMAT_FORMAT_SHIFT 0 # define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6) # define R200_TXFORMAT_NON_POWER2 (1 << 7) # define R200_TXFORMAT_WIDTH_MASK (15 << 8) # define R200_TXFORMAT_WIDTH_SHIFT 8 # define R200_TXFORMAT_HEIGHT_MASK (15 << 12) # define R200_TXFORMAT_HEIGHT_SHIFT 12 # define R200_TXFORMAT_F5_WIDTH_MASK (15 << 16) /* cube face 5 */ # define R200_TXFORMAT_F5_WIDTH_SHIFT 16 # define R200_TXFORMAT_F5_HEIGHT_MASK (15 << 20) # define R200_TXFORMAT_F5_HEIGHT_SHIFT 20 # define R200_TXFORMAT_ST_ROUTE_STQ0 (0 << 24) # define R200_TXFORMAT_ST_ROUTE_STQ1 (1 << 24) # define R200_TXFORMAT_ST_ROUTE_STQ2 (2 << 24) # define R200_TXFORMAT_ST_ROUTE_STQ3 (3 << 24) # define R200_TXFORMAT_ST_ROUTE_STQ4 (4 << 24) # define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) # define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) # define R200_TXFORMAT_ST_ROUTE_SHIFT 24 # define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27) # define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) # define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) # define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) #define R200_PP_TXFORMAT_X_0 0x2c08 #define R200_PP_TXFORMAT_X_1 0x2c28 #define R200_PP_TXFORMAT_X_2 0x2c48 #define R200_PP_TXFORMAT_X_3 0x2c68 #define R200_PP_TXFORMAT_X_4 0x2c88 #define R200_PP_TXFORMAT_X_5 0x2ca8 #define R200_PP_TXSIZE_0 0x2c0c /* NPOT only */ #define R200_PP_TXSIZE_1 0x2c2c /* NPOT only */ #define R200_PP_TXSIZE_2 0x2c4c /* NPOT only */ #define R200_PP_TXSIZE_3 0x2c6c /* NPOT only */ #define R200_PP_TXSIZE_4 0x2c8c /* NPOT only */ #define R200_PP_TXSIZE_5 0x2cac /* NPOT only */ #define R200_PP_TXPITCH_0 0x2c10 /* NPOT only */ #define R200_PP_TXPITCH_1 0x2c30 /* NPOT only */ #define R200_PP_TXPITCH_2 0x2c50 /* NPOT only */ #define R200_PP_TXPITCH_3 0x2c70 /* NPOT only */ #define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */ #define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */ #define R200_PP_CUBIC_FACES_0 0x2c18 #define R200_PP_CUBIC_FACES_1 0x2c38 #define R200_PP_CUBIC_FACES_2 0x2c58 #define R200_PP_CUBIC_FACES_3 0x2c78 #define R200_PP_CUBIC_FACES_4 0x2c98 #define R200_PP_CUBIC_FACES_5 0x2cb8 #define R200_PP_TXOFFSET_0 0x2d00 # define R200_TXO_ENDIAN_NO_SWAP (0 << 0) # define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0) # define R200_TXO_ENDIAN_WORD_SWAP (2 << 0) # define R200_TXO_ENDIAN_HALFDW_SWAP (3 << 0) # define R200_TXO_MACRO_LINEAR (0 << 2) # define R200_TXO_MACRO_TILE (1 << 2) # define R200_TXO_MICRO_LINEAR (0 << 3) # define R200_TXO_MICRO_TILE (1 << 3) # define R200_TXO_OFFSET_MASK 0xffffffe0 # define R200_TXO_OFFSET_SHIFT 5 #define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 #define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 #define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c #define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 #define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 #define R200_PP_TXOFFSET_1 0x2d18 #define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c #define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 #define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 #define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 #define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c #define R200_PP_TXOFFSET_2 0x2d30 #define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 #define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 #define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c #define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 #define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 #define R200_PP_TXOFFSET_3 0x2d48 #define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c #define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 #define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 #define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 #define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c #define R200_PP_TXOFFSET_4 0x2d60 #define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 #define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 #define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c #define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 #define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 #define R200_PP_TXOFFSET_5 0x2d78 #define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c #define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 #define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 #define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 #define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c #define R200_PP_TFACTOR_0 0x2ee0 #define R200_PP_TFACTOR_1 0x2ee4 #define R200_PP_TFACTOR_2 0x2ee8 #define R200_PP_TFACTOR_3 0x2eec #define R200_PP_TFACTOR_4 0x2ef0 #define R200_PP_TFACTOR_5 0x2ef4 #define R200_PP_TXCBLEND_0 0x2f00 # define R200_TXC_ARG_A_ZERO (0) # define R200_TXC_ARG_A_CURRENT_COLOR (2) # define R200_TXC_ARG_A_CURRENT_ALPHA (3) # define R200_TXC_ARG_A_DIFFUSE_COLOR (4) # define R200_TXC_ARG_A_DIFFUSE_ALPHA (5) # define R200_TXC_ARG_A_SPECULAR_COLOR (6) # define R200_TXC_ARG_A_SPECULAR_ALPHA (7) # define R200_TXC_ARG_A_TFACTOR_COLOR (8) # define R200_TXC_ARG_A_TFACTOR_ALPHA (9) # define R200_TXC_ARG_A_R0_COLOR (10) # define R200_TXC_ARG_A_R0_ALPHA (11) # define R200_TXC_ARG_A_R1_COLOR (12) # define R200_TXC_ARG_A_R1_ALPHA (13) # define R200_TXC_ARG_A_R2_COLOR (14) # define R200_TXC_ARG_A_R2_ALPHA (15) # define R200_TXC_ARG_A_R3_COLOR (16) # define R200_TXC_ARG_A_R3_ALPHA (17) # define R200_TXC_ARG_A_R4_COLOR (18) # define R200_TXC_ARG_A_R4_ALPHA (19) # define R200_TXC_ARG_A_R5_COLOR (20) # define R200_TXC_ARG_A_R5_ALPHA (21) # define R200_TXC_ARG_A_TFACTOR1_COLOR (26) # define R200_TXC_ARG_A_TFACTOR1_ALPHA (27) # define R200_TXC_ARG_A_MASK (31 << 0) # define R200_TXC_ARG_A_SHIFT 0 # define R200_TXC_ARG_B_ZERO (0 << 5) # define R200_TXC_ARG_B_CURRENT_COLOR (2 << 5) # define R200_TXC_ARG_B_CURRENT_ALPHA (3 << 5) # define R200_TXC_ARG_B_DIFFUSE_COLOR (4 << 5) # define R200_TXC_ARG_B_DIFFUSE_ALPHA (5 << 5) # define R200_TXC_ARG_B_SPECULAR_COLOR (6 << 5) # define R200_TXC_ARG_B_SPECULAR_ALPHA (7 << 5) # define R200_TXC_ARG_B_TFACTOR_COLOR (8 << 5) # define R200_TXC_ARG_B_TFACTOR_ALPHA (9 << 5) # define R200_TXC_ARG_B_R0_COLOR (10 << 5) # define R200_TXC_ARG_B_R0_ALPHA (11 << 5) # define R200_TXC_ARG_B_R1_COLOR (12 << 5) # define R200_TXC_ARG_B_R1_ALPHA (13 << 5) # define R200_TXC_ARG_B_R2_COLOR (14 << 5) # define R200_TXC_ARG_B_R2_ALPHA (15 << 5) # define R200_TXC_ARG_B_R3_COLOR (16 << 5) # define R200_TXC_ARG_B_R3_ALPHA (17 << 5) # define R200_TXC_ARG_B_R4_COLOR (18 << 5) # define R200_TXC_ARG_B_R4_ALPHA (19 << 5) # define R200_TXC_ARG_B_R5_COLOR (20 << 5) # define R200_TXC_ARG_B_R5_ALPHA (21 << 5) # define R200_TXC_ARG_B_TFACTOR1_COLOR (26 << 5) # define R200_TXC_ARG_B_TFACTOR1_ALPHA (27 << 5) # define R200_TXC_ARG_B_MASK (31 << 5) # define R200_TXC_ARG_B_SHIFT 5 # define R200_TXC_ARG_C_ZERO (0 << 10) # define R200_TXC_ARG_C_CURRENT_COLOR (2 << 10) # define R200_TXC_ARG_C_CURRENT_ALPHA (3 << 10) # define R200_TXC_ARG_C_DIFFUSE_COLOR (4 << 10) # define R200_TXC_ARG_C_DIFFUSE_ALPHA (5 << 10) # define R200_TXC_ARG_C_SPECULAR_COLOR (6 << 10) # define R200_TXC_ARG_C_SPECULAR_ALPHA (7 << 10) # define R200_TXC_ARG_C_TFACTOR_COLOR (8 << 10) # define R200_TXC_ARG_C_TFACTOR_ALPHA (9 << 10) # define R200_TXC_ARG_C_R0_COLOR (10 << 10) # define R200_TXC_ARG_C_R0_ALPHA (11 << 10) # define R200_TXC_ARG_C_R1_COLOR (12 << 10) # define R200_TXC_ARG_C_R1_ALPHA (13 << 10) # define R200_TXC_ARG_C_R2_COLOR (14 << 10) # define R200_TXC_ARG_C_R2_ALPHA (15 << 10) # define R200_TXC_ARG_C_R3_COLOR (16 << 10) # define R200_TXC_ARG_C_R3_ALPHA (17 << 10) # define R200_TXC_ARG_C_R4_COLOR (18 << 10) # define R200_TXC_ARG_C_R4_ALPHA (19 << 10) # define R200_TXC_ARG_C_R5_COLOR (20 << 10) # define R200_TXC_ARG_C_R5_ALPHA (21 << 10) # define R200_TXC_ARG_C_TFACTOR1_COLOR (26 << 10) # define R200_TXC_ARG_C_TFACTOR1_ALPHA (27 << 10) # define R200_TXC_ARG_C_MASK (31 << 10) # define R200_TXC_ARG_C_SHIFT 10 # define R200_TXC_COMP_ARG_A (1 << 16) # define R200_TXC_COMP_ARG_A_SHIFT (16) # define R200_TXC_BIAS_ARG_A (1 << 17) # define R200_TXC_SCALE_ARG_A (1 << 18) # define R200_TXC_NEG_ARG_A (1 << 19) # define R200_TXC_COMP_ARG_B (1 << 20) # define R200_TXC_COMP_ARG_B_SHIFT (20) # define R200_TXC_BIAS_ARG_B (1 << 21) # define R200_TXC_SCALE_ARG_B (1 << 22) # define R200_TXC_NEG_ARG_B (1 << 23) # define R200_TXC_COMP_ARG_C (1 << 24) # define R200_TXC_COMP_ARG_C_SHIFT (24) # define R200_TXC_BIAS_ARG_C (1 << 25) # define R200_TXC_SCALE_ARG_C (1 << 26) # define R200_TXC_NEG_ARG_C (1 << 27) # define R200_TXC_OP_MADD (0 << 28) # define R200_TXC_OP_CND0 (2 << 28) # define R200_TXC_OP_LERP (3 << 28) # define R200_TXC_OP_DOT3 (4 << 28) # define R200_TXC_OP_DOT4 (5 << 28) # define R200_TXC_OP_CONDITIONAL (6 << 28) # define R200_TXC_OP_DOT2_ADD (7 << 28) # define R200_TXC_OP_MASK (7 << 28) #define R200_PP_TXCBLEND2_0 0x2f04 # define R200_TXC_TFACTOR_SEL_SHIFT 0 # define R200_TXC_TFACTOR_SEL_MASK 0x7 # define R200_TXC_TFACTOR1_SEL_SHIFT 4 # define R200_TXC_TFACTOR1_SEL_MASK (0x7 << 4) # define R200_TXC_SCALE_SHIFT 8 # define R200_TXC_SCALE_MASK (7 << 8) # define R200_TXC_SCALE_1X (0 << 8) # define R200_TXC_SCALE_2X (1 << 8) # define R200_TXC_SCALE_4X (2 << 8) # define R200_TXC_SCALE_8X (3 << 8) # define R200_TXC_SCALE_INV2 (5 << 8) # define R200_TXC_SCALE_INV4 (6 << 8) # define R200_TXC_SCALE_INV8 (7 << 8) # define R200_TXC_CLAMP_SHIFT 12 # define R200_TXC_CLAMP_MASK (3 << 12) # define R200_TXC_CLAMP_WRAP (0 << 12) # define R200_TXC_CLAMP_0_1 (1 << 12) # define R200_TXC_CLAMP_8_8 (2 << 12) # define R200_TXC_OUTPUT_REG_MASK (7 << 16) # define R200_TXC_OUTPUT_REG_NONE (0 << 16) # define R200_TXC_OUTPUT_REG_R0 (1 << 16) # define R200_TXC_OUTPUT_REG_R1 (2 << 16) # define R200_TXC_OUTPUT_REG_R2 (3 << 16) # define R200_TXC_OUTPUT_REG_R3 (4 << 16) # define R200_TXC_OUTPUT_REG_R4 (5 << 16) # define R200_TXC_OUTPUT_REG_R5 (6 << 16) # define R200_TXC_OUTPUT_MASK_MASK (7 << 20) # define R200_TXC_OUTPUT_MASK_RGB (0 << 20) # define R200_TXC_OUTPUT_MASK_RG (1 << 20) # define R200_TXC_OUTPUT_MASK_RB (2 << 20) # define R200_TXC_OUTPUT_MASK_R (3 << 20) # define R200_TXC_OUTPUT_MASK_GB (4 << 20) # define R200_TXC_OUTPUT_MASK_G (5 << 20) # define R200_TXC_OUTPUT_MASK_B (6 << 20) # define R200_TXC_OUTPUT_MASK_NONE (7 << 20) # define R200_TXC_REPL_NORMAL 0 # define R200_TXC_REPL_RED 1 # define R200_TXC_REPL_GREEN 2 # define R200_TXC_REPL_BLUE 3 # define R200_TXC_REPL_ARG_A_SHIFT 26 # define R200_TXC_REPL_ARG_A_MASK (3 << 26) # define R200_TXC_REPL_ARG_B_SHIFT 28 # define R200_TXC_REPL_ARG_B_MASK (3 << 28) # define R200_TXC_REPL_ARG_C_SHIFT 30 # define R200_TXC_REPL_ARG_C_MASK (3 << 30) #define R200_PP_TXABLEND_0 0x2f08 # define R200_TXA_ARG_A_ZERO (0) # define R200_TXA_ARG_A_CURRENT_ALPHA (2) /* guess */ # define R200_TXA_ARG_A_CURRENT_BLUE (3) /* guess */ # define R200_TXA_ARG_A_DIFFUSE_ALPHA (4) # define R200_TXA_ARG_A_DIFFUSE_BLUE (5) # define R200_TXA_ARG_A_SPECULAR_ALPHA (6) # define R200_TXA_ARG_A_SPECULAR_BLUE (7) # define R200_TXA_ARG_A_TFACTOR_ALPHA (8) # define R200_TXA_ARG_A_TFACTOR_BLUE (9) # define R200_TXA_ARG_A_R0_ALPHA (10) # define R200_TXA_ARG_A_R0_BLUE (11) # define R200_TXA_ARG_A_R1_ALPHA (12) # define R200_TXA_ARG_A_R1_BLUE (13) # define R200_TXA_ARG_A_R2_ALPHA (14) # define R200_TXA_ARG_A_R2_BLUE (15) # define R200_TXA_ARG_A_R3_ALPHA (16) # define R200_TXA_ARG_A_R3_BLUE (17) # define R200_TXA_ARG_A_R4_ALPHA (18) # define R200_TXA_ARG_A_R4_BLUE (19) # define R200_TXA_ARG_A_R5_ALPHA (20) # define R200_TXA_ARG_A_R5_BLUE (21) # define R200_TXA_ARG_A_TFACTOR1_ALPHA (26) # define R200_TXA_ARG_A_TFACTOR1_BLUE (27) # define R200_TXA_ARG_A_MASK (31 << 0) # define R200_TXA_ARG_A_SHIFT 0 # define R200_TXA_ARG_B_ZERO (0 << 5) # define R200_TXA_ARG_B_CURRENT_ALPHA (2 << 5) /* guess */ # define R200_TXA_ARG_B_CURRENT_BLUE (3 << 5) /* guess */ # define R200_TXA_ARG_B_DIFFUSE_ALPHA (4 << 5) # define R200_TXA_ARG_B_DIFFUSE_BLUE (5 << 5) # define R200_TXA_ARG_B_SPECULAR_ALPHA (6 << 5) # define R200_TXA_ARG_B_SPECULAR_BLUE (7 << 5) # define R200_TXA_ARG_B_TFACTOR_ALPHA (8 << 5) # define R200_TXA_ARG_B_TFACTOR_BLUE (9 << 5) # define R200_TXA_ARG_B_R0_ALPHA (10 << 5) # define R200_TXA_ARG_B_R0_BLUE (11 << 5) # define R200_TXA_ARG_B_R1_ALPHA (12 << 5) # define R200_TXA_ARG_B_R1_BLUE (13 << 5) # define R200_TXA_ARG_B_R2_ALPHA (14 << 5) # define R200_TXA_ARG_B_R2_BLUE (15 << 5) # define R200_TXA_ARG_B_R3_ALPHA (16 << 5) # define R200_TXA_ARG_B_R3_BLUE (17 << 5) # define R200_TXA_ARG_B_R4_ALPHA (18 << 5) # define R200_TXA_ARG_B_R4_BLUE (19 << 5) # define R200_TXA_ARG_B_R5_ALPHA (20 << 5) # define R200_TXA_ARG_B_R5_BLUE (21 << 5) # define R200_TXA_ARG_B_TFACTOR1_ALPHA (26 << 5) # define R200_TXA_ARG_B_TFACTOR1_BLUE (27 << 5) # define R200_TXA_ARG_B_MASK (31 << 5) # define R200_TXA_ARG_B_SHIFT 5 # define R200_TXA_ARG_C_ZERO (0 << 10) # define R200_TXA_ARG_C_CURRENT_ALPHA (2 << 10) /* guess */ # define R200_TXA_ARG_C_CURRENT_BLUE (3 << 10) /* guess */ # define R200_TXA_ARG_C_DIFFUSE_ALPHA (4 << 10) # define R200_TXA_ARG_C_DIFFUSE_BLUE (5 << 10) # define R200_TXA_ARG_C_SPECULAR_ALPHA (6 << 10) # define R200_TXA_ARG_C_SPECULAR_BLUE (7 << 10) # define R200_TXA_ARG_C_TFACTOR_ALPHA (8 << 10) # define R200_TXA_ARG_C_TFACTOR_BLUE (9 << 10) # define R200_TXA_ARG_C_R0_ALPHA (10 << 10) # define R200_TXA_ARG_C_R0_BLUE (11 << 10) # define R200_TXA_ARG_C_R1_ALPHA (12 << 10) # define R200_TXA_ARG_C_R1_BLUE (13 << 10) # define R200_TXA_ARG_C_R2_ALPHA (14 << 10) # define R200_TXA_ARG_C_R2_BLUE (15 << 10) # define R200_TXA_ARG_C_R3_ALPHA (16 << 10) # define R200_TXA_ARG_C_R3_BLUE (17 << 10) # define R200_TXA_ARG_C_R4_ALPHA (18 << 10) # define R200_TXA_ARG_C_R4_BLUE (19 << 10) # define R200_TXA_ARG_C_R5_ALPHA (20 << 10) # define R200_TXA_ARG_C_R5_BLUE (21 << 10) # define R200_TXA_ARG_C_TFACTOR1_ALPHA (26 << 10) # define R200_TXA_ARG_C_TFACTOR1_BLUE (27 << 10) # define R200_TXA_ARG_C_MASK (31 << 10) # define R200_TXA_ARG_C_SHIFT 10 # define R200_TXA_COMP_ARG_A (1 << 16) # define R200_TXA_COMP_ARG_A_SHIFT (16) # define R200_TXA_BIAS_ARG_A (1 << 17) # define R200_TXA_SCALE_ARG_A (1 << 18) # define R200_TXA_NEG_ARG_A (1 << 19) # define R200_TXA_COMP_ARG_B (1 << 20) # define R200_TXA_COMP_ARG_B_SHIFT (20) # define R200_TXA_BIAS_ARG_B (1 << 21) # define R200_TXA_SCALE_ARG_B (1 << 22) # define R200_TXA_NEG_ARG_B (1 << 23) # define R200_TXA_COMP_ARG_C (1 << 24) # define R200_TXA_COMP_ARG_C_SHIFT (24) # define R200_TXA_BIAS_ARG_C (1 << 25) # define R200_TXA_SCALE_ARG_C (1 << 26) # define R200_TXA_NEG_ARG_C (1 << 27) # define R200_TXA_OP_MADD (0 << 28) # define R200_TXA_OP_CND0 (2 << 28) # define R200_TXA_OP_LERP (3 << 28) # define R200_TXA_OP_CONDITIONAL (6 << 28) # define R200_TXA_OP_MASK (7 << 28) #define R200_PP_TXABLEND2_0 0x2f0c # define R200_TXA_TFACTOR_SEL_SHIFT 0 # define R200_TXA_TFACTOR_SEL_MASK 0x7 # define R200_TXA_TFACTOR1_SEL_SHIFT 4 # define R200_TXA_TFACTOR1_SEL_MASK (0x7 << 4) # define R200_TXA_SCALE_SHIFT 8 # define R200_TXA_SCALE_MASK (7 << 8) # define R200_TXA_SCALE_1X (0 << 8) # define R200_TXA_SCALE_2X (1 << 8) # define R200_TXA_SCALE_4X (2 << 8) # define R200_TXA_SCALE_8X (3 << 8) # define R200_TXA_SCALE_INV2 (5 << 8) # define R200_TXA_SCALE_INV4 (6 << 8) # define R200_TXA_SCALE_INV8 (7 << 8) # define R200_TXA_CLAMP_SHIFT 12 # define R200_TXA_CLAMP_MASK (3 << 12) # define R200_TXA_CLAMP_WRAP (0 << 12) # define R200_TXA_CLAMP_0_1 (1 << 12) # define R200_TXA_CLAMP_8_8 (2 << 12) # define R200_TXA_OUTPUT_REG_MASK (7 << 16) # define R200_TXA_OUTPUT_REG_NONE (0 << 16) # define R200_TXA_OUTPUT_REG_R0 (1 << 16) # define R200_TXA_OUTPUT_REG_R1 (2 << 16) # define R200_TXA_OUTPUT_REG_R2 (3 << 16) # define R200_TXA_OUTPUT_REG_R3 (4 << 16) # define R200_TXA_OUTPUT_REG_R4 (5 << 16) # define R200_TXA_OUTPUT_REG_R5 (6 << 16) # define R200_TXA_DOT_ALPHA (1 << 20) # define R200_TXA_REPL_NORMAL 0 # define R200_TXA_REPL_RED 1 # define R200_TXA_REPL_GREEN 2 # define R200_TXA_REPL_ARG_A_SHIFT 26 # define R200_TXA_REPL_ARG_A_MASK (3 << 26) # define R200_TXA_REPL_ARG_B_SHIFT 28 # define R200_TXA_REPL_ARG_B_MASK (3 << 28) # define R200_TXA_REPL_ARG_C_SHIFT 30 # define R200_TXA_REPL_ARG_C_MASK (3 << 30) #define R200_SE_VTX_FMT_0 0x2088 # define R200_VTX_XY 0 /* always have xy */ # define R200_VTX_Z0 (1<<0) # define R200_VTX_W0 (1<<1) # define R200_VTX_WEIGHT_COUNT_SHIFT (2) # define R200_VTX_PV_MATRIX_SEL (1<<5) # define R200_VTX_N0 (1<<6) # define R200_VTX_POINT_SIZE (1<<7) # define R200_VTX_DISCRETE_FOG (1<<8) # define R200_VTX_SHININESS_0 (1<<9) # define R200_VTX_SHININESS_1 (1<<10) # define R200_VTX_COLOR_NOT_PRESENT 0 # define R200_VTX_PK_RGBA 1 # define R200_VTX_FP_RGB 2 # define R200_VTX_FP_RGBA 3 # define R200_VTX_COLOR_MASK 3 # define R200_VTX_COLOR_0_SHIFT 11 # define R200_VTX_COLOR_1_SHIFT 13 # define R200_VTX_COLOR_2_SHIFT 15 # define R200_VTX_COLOR_3_SHIFT 17 # define R200_VTX_COLOR_4_SHIFT 19 # define R200_VTX_COLOR_5_SHIFT 21 # define R200_VTX_COLOR_6_SHIFT 23 # define R200_VTX_COLOR_7_SHIFT 25 # define R200_VTX_XY1 (1<<28) # define R200_VTX_Z1 (1<<29) # define R200_VTX_W1 (1<<30) # define R200_VTX_N1 (1<<31) #define R200_SE_VTX_FMT_1 0x208c # define R200_VTX_TEX0_COMP_CNT_SHIFT 0 # define R200_VTX_TEX1_COMP_CNT_SHIFT 3 # define R200_VTX_TEX2_COMP_CNT_SHIFT 6 # define R200_VTX_TEX3_COMP_CNT_SHIFT 9 # define R200_VTX_TEX4_COMP_CNT_SHIFT 12 # define R200_VTX_TEX5_COMP_CNT_SHIFT 15 #define R200_SE_TCL_OUTPUT_VTX_FMT_0 0x2090 #define R200_SE_TCL_OUTPUT_VTX_FMT_1 0x2094 #define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 # define R200_OUTPUT_XYZW (1<<0) # define R200_OUTPUT_COLOR_0 (1<<8) # define R200_OUTPUT_COLOR_1 (1<<9) # define R200_OUTPUT_TEX_0 (1<<16) # define R200_OUTPUT_TEX_1 (1<<17) # define R200_OUTPUT_TEX_2 (1<<18) # define R200_OUTPUT_TEX_3 (1<<19) # define R200_OUTPUT_TEX_4 (1<<20) # define R200_OUTPUT_TEX_5 (1<<21) # define R200_OUTPUT_TEX_MASK (0x3f<<16) # define R200_OUTPUT_DISCRETE_FOG (1<<24) # define R200_OUTPUT_PT_SIZE (1<<25) # define R200_FORCE_INORDER_PROC (1<<31) #define R200_PP_CNTL_X 0x2cc4 #define R200_PP_TXMULTI_CTL_0 0x2c1c #define R200_PP_TXMULTI_CTL_1 0x2c3c #define R200_PP_TXMULTI_CTL_2 0x2c5c #define R200_PP_TXMULTI_CTL_3 0x2c7c #define R200_PP_TXMULTI_CTL_4 0x2c9c #define R200_PP_TXMULTI_CTL_5 0x2cbc #define R200_SE_VTX_STATE_CNTL 0x2180 # define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16) /* Registers for CP and Microcode Engine */ #define RADEON_CP_ME_RAM_ADDR 0x07d4 #define RADEON_CP_ME_RAM_RADDR 0x07d8 #define RADEON_CP_ME_RAM_DATAH 0x07dc #define RADEON_CP_ME_RAM_DATAL 0x07e0 #define RADEON_CP_RB_BASE 0x0700 #define RADEON_CP_RB_CNTL 0x0704 # define RADEON_RB_BUFSZ_SHIFT 0 # define RADEON_RB_BUFSZ_MASK (0x3f << 0) # define RADEON_RB_BLKSZ_SHIFT 8 # define RADEON_RB_BLKSZ_MASK (0x3f << 8) # define RADEON_BUF_SWAP_32BIT (2 << 16) # define RADEON_MAX_FETCH_SHIFT 18 # define RADEON_MAX_FETCH_MASK (0x3 << 18) # define RADEON_RB_NO_UPDATE (1 << 27) -# define RADEON_RB_RPTR_WR_ENA (1 << 31) +# define RADEON_RB_RPTR_WR_ENA (1U << 31) #define RADEON_CP_RB_RPTR_ADDR 0x070c #define RADEON_CP_RB_RPTR 0x0710 #define RADEON_CP_RB_WPTR 0x0714 #define RADEON_CP_RB_RPTR_WR 0x071c #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 #define R600_CP_RB_BASE 0xc100 #define R600_CP_RB_CNTL 0xc104 # define R600_RB_BUFSZ(x) ((x) << 0) # define R600_RB_BLKSZ(x) ((x) << 8) # define R600_RB_NO_UPDATE (1 << 27) -# define R600_RB_RPTR_WR_ENA (1 << 31) +# define R600_RB_RPTR_WR_ENA (1U << 31) #define R600_CP_RB_RPTR_WR 0xc108 #define R600_CP_RB_RPTR_ADDR 0xc10c #define R600_CP_RB_RPTR_ADDR_HI 0xc110 #define R600_CP_RB_WPTR 0xc114 #define R600_CP_RB_WPTR_ADDR 0xc118 #define R600_CP_RB_WPTR_ADDR_HI 0xc11c #define R600_CP_RB_RPTR 0x8700 #define R600_CP_RB_WPTR_DELAY 0x8704 #define RADEON_CP_IB_BASE 0x0738 #define RADEON_CP_IB_BUFSZ 0x073c #define RADEON_CP_CSQ_CNTL 0x0740 # define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) # define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) # define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) # define RADEON_CSQ_PRIBM_INDDIS (2 << 28) # define RADEON_CSQ_PRIPIO_INDBM (3 << 28) # define RADEON_CSQ_PRIBM_INDBM (4 << 28) # define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) #define R300_CP_RESYNC_ADDR 0x778 #define R300_CP_RESYNC_DATA 0x77c #define RADEON_CP_CSQ_STAT 0x07f8 # define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0) # define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8) # define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16) # define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24) #define RADEON_CP_CSQ2_STAT 0x07fc #define RADEON_CP_CSQ_ADDR 0x07f0 #define RADEON_CP_CSQ_DATA 0x07f4 #define RADEON_CP_CSQ_APER_PRIMARY 0x1000 #define RADEON_CP_CSQ_APER_INDIRECT 0x1300 #define RADEON_CP_RB_WPTR_DELAY 0x0718 # define RADEON_PRE_WRITE_TIMER_SHIFT 0 # define RADEON_PRE_WRITE_LIMIT_SHIFT 23 #define RADEON_CP_CSQ_MODE 0x0744 # define RADEON_INDIRECT2_START_SHIFT 0 # define RADEON_INDIRECT2_START_MASK (0x7f << 0) # define RADEON_INDIRECT1_START_SHIFT 8 # define RADEON_INDIRECT1_START_MASK (0x7f << 8) #define RADEON_AIC_CNTL 0x01d0 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) # define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1) # define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */ #define RADEON_AIC_LO_ADDR 0x01dc #define RADEON_AIC_PT_BASE 0x01d8 #define RADEON_AIC_HI_ADDR 0x01e0 /* Constants */ /* #define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0 */ /* efine RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2 */ /* CP packet types */ #define RADEON_CP_PACKET0 0x00000000 #define RADEON_CP_PACKET1 0x40000000 #define RADEON_CP_PACKET2 0x80000000 #define RADEON_CP_PACKET3 0xC0000000 # define RADEON_CP_PACKET_MASK 0xC0000000 # define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 # define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) # define RADEON_CP_PACKET0_REG_MASK 0x000007ff # define R300_CP_PACKET0_REG_MASK 0x00001fff # define R600_CP_PACKET0_REG_MASK 0x0000ffff # define RADEON_CP_PACKET1_REG0_MASK 0x000007ff # define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 #define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000 #define RADEON_CP_PACKET3_NOP 0xC0001000 #define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900 #define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00 #define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00 #define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300 #define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400 #define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600 #define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800 #define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900 #define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00 #define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00 #define R200_CP_PACKET3_3D_DRAW_IMMD_2 0xc0003500 #define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00 #define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100 #define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200 #define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300 #define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400 #define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500 #define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800 #define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00 #define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00 #define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00 #define RADEON_CP_VC_FRMT_XY 0x00000000 #define RADEON_CP_VC_FRMT_W0 0x00000001 #define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002 #define RADEON_CP_VC_FRMT_FPALPHA 0x00000004 #define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008 #define RADEON_CP_VC_FRMT_FPSPEC 0x00000010 #define RADEON_CP_VC_FRMT_FPFOG 0x00000020 #define RADEON_CP_VC_FRMT_PKSPEC 0x00000040 #define RADEON_CP_VC_FRMT_ST0 0x00000080 #define RADEON_CP_VC_FRMT_ST1 0x00000100 #define RADEON_CP_VC_FRMT_Q1 0x00000200 #define RADEON_CP_VC_FRMT_ST2 0x00000400 #define RADEON_CP_VC_FRMT_Q2 0x00000800 #define RADEON_CP_VC_FRMT_ST3 0x00001000 #define RADEON_CP_VC_FRMT_Q3 0x00002000 #define RADEON_CP_VC_FRMT_Q0 0x00004000 #define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000 #define RADEON_CP_VC_FRMT_N0 0x00040000 #define RADEON_CP_VC_FRMT_XY1 0x08000000 #define RADEON_CP_VC_FRMT_Z1 0x10000000 #define RADEON_CP_VC_FRMT_W1 0x20000000 #define RADEON_CP_VC_FRMT_N1 0x40000000 #define RADEON_CP_VC_FRMT_Z 0x80000000 #define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000 #define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001 #define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002 #define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003 #define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 #define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 #define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 #define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007 #define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008 #define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009 #define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a #define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010 #define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020 #define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030 #define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000 #define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040 #define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080 #define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000 #define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100 #define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000 #define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200 #define RADEON_CP_VC_CNTL_NUM_SHIFT 16 #define RADEON_VS_MATRIX_0_ADDR 0 #define RADEON_VS_MATRIX_1_ADDR 4 #define RADEON_VS_MATRIX_2_ADDR 8 #define RADEON_VS_MATRIX_3_ADDR 12 #define RADEON_VS_MATRIX_4_ADDR 16 #define RADEON_VS_MATRIX_5_ADDR 20 #define RADEON_VS_MATRIX_6_ADDR 24 #define RADEON_VS_MATRIX_7_ADDR 28 #define RADEON_VS_MATRIX_8_ADDR 32 #define RADEON_VS_MATRIX_9_ADDR 36 #define RADEON_VS_MATRIX_10_ADDR 40 #define RADEON_VS_MATRIX_11_ADDR 44 #define RADEON_VS_MATRIX_12_ADDR 48 #define RADEON_VS_MATRIX_13_ADDR 52 #define RADEON_VS_MATRIX_14_ADDR 56 #define RADEON_VS_MATRIX_15_ADDR 60 #define RADEON_VS_LIGHT_AMBIENT_ADDR 64 #define RADEON_VS_LIGHT_DIFFUSE_ADDR 72 #define RADEON_VS_LIGHT_SPECULAR_ADDR 80 #define RADEON_VS_LIGHT_DIRPOS_ADDR 88 #define RADEON_VS_LIGHT_HWVSPOT_ADDR 96 #define RADEON_VS_LIGHT_ATTENUATION_ADDR 104 #define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112 #define RADEON_VS_UCP_ADDR 116 #define RADEON_VS_GLOBAL_AMBIENT_ADDR 122 #define RADEON_VS_FOG_PARAM_ADDR 123 #define RADEON_VS_EYE_VECTOR_ADDR 124 #define RADEON_SS_LIGHT_DCD_ADDR 0 #define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8 #define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16 #define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24 #define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32 #define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48 #define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49 #define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50 #define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51 #define RADEON_SS_SHININESS 60 #define RADEON_TV_MASTER_CNTL 0x0800 # define RADEON_TV_ASYNC_RST (1 << 0) # define RADEON_CRT_ASYNC_RST (1 << 1) # define RADEON_RESTART_PHASE_FIX (1 << 3) # define RADEON_TV_FIFO_ASYNC_RST (1 << 4) # define RADEON_VIN_ASYNC_RST (1 << 5) # define RADEON_AUD_ASYNC_RST (1 << 6) # define RADEON_DVS_ASYNC_RST (1 << 7) # define RADEON_CRT_FIFO_CE_EN (1 << 9) # define RADEON_TV_FIFO_CE_EN (1 << 10) # define RADEON_RE_SYNC_NOW_SEL_MASK (3 << 14) # define RADEON_TVCLK_ALWAYS_ONb (1 << 30) -# define RADEON_TV_ON (1 << 31) +# define RADEON_TV_ON (1U << 31) #define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888 # define RADEON_Y_RED_EN (1 << 0) # define RADEON_C_GRN_EN (1 << 1) # define RADEON_CMP_BLU_EN (1 << 2) # define RADEON_DAC_DITHER_EN (1 << 3) # define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4) # define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8) # define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12) # define RADEON_TV_FORCE_DAC_DATA_SHIFT 16 #define RADEON_TV_RGB_CNTL 0x0804 # define RADEON_SWITCH_TO_BLUE (1 << 4) # define RADEON_RGB_DITHER_EN (1 << 5) # define RADEON_RGB_SRC_SEL_MASK (3 << 8) # define RADEON_RGB_SRC_SEL_CRTC1 (0 << 8) # define RADEON_RGB_SRC_SEL_RMX (1 << 8) # define RADEON_RGB_SRC_SEL_CRTC2 (2 << 8) # define RADEON_RGB_CONVERT_BY_PASS (1 << 10) # define RADEON_UVRAM_READ_MARGIN_SHIFT 16 # define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20 # define RADEON_RGB_ATTEN_SEL(x) ((x) << 24) # define RADEON_TVOUT_SCALE_EN (1 << 26) # define RADEON_RGB_ATTEN_VAL(x) ((x) << 28) #define RADEON_TV_SYNC_CNTL 0x0808 # define RADEON_SYNC_OE (1 << 0) # define RADEON_SYNC_OUT (1 << 1) # define RADEON_SYNC_IN (1 << 2) # define RADEON_SYNC_PUB (1 << 3) # define RADEON_SYNC_PD (1 << 4) # define RADEON_TV_SYNC_IO_DRIVE (1 << 5) #define RADEON_TV_HTOTAL 0x080c #define RADEON_TV_HDISP 0x0810 #define RADEON_TV_HSTART 0x0818 #define RADEON_TV_HCOUNT 0x081C #define RADEON_TV_VTOTAL 0x0820 #define RADEON_TV_VDISP 0x0824 #define RADEON_TV_VCOUNT 0x0828 #define RADEON_TV_FTOTAL 0x082c #define RADEON_TV_FCOUNT 0x0830 #define RADEON_TV_FRESTART 0x0834 #define RADEON_TV_HRESTART 0x0838 #define RADEON_TV_VRESTART 0x083c #define RADEON_TV_HOST_READ_DATA 0x0840 #define RADEON_TV_HOST_WRITE_DATA 0x0844 #define RADEON_TV_HOST_RD_WT_CNTL 0x0848 # define RADEON_HOST_FIFO_RD (1 << 12) # define RADEON_HOST_FIFO_RD_ACK (1 << 13) # define RADEON_HOST_FIFO_WT (1 << 14) # define RADEON_HOST_FIFO_WT_ACK (1 << 15) #define RADEON_TV_VSCALER_CNTL1 0x084c # define RADEON_UV_INC_MASK 0xffff # define RADEON_UV_INC_SHIFT 0 # define RADEON_Y_W_EN (1 << 24) # define RADEON_RESTART_FIELD (1 << 29) /* restart on field 0 */ # define RADEON_Y_DEL_W_SIG_SHIFT 26 #define RADEON_TV_TIMING_CNTL 0x0850 # define RADEON_H_INC_MASK 0xfff # define RADEON_H_INC_SHIFT 0 # define RADEON_REQ_Y_FIRST (1 << 19) # define RADEON_FORCE_BURST_ALWAYS (1 << 21) # define RADEON_UV_POST_SCALE_BYPASS (1 << 23) # define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24 #define RADEON_TV_VSCALER_CNTL2 0x0854 # define RADEON_DITHER_MODE (1 << 0) # define RADEON_Y_OUTPUT_DITHER_EN (1 << 1) # define RADEON_UV_OUTPUT_DITHER_EN (1 << 2) # define RADEON_UV_TO_BUF_DITHER_EN (1 << 3) #define RADEON_TV_Y_FALL_CNTL 0x0858 # define RADEON_Y_FALL_PING_PONG (1 << 16) # define RADEON_Y_COEF_EN (1 << 17) #define RADEON_TV_Y_RISE_CNTL 0x085c # define RADEON_Y_RISE_PING_PONG (1 << 16) #define RADEON_TV_Y_SAW_TOOTH_CNTL 0x0860 #define RADEON_TV_UPSAMP_AND_GAIN_CNTL 0x0864 # define RADEON_YUPSAMP_EN (1 << 0) # define RADEON_UVUPSAMP_EN (1 << 2) #define RADEON_TV_GAIN_LIMIT_SETTINGS 0x0868 # define RADEON_Y_GAIN_LIMIT_SHIFT 0 # define RADEON_UV_GAIN_LIMIT_SHIFT 16 #define RADEON_TV_LINEAR_GAIN_SETTINGS 0x086c # define RADEON_Y_GAIN_SHIFT 0 # define RADEON_UV_GAIN_SHIFT 16 #define RADEON_TV_MODULATOR_CNTL1 0x0870 # define RADEON_YFLT_EN (1 << 2) # define RADEON_UVFLT_EN (1 << 3) # define RADEON_ALT_PHASE_EN (1 << 6) # define RADEON_SYNC_TIP_LEVEL (1 << 7) # define RADEON_BLANK_LEVEL_SHIFT 8 # define RADEON_SET_UP_LEVEL_SHIFT 16 # define RADEON_SLEW_RATE_LIMIT (1 << 23) # define RADEON_CY_FILT_BLEND_SHIFT 28 #define RADEON_TV_MODULATOR_CNTL2 0x0874 # define RADEON_TV_U_BURST_LEVEL_MASK 0x1ff # define RADEON_TV_V_BURST_LEVEL_MASK 0x1ff # define RADEON_TV_V_BURST_LEVEL_SHIFT 16 #define RADEON_TV_CRC_CNTL 0x0890 #define RADEON_TV_UV_ADR 0x08ac # define RADEON_MAX_UV_ADR_MASK 0x000000ff # define RADEON_MAX_UV_ADR_SHIFT 0 # define RADEON_TABLE1_BOT_ADR_MASK 0x0000ff00 # define RADEON_TABLE1_BOT_ADR_SHIFT 8 # define RADEON_TABLE3_TOP_ADR_MASK 0x00ff0000 # define RADEON_TABLE3_TOP_ADR_SHIFT 16 # define RADEON_HCODE_TABLE_SEL_MASK 0x06000000 # define RADEON_HCODE_TABLE_SEL_SHIFT 25 # define RADEON_VCODE_TABLE_SEL_MASK 0x18000000 # define RADEON_VCODE_TABLE_SEL_SHIFT 27 # define RADEON_TV_MAX_FIFO_ADDR 0x1a7 # define RADEON_TV_MAX_FIFO_ADDR_INTERNAL 0x1ff #define RADEON_TV_PLL_FINE_CNTL 0x0020 /* PLL */ #define RADEON_TV_PLL_CNTL 0x0021 /* PLL */ # define RADEON_TV_M0LO_MASK 0xff # define RADEON_TV_M0HI_MASK 0x7 # define RADEON_TV_M0HI_SHIFT 18 # define RADEON_TV_N0LO_MASK 0x1ff # define RADEON_TV_N0LO_SHIFT 8 # define RADEON_TV_N0HI_MASK 0x3 # define RADEON_TV_N0HI_SHIFT 21 # define RADEON_TV_P_MASK 0xf # define RADEON_TV_P_SHIFT 24 # define RADEON_TV_SLIP_EN (1 << 23) # define RADEON_TV_DTO_EN (1 << 28) #define RADEON_TV_PLL_CNTL1 0x0022 /* PLL */ # define RADEON_TVPLL_RESET (1 << 1) # define RADEON_TVPLL_SLEEP (1 << 3) # define RADEON_TVPLL_REFCLK_SEL (1 << 4) # define RADEON_TVPCP_SHIFT 8 # define RADEON_TVPCP_MASK (7 << 8) # define RADEON_TVPVG_SHIFT 11 # define RADEON_TVPVG_MASK (7 << 11) # define RADEON_TVPDC_SHIFT 14 # define RADEON_TVPDC_MASK (3 << 14) -# define RADEON_TVPLL_TEST_DIS (1 << 31) +# define RADEON_TVPLL_TEST_DIS (1U << 31) # define RADEON_TVCLK_SRC_SEL_TVPLL (1 << 30) #define RS400_DISP2_REQ_CNTL1 0xe30 # define RS400_DISP2_START_REQ_LEVEL_SHIFT 0 # define RS400_DISP2_START_REQ_LEVEL_MASK 0x3ff # define RS400_DISP2_STOP_REQ_LEVEL_SHIFT 12 # define RS400_DISP2_STOP_REQ_LEVEL_MASK 0x3ff # define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT 22 # define RS400_DISP2_ALLOW_FID_LEVEL_MASK 0x3ff #define RS400_DISP2_REQ_CNTL2 0xe34 # define RS400_DISP2_CRITICAL_POINT_START_SHIFT 12 # define RS400_DISP2_CRITICAL_POINT_START_MASK 0x3ff # define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT 22 # define RS400_DISP2_CRITICAL_POINT_STOP_MASK 0x3ff #define RS400_DMIF_MEM_CNTL1 0xe38 # define RS400_DISP2_START_ADR_SHIFT 0 # define RS400_DISP2_START_ADR_MASK 0x3ff # define RS400_DISP1_CRITICAL_POINT_START_SHIFT 12 # define RS400_DISP1_CRITICAL_POINT_START_MASK 0x3ff # define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT 22 # define RS400_DISP1_CRITICAL_POINT_STOP_MASK 0x3ff #define RS400_DISP1_REQ_CNTL1 0xe3c # define RS400_DISP1_START_REQ_LEVEL_SHIFT 0 # define RS400_DISP1_START_REQ_LEVEL_MASK 0x3ff # define RS400_DISP1_STOP_REQ_LEVEL_SHIFT 12 # define RS400_DISP1_STOP_REQ_LEVEL_MASK 0x3ff # define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT 22 # define RS400_DISP1_ALLOW_FID_LEVEL_MASK 0x3ff #define RADEON_PCIE_INDEX 0x0030 #define RADEON_PCIE_DATA 0x0034 #define RADEON_PCIE_TX_GART_CNTL 0x10 # define RADEON_PCIE_TX_GART_EN (1 << 0) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1) # define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3) # define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3) # define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5) # define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8) #define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11 #define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 #define RADEON_PCIE_TX_GART_BASE 0x13 #define RADEON_PCIE_TX_GART_START_LO 0x14 #define RADEON_PCIE_TX_GART_START_HI 0x15 #define RADEON_PCIE_TX_GART_END_LO 0x16 #define RADEON_PCIE_TX_GART_END_HI 0x17 #define RADEON_PCIE_TX_GART_ERROR 0x18 #define RADEON_SCRATCH_REG0 0x15e0 #define RADEON_SCRATCH_REG1 0x15e4 #define RADEON_SCRATCH_REG2 0x15e8 #define RADEON_SCRATCH_REG3 0x15ec #define RADEON_SCRATCH_REG4 0x15f0 #define RADEON_SCRATCH_REG5 0x15f4 #define RV530_GB_PIPE_SELECT2 0x4124 #endif Index: head/sys/dev/drm2/radeon/rv770d.h =================================================================== --- head/sys/dev/drm2/radeon/rv770d.h (revision 258779) +++ head/sys/dev/drm2/radeon/rv770d.h (revision 258780) @@ -1,673 +1,673 @@ /* * Copyright 2009 Advanced Micro Devices, Inc. * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #ifndef RV770_H #define RV770_H #include __FBSDID("$FreeBSD$"); #define R7XX_MAX_SH_GPRS 256 #define R7XX_MAX_TEMP_GPRS 16 #define R7XX_MAX_SH_THREADS 256 #define R7XX_MAX_SH_STACK_ENTRIES 4096 #define R7XX_MAX_BACKENDS 8 #define R7XX_MAX_BACKENDS_MASK 0xff #define R7XX_MAX_SIMDS 16 #define R7XX_MAX_SIMDS_MASK 0xffff #define R7XX_MAX_PIPES 8 #define R7XX_MAX_PIPES_MASK 0xff /* Registers */ #define CB_COLOR0_BASE 0x28040 #define CB_COLOR1_BASE 0x28044 #define CB_COLOR2_BASE 0x28048 #define CB_COLOR3_BASE 0x2804C #define CB_COLOR4_BASE 0x28050 #define CB_COLOR5_BASE 0x28054 #define CB_COLOR6_BASE 0x28058 #define CB_COLOR7_BASE 0x2805C #define CB_COLOR7_FRAG 0x280FC #define CC_GC_SHADER_PIPE_CONFIG 0x8950 #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 #define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_TCC_DISABLE 0x9148 #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 #define CGTS_USER_TCC_DISABLE 0x914C #define CONFIG_MEMSIZE 0x5428 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1<<28) #define CP_PFP_HALT (1<<26) #define CP_ME_RAM_DATA 0xC160 #define CP_ME_RAM_RADDR 0xC158 #define CP_ME_RAM_WADDR 0xC15C #define CP_MEQ_THRESHOLDS 0x8764 #define STQ_SPLIT(x) ((x) << 0) #define CP_PERFMON_CNTL 0x87FC #define CP_PFP_UCODE_ADDR 0xC150 #define CP_PFP_UCODE_DATA 0xC154 #define CP_QUEUE_THRESHOLDS 0x8760 #define ROQ_IB1_START(x) ((x) << 0) #define ROQ_IB2_START(x) ((x) << 8) #define CP_RB_CNTL 0xC104 #define RB_BUFSZ(x) ((x) << 0) #define RB_BLKSZ(x) ((x) << 8) #define RB_NO_UPDATE (1 << 27) -#define RB_RPTR_WR_ENA (1 << 31) +#define RB_RPTR_WR_ENA (1U << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C #define CP_RB_RPTR_ADDR_HI 0xC110 #define CP_RB_RPTR_WR 0xC108 #define CP_RB_WPTR 0xC114 #define CP_RB_WPTR_ADDR 0xC118 #define CP_RB_WPTR_ADDR_HI 0xC11C #define CP_RB_WPTR_DELAY 0x8704 #define CP_SEM_WAIT_TIMER 0x85BC #define DB_DEBUG3 0x98B0 #define DB_CLK_OFF_DELAY(x) ((x) << 11) #define DB_DEBUG4 0x9B8C #define DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6) #define DCP_TILING_CONFIG 0x6CA0 #define PIPE_TILING(x) ((x) << 1) #define BANK_TILING(x) ((x) << 4) #define GROUP_SIZE(x) ((x) << 6) #define ROW_TILING(x) ((x) << 8) #define BANK_SWAPS(x) ((x) << 11) #define SAMPLE_SPLIT(x) ((x) << 14) #define BACKEND_MAP(x) ((x) << 16) #define GB_TILING_CONFIG 0x98F0 #define PIPE_TILING__SHIFT 1 #define PIPE_TILING__MASK 0x0000000e #define DMA_TILING_CONFIG 0x3ec8 #define DMA_TILING_CONFIG2 0xd0b8 #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 #define INACTIVE_QD_PIPES_SHIFT 8 #define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS_MASK 0x00FF0000 #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) #define GRBM_SOFT_RESET 0x8020 #define SOFT_RESET_CP (1<<0) #define GRBM_STATUS 0x8010 #define CMDFIFO_AVAIL_MASK 0x0000000F #define GUI_ACTIVE (1<<31) #define GRBM_STATUS2 0x8014 #define CG_MULT_THERMAL_STATUS 0x740 #define ASIC_T(x) ((x) << 16) #define ASIC_T_MASK 0x3FF0000 #define ASIC_T_SHIFT 16 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C #define HDP_DEBUG1 0x2F34 #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 #define MC_SHARED_CHREMAP 0x2008 #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 #define NOOFRANK_SHIFT 2 #define NOOFRANK_MASK 0x00000004 #define NOOFROWS_SHIFT 3 #define NOOFROWS_MASK 0x00000038 #define NOOFCOLS_SHIFT 6 #define NOOFCOLS_MASK 0x000000C0 #define CHANSIZE_SHIFT 8 #define CHANSIZE_MASK 0x00000100 #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_FB_LOCATION 0x2024 #define MC_VM_MB_L1_TLB0_CNTL 0x2234 #define MC_VM_MB_L1_TLB1_CNTL 0x2238 #define MC_VM_MB_L1_TLB2_CNTL 0x223C #define MC_VM_MB_L1_TLB3_CNTL 0x2240 #define ENABLE_L1_TLB (1 << 0) #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) #define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) #define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) #define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) #define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) #define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15) #define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18) #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C #define MC_VM_MD_L1_TLB3_CNTL 0x2698 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define PA_CL_ENHANCE 0x8A14 #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) #define PA_SC_AA_CONFIG 0x28C04 #define PA_SC_CLIPRECT_RULE 0x2820C #define PA_SC_EDGERULE 0x28230 #define PA_SC_FIFO_SIZE 0x8BCC #define SC_PRIM_FIFO_SIZE(x) ((x) << 0) #define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) #define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 #define FORCE_EOV_MAX_CLK_CNT(x) ((x)<<0) #define FORCE_EOV_MAX_REZ_CNT(x) ((x)<<16) #define PA_SC_LINE_STIPPLE 0x28A0C #define PA_SC_LINE_STIPPLE_STATE 0x8B10 #define PA_SC_MODE_CNTL 0x28A4C #define PA_SC_MULTI_CHIP_CNTL 0x8B20 #define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 #define SCRATCH_REG3 0x850C #define SCRATCH_REG4 0x8510 #define SCRATCH_REG5 0x8514 #define SCRATCH_REG6 0x8518 #define SCRATCH_REG7 0x851C #define SCRATCH_UMSK 0x8540 #define SCRATCH_ADDR 0x8544 #define SMX_SAR_CTL0 0xA008 #define SMX_DC_CTL0 0xA020 #define USE_HASH_FUNCTION (1 << 0) #define CACHE_DEPTH(x) ((x) << 1) #define FLUSH_ALL_ON_EVENT (1 << 10) #define STALL_ON_EVENT (1 << 11) #define SMX_EVENT_CTL 0xA02C #define ES_FLUSH_CTL(x) ((x) << 0) #define GS_FLUSH_CTL(x) ((x) << 3) #define ACK_FLUSH_CTL(x) ((x) << 6) #define SYNC_FLUSH_CTL (1 << 8) #define SPI_CONFIG_CNTL 0x9100 #define GPR_WRITE_PRIORITY(x) ((x) << 0) #define DISABLE_INTERP_1 (1 << 5) #define SPI_CONFIG_CNTL_1 0x913C #define VTX_DONE_DELAY(x) ((x) << 0) #define INTERP_ONE_PRIM_PER_ROW (1 << 4) #define SPI_INPUT_Z 0x286D8 #define SPI_PS_IN_CONTROL_0 0x286CC #define NUM_INTERP(x) ((x)<<0) #define POSITION_ENA (1<<8) #define POSITION_CENTROID (1<<9) #define POSITION_ADDR(x) ((x)<<10) #define PARAM_GEN(x) ((x)<<15) #define PARAM_GEN_ADDR(x) ((x)<<19) #define BARYC_SAMPLE_CNTL(x) ((x)<<26) #define PERSP_GRADIENT_ENA (1<<28) #define LINEAR_GRADIENT_ENA (1<<29) #define POSITION_SAMPLE (1<<30) #define BARYC_AT_SAMPLE_ENA (1<<31) #define SQ_CONFIG 0x8C00 #define VC_ENABLE (1 << 0) #define EXPORT_SRC_C (1 << 1) #define DX9_CONSTS (1 << 2) #define ALU_INST_PREFER_VECTOR (1 << 3) #define DX10_CLAMP (1 << 4) #define CLAUSE_SEQ_PRIO(x) ((x) << 8) #define PS_PRIO(x) ((x) << 24) #define VS_PRIO(x) ((x) << 26) #define GS_PRIO(x) ((x) << 28) #define SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8DB0 #define SIMDA_RING0(x) ((x)<<0) #define SIMDA_RING1(x) ((x)<<8) #define SIMDB_RING0(x) ((x)<<16) #define SIMDB_RING1(x) ((x)<<24) #define SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8DB4 #define SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8DB8 #define SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8DBC #define SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8DC0 #define SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8DC4 #define SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8DC8 #define SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8DCC #define ES_PRIO(x) ((x) << 30) #define SQ_GPR_RESOURCE_MGMT_1 0x8C04 #define NUM_PS_GPRS(x) ((x) << 0) #define NUM_VS_GPRS(x) ((x) << 16) #define DYN_GPR_ENABLE (1 << 27) #define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) #define SQ_GPR_RESOURCE_MGMT_2 0x8C08 #define NUM_GS_GPRS(x) ((x) << 0) #define NUM_ES_GPRS(x) ((x) << 16) #define SQ_MS_FIFO_SIZES 0x8CF0 #define CACHE_FIFO_SIZE(x) ((x) << 0) #define FETCH_FIFO_HIWATER(x) ((x) << 8) #define DONE_FIFO_HIWATER(x) ((x) << 16) #define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) #define SQ_STACK_RESOURCE_MGMT_1 0x8C10 #define NUM_PS_STACK_ENTRIES(x) ((x) << 0) #define NUM_VS_STACK_ENTRIES(x) ((x) << 16) #define SQ_STACK_RESOURCE_MGMT_2 0x8C14 #define NUM_GS_STACK_ENTRIES(x) ((x) << 0) #define NUM_ES_STACK_ENTRIES(x) ((x) << 16) #define SQ_THREAD_RESOURCE_MGMT 0x8C0C #define NUM_PS_THREADS(x) ((x) << 0) #define NUM_VS_THREADS(x) ((x) << 8) #define NUM_GS_THREADS(x) ((x) << 16) #define NUM_ES_THREADS(x) ((x) << 24) #define SX_DEBUG_1 0x9058 #define ENABLE_NEW_SMX_ADDRESS (1 << 16) #define SX_EXPORT_BUFFER_SIZES 0x900C #define COLOR_BUFFER_SIZE(x) ((x) << 0) #define POSITION_BUFFER_SIZE(x) ((x) << 8) #define SMX_BUFFER_SIZE(x) ((x) << 16) #define SX_MISC 0x28350 #define TA_CNTL_AUX 0x9508 #define DISABLE_CUBE_WRAP (1 << 0) #define DISABLE_CUBE_ANISO (1 << 1) #define SYNC_GRADIENT (1 << 24) #define SYNC_WALKER (1 << 25) #define SYNC_ALIGNER (1 << 26) #define BILINEAR_PRECISION_6_BIT (0 << 31) -#define BILINEAR_PRECISION_8_BIT (1 << 31) +#define BILINEAR_PRECISION_8_BIT (1U << 31) #define TCP_CNTL 0x9610 #define TCP_CHAN_STEER 0x9614 #define VC_ENHANCE 0x9714 #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x)<<0) #define VC_ONLY 0 #define TC_ONLY 1 #define VC_AND_TC 2 #define AUTO_INVLD_EN(x) ((x) << 6) #define NO_AUTO 0 #define ES_AUTO 1 #define GS_AUTO 2 #define ES_AND_GS_AUTO 3 #define VGT_ES_PER_GS 0x88CC #define VGT_GS_PER_ES 0x88C8 #define VGT_GS_PER_VS 0x88E8 #define VGT_GS_VERTEX_REUSE 0x88D4 #define VGT_NUM_INSTANCES 0x8974 #define VGT_OUT_DEALLOC_CNTL 0x28C5C #define DEALLOC_DIST_MASK 0x0000007F #define VGT_STRMOUT_EN 0x28AB0 #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VTX_REUSE_DEPTH_MASK 0x000000FF #define VM_CONTEXT0_CNTL 0x1410 #define ENABLE_CONTEXT (1 << 0) #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 #define VM_L2_CNTL 0x1400 #define ENABLE_L2_CACHE (1 << 0) #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) #define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) #define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) #define VM_L2_CNTL2 0x1404 #define INVALIDATE_ALL_L1_TLBS (1 << 0) #define INVALIDATE_L2_CACHE (1 << 1) #define VM_L2_CNTL3 0x1408 #define BANK_SELECT(x) ((x) << 0) #define CACHE_UPDATE_MODE(x) ((x) << 6) #define VM_L2_STATUS 0x140C #define L2_BUSY (1 << 0) #define WAIT_UNTIL 0x8040 /* async DMA */ #define DMA_RB_RPTR 0xd008 #define DMA_RB_WPTR 0xd00c /* async DMA packets */ #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ (((t) & 0x1) << 23) | \ (((s) & 0x1) << 22) | \ (((n) & 0xFFFF) << 0)) /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 #define DMA_PACKET_INDIRECT_BUFFER 0x4 #define DMA_PACKET_SEMAPHORE 0x5 #define DMA_PACKET_FENCE 0x6 #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_CONSTANT_FILL 0xd #define DMA_PACKET_NOP 0xf #define SRBM_STATUS 0x0E50 /* DCE 3.2 HDMI */ #define HDMI_CONTROL 0x7400 # define HDMI_KEEPOUT_MODE (1 << 0) # define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */ # define HDMI_ERROR_ACK (1 << 8) # define HDMI_ERROR_MASK (1 << 9) #define HDMI_STATUS 0x7404 # define HDMI_ACTIVE_AVMUTE (1 << 0) # define HDMI_AUDIO_PACKET_ERROR (1 << 16) # define HDMI_VBI_PACKET_ERROR (1 << 20) #define HDMI_AUDIO_PACKET_CONTROL 0x7408 # define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4) # define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) #define HDMI_ACR_PACKET_CONTROL 0x740c # define HDMI_ACR_SEND (1 << 0) # define HDMI_ACR_CONT (1 << 1) # define HDMI_ACR_SELECT(x) (((x) & 3) << 4) # define HDMI_ACR_HW 0 # define HDMI_ACR_32 1 # define HDMI_ACR_44 2 # define HDMI_ACR_48 3 # define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ # define HDMI_ACR_AUTO_SEND (1 << 12) #define HDMI_VBI_PACKET_CONTROL 0x7410 # define HDMI_NULL_SEND (1 << 0) # define HDMI_GC_SEND (1 << 4) # define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */ #define HDMI_INFOFRAME_CONTROL0 0x7414 # define HDMI_AVI_INFO_SEND (1 << 0) # define HDMI_AVI_INFO_CONT (1 << 1) # define HDMI_AUDIO_INFO_SEND (1 << 4) # define HDMI_AUDIO_INFO_CONT (1 << 5) # define HDMI_MPEG_INFO_SEND (1 << 8) # define HDMI_MPEG_INFO_CONT (1 << 9) #define HDMI_INFOFRAME_CONTROL1 0x7418 # define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) # define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) # define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) #define HDMI_GENERIC_PACKET_CONTROL 0x741c # define HDMI_GENERIC0_SEND (1 << 0) # define HDMI_GENERIC0_CONT (1 << 1) # define HDMI_GENERIC1_SEND (1 << 4) # define HDMI_GENERIC1_CONT (1 << 5) # define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16) # define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24) #define HDMI_GC 0x7428 # define HDMI_GC_AVMUTE (1 << 0) #define AFMT_AUDIO_PACKET_CONTROL2 0x742c # define AFMT_AUDIO_LAYOUT_OVRD (1 << 0) # define AFMT_AUDIO_LAYOUT_SELECT (1 << 1) # define AFMT_60958_CS_SOURCE (1 << 4) # define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8) # define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16) #define AFMT_AVI_INFO0 0x7454 # define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define AFMT_AVI_INFO_S(x) (((x) & 3) << 8) # define AFMT_AVI_INFO_B(x) (((x) & 3) << 10) # define AFMT_AVI_INFO_A(x) (((x) & 1) << 12) # define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13) # define AFMT_AVI_INFO_Y_RGB 0 # define AFMT_AVI_INFO_Y_YCBCR422 1 # define AFMT_AVI_INFO_Y_YCBCR444 2 # define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8) # define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16) # define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20) # define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22) # define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16) # define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24) # define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26) # define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28) # define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31) # define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24) #define AFMT_AVI_INFO1 0x7458 # define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */ # define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */ # define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16) #define AFMT_AVI_INFO2 0x745c # define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0) # define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16) #define AFMT_AVI_INFO3 0x7460 # define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0) # define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24) #define AFMT_MPEG_INFO0 0x7464 # define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8) # define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16) # define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24) #define AFMT_MPEG_INFO1 0x7468 # define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0) # define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8) # define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12) #define AFMT_GENERIC0_HDR 0x746c #define AFMT_GENERIC0_0 0x7470 #define AFMT_GENERIC0_1 0x7474 #define AFMT_GENERIC0_2 0x7478 #define AFMT_GENERIC0_3 0x747c #define AFMT_GENERIC0_4 0x7480 #define AFMT_GENERIC0_5 0x7484 #define AFMT_GENERIC0_6 0x7488 #define AFMT_GENERIC1_HDR 0x748c #define AFMT_GENERIC1_0 0x7490 #define AFMT_GENERIC1_1 0x7494 #define AFMT_GENERIC1_2 0x7498 #define AFMT_GENERIC1_3 0x749c #define AFMT_GENERIC1_4 0x74a0 #define AFMT_GENERIC1_5 0x74a4 #define AFMT_GENERIC1_6 0x74a8 #define HDMI_ACR_32_0 0x74ac # define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12) #define HDMI_ACR_32_1 0x74b0 # define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0) #define HDMI_ACR_44_0 0x74b4 # define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12) #define HDMI_ACR_44_1 0x74b8 # define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0) #define HDMI_ACR_48_0 0x74bc # define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12) #define HDMI_ACR_48_1 0x74c0 # define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0) #define HDMI_ACR_STATUS_0 0x74c4 #define HDMI_ACR_STATUS_1 0x74c8 #define AFMT_AUDIO_INFO0 0x74cc # define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0) # define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8) # define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16) #define AFMT_AUDIO_INFO1 0x74d0 # define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0) # define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11) # define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15) # define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8) #define AFMT_60958_0 0x74d4 # define AFMT_60958_CS_A(x) (((x) & 1) << 0) # define AFMT_60958_CS_B(x) (((x) & 1) << 1) # define AFMT_60958_CS_C(x) (((x) & 1) << 2) # define AFMT_60958_CS_D(x) (((x) & 3) << 3) # define AFMT_60958_CS_MODE(x) (((x) & 3) << 6) # define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) # define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) # define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) # define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) # define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) #define AFMT_60958_1 0x74d8 # define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) # define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) # define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16) # define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18) # define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) #define AFMT_AUDIO_CRC_CONTROL 0x74dc # define AFMT_AUDIO_CRC_EN (1 << 0) #define AFMT_RAMP_CONTROL0 0x74e0 # define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) -# define AFMT_RAMP_DATA_SIGN (1 << 31) +# define AFMT_RAMP_DATA_SIGN (1U << 31) #define AFMT_RAMP_CONTROL1 0x74e4 # define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0) # define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24) #define AFMT_RAMP_CONTROL2 0x74e8 # define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0) #define AFMT_RAMP_CONTROL3 0x74ec # define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0) #define AFMT_60958_2 0x74f0 # define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0) # define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4) # define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8) # define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12) # define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16) # define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20) #define AFMT_STATUS 0x7600 # define AFMT_AUDIO_ENABLE (1 << 4) # define AFMT_AZ_FORMAT_WTRIG (1 << 28) # define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29) # define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30) #define AFMT_AUDIO_PACKET_CONTROL 0x7604 # define AFMT_AUDIO_SAMPLE_SEND (1 << 0) # define AFMT_AUDIO_TEST_EN (1 << 12) # define AFMT_AUDIO_CHANNEL_SWAP (1 << 24) # define AFMT_60958_CS_UPDATE (1 << 26) # define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27) # define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28) # define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) # define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) #define AFMT_VBI_PACKET_CONTROL 0x7608 # define AFMT_GENERIC0_UPDATE (1 << 2) #define AFMT_INFOFRAME_CONTROL0 0x760c # define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ # define AFMT_AUDIO_INFO_UPDATE (1 << 7) # define AFMT_MPEG_INFO_UPDATE (1 << 10) #define AFMT_GENERIC0_7 0x7610 /* second instance starts at 0x7800 */ #define HDMI_OFFSET0 (0x7400 - 0x7400) #define HDMI_OFFSET1 (0x7800 - 0x7400) /* DCE3.2 ELD audio interface */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */ #define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */ # define MAX_CHANNELS(x) (((x) & 0x7) << 0) /* max channels minus one. 7 = 8 channels */ # define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) # define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) # define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ /* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO * bit0 = 32 kHz * bit1 = 44.1 kHz * bit2 = 48 kHz * bit3 = 88.2 kHz * bit4 = 96 kHz * bit5 = 176.4 kHz * bit6 = 192 kHz */ #define AZ_HOT_PLUG_CONTROL 0x7300 # define AZ_FORCE_CODEC_WAKE (1 << 0) # define PIN0_JACK_DETECTION_ENABLE (1 << 4) # define PIN1_JACK_DETECTION_ENABLE (1 << 5) # define PIN2_JACK_DETECTION_ENABLE (1 << 6) # define PIN3_JACK_DETECTION_ENABLE (1 << 7) # define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8) # define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9) # define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10) # define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11) # define CODEC_HOT_PLUG_ENABLE (1 << 12) # define PIN0_AUDIO_ENABLED (1 << 24) # define PIN1_AUDIO_ENABLED (1 << 25) # define PIN2_AUDIO_ENABLED (1 << 26) # define PIN3_AUDIO_ENABLED (1 << 27) -# define AUDIO_ENABLED (1 << 31) +# define AUDIO_ENABLED (1U << 31) #define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 #define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 #define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c #define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c /* PCIE link stuff */ #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ # define LC_LINK_WIDTH_SHIFT 0 # define LC_LINK_WIDTH_MASK 0x7 # define LC_LINK_WIDTH_X0 0 # define LC_LINK_WIDTH_X1 1 # define LC_LINK_WIDTH_X2 2 # define LC_LINK_WIDTH_X4 3 # define LC_LINK_WIDTH_X8 4 # define LC_LINK_WIDTH_X16 6 # define LC_LINK_WIDTH_RD_SHIFT 4 # define LC_LINK_WIDTH_RD_MASK 0x70 # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) # define LC_RECONFIG_NOW (1 << 8) # define LC_RENEGOTIATION_SUPPORT (1 << 9) # define LC_RENEGOTIATE_EN (1 << 10) # define LC_SHORT_RECONFIG_EN (1 << 11) # define LC_UPCONFIGURE_SUPPORT (1 << 12) # define LC_UPCONFIGURE_DIS (1 << 13) #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ # define LC_GEN2_EN_STRAP (1 << 0) # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 # define LC_CURRENT_DATA_RATE (1 << 11) # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) #define MM_CFGREGS_CNTL 0x544c # define MM_WR_TO_CFG_EN (1 << 3) #define LINK_CNTL2 0x88 /* F0 */ # define TARGET_LINK_SPEED_MASK (0xf << 0) # define SELECTABLE_DEEMPHASIS (1 << 6) #endif Index: head/sys/dev/drm2/radeon/sid.h =================================================================== --- head/sys/dev/drm2/radeon/sid.h (revision 258779) +++ head/sys/dev/drm2/radeon/sid.h (revision 258780) @@ -1,1065 +1,1065 @@ /* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #ifndef SI_H #define SI_H #include __FBSDID("$FreeBSD$"); #define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 #define CG_MULT_THERMAL_STATUS 0x714 #define ASIC_MAX_TEMP(x) ((x) << 0) #define ASIC_MAX_TEMP_MASK 0x000001ff #define ASIC_MAX_TEMP_SHIFT 0 #define CTF_TEMP(x) ((x) << 9) #define CTF_TEMP_MASK 0x0003fe00 #define CTF_TEMP_SHIFT 9 #define SI_MAX_SH_GPRS 256 #define SI_MAX_TEMP_GPRS 16 #define SI_MAX_SH_THREADS 256 #define SI_MAX_SH_STACK_ENTRIES 4096 #define SI_MAX_FRC_EOV_CNT 16384 #define SI_MAX_BACKENDS 8 #define SI_MAX_BACKENDS_MASK 0xFF #define SI_MAX_BACKENDS_PER_SE_MASK 0x0F #define SI_MAX_SIMDS 12 #define SI_MAX_SIMDS_MASK 0x0FFF #define SI_MAX_SIMDS_PER_SE_MASK 0x00FF #define SI_MAX_PIPES 8 #define SI_MAX_PIPES_MASK 0xFF #define SI_MAX_PIPES_PER_SIMD_MASK 0x3F #define SI_MAX_LDS_NUM 0xFFFF #define SI_MAX_TCC 16 #define SI_MAX_TCC_MASK 0xFFFF #define VGA_HDP_CONTROL 0x328 #define VGA_MEMORY_DISABLE (1 << 4) #define DMIF_ADDR_CONFIG 0xBD4 #define SRBM_STATUS 0xE50 #define SRBM_SOFT_RESET 0x0E60 #define SOFT_RESET_BIF (1 << 1) #define SOFT_RESET_DC (1 << 5) #define SOFT_RESET_DMA1 (1 << 6) #define SOFT_RESET_GRBM (1 << 8) #define SOFT_RESET_HDP (1 << 9) #define SOFT_RESET_IH (1 << 10) #define SOFT_RESET_MC (1 << 11) #define SOFT_RESET_ROM (1 << 14) #define SOFT_RESET_SEM (1 << 15) #define SOFT_RESET_VMC (1 << 17) #define SOFT_RESET_DMA (1 << 20) #define SOFT_RESET_TST (1 << 21) #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) #define CC_SYS_RB_BACKEND_DISABLE 0xe80 #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 #define VM_L2_CNTL 0x1400 #define ENABLE_L2_CACHE (1 << 0) #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) #define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2) #define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4) #define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) #define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10) #define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15) #define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19) #define VM_L2_CNTL2 0x1404 #define INVALIDATE_ALL_L1_TLBS (1 << 0) #define INVALIDATE_L2_CACHE (1 << 1) #define INVALIDATE_CACHE_MODE(x) ((x) << 26) #define INVALIDATE_PTE_AND_PDE_CACHES 0 #define INVALIDATE_ONLY_PTE_CACHES 1 #define INVALIDATE_ONLY_PDE_CACHES 2 #define VM_L2_CNTL3 0x1408 #define BANK_SELECT(x) ((x) << 0) #define L2_CACHE_UPDATE_MODE(x) ((x) << 6) #define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15) #define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20) #define VM_L2_STATUS 0x140C #define L2_BUSY (1 << 0) #define VM_CONTEXT0_CNTL 0x1410 #define ENABLE_CONTEXT (1 << 0) #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) #define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3) #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) #define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6) #define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7) #define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9) #define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10) #define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12) #define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13) #define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15) #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) #define VM_CONTEXT1_CNTL 0x1414 #define VM_CONTEXT0_CNTL2 0x1430 #define VM_CONTEXT1_CNTL2 0x1434 #define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x1438 #define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x143c #define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x1440 #define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x1444 #define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x1448 #define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x144c #define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450 #define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454 #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC #define VM_INVALIDATE_REQUEST 0x1478 #define VM_INVALIDATE_RESPONSE 0x147c #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 #define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c #define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x1540 #define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x1544 #define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x1548 #define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x154c #define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x1550 #define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x1554 #define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x1558 #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c #define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x1560 #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C #define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580 #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x0000f000 #define MC_SHARED_CHREMAP 0x2008 #define MC_VM_FB_LOCATION 0x2024 #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_MX_L1_TLB_CNTL 0x2064 #define ENABLE_L1_TLB (1 << 0) #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) #define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) #define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) #define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) #define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) #define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6) #define MC_SHARED_BLACKOUT_CNTL 0x20ac #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 #define NOOFRANK_SHIFT 2 #define NOOFRANK_MASK 0x00000004 #define NOOFROWS_SHIFT 3 #define NOOFROWS_MASK 0x00000038 #define NOOFCOLS_SHIFT 6 #define NOOFCOLS_MASK 0x000000C0 #define CHANSIZE_SHIFT 8 #define CHANSIZE_MASK 0x00000100 #define CHANSIZE_OVERRIDE (1 << 11) #define NOOFGROUPS_SHIFT 12 #define NOOFGROUPS_MASK 0x00001000 #define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808 #define TRAIN_DONE_D0 (1 << 30) -#define TRAIN_DONE_D1 (1 << 31) +#define TRAIN_DONE_D1 (1U << 31) #define MC_SEQ_SUP_CNTL 0x28c8 #define RUN_MASK (1 << 0) #define MC_SEQ_SUP_PGM 0x28cc #define MC_IO_PAD_CNTL_D0 0x29d0 #define MEM_FALL_OUT_CMD (1 << 8) #define MC_SEQ_IO_DEBUG_INDEX 0x2a44 #define MC_SEQ_IO_DEBUG_DATA 0x2a48 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_ADDR_CONFIG 0x2F48 #define HDP_MISC_CNTL 0x2F4C #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ # define IH_WPTR_OVERFLOW_ENABLE (1 << 16) -# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +# define IH_WPTR_OVERFLOW_CLEAR (1U << 31) #define IH_RB_BASE 0x3e04 #define IH_RB_RPTR 0x3e08 #define IH_RB_WPTR 0x3e0c # define RB_OVERFLOW (1 << 0) # define WPTR_OFFSET_MASK 0x3fffc #define IH_RB_WPTR_ADDR_HI 0x3e10 #define IH_RB_WPTR_ADDR_LO 0x3e14 #define IH_CNTL 0x3e18 # define ENABLE_INTR (1 << 0) # define IH_MC_SWAP(x) ((x) << 1) # define IH_MC_SWAP_NONE 0 # define IH_MC_SWAP_16BIT 1 # define IH_MC_SWAP_32BIT 2 # define IH_MC_SWAP_64BIT 3 # define RPTR_REARM (1 << 4) # define MC_WRREQ_CREDIT(x) ((x) << 15) # define MC_WR_CLEAN_CNT(x) ((x) << 20) # define MC_VMID(x) ((x) << 25) #define CONFIG_MEMSIZE 0x5428 #define INTERRUPT_CNTL 0x5468 # define IH_DUMMY_RD_OVERRIDE (1 << 0) # define IH_DUMMY_RD_EN (1 << 1) # define IH_REQ_NONSNOOP_EN (1 << 3) # define GEN_IH_INT_EN (1 << 8) #define INTERRUPT_CNTL2 0x546c #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define BIF_FB_EN 0x5490 #define FB_READ_EN (1 << 0) #define FB_WRITE_EN (1 << 1) #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define DC_LB_MEMORY_SPLIT 0x6b0c #define DC_LB_MEMORY_CONFIG(x) ((x) << 20) #define PRIORITY_A_CNT 0x6b18 #define PRIORITY_MARK_MASK 0x7fff #define PRIORITY_OFF (1 << 16) #define PRIORITY_ALWAYS_ON (1 << 20) #define PRIORITY_B_CNT 0x6b1c #define DPG_PIPE_ARBITRATION_CONTROL3 0x6cc8 # define LATENCY_WATERMARK_MASK(x) ((x) << 16) #define DPG_PIPE_LATENCY_CONTROL 0x6ccc # define LATENCY_LOW_WATERMARK(x) ((x) << 0) # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) /* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ #define VLINE_STATUS 0x6bb8 # define VLINE_OCCURRED (1 << 0) # define VLINE_ACK (1 << 4) # define VLINE_STAT (1 << 12) # define VLINE_INTERRUPT (1 << 16) # define VLINE_INTERRUPT_TYPE (1 << 17) /* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ #define VBLANK_STATUS 0x6bbc # define VBLANK_OCCURRED (1 << 0) # define VBLANK_ACK (1 << 4) # define VBLANK_STAT (1 << 12) # define VBLANK_INTERRUPT (1 << 16) # define VBLANK_INTERRUPT_TYPE (1 << 17) /* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ #define INT_MASK 0x6b40 # define VBLANK_INT_MASK (1 << 0) # define VLINE_INT_MASK (1 << 4) #define DISP_INTERRUPT_STATUS 0x60f4 # define LB_D1_VLINE_INTERRUPT (1 << 2) # define LB_D1_VBLANK_INTERRUPT (1 << 3) # define DC_HPD1_INTERRUPT (1 << 17) # define DC_HPD1_RX_INTERRUPT (1 << 18) # define DACA_AUTODETECT_INTERRUPT (1 << 22) # define DACB_AUTODETECT_INTERRUPT (1 << 23) # define DC_I2C_SW_DONE_INTERRUPT (1 << 24) # define DC_I2C_HW_DONE_INTERRUPT (1 << 25) #define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 # define LB_D2_VLINE_INTERRUPT (1 << 2) # define LB_D2_VBLANK_INTERRUPT (1 << 3) # define DC_HPD2_INTERRUPT (1 << 17) # define DC_HPD2_RX_INTERRUPT (1 << 18) # define DISP_TIMER_INTERRUPT (1 << 24) #define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc # define LB_D3_VLINE_INTERRUPT (1 << 2) # define LB_D3_VBLANK_INTERRUPT (1 << 3) # define DC_HPD3_INTERRUPT (1 << 17) # define DC_HPD3_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 # define LB_D4_VLINE_INTERRUPT (1 << 2) # define LB_D4_VBLANK_INTERRUPT (1 << 3) # define DC_HPD4_INTERRUPT (1 << 17) # define DC_HPD4_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c # define LB_D5_VLINE_INTERRUPT (1 << 2) # define LB_D5_VBLANK_INTERRUPT (1 << 3) # define DC_HPD5_INTERRUPT (1 << 17) # define DC_HPD5_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150 # define LB_D6_VLINE_INTERRUPT (1 << 2) # define LB_D6_VBLANK_INTERRUPT (1 << 3) # define DC_HPD6_INTERRUPT (1 << 17) # define DC_HPD6_RX_INTERRUPT (1 << 18) /* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ #define GRPH_INT_STATUS 0x6858 # define GRPH_PFLIP_INT_OCCURRED (1 << 0) # define GRPH_PFLIP_INT_CLEAR (1 << 8) /* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ #define GRPH_INT_CONTROL 0x685c # define GRPH_PFLIP_INT_MASK (1 << 0) # define GRPH_PFLIP_INT_TYPE (1 << 8) #define DACA_AUTODETECT_INT_CONTROL 0x66c8 #define DC_HPD1_INT_STATUS 0x601c #define DC_HPD2_INT_STATUS 0x6028 #define DC_HPD3_INT_STATUS 0x6034 #define DC_HPD4_INT_STATUS 0x6040 #define DC_HPD5_INT_STATUS 0x604c #define DC_HPD6_INT_STATUS 0x6058 # define DC_HPDx_INT_STATUS (1 << 0) # define DC_HPDx_SENSE (1 << 1) # define DC_HPDx_RX_INT_STATUS (1 << 8) #define DC_HPD1_INT_CONTROL 0x6020 #define DC_HPD2_INT_CONTROL 0x602c #define DC_HPD3_INT_CONTROL 0x6038 #define DC_HPD4_INT_CONTROL 0x6044 #define DC_HPD5_INT_CONTROL 0x6050 #define DC_HPD6_INT_CONTROL 0x605c # define DC_HPDx_INT_ACK (1 << 0) # define DC_HPDx_INT_POLARITY (1 << 8) # define DC_HPDx_INT_EN (1 << 16) # define DC_HPDx_RX_INT_ACK (1 << 20) # define DC_HPDx_RX_INT_EN (1 << 24) #define DC_HPD1_CONTROL 0x6024 #define DC_HPD2_CONTROL 0x6030 #define DC_HPD3_CONTROL 0x603c #define DC_HPD4_CONTROL 0x6048 #define DC_HPD5_CONTROL 0x6054 #define DC_HPD6_CONTROL 0x6060 # define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) # define DC_HPDx_EN (1 << 28) /* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ #define CRTC_STATUS_FRAME_COUNT 0x6e98 #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) #define GRBM_STATUS2 0x8008 #define RLC_RQ_PENDING (1 << 0) #define RLC_BUSY (1 << 8) #define TC_BUSY (1 << 9) #define GRBM_STATUS 0x8010 #define CMDFIFO_AVAIL_MASK 0x0000000F #define RING2_RQ_PENDING (1 << 4) #define SRBM_RQ_PENDING (1 << 5) #define RING1_RQ_PENDING (1 << 6) #define CF_RQ_PENDING (1 << 7) #define PF_RQ_PENDING (1 << 8) #define GDS_DMA_RQ_PENDING (1 << 9) #define GRBM_EE_BUSY (1 << 10) #define DB_CLEAN (1 << 12) #define CB_CLEAN (1 << 13) #define TA_BUSY (1 << 14) #define GDS_BUSY (1 << 15) #define VGT_BUSY (1 << 17) #define IA_BUSY_NO_DMA (1 << 18) #define IA_BUSY (1 << 19) #define SX_BUSY (1 << 20) #define SPI_BUSY (1 << 22) #define BCI_BUSY (1 << 23) #define SC_BUSY (1 << 24) #define PA_BUSY (1 << 25) #define DB_BUSY (1 << 26) #define CP_COHERENCY_BUSY (1 << 28) #define CP_BUSY (1 << 29) #define CB_BUSY (1 << 30) -#define GUI_ACTIVE (1 << 31) +#define GUI_ACTIVE (1U << 31) #define GRBM_STATUS_SE0 0x8014 #define GRBM_STATUS_SE1 0x8018 #define SE_DB_CLEAN (1 << 1) #define SE_CB_CLEAN (1 << 2) #define SE_BCI_BUSY (1 << 22) #define SE_VGT_BUSY (1 << 23) #define SE_PA_BUSY (1 << 24) #define SE_TA_BUSY (1 << 25) #define SE_SX_BUSY (1 << 26) #define SE_SPI_BUSY (1 << 27) #define SE_SC_BUSY (1 << 29) #define SE_DB_BUSY (1 << 30) -#define SE_CB_BUSY (1 << 31) +#define SE_CB_BUSY (1U << 31) #define GRBM_SOFT_RESET 0x8020 #define SOFT_RESET_CP (1 << 0) #define SOFT_RESET_CB (1 << 1) #define SOFT_RESET_RLC (1 << 2) #define SOFT_RESET_DB (1 << 3) #define SOFT_RESET_GDS (1 << 4) #define SOFT_RESET_PA (1 << 5) #define SOFT_RESET_SC (1 << 6) #define SOFT_RESET_BCI (1 << 7) #define SOFT_RESET_SPI (1 << 8) #define SOFT_RESET_SX (1 << 10) #define SOFT_RESET_TC (1 << 11) #define SOFT_RESET_TA (1 << 12) #define SOFT_RESET_VGT (1 << 14) #define SOFT_RESET_IA (1 << 15) #define GRBM_GFX_INDEX 0x802C #define INSTANCE_INDEX(x) ((x) << 0) #define SH_INDEX(x) ((x) << 8) #define SE_INDEX(x) ((x) << 16) #define SH_BROADCAST_WRITES (1 << 29) #define INSTANCE_BROADCAST_WRITES (1 << 30) -#define SE_BROADCAST_WRITES (1 << 31) +#define SE_BROADCAST_WRITES (1U << 31) #define GRBM_INT_CNTL 0x8060 # define RDERR_INT_ENABLE (1 << 0) # define GUI_IDLE_INT_ENABLE (1 << 19) #define CP_STRMOUT_CNTL 0x84FC #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 #define SCRATCH_REG3 0x850C #define SCRATCH_REG4 0x8510 #define SCRATCH_REG5 0x8514 #define SCRATCH_REG6 0x8518 #define SCRATCH_REG7 0x851C #define SCRATCH_UMSK 0x8540 #define SCRATCH_ADDR 0x8544 #define CP_SEM_WAIT_TIMER 0x85BC #define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_ME_CNTL 0x86D8 #define CP_CE_HALT (1 << 24) #define CP_PFP_HALT (1 << 26) #define CP_ME_HALT (1 << 28) #define CP_COHER_CNTL2 0x85E8 #define CP_RB2_RPTR 0x86f8 #define CP_RB1_RPTR 0x86fc #define CP_RB0_RPTR 0x8700 #define CP_RB_WPTR_DELAY 0x8704 #define CP_QUEUE_THRESHOLDS 0x8760 #define ROQ_IB1_START(x) ((x) << 0) #define ROQ_IB2_START(x) ((x) << 8) #define CP_MEQ_THRESHOLDS 0x8764 #define MEQ1_START(x) ((x) << 0) #define MEQ2_START(x) ((x) << 8) #define CP_PERFMON_CNTL 0x87FC #define VGT_VTX_VECT_EJECT_REG 0x88B0 #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x) << 0) #define VC_ONLY 0 #define TC_ONLY 1 #define VC_AND_TC 2 #define AUTO_INVLD_EN(x) ((x) << 6) #define NO_AUTO 0 #define ES_AUTO 1 #define GS_AUTO 2 #define ES_AND_GS_AUTO 3 #define VGT_ESGS_RING_SIZE 0x88C8 #define VGT_GSVS_RING_SIZE 0x88CC #define VGT_GS_VERTEX_REUSE 0x88D4 #define VGT_PRIMITIVE_TYPE 0x8958 #define VGT_INDEX_TYPE 0x895C #define VGT_NUM_INDICES 0x8970 #define VGT_NUM_INSTANCES 0x8974 #define VGT_TF_RING_SIZE 0x8988 #define VGT_HS_OFFCHIP_PARAM 0x89B0 #define VGT_TF_MEMORY_BASE 0x89B8 #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc #define INACTIVE_CUS_MASK 0xFFFF0000 #define INACTIVE_CUS_SHIFT 16 #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 #define PA_CL_ENHANCE 0x8A14 #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) #define PA_SU_LINE_STIPPLE_VALUE 0x8A60 #define PA_SC_LINE_STIPPLE_STATE 0x8B10 #define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define PA_SC_FIFO_SIZE 0x8BCC #define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0) #define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6) #define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15) #define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23) #define PA_SC_ENHANCE 0x8BF0 #define SQ_CONFIG 0x8C00 #define SQC_CACHES 0x8C08 #define SX_DEBUG_1 0x9060 #define SPI_STATIC_THREAD_MGMT_1 0x90E0 #define SPI_STATIC_THREAD_MGMT_2 0x90E4 #define SPI_STATIC_THREAD_MGMT_3 0x90E8 #define SPI_PS_MAX_WAVE_ID 0x90EC #define SPI_CONFIG_CNTL 0x9100 #define SPI_CONFIG_CNTL_1 0x913C #define VTX_DONE_DELAY(x) ((x) << 0) #define INTERP_ONE_PRIM_PER_ROW (1 << 4) #define CGTS_TCC_DISABLE 0x9148 #define CGTS_USER_TCC_DISABLE 0x914C #define TCC_DISABLE_MASK 0xFFFF0000 #define TCC_DISABLE_SHIFT 16 #define TA_CNTL_AUX 0x9508 #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) #define GB_ADDR_CONFIG 0x98F8 #define NUM_PIPES(x) ((x) << 0) #define NUM_PIPES_MASK 0x00000007 #define NUM_PIPES_SHIFT 0 #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) #define PIPE_INTERLEAVE_SIZE_MASK 0x00000070 #define PIPE_INTERLEAVE_SIZE_SHIFT 4 #define NUM_SHADER_ENGINES(x) ((x) << 12) #define NUM_SHADER_ENGINES_MASK 0x00003000 #define NUM_SHADER_ENGINES_SHIFT 12 #define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) #define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000 #define SHADER_ENGINE_TILE_SIZE_SHIFT 16 #define NUM_GPUS(x) ((x) << 20) #define NUM_GPUS_MASK 0x00700000 #define NUM_GPUS_SHIFT 20 #define MULTI_GPU_TILE_SIZE(x) ((x) << 24) #define MULTI_GPU_TILE_SIZE_MASK 0x03000000 #define MULTI_GPU_TILE_SIZE_SHIFT 24 #define ROW_SIZE(x) ((x) << 28) #define ROW_SIZE_MASK 0x30000000 #define ROW_SIZE_SHIFT 28 #define GB_TILE_MODE0 0x9910 # define MICRO_TILE_MODE(x) ((x) << 0) # define ADDR_SURF_DISPLAY_MICRO_TILING 0 # define ADDR_SURF_THIN_MICRO_TILING 1 # define ADDR_SURF_DEPTH_MICRO_TILING 2 # define ARRAY_MODE(x) ((x) << 2) # define ARRAY_LINEAR_GENERAL 0 # define ARRAY_LINEAR_ALIGNED 1 # define ARRAY_1D_TILED_THIN1 2 # define ARRAY_2D_TILED_THIN1 4 # define PIPE_CONFIG(x) ((x) << 6) # define ADDR_SURF_P2 0 # define ADDR_SURF_P4_8x16 4 # define ADDR_SURF_P4_16x16 5 # define ADDR_SURF_P4_16x32 6 # define ADDR_SURF_P4_32x32 7 # define ADDR_SURF_P8_16x16_8x16 8 # define ADDR_SURF_P8_16x32_8x16 9 # define ADDR_SURF_P8_32x32_8x16 10 # define ADDR_SURF_P8_16x32_16x16 11 # define ADDR_SURF_P8_32x32_16x16 12 # define ADDR_SURF_P8_32x32_16x32 13 # define ADDR_SURF_P8_32x64_32x32 14 # define TILE_SPLIT(x) ((x) << 11) # define ADDR_SURF_TILE_SPLIT_64B 0 # define ADDR_SURF_TILE_SPLIT_128B 1 # define ADDR_SURF_TILE_SPLIT_256B 2 # define ADDR_SURF_TILE_SPLIT_512B 3 # define ADDR_SURF_TILE_SPLIT_1KB 4 # define ADDR_SURF_TILE_SPLIT_2KB 5 # define ADDR_SURF_TILE_SPLIT_4KB 6 # define BANK_WIDTH(x) ((x) << 14) # define ADDR_SURF_BANK_WIDTH_1 0 # define ADDR_SURF_BANK_WIDTH_2 1 # define ADDR_SURF_BANK_WIDTH_4 2 # define ADDR_SURF_BANK_WIDTH_8 3 # define BANK_HEIGHT(x) ((x) << 16) # define ADDR_SURF_BANK_HEIGHT_1 0 # define ADDR_SURF_BANK_HEIGHT_2 1 # define ADDR_SURF_BANK_HEIGHT_4 2 # define ADDR_SURF_BANK_HEIGHT_8 3 # define MACRO_TILE_ASPECT(x) ((x) << 18) # define ADDR_SURF_MACRO_ASPECT_1 0 # define ADDR_SURF_MACRO_ASPECT_2 1 # define ADDR_SURF_MACRO_ASPECT_4 2 # define ADDR_SURF_MACRO_ASPECT_8 3 # define NUM_BANKS(x) ((x) << 20) # define ADDR_SURF_2_BANK 0 # define ADDR_SURF_4_BANK 1 # define ADDR_SURF_8_BANK 2 # define ADDR_SURF_16_BANK 3 #define CB_PERFCOUNTER0_SELECT0 0x9a20 #define CB_PERFCOUNTER0_SELECT1 0x9a24 #define CB_PERFCOUNTER1_SELECT0 0x9a28 #define CB_PERFCOUNTER1_SELECT1 0x9a2c #define CB_PERFCOUNTER2_SELECT0 0x9a30 #define CB_PERFCOUNTER2_SELECT1 0x9a34 #define CB_PERFCOUNTER3_SELECT0 0x9a38 #define CB_PERFCOUNTER3_SELECT1 0x9a3c #define GC_USER_RB_BACKEND_DISABLE 0x9B7C #define BACKEND_DISABLE_MASK 0x00FF0000 #define BACKEND_DISABLE_SHIFT 16 #define TCP_CHAN_STEER_LO 0xac0c #define TCP_CHAN_STEER_HI 0xac10 #define CP_RB0_BASE 0xC100 #define CP_RB0_CNTL 0xC104 #define RB_BUFSZ(x) ((x) << 0) #define RB_BLKSZ(x) ((x) << 8) #define BUF_SWAP_32BIT (2 << 16) #define RB_NO_UPDATE (1 << 27) -#define RB_RPTR_WR_ENA (1 << 31) +#define RB_RPTR_WR_ENA (1U << 31) #define CP_RB0_RPTR_ADDR 0xC10C #define CP_RB0_RPTR_ADDR_HI 0xC110 #define CP_RB0_WPTR 0xC114 #define CP_PFP_UCODE_ADDR 0xC150 #define CP_PFP_UCODE_DATA 0xC154 #define CP_ME_RAM_RADDR 0xC158 #define CP_ME_RAM_WADDR 0xC15C #define CP_ME_RAM_DATA 0xC160 #define CP_CE_UCODE_ADDR 0xC168 #define CP_CE_UCODE_DATA 0xC16C #define CP_RB1_BASE 0xC180 #define CP_RB1_CNTL 0xC184 #define CP_RB1_RPTR_ADDR 0xC188 #define CP_RB1_RPTR_ADDR_HI 0xC18C #define CP_RB1_WPTR 0xC190 #define CP_RB2_BASE 0xC194 #define CP_RB2_CNTL 0xC198 #define CP_RB2_RPTR_ADDR 0xC19C #define CP_RB2_RPTR_ADDR_HI 0xC1A0 #define CP_RB2_WPTR 0xC1A4 #define CP_INT_CNTL_RING0 0xC1A8 #define CP_INT_CNTL_RING1 0xC1AC #define CP_INT_CNTL_RING2 0xC1B0 # define CNTX_BUSY_INT_ENABLE (1 << 19) # define CNTX_EMPTY_INT_ENABLE (1 << 20) # define WAIT_MEM_SEM_INT_ENABLE (1 << 21) # define TIME_STAMP_INT_ENABLE (1 << 26) # define CP_RINGID2_INT_ENABLE (1 << 29) # define CP_RINGID1_INT_ENABLE (1 << 30) -# define CP_RINGID0_INT_ENABLE (1 << 31) +# define CP_RINGID0_INT_ENABLE (1U << 31) #define CP_INT_STATUS_RING0 0xC1B4 #define CP_INT_STATUS_RING1 0xC1B8 #define CP_INT_STATUS_RING2 0xC1BC # define WAIT_MEM_SEM_INT_STAT (1 << 21) # define TIME_STAMP_INT_STAT (1 << 26) # define CP_RINGID2_INT_STAT (1 << 29) # define CP_RINGID1_INT_STAT (1 << 30) -# define CP_RINGID0_INT_STAT (1 << 31) +# define CP_RINGID0_INT_STAT (1U << 31) #define CP_DEBUG 0xC1FC #define RLC_CNTL 0xC300 # define RLC_ENABLE (1 << 0) #define RLC_RL_BASE 0xC304 #define RLC_RL_SIZE 0xC308 #define RLC_LB_CNTL 0xC30C #define RLC_SAVE_AND_RESTORE_BASE 0xC310 #define RLC_LB_CNTR_MAX 0xC314 #define RLC_LB_CNTR_INIT 0xC318 #define RLC_CLEAR_STATE_RESTORE_BASE 0xC320 #define RLC_UCODE_ADDR 0xC32C #define RLC_UCODE_DATA 0xC330 #define RLC_GPU_CLOCK_COUNT_LSB 0xC338 #define RLC_GPU_CLOCK_COUNT_MSB 0xC33C #define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 #define RLC_MC_CNTL 0xC344 #define RLC_UCODE_CNTL 0xC348 #define PA_SC_RASTER_CONFIG 0x28350 # define RASTER_CONFIG_RB_MAP_0 0 # define RASTER_CONFIG_RB_MAP_1 1 # define RASTER_CONFIG_RB_MAP_2 2 # define RASTER_CONFIG_RB_MAP_3 3 #define VGT_EVENT_INITIATOR 0x28a90 # define SAMPLE_STREAMOUTSTATS1 (1 << 0) # define SAMPLE_STREAMOUTSTATS2 (2 << 0) # define SAMPLE_STREAMOUTSTATS3 (3 << 0) # define CACHE_FLUSH_TS (4 << 0) # define CACHE_FLUSH (6 << 0) # define CS_PARTIAL_FLUSH (7 << 0) # define VGT_STREAMOUT_RESET (10 << 0) # define END_OF_PIPE_INCR_DE (11 << 0) # define END_OF_PIPE_IB_END (12 << 0) # define RST_PIX_CNT (13 << 0) # define VS_PARTIAL_FLUSH (15 << 0) # define PS_PARTIAL_FLUSH (16 << 0) # define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0) # define ZPASS_DONE (21 << 0) # define CACHE_FLUSH_AND_INV_EVENT (22 << 0) # define PERFCOUNTER_START (23 << 0) # define PERFCOUNTER_STOP (24 << 0) # define PIPELINESTAT_START (25 << 0) # define PIPELINESTAT_STOP (26 << 0) # define PERFCOUNTER_SAMPLE (27 << 0) # define SAMPLE_PIPELINESTAT (30 << 0) # define SAMPLE_STREAMOUTSTATS (32 << 0) # define RESET_VTX_CNT (33 << 0) # define VGT_FLUSH (36 << 0) # define BOTTOM_OF_PIPE_TS (40 << 0) # define DB_CACHE_FLUSH_AND_INV (42 << 0) # define FLUSH_AND_INV_DB_DATA_TS (43 << 0) # define FLUSH_AND_INV_DB_META (44 << 0) # define FLUSH_AND_INV_CB_DATA_TS (45 << 0) # define FLUSH_AND_INV_CB_META (46 << 0) # define CS_DONE (47 << 0) # define PS_DONE (48 << 0) # define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0) # define THREAD_TRACE_START (51 << 0) # define THREAD_TRACE_STOP (52 << 0) # define THREAD_TRACE_FLUSH (54 << 0) # define THREAD_TRACE_FINISH (55 << 0) /* * PM4 */ #define PACKET_TYPE0 0 #define PACKET_TYPE1 1 #define PACKET_TYPE2 2 #define PACKET_TYPE3 3 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ (((reg) >> 2) & 0xFFFF) | \ ((n) & 0x3FFF) << 16) #define CP_PACKET2 0x80000000 #define PACKET2_PAD_SHIFT 0 #define PACKET2_PAD_MASK (0x3fffffff << 0) #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ (((op) & 0xFF) << 8) | \ ((n) & 0x3FFF) << 16) #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) /* Packet 3 types */ #define PACKET3_NOP 0x10 #define PACKET3_SET_BASE 0x11 #define PACKET3_BASE_INDEX(x) ((x) << 0) #define GDS_PARTITION_BASE 2 #define CE_PARTITION_BASE 3 #define PACKET3_CLEAR_STATE 0x12 #define PACKET3_INDEX_BUFFER_SIZE 0x13 #define PACKET3_DISPATCH_DIRECT 0x15 #define PACKET3_DISPATCH_INDIRECT 0x16 #define PACKET3_ALLOC_GDS 0x1B #define PACKET3_WRITE_GDS_RAM 0x1C #define PACKET3_ATOMIC_GDS 0x1D #define PACKET3_ATOMIC 0x1E #define PACKET3_OCCLUSION_QUERY 0x1F #define PACKET3_SET_PREDICATION 0x20 #define PACKET3_REG_RMW 0x21 #define PACKET3_COND_EXEC 0x22 #define PACKET3_PRED_EXEC 0x23 #define PACKET3_DRAW_INDIRECT 0x24 #define PACKET3_DRAW_INDEX_INDIRECT 0x25 #define PACKET3_INDEX_BASE 0x26 #define PACKET3_DRAW_INDEX_2 0x27 #define PACKET3_CONTEXT_CONTROL 0x28 #define PACKET3_INDEX_TYPE 0x2A #define PACKET3_DRAW_INDIRECT_MULTI 0x2C #define PACKET3_DRAW_INDEX_AUTO 0x2D #define PACKET3_DRAW_INDEX_IMMD 0x2E #define PACKET3_NUM_INSTANCES 0x2F #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 #define PACKET3_INDIRECT_BUFFER_CONST 0x31 #define PACKET3_INDIRECT_BUFFER 0x32 #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 #define PACKET3_WRITE_DATA 0x37 #define WRITE_DATA_DST_SEL(x) ((x) << 8) /* 0 - register * 1 - memory (sync - via GRBM) * 2 - tc/l2 * 3 - gds * 4 - reserved * 5 - memory (async - direct) */ #define WR_ONE_ADDR (1 << 16) #define WR_CONFIRM (1 << 20) #define WRITE_DATA_ENGINE_SEL(x) ((x) << 30) /* 0 - me * 1 - pfp * 2 - ce */ #define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_COPY_DW 0x3B #define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_MEM_WRITE 0x3D #define PACKET3_COPY_DATA 0x40 #define PACKET3_CP_DMA 0x41 /* 1. header * 2. SRC_ADDR_LO or DATA [31:0] * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] | * SRC_ADDR_HI [7:0] * 4. DST_ADDR_LO [31:0] * 5. DST_ADDR_HI [7:0] * 6. COMMAND [30:21] | BYTE_COUNT [20:0] */ # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) /* 0 - SRC_ADDR * 1 - GDS */ # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) /* 0 - ME * 1 - PFP */ # define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29) /* 0 - SRC_ADDR * 1 - GDS * 2 - DATA */ -# define PACKET3_CP_DMA_CP_SYNC (1 << 31) +# define PACKET3_CP_DMA_CP_SYNC (1U << 31) /* COMMAND */ # define PACKET3_CP_DMA_DIS_WC (1 << 21) # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) /* 0 - none * 1 - 8 in 16 * 2 - 8 in 32 * 3 - 8 in 64 */ # define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) /* 0 - none * 1 - 8 in 16 * 2 - 8 in 32 * 3 - 8 in 64 */ # define PACKET3_CP_DMA_CMD_SAS (1 << 26) /* 0 - memory * 1 - register */ # define PACKET3_CP_DMA_CMD_DAS (1 << 27) /* 0 - memory * 1 - register */ # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) # define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30) #define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_DEST_BASE_0_ENA (1 << 0) # define PACKET3_DEST_BASE_1_ENA (1 << 1) # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) # define PACKET3_DB_DEST_BASE_ENA (1 << 14) # define PACKET3_DEST_BASE_2_ENA (1 << 19) # define PACKET3_DEST_BASE_3_ENA (1 << 21) # define PACKET3_TCL1_ACTION_ENA (1 << 22) # define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_CB_ACTION_ENA (1 << 25) # define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27) # define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29) #define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 #define EVENT_TYPE(x) ((x) << 0) #define EVENT_INDEX(x) ((x) << 8) /* 0 - any non-TS event * 1 - ZPASS_DONE * 2 - SAMPLE_PIPELINESTAT * 3 - SAMPLE_STREAMOUTSTAT* * 4 - *S_PARTIAL_FLUSH * 5 - EOP events * 6 - EOS events * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT */ #define INV_L2 (1 << 20) /* INV TC L2 cache when EVENT_INDEX = 7 */ #define PACKET3_EVENT_WRITE_EOP 0x47 #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data * 2 - send 64bit data * 3 - send 64bit counter value */ #define INT_SEL(x) ((x) << 24) /* 0 - none * 1 - interrupt only (DATA_SEL = 0) * 2 - interrupt when data write is confirmed */ #define PACKET3_EVENT_WRITE_EOS 0x48 #define PACKET3_PREAMBLE_CNTL 0x4A # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_LOAD_CONFIG_REG 0x5F #define PACKET3_LOAD_CONTEXT_REG 0x60 #define PACKET3_LOAD_SH_REG 0x61 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_START 0x00008000 #define PACKET3_SET_CONFIG_REG_END 0x0000b000 #define PACKET3_SET_CONTEXT_REG 0x69 #define PACKET3_SET_CONTEXT_REG_START 0x00028000 #define PACKET3_SET_CONTEXT_REG_END 0x00029000 #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 #define PACKET3_SET_RESOURCE_INDIRECT 0x74 #define PACKET3_SET_SH_REG 0x76 #define PACKET3_SET_SH_REG_START 0x0000b000 #define PACKET3_SET_SH_REG_END 0x0000c000 #define PACKET3_SET_SH_REG_OFFSET 0x77 #define PACKET3_ME_WRITE 0x7A #define PACKET3_SCRATCH_RAM_WRITE 0x7D #define PACKET3_SCRATCH_RAM_READ 0x7E #define PACKET3_CE_WRITE 0x7F #define PACKET3_LOAD_CONST_RAM 0x80 #define PACKET3_WRITE_CONST_RAM 0x81 #define PACKET3_WRITE_CONST_RAM_OFFSET 0x82 #define PACKET3_DUMP_CONST_RAM 0x83 #define PACKET3_INCREMENT_CE_COUNTER 0x84 #define PACKET3_INCREMENT_DE_COUNTER 0x85 #define PACKET3_WAIT_ON_CE_COUNTER 0x86 #define PACKET3_WAIT_ON_DE_COUNTER 0x87 #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 #define PACKET3_SET_CE_DE_COUNTERS 0x89 #define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A #define PACKET3_SWITCH_BUFFER 0x8B /* ASYNC DMA - first instance at 0xd000, second at 0xd800 */ #define DMA0_REGISTER_OFFSET 0x0 /* not a register */ #define DMA1_REGISTER_OFFSET 0x800 /* not a register */ #define DMA_RB_CNTL 0xd000 # define DMA_RB_ENABLE (1 << 0) # define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ # define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) # define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ # define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ #define DMA_RB_BASE 0xd004 #define DMA_RB_RPTR 0xd008 #define DMA_RB_WPTR 0xd00c #define DMA_RB_RPTR_ADDR_HI 0xd01c #define DMA_RB_RPTR_ADDR_LO 0xd020 #define DMA_IB_CNTL 0xd024 # define DMA_IB_ENABLE (1 << 0) # define DMA_IB_SWAP_ENABLE (1 << 4) #define DMA_IB_RPTR 0xd028 #define DMA_CNTL 0xd02c # define TRAP_ENABLE (1 << 0) # define SEM_INCOMPLETE_INT_ENABLE (1 << 1) # define SEM_WAIT_INT_ENABLE (1 << 2) # define DATA_SWAP_ENABLE (1 << 3) # define FENCE_SWAP_ENABLE (1 << 4) # define CTXEMPTY_INT_ENABLE (1 << 28) #define DMA_STATUS_REG 0xd034 # define DMA_IDLE (1 << 0) #define DMA_TILING_CONFIG 0xd0b8 #define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ (((b) & 0x1) << 26) | \ (((t) & 0x1) << 23) | \ (((s) & 0x1) << 22) | \ (((n) & 0xFFFFF) << 0)) #define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ (((vmid) & 0xF) << 20) | \ (((n) & 0xFFFFF) << 0)) #define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \ (1 << 26) | \ (1 << 21) | \ (((n) & 0xFFFFF) << 0)) /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 #define DMA_PACKET_INDIRECT_BUFFER 0x4 #define DMA_PACKET_SEMAPHORE 0x5 #define DMA_PACKET_FENCE 0x6 #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_CONSTANT_FILL 0xd #define DMA_PACKET_NOP 0xf #endif Index: head/sys/dev/drm2/ttm/ttm_bo.c =================================================================== --- head/sys/dev/drm2/ttm/ttm_bo.c (revision 258779) +++ head/sys/dev/drm2/ttm/ttm_bo.c (revision 258780) @@ -1,1885 +1,1885 @@ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #define TTM_ASSERT_LOCKED(param) #define TTM_DEBUG(fmt, arg...) #define TTM_BO_HASH_ORDER 13 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob); MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects"); static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) { int i; for (i = 0; i <= TTM_PL_PRIV5; i++) if (flags & (1 << i)) { *mem_type = i; return 0; } return -EINVAL; } static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; printf(" has_type: %d\n", man->has_type); printf(" use_type: %d\n", man->use_type); printf(" flags: 0x%08X\n", man->flags); printf(" gpu_offset: 0x%08lX\n", man->gpu_offset); printf(" size: %ju\n", (uintmax_t)man->size); printf(" available_caching: 0x%08X\n", man->available_caching); printf(" default_caching: 0x%08X\n", man->default_caching); if (mem_type != TTM_PL_SYSTEM) (*man->func->debug)(man, TTM_PFX); } static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { int i, ret, mem_type; printf("No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { ret = ttm_mem_type_from_flags(placement->placement[i], &mem_type); if (ret) return; printf(" placement[%d]=0x%08X (%d)\n", i, placement->placement[i], mem_type); ttm_mem_type_debug(bo->bdev, mem_type); } } #if 0 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob, char *buffer) { return snprintf(buffer, PAGE_SIZE, "%lu\n", (unsigned long) atomic_read(&glob->bo_count)); } #endif static inline uint32_t ttm_bo_type_flags(unsigned type) { return 1 << (type); } static void ttm_bo_release_list(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; size_t acc_size = bo->acc_size; MPASS(atomic_read(&bo->list_kref) == 0); MPASS(atomic_read(&bo->kref) == 0); MPASS(atomic_read(&bo->cpu_writers) == 0); MPASS(bo->sync_obj == NULL); MPASS(bo->mem.mm_node == NULL); MPASS(list_empty(&bo->lru)); MPASS(list_empty(&bo->ddestroy)); if (bo->ttm) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); if (bo->destroy) bo->destroy(bo); else { free(bo, M_TTM_BO); } ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } static int ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible) { const char *wmsg; int flags, ret; ret = 0; if (interruptible) { flags = PCATCH; wmsg = "ttbowi"; } else { flags = 0; wmsg = "ttbowu"; } while (ttm_bo_is_reserved(bo)) { ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0); if (ret != 0) break; } return (ret); } void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; MPASS(ttm_bo_is_reserved(bo)); if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { MPASS(list_empty(&bo->lru)); man = &bdev->man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); refcount_acquire(&bo->list_kref); if (bo->ttm != NULL) { list_add_tail(&bo->swap, &bo->glob->swap_lru); refcount_acquire(&bo->list_kref); } } } int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { int put_count = 0; if (!list_empty(&bo->swap)) { list_del_init(&bo->swap); ++put_count; } if (!list_empty(&bo->lru)) { list_del_init(&bo->lru); ++put_count; } /* * TODO: Add a driver hook to delete from * driver-specific LRU's here. */ return put_count; } int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { int ret; while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ if (use_sequence && bo->seq_valid) { /** * We've already reserved this one. */ if (unlikely(sequence == bo->val_seq)) return -EDEADLK; /** * Already reserved by a thread that will not back * off for us. We need to back off. */ - if (unlikely(sequence - bo->val_seq < (1 << 31))) + if (unlikely(sequence - bo->val_seq < (1U << 31))) return -EAGAIN; } if (no_wait) return -EBUSY; ret = ttm_bo_wait_unreserved_locked(bo, interruptible); if (unlikely(ret)) return ret; } if (use_sequence) { bool wake_up = false; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ - if (unlikely((bo->val_seq - sequence < (1 << 31)) + if (unlikely((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid)) wake_up = true; /* * In the worst case with memory ordering these values can be * seen in the wrong order. However since we call wake_up_all * in that case, this will hopefully not pose a problem, * and the worst case would only cause someone to accidentally * hit -EAGAIN in ttm_bo_reserve when they see old value of * val_seq. However this would only happen if seq_valid was * written before val_seq was, and just means some slightly * increased cpu usage */ bo->val_seq = sequence; bo->seq_valid = true; if (wake_up) wakeup(bo); } else { bo->seq_valid = false; } return 0; } void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, bool never_free) { u_int old; old = atomic_fetchadd_int(&bo->list_kref, -count); if (old <= count) { if (never_free) panic("ttm_bo_ref_buf"); ttm_bo_release_list(bo); } } int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; mtx_lock(&bo->glob->lru_lock); ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, sequence); if (likely(ret == 0)) { put_count = ttm_bo_del_from_lru(bo); mtx_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); } else mtx_unlock(&bo->glob->lru_lock); return ret; } int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, bool interruptible, uint32_t sequence) { bool wake_up = false; int ret; while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { if (bo->seq_valid && sequence == bo->val_seq) { DRM_ERROR( "%s: bo->seq_valid && sequence == bo->val_seq", __func__); } ret = ttm_bo_wait_unreserved_locked(bo, interruptible); if (unlikely(ret)) return ret; } - if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) + if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid) wake_up = true; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ bo->val_seq = sequence; bo->seq_valid = true; if (wake_up) wakeup(bo); return 0; } int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, bool interruptible, uint32_t sequence) { struct ttm_bo_global *glob = bo->glob; int put_count, ret; mtx_lock(&glob->lru_lock); ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); if (likely(!ret)) { put_count = ttm_bo_del_from_lru(bo); mtx_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); } else mtx_unlock(&glob->lru_lock); return ret; } void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) { ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); wakeup(bo); } void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; mtx_lock(&glob->lru_lock); ttm_bo_unreserve_locked(bo); mtx_unlock(&glob->lru_lock); } /* * Call bo->mutex locked. */ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int ret = 0; uint32_t page_flags = 0; TTM_ASSERT_LOCKED(&bo->mutex); bo->ttm = NULL; if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32; switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; case ttm_bo_type_kernel: bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; case ttm_bo_type_sg: bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags | TTM_PAGE_FLAG_SG, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) { ret = -ENOMEM; break; } bo->ttm->sg = bo->sg; break; default: printf("[TTM] Illegal buffer object type\n"); ret = -EINVAL; break; } return ret; } static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci || ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { ret = ttm_mem_io_lock(old_man, true); if (unlikely(ret != 0)) goto out_err; ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_unlock(old_man); } /* * Create and bind a ttm if required. */ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { if (bo->ttm == NULL) { bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); ret = ttm_bo_add_ttm(bo, zero); if (ret) goto out_err; } ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_bind(bo->ttm, mem); if (ret) goto out_err; } if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; } } if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); else ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { struct ttm_mem_reg tmp_mem = *mem; *mem = bo->mem; bo->mem = tmp_mem; bdev->driver->move_notify(bo, mem); bo->mem = *mem; *mem = tmp_mem; } goto out_err; } moved: if (bo->evicted) { ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); if (ret) printf("[TTM] Can not flush read caches\n"); bo->evicted = false; } if (bo->mem.mm_node) { bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; } else bo->offset = 0; return 0; out_err: new_man = &bdev->man[bo->mem.mem_type]; if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } return ret; } /** * Call bo::reserved. * Will release GPU memory type usage on destruction. * This is the place to put in driver specific hooks to release * driver private resources. * Will release the bo::reserved lock. */ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { if (bo->bdev->driver->move_notify) bo->bdev->driver->move_notify(bo, NULL); if (bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); wakeup(&bo); /* * Since the final reference to this bo may not be dropped by * the current task we have to put a memory barrier here to make * sure the changes done in this function are always visible. * * This function only needs protection against the final kref_put. */ mb(); } static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver = bdev->driver; void *sync_obj = NULL; int put_count; int ret; mtx_lock(&glob->lru_lock); ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); mtx_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); if (!ret && !bo->sync_obj) { mtx_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); mtx_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); ttm_bo_list_ref_sub(bo, put_count, true); return; } if (bo->sync_obj) sync_obj = driver->sync_obj_ref(bo->sync_obj); mtx_unlock(&bdev->fence_lock); if (!ret) { atomic_set(&bo->reserved, 0); wakeup(bo); } refcount_acquire(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); mtx_unlock(&glob->lru_lock); if (sync_obj) { driver->sync_obj_flush(sync_obj); driver->sync_obj_unref(&sync_obj); } taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq, ((hz / 100) < 1) ? 1 : hz / 100); } /** * function ttm_bo_cleanup_refs_and_unlock * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, do nothing. * * Must be called with lru_lock and reservation held, this function * will drop both before returning. * * @interruptible Any sleeps should occur interruptibly. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. */ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_driver *driver = bdev->driver; struct ttm_bo_global *glob = bo->glob; int put_count; int ret; mtx_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, true); if (ret && !no_wait_gpu) { void *sync_obj; /* * Take a reference to the fence and unreserve, * at this point the buffer should be dead, so * no new sync objects can be attached. */ sync_obj = driver->sync_obj_ref(bo->sync_obj); mtx_unlock(&bdev->fence_lock); atomic_set(&bo->reserved, 0); wakeup(bo); mtx_unlock(&glob->lru_lock); ret = driver->sync_obj_wait(sync_obj, false, interruptible); driver->sync_obj_unref(&sync_obj); if (ret) return ret; /* * remove sync_obj with ttm_bo_wait, the wait should be * finished, and no new wait object should have been added. */ mtx_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, true); mtx_unlock(&bdev->fence_lock); if (ret) return ret; mtx_lock(&glob->lru_lock); ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); /* * We raced, and lost, someone else holds the reservation now, * and is probably busy in ttm_bo_cleanup_memtype_use. * * Even if it's not the case, because we finished waiting any * delayed destruction would succeed, so just return success * here. */ if (ret) { mtx_unlock(&glob->lru_lock); return 0; } } else mtx_unlock(&bdev->fence_lock); if (ret || unlikely(list_empty(&bo->ddestroy))) { atomic_set(&bo->reserved, 0); wakeup(bo); mtx_unlock(&glob->lru_lock); return ret; } put_count = ttm_bo_del_from_lru(bo); list_del_init(&bo->ddestroy); ++put_count; mtx_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); ttm_bo_list_ref_sub(bo, put_count, true); return 0; } /** * Traverse the delayed list, and call ttm_bo_cleanup_refs on all * encountered buffers. */ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry = NULL; int ret = 0; mtx_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) goto out_unlock; entry = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, ddestroy); refcount_acquire(&entry->list_kref); for (;;) { struct ttm_buffer_object *nentry = NULL; if (entry->ddestroy.next != &bdev->ddestroy) { nentry = list_first_entry(&entry->ddestroy, struct ttm_buffer_object, ddestroy); refcount_acquire(&nentry->list_kref); } ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); if (remove_all && ret) { ret = ttm_bo_reserve_nolru(entry, false, false, false, 0); } if (!ret) ret = ttm_bo_cleanup_refs_and_unlock(entry, false, !remove_all); else mtx_unlock(&glob->lru_lock); if (refcount_release(&entry->list_kref)) ttm_bo_release_list(entry); entry = nentry; if (ret || !entry) goto out; mtx_lock(&glob->lru_lock); if (list_empty(&entry->ddestroy)) break; } out_unlock: mtx_unlock(&glob->lru_lock); out: if (entry && refcount_release(&entry->list_kref)) ttm_bo_release_list(entry); return ret; } static void ttm_bo_delayed_workqueue(void *arg, int pending __unused) { struct ttm_bo_device *bdev = arg; if (ttm_bo_delayed_delete(bdev, false)) { taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq, ((hz / 100) < 1) ? 1 : hz / 100); } } static void ttm_bo_release(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; rw_wlock(&bdev->vm_lock); if (likely(bo->vm_node != NULL)) { RB_REMOVE(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); drm_mm_put_block(bo->vm_node); bo->vm_node = NULL; } rw_wunlock(&bdev->vm_lock); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); ttm_bo_cleanup_refs_or_queue(bo); if (refcount_release(&bo->list_kref)) ttm_bo_release_list(bo); } void ttm_bo_unref(struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo = *p_bo; *p_bo = NULL; if (refcount_release(&bo->kref)) ttm_bo_release(bo); } int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) { int pending; taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending); if (pending) taskqueue_drain_timeout(taskqueue_thread, &bdev->wq); return (pending); } void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) { if (resched) { taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq, ((hz / 100) < 1) ? 1 : hz / 100); } } static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg evict_mem; struct ttm_placement placement; int ret = 0; mtx_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); mtx_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { if (ret != -ERESTART) { printf("[TTM] Failed to expire sync object before buffer eviction\n"); } goto out; } MPASS(ttm_bo_is_reserved(bo)); evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.bus.io_reserved_vm = false; evict_mem.bus.io_reserved_count = 0; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 0; placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, no_wait_gpu); if (ret) { if (ret != -ERESTART) { printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n", bo); ttm_bo_mem_space_debug(bo, &placement); } goto out; } ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait_gpu); if (ret) { if (ret != -ERESTART) printf("[TTM] Buffer eviction failed\n"); ttm_bo_mem_put(bo, &evict_mem); goto out; } bo->evicted = true; out: return ret; } static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, bool interruptible, bool no_wait_gpu) { struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_buffer_object *bo; int ret = -EBUSY, put_count; mtx_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } if (ret) { mtx_unlock(&glob->lru_lock); return ret; } refcount_acquire(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, no_wait_gpu); if (refcount_release(&bo->list_kref)) ttm_bo_release_list(bo); return ret; } put_count = ttm_bo_del_from_lru(bo); mtx_unlock(&glob->lru_lock); MPASS(ret == 0); ttm_bo_list_ref_sub(bo, put_count, true); ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); ttm_bo_unreserve(bo); if (refcount_release(&bo->list_kref)) ttm_bo_release_list(bo); return ret; } void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; if (mem->mm_node) (*man->func->put_node)(man, mem); } /** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, uint32_t mem_type, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; int ret; do { ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) break; ret = ttm_mem_evict_first(bdev, mem_type, interruptible, no_wait_gpu); if (unlikely(ret != 0)) return ret; } while (1); if (mem->mm_node == NULL) return -ENOMEM; mem->mem_type = mem_type; return 0; } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; /** * Keep current caching if possible. */ if ((cur_placement & caching) != 0) result |= (cur_placement & caching); else if ((man->default_caching & caching) != 0) result |= man->default_caching; else if ((TTM_PL_FLAG_CACHED & caching) != 0) result |= TTM_PL_FLAG_CACHED; else if ((TTM_PL_FLAG_WC & caching) != 0) result |= TTM_PL_FLAG_WC; else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) result |= TTM_PL_FLAG_UNCACHED; return result; } static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, uint32_t mem_type, uint32_t proposed_placement, uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type); if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) return false; if ((proposed_placement & man->available_caching) == 0) return false; cur_flags |= (proposed_placement & man->available_caching); *masked_placement = cur_flags; return true; } /** * Creates space for memory region @mem according to its type. * * This function first searches for free space in compatible memory types in * the priority order defined by the driver. If free space isn't found, then * ttm_bo_mem_force_space is attempted in priority order to evict and find * space. */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; uint32_t mem_type = TTM_PL_SYSTEM; uint32_t cur_flags = 0; bool type_found = false; bool type_ok = false; bool has_erestartsys = false; int i, ret; mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { ret = ttm_mem_type_from_flags(placement->placement[i], &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; type_ok = ttm_bo_mt_compatible(man, mem_type, placement->placement[i], &cur_flags); if (!type_ok) continue; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); /* * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ ttm_flag_masked(&cur_flags, placement->placement[i], ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) break; if (man->has_type && man->use_type) { type_found = true; ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret)) return ret; } if (mem->mm_node) break; } if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { mem->mem_type = mem_type; mem->placement = cur_flags; return 0; } if (!type_found) return -EINVAL; for (i = 0; i < placement->num_busy_placement; ++i) { ret = ttm_mem_type_from_flags(placement->busy_placement[i], &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; if (!man->has_type) continue; if (!ttm_bo_mt_compatible(man, mem_type, placement->busy_placement[i], &cur_flags)) continue; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); /* * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ ttm_flag_masked(&cur_flags, placement->busy_placement[i], ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) { mem->mem_type = mem_type; mem->placement = cur_flags; mem->mm_node = NULL; return 0; } ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, interruptible, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; return 0; } if (ret == -ERESTART) has_erestartsys = true; } ret = (has_erestartsys) ? -ERESTART : -ENOMEM; return ret; } static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_gpu) { int ret = 0; struct ttm_mem_reg mem; struct ttm_bo_device *bdev = bo->bdev; MPASS(ttm_bo_is_reserved(bo)); /* * FIXME: It's possible to pipeline buffer moves. * Have the driver move function wait for idle when necessary, * instead of doing it here. */ mtx_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); mtx_unlock(&bdev->fence_lock); if (ret) return ret; mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; mem.bus.io_reserved_vm = false; mem.bus.io_reserved_count = 0; /* * Determine where to move the buffer. */ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_gpu); if (ret) goto out_unlock; ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_gpu); out_unlock: if (ret && mem.mm_node) ttm_bo_mem_put(bo, &mem); return ret; } static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; if (mem->mm_node && placement->lpfn != 0 && (mem->start < placement->fpfn || mem->start + mem->num_pages > placement->lpfn)) return -1; for (i = 0; i < placement->num_placement; i++) { if ((placement->placement[i] & mem->placement & TTM_PL_MASK_CACHING) && (placement->placement[i] & mem->placement & TTM_PL_MASK_MEM)) return i; } return -1; } int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_gpu) { int ret; MPASS(ttm_bo_is_reserved(bo)); /* Check that range is valid */ if (placement->lpfn || placement->fpfn) if (placement->fpfn > placement->lpfn || (placement->lpfn - placement->fpfn) < bo->num_pages) return -EINVAL; /* * Check whether we need to move buffer. */ ret = ttm_bo_mem_compat(placement, &bo->mem); if (ret < 0) { ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_gpu); if (ret) return ret; } else { /* * Use the access and other non-mapping-related flag bits from * the compatible memory placement flags to the active flags */ ttm_flag_masked(&bo->mem.placement, placement->placement[ret], ~TTM_PL_MASK_MEMTYPE); } /* * We might need to add a TTM. */ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ret = ttm_bo_add_ttm(bo, true); if (ret) return ret; } return 0; } int ttm_bo_check_placement(struct ttm_buffer_object *bo, struct ttm_placement *placement) { MPASS(!((placement->fpfn || placement->lpfn) && (bo->mem.num_pages > (placement->lpfn - placement->fpfn)))); return 0; } int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interruptible, struct vm_object *persistent_swap_storage, size_t acc_size, struct sg_table *sg, void (*destroy) (struct ttm_buffer_object *)) { int ret = 0; unsigned long num_pages; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (ret) { printf("[TTM] Out of kernel memory\n"); if (destroy) (*destroy)(bo); else free(bo, M_TTM_BO); return -ENOMEM; } num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (num_pages == 0) { printf("[TTM] Illegal buffer object size\n"); if (destroy) (*destroy)(bo); else free(bo, M_TTM_BO); ttm_mem_global_free(mem_glob, acc_size); return -EINVAL; } bo->destroy = destroy; refcount_init(&bo->kref, 1); refcount_init(&bo->list_kref, 1); atomic_set(&bo->cpu_writers, 0); atomic_set(&bo->reserved, 1); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; bo->mem.size = num_pages << PAGE_SHIFT; bo->mem.mem_type = TTM_PL_SYSTEM; bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->seq_valid = false; bo->persistent_swap_storage = persistent_swap_storage; bo->acc_size = acc_size; bo->sg = sg; atomic_inc(&bo->glob->bo_count); ret = ttm_bo_check_placement(bo, placement); if (unlikely(ret != 0)) goto out_err; /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) { ret = ttm_bo_setup_vm(bo); if (ret) goto out_err; } ret = ttm_bo_validate(bo, placement, interruptible, false); if (ret) goto out_err; ttm_bo_unreserve(bo); return 0; out_err: ttm_bo_unreserve(bo); ttm_bo_unref(&bo); return ret; } size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, unsigned struct_size) { unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; size_t size = 0; size += ttm_round_pot(struct_size); size += PAGE_ALIGN(npages * sizeof(void *)); size += ttm_round_pot(sizeof(struct ttm_tt)); return size; } size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, unsigned struct_size) { unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; size_t size = 0; size += ttm_round_pot(struct_size); size += PAGE_ALIGN(npages * sizeof(void *)); size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); size += ttm_round_pot(sizeof(struct ttm_dma_tt)); return size; } int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interruptible, struct vm_object *persistent_swap_storage, struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo; size_t acc_size; int ret; bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO); acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, interruptible, persistent_swap_storage, acc_size, NULL, NULL); if (likely(ret == 0)) *p_bo = bo; return ret; } static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, unsigned mem_type, bool allow_errors) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; int ret; /* * Can't use standard list traversal since we're unlocking. */ mtx_lock(&glob->lru_lock); while (!list_empty(&man->lru)) { mtx_unlock(&glob->lru_lock); ret = ttm_mem_evict_first(bdev, mem_type, false, false); if (ret) { if (allow_errors) { return ret; } else { printf("[TTM] Cleanup eviction failed\n"); } } mtx_lock(&glob->lru_lock); } mtx_unlock(&glob->lru_lock); return 0; } int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_mem_type_manager *man; int ret = -EINVAL; if (mem_type >= TTM_NUM_MEM_TYPES) { printf("[TTM] Illegal memory type %d\n", mem_type); return ret; } man = &bdev->man[mem_type]; if (!man->has_type) { printf("[TTM] Trying to take down uninitialized memory manager type %u\n", mem_type); return ret; } man->use_type = false; man->has_type = false; ret = 0; if (mem_type > 0) { ttm_bo_force_list_clean(bdev, mem_type, false); ret = (*man->func->takedown)(man); } return ret; } int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { printf("[TTM] Illegal memory manager memory type %u\n", mem_type); return -EINVAL; } if (!man->has_type) { printf("[TTM] Memory type %u has not been initialized\n", mem_type); return 0; } return ttm_bo_force_list_clean(bdev, mem_type, true); } int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, unsigned long p_size) { int ret = -EINVAL; struct ttm_mem_type_manager *man; MPASS(type < TTM_NUM_MEM_TYPES); man = &bdev->man[type]; MPASS(!man->has_type); man->io_reserve_fastpath = true; man->use_io_reserve_lru = false; sx_init(&man->io_reserve_mutex, "ttmman"); INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) return ret; man->bdev = bdev; ret = 0; if (type != TTM_PL_SYSTEM) { ret = (*man->func->init)(man, p_size); if (ret) return ret; } man->has_type = true; man->use_type = true; man->size = p_size; INIT_LIST_HEAD(&man->lru); return 0; } static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob) { ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); vm_page_free(glob->dummy_read_page); } void ttm_bo_global_release(struct drm_global_reference *ref) { struct ttm_bo_global *glob = ref->object; if (refcount_release(&glob->kobj_ref)) ttm_bo_global_kobj_release(glob); } int ttm_bo_global_init(struct drm_global_reference *ref) { struct ttm_bo_global_ref *bo_ref = container_of(ref, struct ttm_bo_global_ref, ref); struct ttm_bo_global *glob = ref->object; int ret; sx_init(&glob->device_list_mutex, "ttmdlm"); mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF); glob->mem_glob = bo_ref->mem_glob; glob->dummy_read_page = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ, 1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); if (unlikely(glob->dummy_read_page == NULL)) { ret = -ENOMEM; goto out_no_drp; } INIT_LIST_HEAD(&glob->swap_lru); INIT_LIST_HEAD(&glob->device_list); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); if (unlikely(ret != 0)) { printf("[TTM] Could not register buffer object swapout\n"); goto out_no_shrink; } atomic_set(&glob->bo_count, 0); refcount_init(&glob->kobj_ref, 1); return (0); out_no_shrink: vm_page_free(glob->dummy_read_page); out_no_drp: free(glob, M_DRM_GLOBAL); return ret; } int ttm_bo_device_release(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; if (man->has_type) { man->use_type = false; if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { ret = -EBUSY; printf("[TTM] DRM memory manager type %d is not clean\n", i); } man->has_type = false; } } sx_xlock(&glob->device_list_mutex); list_del(&bdev->device_list); sx_xunlock(&glob->device_list_mutex); if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL)) taskqueue_drain_timeout(taskqueue_thread, &bdev->wq); while (ttm_bo_delayed_delete(bdev, true)) ; mtx_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); mtx_unlock(&glob->lru_lock); MPASS(drm_mm_clean(&bdev->addr_space_mm)); rw_wlock(&bdev->vm_lock); drm_mm_takedown(&bdev->addr_space_mm); rw_wunlock(&bdev->vm_lock); return ret; } int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, struct ttm_bo_driver *driver, uint64_t file_page_offset, bool need_dma32) { int ret = -EINVAL; rw_init(&bdev->vm_lock, "ttmvml"); bdev->driver = driver; memset(bdev->man, 0, sizeof(bdev->man)); /* * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); if (unlikely(ret != 0)) goto out_no_sys; RB_INIT(&bdev->addr_space_rb); ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); if (unlikely(ret != 0)) goto out_no_addr_mm; TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0, ttm_bo_delayed_workqueue, bdev); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = NULL; bdev->glob = glob; bdev->need_dma32 = need_dma32; bdev->val_seq = 0; mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF); sx_xlock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); sx_xunlock(&glob->device_list_mutex); return 0; out_no_addr_mm: ttm_bo_clean_mm(bdev, 0); out_no_sys: return ret; } /* * buffer object vm functions. */ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { if (mem->mem_type == TTM_PL_SYSTEM) return false; if (man->flags & TTM_MEMTYPE_FLAG_CMA) return false; if (mem->placement & TTM_PL_FLAG_CACHED) return false; } return true; } void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { ttm_bo_release_mmap(bo); ttm_mem_io_free_vm(bo); } void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; ttm_mem_io_lock(man, false); ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_unlock(man); } static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; /* The caller acquired bdev->vm_lock. */ RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); } /** * ttm_bo_setup_vm: * * @bo: the buffer to allocate address space for * * Allocate address space in the drm device so that applications * can mmap the buffer and access the contents. This only * applies to ttm_bo_type_device objects as others are not * placed in the drm device address space. */ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; int ret; retry_pre_get: ret = drm_mm_pre_get(&bdev->addr_space_mm); if (unlikely(ret != 0)) return ret; rw_wlock(&bdev->vm_lock); bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, bo->mem.num_pages, 0, 0); if (unlikely(bo->vm_node == NULL)) { ret = -ENOMEM; goto out_unlock; } bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, bo->mem.num_pages, 0); if (unlikely(bo->vm_node == NULL)) { rw_wunlock(&bdev->vm_lock); goto retry_pre_get; } ttm_bo_vm_insert_rb(bo); rw_wunlock(&bdev->vm_lock); bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; return 0; out_unlock: rw_wunlock(&bdev->vm_lock); return ret; } int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; int ret = 0; if (likely(bo->sync_obj == NULL)) return 0; while (bo->sync_obj) { if (driver->sync_obj_signaled(bo->sync_obj)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); mtx_unlock(&bdev->fence_lock); driver->sync_obj_unref(&tmp_obj); mtx_lock(&bdev->fence_lock); continue; } if (no_wait) return -EBUSY; sync_obj = driver->sync_obj_ref(bo->sync_obj); mtx_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); mtx_lock(&bdev->fence_lock); return ret; } mtx_lock(&bdev->fence_lock); if (likely(bo->sync_obj == sync_obj)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); mtx_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); mtx_lock(&bdev->fence_lock); } else { mtx_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); mtx_lock(&bdev->fence_lock); } } return 0; } int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; int ret = 0; /* * Using ttm_bo_reserve makes sure the lru lists are updated. */ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; mtx_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, true, no_wait); mtx_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); return ret; } void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) { atomic_dec(&bo->cpu_writers); } /** * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. */ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { struct ttm_bo_global *glob = container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); mtx_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } if (ret) { mtx_unlock(&glob->lru_lock); return ret; } refcount_acquire(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); if (refcount_release(&bo->list_kref)) ttm_bo_release_list(bo); return ret; } put_count = ttm_bo_del_from_lru(bo); mtx_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); /** * Wait for GPU, then move to system cached. */ mtx_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); mtx_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; if ((bo->mem.placement & swap_placement) != swap_placement) { struct ttm_mem_reg evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false); if (unlikely(ret != 0)) goto out; } ttm_bo_unmap_virtual(bo); /** * Swap out. Buffer will be swapped in again as soon as * anyone tries to access a ttm page. */ if (bo->bdev->driver->swap_notify) bo->bdev->driver->swap_notify(bo); ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); out: /** * * Unreserve without putting on LRU to avoid swapping out an * already swapped buffer. */ atomic_set(&bo->reserved, 0); wakeup(bo); if (refcount_release(&bo->list_kref)) ttm_bo_release_list(bo); return ret; } void ttm_bo_swapout_all(struct ttm_bo_device *bdev) { while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; } Index: head/sys/dev/e1000/e1000_82575.h =================================================================== --- head/sys/dev/e1000/e1000_82575.h (revision 258779) +++ head/sys/dev/e1000/e1000_82575.h (revision 258780) @@ -1,519 +1,519 @@ /****************************************************************************** Copyright (c) 2001-2013, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_DEF1_DEF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_OFF1_ON2)) /* * Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * These entries are also used for MAC-based filtering. */ /* * For 82576, there are an additional set of RARs that begin at an offset * separate from the first set of RARs. */ #define E1000_RAR_ENTRIES_82575 16 #define E1000_RAR_ENTRIES_82576 24 #define E1000_RAR_ENTRIES_82580 24 #define E1000_RAR_ENTRIES_I350 32 #define E1000_SW_SYNCH_MB 0x00000100 #define E1000_STAT_DEV_RST_SET 0x00100000 #define E1000_CTRL_DEV_RST 0x20000000 #ifdef E1000_BIT_FIELDS struct e1000_adv_data_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ union { u32 data; struct { u32 datalen:16; /* Data buffer length */ u32 rsvd:4; u32 dtyp:4; /* Descriptor type */ u32 dcmd:8; /* Descriptor command */ } config; } lower; union { u32 data; struct { u32 status:4; /* Descriptor status */ u32 idx:4; u32 popts:6; /* Packet Options */ u32 paylen:18; /* Payload length */ } options; } upper; }; #define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ #define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ #define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ #define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ #define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ #define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ #define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ #define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ #define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ #define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ #define E1000_ADV_DCMD_RS 0x8 /* Report Status */ #define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ #define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ /* Extended Device Control */ #define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ struct e1000_adv_context_desc { union { u32 ip_config; struct { u32 iplen:9; u32 maclen:7; u32 vlan_tag:16; } fields; } ip_setup; u32 seq_num; union { u64 l4_config; struct { u32 mkrloc:9; u32 tucmd:11; u32 dtyp:4; u32 adv:8; u32 rsvd:4; u32 idx:4; u32 l4len:8; u32 mss:16; } fields; } l4_setup; }; #endif /* SRRCTL bit definitions */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ #define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 #define E1000_SRRCTL_TIMESTAMP 0x40000000 #define E1000_SRRCTL_DROP_EN 0x80000000 #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 #define E1000_TX_HEAD_WB_ENABLE 0x1 #define E1000_TX_SEQNUM_WB_ENABLE 0x2 #define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 #define E1000_MRQC_ENABLE_VMDQ 0x00000003 #define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 #define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 #define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 #define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 #define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ E1000_VMRCTL_MIRROR_PORT_SHIFT) #define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) #define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) #define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) #define E1000_EICR_TX_QUEUE ( \ E1000_EICR_TX_QUEUE0 | \ E1000_EICR_TX_QUEUE1 | \ E1000_EICR_TX_QUEUE2 | \ E1000_EICR_TX_QUEUE3) #define E1000_EICR_RX_QUEUE ( \ E1000_EICR_RX_QUEUE0 | \ E1000_EICR_RX_QUEUE1 | \ E1000_EICR_RX_QUEUE2 | \ E1000_EICR_RX_QUEUE3) #define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE #define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE #define EIMS_ENABLE_MASK ( \ E1000_EIMS_RX_QUEUE | \ E1000_EIMS_TX_QUEUE | \ E1000_EIMS_TCP_TIMER | \ E1000_EIMS_OTHER) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ #define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ #define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ #define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ #define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ #define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ #define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ #define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ #define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ #define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ #define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { union { __le32 data; struct { __le16 pkt_info; /*RSS type, Pkt type*/ /* Split Header, header buffer len */ __le16 hdr_info; } hs_rss; } lo_dword; union { __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { __le32 status_error; /* ext status/error */ __le16 length; /* Packet length */ __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; #define E1000_RXDADV_RSSTYPE_MASK 0x0000000F #define E1000_RXDADV_RSSTYPE_SHIFT 12 #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 #define E1000_RXDADV_SPLITHEADER_EN 0x00001000 #define E1000_RXDADV_SPH 0x8000 #define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ #define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ #define E1000_RXDADV_ERR_HBO 0x00800000 /* RSS Hash results */ #define E1000_RXDADV_RSSTYPE_NONE 0x00000000 #define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 #define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 #define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 #define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 #define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 #define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 #define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 #define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 #define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 /* RSS Packet Types as indicated in the receive descriptor */ #define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 #define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 #define E1000_RXDADV_PKTTYPE_NONE 0x00000000 #define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ #define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ #define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ #define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ #define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ #define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ #define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ #define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ #define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ #define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ #define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ #define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ #define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ #define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ /* LinkSec results */ /* Security Processing bit Indication */ #define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 #define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 #define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 #define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 #define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 #define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 #define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 #define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 #define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 #define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { struct { __le64 buffer_addr; /* Address of descriptor's data buf */ __le32 cmd_type_len; __le32 olinfo_status; } read; struct { __le64 rsvd; /* Reserved */ __le32 nxtseq_seed; __le32 status; } wb; }; /* Adv Transmit Descriptor Config Masks */ #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ #define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ #define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ #define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ #define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ #define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ #define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ #define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ /* 1st & Last TSO-full iSCSI PDU*/ #define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 #define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { __le32 vlan_macip_lens; __le32 seqnum_seed; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ #define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ #define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ #define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ #define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ /* IPSec Encrypt Enable for ESP */ #define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* Req requires Markers and CRC */ #define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Adv ctxt IPSec SA IDX mask */ #define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF /* Adv ctxt IPSec ESP len mask */ #define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF /* Additional Transmit Descriptor Control definitions */ #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ #define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ /* Tx Queue Arbitration Priority 0=low, 1=high */ #define E1000_TXDCTL_PRIORITY 0x08000000 /* Additional Receive Descriptor Control definitions */ #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ #define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ /* Direct Cache Access (DCA) definitions */ #define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ #define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ #define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ #define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ #define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ #define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ #define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ /* Additional interrupt register bit definitions */ #define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ #define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ #define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ /* ETQF register bit definitions */ #define E1000_ETQF_FILTER_ENABLE (1 << 26) #define E1000_ETQF_IMM_INT (1 << 29) #define E1000_ETQF_1588 (1 << 30) -#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +#define E1000_ETQF_QUEUE_ENABLE (1U << 31) /* * ETQF filter list: one static filter per filter consumer. This is * to avoid filter collisions later. Add new filters * here!! * * Current filters: * EAPOL 802.1x (0x888e): Filter 0 */ #define E1000_ETQF_FILTER_EAPOL 0 #define E1000_FTQF_VF_BP 0x00008000 #define E1000_FTQF_1588_TIME_STAMP 0x08000000 #define E1000_FTQF_MASK 0xF0000000 #define E1000_FTQF_MASK_PROTO_BP 0x10000000 #define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 #define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 #define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 #define E1000_NVM_APME_82575 0x0400 #define MAX_NUM_VFS 7 #define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 #define E1000_DTXSWC_LLE_SHIFT 16 -#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1U << 31) /* global VF LB enable */ /* Easy defines for setting default pool, would normally be left a zero */ #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) /* Other useful VMD_CTL register defines */ #define E1000_VT_CTL_IGNORE_MAC (1 << 28) #define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) #define E1000_VT_CTL_VM_REPL_EN (1 << 30) /* Per VM Offload register setup */ #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ #define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ #define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ #define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ #define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ #define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ #define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ #define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ #define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ #define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ #define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ #define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ #define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ #define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ #define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ #define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ #define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ #define E1000_VLVF_ARRAY_SIZE 32 #define E1000_VLVF_VLANID_MASK 0x00000FFF #define E1000_VLVF_POOLSEL_SHIFT 12 #define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) #define E1000_VLVF_LVLAN 0x00100000 #define E1000_VLVF_VLANID_ENABLE 0x80000000 #define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ #define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ #define E1000_IOVCTL 0x05BBC #define E1000_IOVCTL_REUSE_VFQ 0x00000001 #define E1000_RPLOLR_STRVLAN 0x40000000 #define E1000_RPLOLR_STRCRC 0x80000000 #define E1000_TCTL_EXT_COLD 0x000FFC00 #define E1000_TCTL_EXT_COLD_SHIFT 10 #define E1000_DTXCTL_8023LL 0x0004 #define E1000_DTXCTL_VLAN_ADDED 0x0008 #define E1000_DTXCTL_OOS_ENABLE 0x0010 #define E1000_DTXCTL_MDP_EN 0x0020 #define E1000_DTXCTL_SPOOF_INT 0x0040 #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) #define ALL_QUEUES 0xFFFF /* Rx packet buffer size defines */ #define E1000_RXPBS_SIZE_MASK_82576 0x0000007F void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); enum e1000_promisc_type { e1000_promisc_disabled = 0, /* all promisc modes disabled */ e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ e1000_promisc_enabled = 3, /* both uni and multicast promisc */ e1000_num_promisc_types }; void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); void e1000_rlpml_set_vf(struct e1000_hw *, u16); s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type); u16 e1000_rxpbs_adjust_82580(u32 data); s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); s32 e1000_set_eee_i350(struct e1000_hw *); s32 e1000_set_eee_i354(struct e1000_hw *); s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); /* I2C SDA and SCL timing parameters for standard mode */ #define E1000_I2C_T_HD_STA 4 #define E1000_I2C_T_LOW 5 #define E1000_I2C_T_HIGH 4 #define E1000_I2C_T_SU_STA 5 #define E1000_I2C_T_HD_DATA 5 #define E1000_I2C_T_SU_DATA 1 #define E1000_I2C_T_RISE 1 #define E1000_I2C_T_FALL 1 #define E1000_I2C_T_SU_STO 4 #define E1000_I2C_T_BUF 5 s32 e1000_set_i2c_bb(struct e1000_hw *hw); s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); void e1000_i2c_bus_clear(struct e1000_hw *hw); #endif /* _E1000_82575_H_ */ Index: head/sys/dev/e1000/e1000_ich8lan.c =================================================================== --- head/sys/dev/e1000/e1000_ich8lan.c (revision 258779) +++ head/sys/dev/e1000/e1000_ich8lan.c (revision 258780) @@ -1,5063 +1,5063 @@ /****************************************************************************** Copyright (c) 2001-2013, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection * 82562GT 10/100 Network Connection * 82562GT-2 10/100 Network Connection * 82562V 10/100 Network Connection * 82562V-2 10/100 Network Connection * 82566DC-2 Gigabit Network Connection * 82566DC Gigabit Network Connection * 82566DM-2 Gigabit Network Connection * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection * 82567LM Gigabit Network Connection * 82567LF Gigabit Network Connection * 82567V Gigabit Network Connection * 82567LM-2 Gigabit Network Connection * 82567LF-2 Gigabit Network Connection * 82567V-2 Gigabit Network Connection * 82567LF-3 Gigabit Network Connection * 82567LM-3 Gigabit Network Connection * 82567LM-4 Gigabit Network Connection * 82577LM Gigabit Network Connection * 82577LC Gigabit Network Connection * 82578DM Gigabit Network Connection * 82578DC Gigabit Network Connection * 82579LM Gigabit Network Connection * 82579V Gigabit Network Connection * Ethernet Connection I217-LM * Ethernet Connection I217-V * Ethernet Connection I218-V * Ethernet Connection I218-LM */ #include "e1000_api.h" static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active); static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active); static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data); static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); static s32 e1000_led_on_pchlan(struct e1000_hw *hw); static s32 e1000_led_off_pchlan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte); static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr); /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { struct ich8_hsfsts { u16 flcdone:1; /* bit 0 Flash Cycle Done */ u16 flcerr:1; /* bit 1 Flash Cycle Error */ u16 dael:1; /* bit 2 Direct Access error Log */ u16 berasesz:2; /* bit 4:3 Sector Erase Size */ u16 flcinprog:1; /* bit 5 flash cycle in Progress */ u16 reserved1:2; /* bit 13:6 Reserved */ u16 reserved2:6; /* bit 13:6 Reserved */ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ } hsf_status; u16 regval; }; /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ /* Offset 06h FLCTL */ union ich8_hws_flash_ctrl { struct ich8_hsflctl { u16 flcgo:1; /* 0 Flash Cycle Go */ u16 flcycle:2; /* 2:1 Flash Cycle */ u16 reserved:5; /* 7:3 Reserved */ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ u16 flockdn:6; /* 15:10 Reserved */ } hsf_ctrl; u16 regval; }; /* ICH Flash Region Access Permissions */ union ich8_hws_flash_regacc { struct ich8_flracc { u32 grra:8; /* 0:7 GbE region Read Access */ u32 grwa:8; /* 8:15 GbE region Write Access */ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ } hsf_flregacc; u16 regval; }; /** * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers * @hw: pointer to the HW structure * * Test access to the PHY registers by reading the PHY ID registers. If * the PHY ID is already known (e.g. resume path) compare it with known ID, * otherwise assume the read PHY ID is correct if it is valid. * * Assumes the sw/fw/hw semaphore is already acquired. **/ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { u16 phy_reg = 0; u32 phy_id = 0; s32 ret_val = 0; u16 retry_count; u32 mac_reg = 0; for (retry_count = 0; retry_count < 2; retry_count++) { ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) continue; phy_id = (u32)(phy_reg << 16); ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) { phy_id = 0; continue; } phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); break; } if (hw->phy.id) { if (hw->phy.id == phy_id) goto out; } else if (phy_id) { hw->phy.id = phy_id; hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); goto out; } /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ if (hw->mac.type < e1000_pch_lpt) { hw->phy.ops.release(hw); ret_val = e1000_set_mdio_slow_mode_hv(hw); if (!ret_val) ret_val = e1000_get_phy_id(hw); hw->phy.ops.acquire(hw); } if (ret_val) return FALSE; out: if (hw->mac.type == e1000_pch_lpt) { /* Unforce SMBus mode in PHY */ hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); /* Unforce SMBus mode in MAC */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); } return TRUE; } /** * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value * @hw: pointer to the HW structure * * Toggling the LANPHYPC pin value fully power-cycles the PHY and is * used to reset the PHY to a quiescent state when necessary. **/ void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) { u32 mac_reg; DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); /* Set Phy Config Counter to 50msec */ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); /* Toggle LANPHYPC Value bit */ mac_reg = E1000_READ_REG(hw, E1000_CTRL); mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); E1000_WRITE_FLUSH(hw); usec_delay(10); mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); E1000_WRITE_FLUSH(hw); if (hw->mac.type < e1000_pch_lpt) { msec_delay(50); } else { u16 count = 20; do { msec_delay(5); } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); msec_delay(30); } } /** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure * * Workarounds/flow necessary for PHY initialization during driver load * and resume paths. **/ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); s32 ret_val; DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); /* Gate automatic PHY configuration by hardware on managed and * non-managed 82579 and newer adapters. */ e1000_gate_hw_phy_config_ich8lan(hw, TRUE); ret_val = hw->phy.ops.acquire(hw); if (ret_val) { DEBUGOUT("Failed to initialize PHY flow\n"); goto out; } /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is * inaccessible and resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode. */ switch (hw->mac.type) { case e1000_pch_lpt: if (e1000_phy_is_accessible_pchlan(hw)) break; /* Before toggling LANPHYPC, see if PHY is accessible by * forcing MAC to SMBus mode first. */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); /* Wait 50 milliseconds for MAC to finish any retries * that it might be trying to perform from previous * attempts to acknowledge any phy read requests. */ msec_delay(50); /* fall-through */ case e1000_pch2lan: if (e1000_phy_is_accessible_pchlan(hw)) break; /* fall-through */ case e1000_pchlan: if ((hw->mac.type == e1000_pchlan) && (fwsm & E1000_ICH_FWSM_FW_VALID)) break; if (hw->phy.ops.check_reset_block(hw)) { DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); ret_val = -E1000_ERR_PHY; break; } /* Toggle LANPHYPC Value bit */ e1000_toggle_lanphypc_pch_lpt(hw); if (hw->mac.type >= e1000_pch_lpt) { if (e1000_phy_is_accessible_pchlan(hw)) break; /* Toggling LANPHYPC brings the PHY out of SMBus mode * so ensure that the MAC is also out of SMBus mode */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); if (e1000_phy_is_accessible_pchlan(hw)) break; ret_val = -E1000_ERR_PHY; } break; default: break; } hw->phy.ops.release(hw); if (!ret_val) { /* Check to see if able to reset PHY. Print error if not */ if (hw->phy.ops.check_reset_block(hw)) { ERROR_REPORT("Reset blocked by ME\n"); goto out; } /* Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet. */ ret_val = e1000_phy_hw_reset_generic(hw); if (ret_val) goto out; /* On a successful reset, possibly need to wait for the PHY * to quiesce to an accessible state before returning control * to the calling function. If the PHY does not quiesce, then * return E1000E_BLK_PHY_RESET, as this is the condition that * the PHY is in. */ ret_val = hw->phy.ops.check_reset_block(hw); if (ret_val) ERROR_REPORT("ME blocked access to PHY after reset\n"); } out: /* Ungate automatic PHY configuration on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { msec_delay(10); e1000_gate_hw_phy_config_ich8lan(hw, FALSE); } return ret_val; } /** * e1000_init_phy_params_pchlan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("e1000_init_phy_params_pchlan"); phy->addr = 1; phy->reset_delay_us = 100; phy->ops.acquire = e1000_acquire_swflag_ich8lan; phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; phy->ops.set_page = e1000_set_page_igp; phy->ops.read_reg = e1000_read_phy_reg_hv; phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; phy->ops.release = e1000_release_swflag_ich8lan; phy->ops.reset = e1000_phy_hw_reset_ich8lan; phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.write_reg = e1000_write_phy_reg_hv; phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->id = e1000_phy_unknown; ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) return ret_val; if (phy->id == e1000_phy_unknown) switch (hw->mac.type) { default: ret_val = e1000_get_phy_id(hw); if (ret_val) return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break; /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = e1000_get_phy_id(hw); if (ret_val) return ret_val; break; } phy->type = e1000_get_phy_type_from_id(phy->id); switch (phy->type) { case e1000_phy_82577: case e1000_phy_82579: case e1000_phy_i217: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; phy->ops.get_cable_length = e1000_get_cable_length_82577; phy->ops.get_info = e1000_get_phy_info_82577; phy->ops.commit = e1000_phy_sw_reset_generic; break; case e1000_phy_82578: phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.get_info = e1000_get_phy_info_m88; break; default: ret_val = -E1000_ERR_PHY; break; } return ret_val; } /** * e1000_init_phy_params_ich8lan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 i = 0; DEBUGFUNC("e1000_init_phy_params_ich8lan"); phy->addr = 1; phy->reset_delay_us = 100; phy->ops.acquire = e1000_acquire_swflag_ich8lan; phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; phy->ops.get_cable_length = e1000_get_cable_length_igp_2; phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; phy->ops.read_reg = e1000_read_phy_reg_igp; phy->ops.release = e1000_release_swflag_ich8lan; phy->ops.reset = e1000_phy_hw_reset_ich8lan; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; phy->ops.write_reg = e1000_write_phy_reg_igp; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; /* We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again */ ret_val = e1000_determine_phy_address(hw); if (ret_val) { phy->ops.write_reg = e1000_write_phy_reg_bm; phy->ops.read_reg = e1000_read_phy_reg_bm; ret_val = e1000_determine_phy_address(hw); if (ret_val) { DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); return ret_val; } } phy->id = 0; while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && (i++ < 100)) { msec_delay(1); ret_val = e1000_get_phy_id(hw); if (ret_val) return ret_val; } /* Verify phy id */ switch (phy->id) { case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; phy->ops.get_info = e1000_get_phy_info_igp; phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; phy->ops.get_info = e1000_get_phy_info_ife; phy->ops.check_polarity = e1000_check_polarity_ife; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; break; case BME1000_E_PHY_ID: phy->type = e1000_phy_bm; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg = e1000_read_phy_reg_bm; phy->ops.write_reg = e1000_write_phy_reg_bm; phy->ops.commit = e1000_phy_sw_reset_generic; phy->ops.get_info = e1000_get_phy_info_m88; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; default: return -E1000_ERR_PHY; break; } return E1000_SUCCESS; } /** * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers * @hw: pointer to the HW structure * * Initialize family-specific NVM parameters and function * pointers. **/ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; DEBUGFUNC("e1000_init_nvm_params_ich8lan"); /* Can't read flash registers if the register set isn't mapped. */ if (!hw->flash_address) { DEBUGOUT("ERROR: Flash registers not mapped\n"); return -E1000_ERR_CONFIG; } nvm->type = e1000_nvm_flash_sw; gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); /* sector_X_addr is a "sector"-aligned address (4096 bytes) * Add 1 to sector_end_addr since this sector is included in * the overall size. */ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; /* flash_base_addr is byte-aligned */ nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; /* find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) << FLASH_SECTOR_ADDR_SHIFT); nvm->flash_bank_size /= 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); nvm->word_size = E1000_SHADOW_RAM_WORDS; /* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) { dev_spec->shadow_ram[i].modified = FALSE; dev_spec->shadow_ram[i].value = 0xFFFF; } E1000_MUTEX_INIT(&dev_spec->nvm_mutex); E1000_MUTEX_INIT(&dev_spec->swflag_mutex); /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_ich8lan; nvm->ops.release = e1000_release_nvm_ich8lan; nvm->ops.read = e1000_read_nvm_ich8lan; nvm->ops.update = e1000_update_nvm_checksum_ich8lan; nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; nvm->ops.write = e1000_write_nvm_ich8lan; return E1000_SUCCESS; } /** * e1000_init_mac_params_ich8lan - Initialize MAC function pointers * @hw: pointer to the HW structure * * Initialize family-specific MAC parameters and function * pointers. **/ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_ich8lan"); /* Set media type function pointer */ hw->phy.media_type = e1000_media_type_copper; /* Set mta register count */ mac->mta_reg_count = 32; /* Set rar entry count */ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; if (mac->type == e1000_ich8lan) mac->rar_entry_count--; /* Set if part includes ASF firmware */ mac->asf_firmware_present = TRUE; /* FWSM register */ mac->has_fwsm = TRUE; /* ARC subsystem not supported */ mac->arc_subsystem_valid = FALSE; /* Adaptive IFS supported */ mac->adaptive_ifs = TRUE; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_single_port; /* reset */ mac->ops.reset_hw = e1000_reset_hw_ich8lan; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_ich8lan; /* link setup */ mac->ops.setup_link = e1000_setup_link_ich8lan; /* physical interface setup */ mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; /* check for link */ mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; /* LED and other operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000_blink_led_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_ich8lan; mac->ops.led_off = e1000_led_off_ich8lan; break; case e1000_pch2lan: mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch2lan; /* fall-through */ case e1000_pch_lpt: /* multicast address update for pch2 */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_pch2lan; case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_pchlan; /* setup LED */ mac->ops.setup_led = e1000_setup_led_pchlan; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_pchlan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_pchlan; mac->ops.led_off = e1000_led_off_pchlan; break; default: break; } if (mac->type == e1000_pch_lpt) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt; } /* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan) e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); return E1000_SUCCESS; } /** * __e1000_access_emi_reg_locked - Read/write EMI register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: pointer to value to read/write from/to the EMI address * @read: boolean flag to indicate read or write * * This helper function assumes the SW/FW/HW Semaphore is already acquired. **/ static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, u16 *data, bool read) { s32 ret_val; DEBUGFUNC("__e1000_access_emi_reg_locked"); ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address); if (ret_val) return ret_val; if (read) ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA, data); else ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, *data); return ret_val; } /** * e1000_read_emi_reg_locked - Read Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be read from the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired. **/ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) { DEBUGFUNC("e1000_read_emi_reg_locked"); return __e1000_access_emi_reg_locked(hw, addr, data, TRUE); } /** * e1000_write_emi_reg_locked - Write Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be written to the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired. **/ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) { DEBUGFUNC("e1000_read_emi_reg_locked"); return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE); } /** * e1000_set_eee_pchlan - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure, the duplex of * the link and the EEE capabilities of the link partner. The LPI Control * register bits will remain set only if/when link is up. * * EEE LPI must not be asserted earlier than one second after link is up. * On 82579, EEE LPI should not be enabled until such time otherwise there * can be link issues with some switches. Other devices can have EEE LPI * enabled immediately upon link up since they have a timer in hardware which * prevents LPI from being asserted too early. **/ s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val; u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; DEBUGFUNC("e1000_set_eee_pchlan"); switch (hw->phy.type) { case e1000_phy_82579: lpa = I82579_EEE_LP_ABILITY; pcs_status = I82579_EEE_PCS_STATUS; adv_addr = I82579_EEE_ADVERTISEMENT; break; case e1000_phy_i217: lpa = I217_EEE_LP_ABILITY; pcs_status = I217_EEE_PCS_STATUS; adv_addr = I217_EEE_ADVERTISEMENT; break; default: return E1000_SUCCESS; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); if (ret_val) goto release; /* Clear bits that enable EEE in various speeds */ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; /* Enable EEE if not disabled by user */ if (!dev_spec->eee_disable) { /* Save off link partner's EEE ability */ ret_val = e1000_read_emi_reg_locked(hw, lpa, &dev_spec->eee_lp_ability); if (ret_val) goto release; /* Read EEE advertisement */ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); if (ret_val) goto release; /* Enable EEE only for speeds in which the link partner is * EEE capable and for which we advertise EEE. */ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); if (data & NWAY_LPAR_100TX_FD_CAPS) lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; else /* EEE is not supported in 100Half, so ignore * partner's EEE in 100 ability if full-duplex * is not advertised. */ dev_spec->eee_lp_ability &= ~I82579_EEE_100_SUPPORTED; } } /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); if (ret_val) goto release; ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP * @hw: pointer to the HW structure * @link: link up bool flag * * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link * speeds in order to avoid Tx hangs. **/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) { u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); u32 status = E1000_READ_REG(hw, E1000_STATUS); s32 ret_val = E1000_SUCCESS; u16 reg; if (link && (status & E1000_STATUS_SPEED_1000)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, ®); if (ret_val) goto release; ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, reg & ~E1000_KMRNCTRLSTA_K1_ENABLE); if (ret_val) goto release; usec_delay(10); E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, reg); release: hw->phy.ops.release(hw); } else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; if (!link || ((status & E1000_STATUS_SPEED_100) && (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); if (ret_val) return ret_val; /* Clear link status transmit timeout */ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; if (status & E1000_STATUS_SPEED_100) { /* Set inband Tx timeout to 5x10us for 100Half */ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; /* Do not extend the K1 entry latency for 100Half */ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; } else { /* Set inband Tx timeout to 50x10us for 10Full/Half */ reg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; /* Extend the K1 entry latency for 10 Mbps */ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; } ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); if (ret_val) return ret_val; update_fextnvm6: E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); } return ret_val; } static u64 e1000_ltr2ns(u16 ltr) { u32 value, scale; /* Determine the latency in nsec based on the LTR value & scale */ value = ltr & E1000_LTRV_VALUE_MASK; scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT; return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR)); } /** * e1000_platform_pm_pch_lpt - Set platform power management values * @hw: pointer to the HW structure * @link: bool indicating link status * * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed * when link is up (which must not exceed the maximum latency supported * by the platform), otherwise specify there is no LTR requirement. * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop * latencies in the LTR Extended Capability Structure in the PCIe Extended * Capability register set, on this device LTR is set by writing the * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) * message to the PMC. * * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF) * high-water mark. **/ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; u16 lat_enc = 0; /* latency encoded */ s32 obff_hwm = 0; DEBUGFUNC("e1000_platform_pm_pch_lpt"); if (link) { u16 speed, duplex, scale = 0; u16 max_snoop, max_nosnoop; u16 max_ltr_enc; /* max LTR latency encoded */ s64 lat_ns; /* latency (ns) */ s64 value; u32 rxa; if (!hw->mac.max_frame_size) { DEBUGOUT("max_frame_size not set.\n"); return -E1000_ERR_CONFIG; } hw->mac.ops.get_link_up_info(hw, &speed, &duplex); if (!speed) { DEBUGOUT("Speed not set.\n"); return -E1000_ERR_CONFIG; } /* Rx Packet Buffer Allocation size (KB) */ rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK; /* Determine the maximum latency tolerated by the device. * * Per the PCIe spec, the tolerated latencies are encoded as * a 3-bit encoded scale (only 0-5 are valid) multiplied by * a 10-bit value (0-1023) to provide a range from 1 ns to * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ lat_ns = ((s64)rxa * 1024 - (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000; if (lat_ns < 0) lat_ns = 0; else lat_ns /= speed; value = lat_ns; while (value > E1000_LTRV_VALUE_MASK) { scale++; value = E1000_DIVIDE_ROUND_UP(value, (1 << 5)); } if (scale > E1000_LTRV_SCALE_MAX) { DEBUGOUT1("Invalid LTR latency scale %d\n", scale); return -E1000_ERR_CONFIG; } lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value); /* Determine the maximum latency tolerated by the platform */ e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop); e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop); if (lat_enc > max_ltr_enc) { lat_enc = max_ltr_enc; lat_ns = e1000_ltr2ns(max_ltr_enc); } if (lat_ns) { lat_ns *= speed * 1000; lat_ns /= 8; lat_ns /= 1000000000; obff_hwm = (s32)(rxa - lat_ns); } if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) { DEBUGOUT1("Invalid high water mark %d\n", obff_hwm); return -E1000_ERR_CONFIG; } } /* Set Snoop and No-Snoop latencies the same */ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); E1000_WRITE_REG(hw, E1000_LTRV, reg); /* Set OBFF high water mark */ reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK; reg |= obff_hwm; E1000_WRITE_REG(hw, E1000_SVT, reg); /* Enable OBFF */ reg = E1000_READ_REG(hw, E1000_SVCR); reg |= E1000_SVCR_OFF_EN; /* Always unblock interrupts to the CPU even when the system is * in OBFF mode. This ensures that small round-robin traffic * (like ping) does not get dropped or experience long latency. */ reg |= E1000_SVCR_OFF_MASKINT; E1000_WRITE_REG(hw, E1000_SVCR, reg); return E1000_SUCCESS; } /** * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer * @hw: pointer to the HW structure * @itr: interrupt throttling rate * * Configure OBFF with the updated interrupt rate. **/ static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr) { u32 svcr; s32 timer; DEBUGFUNC("e1000_set_obff_timer_pch_lpt"); /* Convert ITR value into microseconds for OBFF timer */ timer = itr & E1000_ITR_MASK; timer = (timer * E1000_ITR_MULT) / 1000; if ((timer < 0) || (timer > E1000_ITR_MASK)) { DEBUGOUT1("Invalid OBFF timer %d\n", timer); return -E1000_ERR_CONFIG; } svcr = E1000_READ_REG(hw, E1000_SVCR); svcr &= ~E1000_SVCR_OFF_TIMER_MASK; svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT; E1000_WRITE_REG(hw, E1000_SVCR, svcr); return E1000_SUCCESS; } /** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; u16 phy_reg; DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) return E1000_SUCCESS; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) return ret_val; } /* When connected at 10Mbps half-duplex, 82579 parts are excessively * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ if ((hw->mac.type == e1000_pch2lan) && link) { u32 reg; reg = E1000_READ_REG(hw, E1000_STATUS); if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { reg = E1000_READ_REG(hw, E1000_TIPG); reg &= ~E1000_TIPG_IPGT_MASK; reg |= 0xFF; E1000_WRITE_REG(hw, E1000_TIPG, reg); /* Reduce Rx latency in analog PHY */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); hw->phy.ops.release(hw); if (ret_val) return ret_val; } } /* Work-around I218 hang issue */ if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } if (hw->mac.type == e1000_pch_lpt) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) * Optimized Buffer Flush/Fill (OBFF) */ ret_val = e1000_platform_pm_pch_lpt(hw, link); if (ret_val) return ret_val; } /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; if (!link) return E1000_SUCCESS; /* No link detected */ mac->get_link_status = FALSE; switch (hw->mac.type) { case e1000_pch2lan: ret_val = e1000_k1_workaround_lv(hw); if (ret_val) return ret_val; /* fall-thru */ case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) { ret_val = e1000_link_stall_workaround_hv(hw); if (ret_val) return ret_val; } /* Workaround for PCHx parts in half-duplex: * Set the number of preambles removed from the packet * when it is passed from the PHY to the MAC to prevent * the MAC from misinterpreting the packet type. */ hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); break; default: break; } /* Check if there was DownShift, must be checked * immediately after link-up */ e1000_check_downshift_generic(hw); /* Enable/Disable EEE after link up */ if (hw->phy.type > e1000_phy_82579) { ret_val = e1000_set_eee_pchlan(hw); if (ret_val) return ret_val; } /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) DEBUGOUT("Error configuring flow control\n"); return ret_val; } /** * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers * @hw: pointer to the HW structure * * Initialize family-specific function pointers for PHY, MAC, and NVM. **/ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_ich8lan"); hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: hw->phy.ops.init_params = e1000_init_phy_params_pchlan; break; default: break; } } /** * e1000_acquire_nvm_ich8lan - Acquire NVM mutex * @hw: pointer to the HW structure * * Acquires the mutex for performing NVM operations. **/ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_acquire_nvm_ich8lan"); E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex); return E1000_SUCCESS; } /** * e1000_release_nvm_ich8lan - Release NVM mutex * @hw: pointer to the HW structure * * Releases the mutex used while performing NVM operations. **/ static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_release_nvm_ich8lan"); E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex); return; } /** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * * Acquires the software control flag for performing PHY and select * MAC CSR accesses. **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_acquire_swflag_ich8lan"); E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex); while (timeout) { extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) break; msec_delay_irq(1); timeout--; } if (!timeout) { DEBUGOUT("SW has already locked the resource.\n"); ret_val = -E1000_ERR_CONFIG; goto out; } timeout = SW_FLAG_TIMEOUT; extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); while (timeout) { extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) break; msec_delay_irq(1); timeout--; } if (!timeout) { DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); ret_val = -E1000_ERR_CONFIG; goto out; } out: if (ret_val) E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); return ret_val; } /** * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * * Releases the software control flag for performing PHY and select * MAC CSR accesses. **/ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl; DEBUGFUNC("e1000_release_swflag_ich8lan"); extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); } else { DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); } E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); return; } /** * e1000_check_mng_mode_ich8lan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has any manageability enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) { u32 fwsm; DEBUGFUNC("e1000_check_mng_mode_ich8lan"); fwsm = E1000_READ_REG(hw, E1000_FWSM); return ((fwsm & E1000_ICH_FWSM_FW_VALID) && ((fwsm & E1000_FWSM_MODE_MASK) == (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); } /** * e1000_check_mng_mode_pchlan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has iAMT enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) { u32 fwsm; DEBUGFUNC("e1000_check_mng_mode_pchlan"); fwsm = E1000_READ_REG(hw, E1000_FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_rar_set_pch2lan - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. For 82579, RAR[0] is the base address register that is to * contain the MAC address but RAR[1-6] are reserved for manageability (ME). * Use SHRA[0-3] in place of those reserved for ME. **/ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; DEBUGFUNC("e1000_rar_set_pch2lan"); /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; if (index == 0) { E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); return; } /* RAR[1-6] are owned by manageability. Skip those and program the * next address into the SHRA register array. */ if (index < (u32) (hw->mac.rar_entry_count - 6)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); E1000_WRITE_FLUSH(hw); e1000_release_swflag_ich8lan(hw); /* verify the register updates */ if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) return; DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", (index - 1), E1000_READ_REG(hw, E1000_FWSM)); } out: DEBUGOUT1("Failed to write receive address at index %d\n", index); } /** * e1000_rar_set_pch_lpt - Set receive address registers * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address register array at index to the address passed * in by addr. For LPT, RAR[0] is the base address register that is to * contain the MAC address. SHRA[0-10] are the shared receive address * registers that are shared between the Host and manageability engine (ME). **/ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; u32 wlock_mac; DEBUGFUNC("e1000_rar_set_pch_lpt"); /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; if (index == 0) { E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); return; } /* The manageability engine (ME) can lock certain SHRAR registers that * it is using - those registers are unavailable for use. */ if (index < hw->mac.rar_entry_count) { wlock_mac = E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_WLOCK_MAC_MASK; wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; /* Check if all SHRAR registers are locked */ if (wlock_mac == 1) goto out; if ((wlock_mac == 0) || (index <= wlock_mac)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1), rar_high); E1000_WRITE_FLUSH(hw); e1000_release_swflag_ich8lan(hw); /* verify the register updates */ if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) return; } } out: DEBUGOUT1("Failed to write receive address at index %d\n", index); } /** * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * * Updates entire Multicast Table Array of the PCH2 MAC and PHY. * The caller must have a packed mc_addr_list of multicast addresses. **/ static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count) { u16 phy_reg = 0; int i; s32 ret_val; DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) goto release; for (i = 0; i < hw->mac.mta_reg_count; i++) { hw->phy.ops.write_reg_page(hw, BM_MTA(i), (u16)(hw->mac.mta_shadow[i] & 0xFFFF)); hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1), (u16)((hw->mac.mta_shadow[i] >> 16) & 0xFFFF)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); release: hw->phy.ops.release(hw); } /** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Checks if firmware is blocking the reset of the PHY. * This is a function pointer entry point only called by * reset routines. **/ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) { u32 fwsm; bool blocked = FALSE; int i = 0; DEBUGFUNC("e1000_check_reset_block_ich8lan"); do { fwsm = E1000_READ_REG(hw, E1000_FWSM); if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { blocked = TRUE; msec_delay(10); continue; } blocked = FALSE; } while (blocked && (i++ < 10)); return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; } /** * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states * @hw: pointer to the HW structure * * Assumes semaphore already acquired. * **/ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = E1000_READ_REG(hw, E1000_STRAP); u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> E1000_STRAP_SMT_FREQ_SHIFT; s32 ret_val; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); if (ret_val) return ret_val; phy_data &= ~HV_SMB_ADDR_MASK; phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; if (hw->phy.type == e1000_phy_i217) { /* Restore SMBus frequency */ if (freq--) { phy_data &= ~HV_SMB_ADDR_FREQ_MASK; phy_data |= (freq & (1 << 0)) << HV_SMB_ADDR_FREQ_LOW_SHIFT; phy_data |= (freq & (1 << 1)) << (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); } else { DEBUGOUT("Unsupported SMB frequency in PHY\n"); } } return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } /** * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * * SW should configure the LCD from the NVM extended configuration region * as a workaround for certain parts. **/ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; s32 ret_val = E1000_SUCCESS; u16 word_addr, reg_data, reg_addr, phy_page = 0; DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); /* Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is * not properly autoloaded after power transitions. * Therefore, after each PHY reset, we will load the * configuration data out of the NVM manually. */ switch (hw->mac.type) { case e1000_ich8lan: if (phy->type != e1000_phy_igp_3) return ret_val; if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break; } /* Fall-thru */ case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: return ret_val; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; data = E1000_READ_REG(hw, E1000_FEXTNVM); if (!(data & sw_cfg_mask)) goto release; /* Make sure HW does not configure LCD from PHY * extended configuration before SW configuration */ data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if ((hw->mac.type < e1000_pch2lan) && (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) goto release; cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; if (!cnf_size) goto release; cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; if (((hw->mac.type == e1000_pchlan) && !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || (hw->mac.type > e1000_pchlan)) { /* HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead. */ ret_val = e1000_write_smbus_addr(hw); if (ret_val) goto release; data = E1000_READ_REG(hw, E1000_LEDCTL); ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, (u16)data); if (ret_val) goto release; } /* Configure LCD from extended configuration region. */ /* cnf_base_addr is in DWORD */ word_addr = (u16)(cnf_base_addr << 1); for (i = 0; i < cnf_size; i++) { ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, ®_data); if (ret_val) goto release; ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), 1, ®_addr); if (ret_val) goto release; /* Save off the PHY page for future writes. */ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { phy_page = reg_data; continue; } reg_addr &= PHY_REG_MASK; reg_addr |= phy_page; ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, reg_data); if (ret_val) goto release; } release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_hv - K1 Si workaround * @hw: pointer to the HW structure * @link: link up bool flag * * If K1 is enabled for 1Gbps, the MAC might stall when transitioning * from a lower speed. This workaround disables K1 whenever link is at 1Gig * If link is down, the function will restore the default K1 setting located * in the NVM. **/ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) { s32 ret_val = E1000_SUCCESS; u16 status_reg = 0; bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; DEBUGFUNC("e1000_k1_gig_workaround_hv"); if (hw->mac.type != e1000_pchlan) return E1000_SUCCESS; /* Wrap the whole flow with the sw flag */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { if (hw->phy.type == e1000_phy_82578) { ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, &status_reg); if (ret_val) goto release; status_reg &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK); if (status_reg == (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) k1_enable = FALSE; } if (hw->phy.type == e1000_phy_82577) { ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, &status_reg); if (ret_val) goto release; status_reg &= (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_MASK); if (status_reg == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_1000)) k1_enable = FALSE; } /* Link stall fix for link up */ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 0x0100); if (ret_val) goto release; } else { /* Link stall fix for link down */ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 0x4100); if (ret_val) goto release; } ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_configure_k1_ich8lan - Configure K1 power state * @hw: pointer to the HW structure * @enable: K1 state to configure * * Configure the K1 power state based on the provided parameter. * Assumes semaphore already acquired. * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) **/ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) { s32 ret_val; u32 ctrl_reg = 0; u32 ctrl_ext = 0; u32 reg = 0; u16 kmrn_reg = 0; DEBUGFUNC("e1000_configure_k1_ich8lan"); ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, &kmrn_reg); if (ret_val) return ret_val; if (k1_enable) kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; else kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, kmrn_reg); if (ret_val) return ret_val; usec_delay(20); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); reg |= E1000_CTRL_FRCSPD; E1000_WRITE_REG(hw, E1000_CTRL, reg); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); E1000_WRITE_FLUSH(hw); usec_delay(20); E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); usec_delay(20); return E1000_SUCCESS; } /** * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * @d0_state: boolean if entering d0 or d3 device state * * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are * collectively called OEM bits. The OEM Write Enable bit and SW Config bit * in NVM determines whether HW should configure LPLU and Gbe Disable. **/ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) { s32 ret_val = 0; u32 mac_reg; u16 oem_reg; DEBUGFUNC("e1000_oem_bits_config_ich8lan"); if (hw->mac.type < e1000_pchlan) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; if (hw->mac.type == e1000_pchlan) { mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) goto release; } mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) goto release; mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); if (d0_state) { if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) oem_reg |= HV_OEM_BITS_LPLU; } else { if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU)) oem_reg |= HV_OEM_BITS_LPLU; } /* Set Restart auto-neg to activate the bits */ if ((d0_state || (hw->mac.type != e1000_pchlan)) && !hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode * @hw: pointer to the HW structure **/ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) { s32 ret_val; u16 data; DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); if (ret_val) return ret_val; data |= HV_KMRN_MDIO_SLOW; ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); return ret_val; } /** * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 phy_data; DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); if (hw->mac.type != e1000_pchlan) return E1000_SUCCESS; /* Set MDIO slow mode before any other MDIO access */ if (hw->phy.type == e1000_phy_82577) { ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; } if (((hw->phy.type == e1000_phy_82577) && ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { /* Disable generation of early preamble */ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); if (ret_val) return ret_val; /* Preamble tuning for SSC */ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); if (ret_val) return ret_val; } if (hw->phy.type == e1000_phy_82578) { /* Return registers to default by doing a soft reset then * writing 0x3140 to the control register. */ if (hw->phy.revision < 2) { e1000_phy_sw_reset_generic(hw); ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, 0x3140); } } /* Select page 0 */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; hw->phy.addr = 1; ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); hw->phy.ops.release(hw); if (ret_val) return ret_val; /* Configure the K1 Si workaround during phy reset assuming there is * link so that it disables K1 if link is in 1Gbps. */ ret_val = e1000_k1_gig_workaround_hv(hw, TRUE); if (ret_val) return ret_val; /* Workaround for link disconnects on a busy hub in half duplex */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); if (ret_val) goto release; /* set MSE higher to enable link to stay up when noise is high */ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY * @hw: pointer to the HW structure **/ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) { u32 mac_reg; u16 i, phy_reg = 0; s32 ret_val; DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) goto release; /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ for (i = 0; i < (hw->mac.rar_entry_count); i++) { mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); release: hw->phy.ops.release(hw); } static u32 e1000_calc_rx_da_crc(u8 mac[]) { u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ u32 i, j, mask, crc; DEBUGFUNC("e1000_calc_rx_da_crc"); crc = 0xffffffff; for (i = 0; i < 6; i++) { crc = crc ^ mac[i]; for (j = 8; j > 0; j--) { mask = (crc & 1) * (-1); crc = (crc >> 1) ^ (poly & mask); } } return ~crc; } /** * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation * with 82579 PHY * @hw: pointer to the HW structure * @enable: flag to enable/disable workaround when enabling/disabling jumbos **/ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) { s32 ret_val = E1000_SUCCESS; u16 phy_reg, data; u32 mac_reg; u16 i; DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); if (hw->mac.type < e1000_pch2lan) return E1000_SUCCESS; /* disable Rx path while enabling/disabling workaround */ hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); if (ret_val) return ret_val; if (enable) { /* Write Rx addresses (rar_entry_count for RAL/H, and * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < hw->mac.rar_entry_count; i++) { u8 mac_addr[ETH_ADDR_LEN] = {0}; u32 addr_high, addr_low; addr_high = E1000_READ_REG(hw, E1000_RAH(i)); if (!(addr_high & E1000_RAH_AV)) continue; addr_low = E1000_READ_REG(hw, E1000_RAL(i)); mac_addr[0] = (addr_low & 0xFF); mac_addr[1] = ((addr_low >> 8) & 0xFF); mac_addr[2] = ((addr_low >> 16) & 0xFF); mac_addr[3] = ((addr_low >> 24) & 0xFF); mac_addr[4] = (addr_high & 0xFF); mac_addr[5] = ((addr_high >> 8) & 0xFF); E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), e1000_calc_rx_da_crc(mac_addr)); } /* Write Rx addresses to the PHY */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* Enable jumbo frame workaround in the MAC */ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); mac_reg &= ~(1 << 14); mac_reg |= (7 << 15); E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); mac_reg = E1000_READ_REG(hw, E1000_RCTL); mac_reg |= E1000_RCTL_SECRC; E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data | (1 << 0)); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Enable jumbo frame workaround in the PHY */ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); data |= (0x37 << 5); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); data &= ~(1 << 13); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x1A << 2); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10)); if (ret_val) return ret_val; } else { /* Write MAC register values back to h/w defaults */ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); mac_reg &= ~(0xF << 14); E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); mac_reg = E1000_READ_REG(hw, E1000_RCTL); mac_reg &= ~E1000_RCTL_SECRC; E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data & ~(1 << 0)); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Write PHY register values back to h/w defaults */ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); data |= (1 << 13); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x8 << 2); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10)); if (ret_val) return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); } /** * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); if (hw->mac.type != e1000_pch2lan) return E1000_SUCCESS; /* Set MDIO slow mode before any other MDIO access */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* set MSE higher to enable link to stay up when noise is high */ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * * Workaround to set the K1 beacon duration for 82579 parts **/ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 status_reg = 0; u32 mac_reg; u16 phy_reg; DEBUGFUNC("e1000_k1_workaround_lv"); if (hw->mac.type != e1000_pch2lan) return E1000_SUCCESS; /* Set K1 beacon duration based on 1Gbps speed or otherwise */ ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); if (ret_val) return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) return ret_val; if (status_reg & HV_M_STATUS_SPEED_1000) { u16 pm_phy_reg; mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; /* LV 1G Packet drop issue wa */ ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, &pm_phy_reg); if (ret_val) return ret_val; pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, pm_phy_reg); if (ret_val) return ret_val; } else { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; } E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg); } return ret_val; } /** * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware * @hw: pointer to the HW structure * @gate: boolean set to TRUE to gate, FALSE to ungate * * Gate/ungate the automatic PHY configuration via hardware; perform * the configuration via software instead. **/ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) { u32 extcnf_ctrl; DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); if (hw->mac.type < e1000_pch2lan) return; extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (gate) extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; else extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); } /** * e1000_lan_init_done_ich8lan - Check for PHY config completion * @hw: pointer to the HW structure * * Check the appropriate indication the MAC has finished configuring the * PHY after a software reset. **/ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) { u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; DEBUGFUNC("e1000_lan_init_done_ich8lan"); /* Wait for basic configuration completes before proceeding */ do { data = E1000_READ_REG(hw, E1000_STATUS); data &= E1000_STATUS_LAN_INIT_DONE; usec_delay(100); } while ((!data) && --loop); /* If basic configuration is incomplete before the above loop * count reaches 0, loading the configuration from NVM will * leave the PHY in a bad state possibly resulting in no link. */ if (loop == 0) DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); /* Clear the Init Done bit for the next init event */ data = E1000_READ_REG(hw, E1000_STATUS); data &= ~E1000_STATUS_LAN_INIT_DONE; E1000_WRITE_REG(hw, E1000_STATUS, data); } /** * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset * @hw: pointer to the HW structure **/ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 reg; DEBUGFUNC("e1000_post_phy_reset_ich8lan"); if (hw->phy.ops.check_reset_block(hw)) return E1000_SUCCESS; /* Allow time for h/w to get to quiescent state after reset */ msec_delay(10); /* Perform any necessary post-reset workarounds */ switch (hw->mac.type) { case e1000_pchlan: ret_val = e1000_hv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; case e1000_pch2lan: ret_val = e1000_lv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; default: break; } /* Clear the host wakeup bit after lcd reset */ if (hw->mac.type >= e1000_pchlan) { hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®); reg &= ~BM_WUC_HOST_WU_BIT; hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg); } /* Configure the LCD with the extended configuration region in NVM */ ret_val = e1000_sw_lcd_config_ich8lan(hw); if (ret_val) return ret_val; /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); if (hw->mac.type == e1000_pch2lan) { /* Ungate automatic PHY configuration on non-managed 82579 */ if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { msec_delay(10); e1000_gate_hw_phy_config_ich8lan(hw, FALSE); } /* Set EEE LPI Update Timer to 200usec */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_UPDATE_TIMER, 0x1387); hw->phy.ops.release(hw); } return ret_val; } /** * e1000_phy_hw_reset_ich8lan - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY * This is a function pointer entry point called by drivers * or other shared routines. **/ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); /* Gate automatic PHY configuration by hardware on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, TRUE); ret_val = e1000_phy_hw_reset_generic(hw); if (ret_val) return ret_val; return e1000_post_phy_reset_ich8lan(hw); } /** * e1000_set_lplu_state_pchlan - Set Low Power Link Up state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU state according to the active flag. For PCH, if OEM write * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set * the phy speed. This function will manually set the LPLU bit and restart * auto-neg as hw would do. D3 and D0 LPLU will call the same function * since it configures the same bit. **/ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) { s32 ret_val; u16 oem_reg; DEBUGFUNC("e1000_set_lplu_state_pchlan"); ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); if (ret_val) return ret_val; if (active) oem_reg |= HV_OEM_BITS_LPLU; else oem_reg &= ~HV_OEM_BITS_LPLU; if (!hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); } /** * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); if (phy->type == e1000_phy_ife) return E1000_SUCCESS; phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); if (active) { phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else { phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } return E1000_SUCCESS; } /** * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU D3 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); if (!active) { phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 * @hw: pointer to the HW structure * @bank: pointer to the variable that returns the active bank * * Reads signature byte from the NVM using the flash access registers. * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. **/ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) { u32 eecd; struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; u8 sig_byte = 0; s32 ret_val; DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: eecd = E1000_READ_REG(hw, E1000_EECD); if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == E1000_EECD_SEC1VAL_VALID_MASK) { if (eecd & E1000_EECD_SEC1VAL) *bank = 1; else *bank = 0; return E1000_SUCCESS; } DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n"); /* fall-thru */ default: /* set bank to 0 in case flash read fails */ *bank = 0; /* Check bank 0 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 0; return E1000_SUCCESS; } /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + bank1_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 1; return E1000_SUCCESS; } DEBUGOUT("ERROR: No valid NVM bank present\n"); return -E1000_ERR_NVM; } } /** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. * @words: Size of data to read in words * @data: Pointer to the word(s) to read at offset. * * Reads a word(s) from the NVM using the flash access registers. **/ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 act_offset; s32 ret_val = E1000_SUCCESS; u32 bank = 0; u16 i, word; DEBUGFUNC("e1000_read_nvm_ich8lan"); if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } nvm->ops.acquire(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val != E1000_SUCCESS) { DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); bank = 0; } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; ret_val = E1000_SUCCESS; for (i = 0; i < words; i++) { if (dev_spec->shadow_ram[offset+i].modified) { data[i] = dev_spec->shadow_ram[offset+i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, act_offset + i, &word); if (ret_val) break; data[i] = word; } } nvm->ops.release(hw); out: if (ret_val) DEBUGOUT1("NVM read error: %d\n", ret_val); return ret_val; } /** * e1000_flash_cycle_init_ich8lan - Initialize flash * @hw: pointer to the HW structure * * This function does initial flash setup so that a new read/write/erase cycle * can be started. **/ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; s32 ret_val = -E1000_ERR_NVM; DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); /* Check if the flash descriptor is valid */ if (!hsfsts.hsf_status.fldesvalid) { DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or * FDONE bit should be changed in the hardware so that it * is 1 after hardware reset, which can then be used as an * indication whether a cycle is in progress or has been * completed. */ if (!hsfsts.hsf_status.flcinprog) { /* There is no cycle running at present, * so we can start a cycle. * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = E1000_SUCCESS; } else { s32 i; /* Otherwise poll for sometime so the current * cycle has a chance to end before giving up. */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (!hsfsts.hsf_status.flcinprog) { ret_val = E1000_SUCCESS; break; } usec_delay(1); } if (ret_val == E1000_SUCCESS) { /* Successful in waiting for previous cycle to timeout, * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); } else { DEBUGOUT("Flash controller busy, cannot get access\n"); } } return ret_val; } /** * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) * @hw: pointer to the HW structure * @timeout: maximum time to wait for completion * * This function starts a flash cycle and waits for its completion. **/ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) { union ich8_hws_flash_ctrl hsflctl; union ich8_hws_flash_status hsfsts; u32 i = 0; DEBUGFUNC("e1000_flash_cycle_ich8lan"); /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcdone) break; usec_delay(1); } while (i++ < timeout); if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) return E1000_SUCCESS; return -E1000_ERR_NVM; } /** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location * @data: pointer to the location for storing the data * * Reads the flash word at offset into data. Offset is converted * to bytes before read. **/ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data) { DEBUGFUNC("e1000_read_flash_word_ich8lan"); if (!data) return -E1000_ERR_NVM; /* Must convert offset into bytes. */ offset <<= 1; return e1000_read_flash_data_ich8lan(hw, offset, 2, data); } /** * e1000_read_flash_byte_ich8lan - Read byte from flash * @hw: pointer to the HW structure * @offset: The offset of the byte to read. * @data: Pointer to a byte to store the value read. * * Reads a single byte from the NVM using the flash access registers. **/ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data) { s32 ret_val; u16 word = 0; ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); if (ret_val) return ret_val; *data = (u8)word; return E1000_SUCCESS; } /** * e1000_read_flash_data_ich8lan - Read byte or word from NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte or word to read. * @size: Size of data to read, 1=byte 2=word * @data: Pointer to the word to store the value read. * * Reads a byte or word from the NVM using the flash access registers. **/ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val = -E1000_ERR_NVM; u8 count = 0; DEBUGFUNC("e1000_read_flash_data_ich8lan"); if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { usec_delay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val != E1000_SUCCESS) break; hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_READ_COMMAND_TIMEOUT); /* Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb */ if (ret_val == E1000_SUCCESS) { flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); if (size == 1) *data = (u8)(flash_data & 0x000000FF); else if (size == 2) *data = (u16)(flash_data & 0x0000FFFF); break; } else { /* If we've gotten here, then things are probably * completely hosed, but if the error condition is * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) { /* Repeat for some time before giving up. */ continue; } else if (!hsfsts.hsf_status.flcdone) { DEBUGOUT("Timeout error - flash cycle did not complete.\n"); break; } } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. * @words: Size of data to write in words * @data: Pointer to the word(s) to write at offset. * * Writes a byte or word to the NVM using the flash access registers. **/ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 i; DEBUGFUNC("e1000_write_nvm_ich8lan"); if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } nvm->ops.acquire(hw); for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset+i].modified = TRUE; dev_spec->shadow_ram[offset+i].value = data[i]; } nvm->ops.release(hw); return E1000_SUCCESS; } /** * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, * which writes the checksum to the shadow ram. The changes in the shadow * ram are then committed to the EEPROM by processing each bank at a time * checking for the modified bit and writing only the pending changes. * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; u16 data; DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); ret_val = e1000_update_nvm_checksum_generic(hw); if (ret_val) goto out; if (nvm->type != e1000_nvm_flash_sw) goto out; nvm->ops.acquire(hw); /* We're writing to the opposite bank so if we're on bank 1, * write to bank 0 etc. We also need to erase the segment that * is going to be written */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val != E1000_SUCCESS) { DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); bank = 0; } if (bank == 0) { new_bank_offset = nvm->flash_bank_size; old_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); if (ret_val) goto release; } else { old_bank_offset = nvm->flash_bank_size; new_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); if (ret_val) goto release; } for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { /* Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, i + old_bank_offset, &data); if (ret_val) break; } /* If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the * signature is valid. We want to do this after the write * has completed so that we don't mark the segment valid * while the write is still in progress */ if (i == E1000_ICH_NVM_SIG_WORD) data |= E1000_ICH_NVM_SIG_MASK; /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; usec_delay(100); /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, (u8)data); if (ret_val) break; usec_delay(100); ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset + 1, (u8)(data >> 8)); if (ret_val) break; } /* Don't bother writing the segment valid bits if sector * programming failed. */ if (ret_val) { DEBUGOUT("Flash commit failed.\n"); goto release; } /* Finally validate the new segment by setting bit 15:14 * to 10b in word 0x13 , this can be done without an * erase as well since these bits are 11 to start with * and we need to change bit 14 to 0b */ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); if (ret_val) goto release; data &= 0xBFFF; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, (u8)(data >> 8)); if (ret_val) goto release; /* And invalidate the previously valid segment by setting * its signature word (0x13) high_byte to 0b. This can be * done without an erase because flash erase sets all bits * to 1's. We can write 1's to 0's without an erase */ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); if (ret_val) goto release; /* Great! Everything worked, we can now clear the cached entries. */ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { dev_spec->shadow_ram[i].modified = FALSE; dev_spec->shadow_ram[i].value = 0xFFFF; } release: nvm->ops.release(hw); /* Reload the EEPROM, or else modifications will not appear * until after the next adapter reset. */ if (!ret_val) { nvm->ops.reload(hw); msec_delay(10); } out: if (ret_val) DEBUGOUT1("NVM update error: %d\n", ret_val); return ret_val; } /** * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum * @hw: pointer to the HW structure * * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. * If the bit is 0, that the EEPROM had been modified, but the checksum was not * calculated, in which case we need to calculate the checksum and set bit 6. **/ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 data; u16 word; u16 valid_csum_mask; DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, * the checksum needs to be fixed. This bit is an indication that * the NVM was prepared by OEM software and did not calculate * the checksum...a likely scenario. */ switch (hw->mac.type) { case e1000_pch_lpt: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; default: word = NVM_FUTURE_INIT_WORD1; valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; break; } ret_val = hw->nvm.ops.read(hw, word, 1, &data); if (ret_val) return ret_val; if (!(data & valid_csum_mask)) { data |= valid_csum_mask; ret_val = hw->nvm.ops.write(hw, word, 1, &data); if (ret_val) return ret_val; ret_val = hw->nvm.ops.update(hw); if (ret_val) return ret_val; } return e1000_validate_nvm_checksum_generic(hw); } /** * e1000_write_flash_data_ich8lan - Writes bytes to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte/word to read. * @size: Size of data to read, 1=byte 2=word * @data: The byte(s) to write to the NVM. * * Writes one/two bytes to the NVM using the flash access registers. **/ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val; u8 count = 0; DEBUGFUNC("e1000_write_ich8_data"); if (size < 1 || size > 2 || data > size * 0xff || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { usec_delay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val != E1000_SUCCESS) break; hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); if (size == 1) flash_data = (u32)data & 0x00FF; else flash_data = (u32)data; E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (ret_val == E1000_SUCCESS) break; /* If we're here, then things are most likely * completely hosed, but if the error condition * is detected, it won't hurt to give it another * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* Repeat for some time before giving up. */ continue; if (!hsfsts.hsf_status.flcdone) { DEBUGOUT("Timeout error - flash cycle did not complete.\n"); break; } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. * @data: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. **/ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 data) { u16 word = (u16)data; DEBUGFUNC("e1000_write_flash_byte_ich8lan"); return e1000_write_flash_data_ich8lan(hw, offset, 1, word); } /** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. * @byte: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. * Goes through a retry algorithm before giving up. **/ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte) { s32 ret_val; u16 program_retries; DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) return ret_val; for (program_retries = 0; program_retries < 100; program_retries++) { DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); usec_delay(100); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (ret_val == E1000_SUCCESS) break; } if (program_retries == 100) return -E1000_ERR_NVM; return E1000_SUCCESS; } /** * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM * @hw: pointer to the HW structure * @bank: 0 for first bank, 1 for second bank, etc. * * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. * bank N is 4096 * N + flash_reg_addr. **/ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) { struct e1000_nvm_info *nvm = &hw->nvm; union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; /* bank size is in 16bit words - adjust to bytes */ u32 flash_bank_size = nvm->flash_bank_size * 2; s32 ret_val; s32 count = 0; s32 j, iteration, sector_size; DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); /* Determine HW Sector size: Read BERASE bits of hw flash status * register * 00: The Hw sector is 256 bytes, hence we need to erase 16 * consecutive sectors. The start index for the nth Hw sector * can be calculated as = bank * 4096 + n * 256 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. * The start index for the nth Hw sector can be calculated * as = bank * 4096 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 * (ich9 only, otherwise error condition) * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 */ switch (hsfsts.hsf_status.berasesz) { case 0: /* Hw sector size 256 */ sector_size = ICH_FLASH_SEG_SIZE_256; iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; break; case 1: sector_size = ICH_FLASH_SEG_SIZE_4K; iteration = 1; break; case 2: sector_size = ICH_FLASH_SEG_SIZE_8K; iteration = 1; break; case 3: sector_size = ICH_FLASH_SEG_SIZE_64K; iteration = 1; break; default: return -E1000_ERR_NVM; } /* Start with the base address, then add the sector offset. */ flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; for (j = 0; j < iteration; j++) { do { u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) return ret_val; /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash * Address. */ flash_linear_addr += (j * sector_size); E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, timeout); if (ret_val == E1000_SUCCESS) break; /* Check if FCERR is set to 1. If 1, * clear it and try the whole sequence * a few more times else Done */ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* repeat for some time before giving up */ continue; else if (!hsfsts.hsf_status.flcdone) return ret_val; } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); } return E1000_SUCCESS; } /** * e1000_valid_led_default_ich8lan - Set the default LED settings * @hw: pointer to the HW structure * @data: Pointer to the LED settings * * Reads the LED default settings from the NVM to data. If the NVM LED * settings is all 0's or F's, set the LED default to a valid LED default * setting. **/ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) { s32 ret_val; DEBUGFUNC("e1000_valid_led_default_ich8lan"); ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT_ICH8LAN; return E1000_SUCCESS; } /** * e1000_id_led_init_pchlan - store LED configurations * @hw: pointer to the HW structure * * PCH does not control LEDs via the LEDCTL register, rather it uses * the PHY LED configuration register. * * PCH also does not have an "always on" or "always off" mode which * complicates the ID feature. Instead of using the "on" mode to indicate * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), * use "link_up" mode. The LEDs will still ID on request if there is no * link based on logic in e1000_led_[on|off]_pchlan(). **/ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; u16 data, i, temp, shift; DEBUGFUNC("e1000_id_led_init_pchlan"); /* Get default ID LED modes */ ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) return ret_val; mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; for (i = 0; i < 4; i++) { temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; shift = (i * 5); switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_on << shift); break; case ID_LED_OFF1_DEF2: case ID_LED_OFF1_ON2: case ID_LED_OFF1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_on << shift); break; case ID_LED_DEF1_OFF2: case ID_LED_ON1_OFF2: case ID_LED_OFF1_OFF2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } } return E1000_SUCCESS; } /** * e1000_get_bus_info_ich8lan - Get/Set the bus type and width * @hw: pointer to the HW structure * * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability * register, so the the bus width is hard coded. **/ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; s32 ret_val; DEBUGFUNC("e1000_get_bus_info_ich8lan"); ret_val = e1000_get_bus_info_pcie_generic(hw); /* ICH devices are "PCI Express"-ish. They have * a configuration space, but do not contain * PCI Express Capability registers, so bus width * must be hardcoded. */ if (bus->width == e1000_bus_width_unknown) bus->width = e1000_bus_width_pcie_x1; return ret_val; } /** * e1000_reset_hw_ich8lan - Reset the hardware * @hw: pointer to the HW structure * * Does a full reset of the hardware which includes a reset of the PHY and * MAC. **/ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 kum_cfg; u32 ctrl, reg; s32 ret_val; DEBUGFUNC("e1000_reset_hw_ich8lan"); /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) DEBUGOUT("PCI-E Master disable polling has failed.\n"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); /* Disable the Transmit and Receive units. Then delay to allow * any pending transactions to complete before we hit the MAC * with the global reset. */ E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); msec_delay(10); /* Workaround for ICH8 bit corruption issue in FIFO memory */ if (hw->mac.type == e1000_ich8lan) { /* Set Tx and Rx buffer allocation to 8k apiece. */ E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); /* Set Packet Buffer Size to 16k. */ E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); } if (hw->mac.type == e1000_pchlan) { /* Save the NVM K1 bit setting*/ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); if (ret_val) return ret_val; if (kum_cfg & E1000_NVM_K1_ENABLE) dev_spec->nvm_k1_enabled = TRUE; else dev_spec->nvm_k1_enabled = FALSE; } ctrl = E1000_READ_REG(hw, E1000_CTRL); if (!hw->phy.ops.check_reset_block(hw)) { /* Full-chip reset requires MAC and PHY reset at the same * time to make sure the interface between MAC and the * external PHY is reset. */ ctrl |= E1000_CTRL_PHY_RST; /* Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, TRUE); } ret_val = e1000_acquire_swflag_ich8lan(hw); DEBUGOUT("Issuing a global reset to ich8lan\n"); E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); /* cannot issue a flush here because it hangs the hardware */ msec_delay(20); /* Set Phy Config Counter to 50msec */ if (hw->mac.type == e1000_pch2lan) { reg = E1000_READ_REG(hw, E1000_FEXTNVM3); reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg); } if (!ret_val) E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); if (ctrl & E1000_CTRL_PHY_RST) { ret_val = hw->phy.ops.get_cfg_done(hw); if (ret_val) return ret_val; ret_val = e1000_post_phy_reset_ich8lan(hw); if (ret_val) return ret_val; } /* For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up * as a bad packet to the DMA engine. */ if (hw->mac.type == e1000_pchlan) E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); reg = E1000_READ_REG(hw, E1000_KABGTXD); reg |= E1000_KABGTXD_BGSQLBIAS; E1000_WRITE_REG(hw, E1000_KABGTXD, reg); return E1000_SUCCESS; } /** * e1000_init_hw_ich8lan - Initialize the hardware * @hw: pointer to the HW structure * * Prepares the hardware for transmit and receive by doing the following: * - initialize hardware bits * - initialize LED identification * - setup receive address registers * - setup flow control * - setup transmit descriptors * - clear statistics **/ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl_ext, txdctl, snoop; s32 ret_val; u16 i; DEBUGFUNC("e1000_init_hw_ich8lan"); e1000_initialize_hw_bits_ich8lan(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); /* An error is not fatal and we should not stop init due to this */ if (ret_val) DEBUGOUT("Error initializing identification LED\n"); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* The 82578 Rx buffer will stall if wakeup is enabled in host and * the ME. Disable wakeup by clearing the host wakeup bit. * Reset the phy after disabling host wakeup to reset the Rx buffer. */ if (hw->phy.type == e1000_phy_82578) { hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i); i &= ~BM_WUC_HOST_WU_BIT; hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i); ret_val = e1000_phy_hw_reset_ich8lan(hw); if (ret_val) return ret_val; } /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy for both queues */ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB); txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH); E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB); txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH); E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); /* ICH8 has opposite polarity of no_snoop bits. * By default, we should use snoop behavior. */ if (mac->type == e1000_ich8lan) snoop = PCIE_ICH8_SNOOP_ALL; else snoop = (u32) ~(PCIE_NO_SNOOP_ALL); e1000_set_pcie_no_snoop_generic(hw, snoop); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_RO_DIS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_ich8lan(hw); return ret_val; } /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits * @hw: pointer to the HW structure * * Sets/Clears required hardware bits necessary for correctly setting up the * hardware for transmit and receive. **/ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) { u32 reg; DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); /* Extended Device Control */ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg |= (1 << 22); /* Enable PHY low-power state when MAC is at D3 w/o WoL */ if (hw->mac.type >= e1000_pchlan) reg |= E1000_CTRL_EXT_PHYPDEN; E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Transmit Descriptor Control 0 */ reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); reg |= (1 << 22); E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); reg |= (1 << 22); E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = E1000_READ_REG(hw, E1000_TARC(0)); if (hw->mac.type == e1000_ich8lan) reg |= (1 << 28) | (1 << 29); reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); E1000_WRITE_REG(hw, E1000_TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = E1000_READ_REG(hw, E1000_TARC(1)); if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) reg &= ~(1 << 28); else reg |= (1 << 28); reg |= (1 << 24) | (1 << 26) | (1 << 30); E1000_WRITE_REG(hw, E1000_TARC(1), reg); /* Device Status */ if (hw->mac.type == e1000_ich8lan) { reg = E1000_READ_REG(hw, E1000_STATUS); - reg &= ~(1 << 31); + reg &= ~(1U << 31); E1000_WRITE_REG(hw, E1000_STATUS, reg); } /* work-around descriptor data corruption issue during nfs v2 udp * traffic, just disable the nfs filtering capability */ reg = E1000_READ_REG(hw, E1000_RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ if (hw->mac.type == e1000_ich8lan) reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); E1000_WRITE_REG(hw, E1000_RFCTL, reg); /* Enable ECC on Lynxpoint */ if (hw->mac.type == e1000_pch_lpt) { reg = E1000_READ_REG(hw, E1000_PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; E1000_WRITE_REG(hw, E1000_PBECCSTS, reg); reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_MEHE; E1000_WRITE_REG(hw, E1000_CTRL, reg); } return; } /** * e1000_setup_link_ich8lan - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_setup_link_ich8lan"); if (hw->phy.ops.check_reset_block(hw)) return E1000_SUCCESS; /* ICH parts do not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ if (hw->fc.requested_mode == e1000_fc_default) hw->fc.requested_mode = e1000_fc_full; /* Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Continue to configure the copper link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), hw->fc.pause_time); if (ret_val) return ret_val; } return e1000_set_fc_watermarks_generic(hw); } /** * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Configures the kumeran interface to the PHY to wait the appropriate time * when polling the PHY, then call the generic setup_copper_link to finish * configuring the copper link. **/ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 reg_data; DEBUGFUNC("e1000_setup_copper_link_ich8lan"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Set the mac to wait the maximum time between each iteration * and increase the max iterations when polling the phy; * this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, reg_data); if (ret_val) return ret_val; switch (hw->phy.type) { case e1000_phy_igp_3: ret_val = e1000_copper_link_setup_igp(hw); if (ret_val) return ret_val; break; case e1000_phy_bm: case e1000_phy_82578: ret_val = e1000_copper_link_setup_m88(hw); if (ret_val) return ret_val; break; case e1000_phy_82577: case e1000_phy_82579: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; break; case e1000_phy_ife: ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, ®_data); if (ret_val) return ret_val; reg_data &= ~IFE_PMC_AUTO_MDIX; switch (hw->phy.mdix) { case 1: reg_data &= ~IFE_PMC_FORCE_MDIX; break; case 2: reg_data |= IFE_PMC_FORCE_MDIX; break; case 0: default: reg_data |= IFE_PMC_AUTO_MDIX; break; } ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, reg_data); if (ret_val) return ret_val; break; default: break; } return e1000_setup_copper_link_generic(hw); } /** * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Calls the PHY specific link setup function and then calls the * generic setup_copper_link to finish configuring the link for * Lynxpoint PCH devices **/ static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; DEBUGFUNC("e1000_setup_copper_link_pch_lpt"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; return e1000_setup_copper_link_generic(hw); } /** * e1000_get_link_up_info_ich8lan - Get current link speed and duplex * @hw: pointer to the HW structure * @speed: pointer to store current link speed * @duplex: pointer to store the current link duplex * * Calls the generic get_speed_and_duplex to retrieve the current link * information and then calls the Kumeran lock loss workaround for links at * gigabit speeds. **/ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; DEBUGFUNC("e1000_get_link_up_info_ich8lan"); ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); if (ret_val) return ret_val; if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); } return ret_val; } /** * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround * @hw: pointer to the HW structure * * Work-around for 82566 Kumeran PCS lock loss: * On link status change (i.e. PCI reset, speed change) and link is up and * speed is gigabit- * 0) if workaround is optionally disabled do nothing * 1) wait 1ms for Kumeran link to come up * 2) check Kumeran Diagnostic register PCS lock loss bit * 3) if not set the link is locked (all is good), otherwise... * 4) reset the PHY * 5) repeat up to 10 times * Note: this is only called for IGP3 copper when speed is 1gb. **/ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; u16 i, data; bool link; DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); if (!dev_spec->kmrn_lock_loss_workaround_enabled) return E1000_SUCCESS; /* Make sure link is up before proceeding. If not just return. * Attempting this while link is negotiating fouled up link * stability */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (!link) return E1000_SUCCESS; for (i = 0; i < 10; i++) { /* read once to clear */ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* and again to get new status */ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* check for PCS lock */ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) return E1000_SUCCESS; /* Issue PHY reset */ hw->phy.ops.reset(hw); msec_delay_irq(5); } /* Disable GigE link negotiation */ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); /* Call gig speed drop workaround on Gig disable before accessing * any PHY registers */ e1000_gig_downshift_workaround_ich8lan(hw); /* unable to acquire PCS lock */ return -E1000_ERR_PHY; } /** * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state * @hw: pointer to the HW structure * @state: boolean value used to set the current Kumeran workaround state * * If ICH8, set the current Kumeran workaround state (enabled - TRUE * /disabled - FALSE). **/ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); if (hw->mac.type != e1000_ich8lan) { DEBUGOUT("Workaround applies to ICH8 only.\n"); return; } dev_spec->kmrn_lock_loss_workaround_enabled = state; return; } /** * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 * @hw: pointer to the HW structure * * Workaround for 82566 power-down on D3 entry: * 1) disable gigabit link * 2) write VR power-down enable * 3) read it back * Continue if successful, else issue LCD reset and repeat **/ void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) { u32 reg; u16 data; u8 retry = 0; DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); if (hw->phy.type != e1000_phy_igp_3) return; /* Try the workaround twice (if needed) */ do { /* Disable link */ reg = E1000_READ_REG(hw, E1000_PHY_CTRL); reg |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); /* Call gig speed drop workaround on Gig disable before * accessing any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); /* Write VR power-down enable */ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); /* Read it back and test */ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) break; /* Issue PHY reset and repeat at most one more time */ reg = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); retry++; } while (retry); } /** * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working * @hw: pointer to the HW structure * * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), * LPLU, Gig disable, MDIC PHY reset): * 1) Set Kumeran Near-end loopback * 2) Clear Kumeran Near-end loopback * Should only be called for ICH8[m] devices with any 1G Phy. **/ void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 reg_data; DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) return; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, ®_data); if (ret_val) return; reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); } /** * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx * @hw: pointer to the HW structure * * During S0 to Sx transition, it is possible the link remains at gig * instead of negotiating to a lower speed. Before going to Sx, set * 'Gig Disable' to force link speed negotiation to a lower speed based on * the LPLU setting in the NVM or custom setting. For PCH and newer parts, * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also * needs to be written. * Parts that support (and are linked to a partner which support) EEE in * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power * than 10Mbps w/o EEE. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; DEBUGFUNC("e1000_suspend_workarounds_ich8lan"); phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; if (hw->phy.type == e1000_phy_i217) { u16 phy_reg, device_id = hw->device_id; if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; if (!dev_spec->eee_disable) { u16 eee_advert; ret_val = e1000_read_emi_reg_locked(hw, I217_EEE_ADVERTISEMENT, &eee_advert); if (ret_val) goto release; /* Disable LPLU if both link partners support 100BaseT * EEE and 100Full is advertised on both ends of the * link, and enable Auto Enable LPI since there will * be no driver to enable LPI while in Sx. */ if ((eee_advert & I82579_EEE_100_SUPPORTED) && (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) && (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU); /* Set Auto Enable LPI after link up */ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); } } /* For i217 Intel Rapid Start Technology support, * when the system is going into Sx and no manageability engine * is present, the driver must configure proxy to reset only on * power good. LPI (Low Power Idle) state must also reset only * on power good, as well as the MTA (Multicast table array). * The SMBus release must also be disabled on LCD reset. */ if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Enable proxy to reset only on power good. */ hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, &phy_reg); phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, phy_reg); /* Set bit enable LPI (EEE) to reset only on * power good. */ hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg); phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg); /* Disable the SMB release on LCD reset. */ hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); } /* Enable MTA to reset for Intel Rapid Start Technology * Support */ hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); release: hw->phy.ops.release(hw); } out: E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); if (hw->mac.type >= e1000_pchlan) { e1000_oem_bits_config_ich8lan(hw, FALSE); /* Reset PHY to activate OEM bits on 82577/8 */ if (hw->mac.type == e1000_pchlan) e1000_phy_hw_reset_generic(hw); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; e1000_write_smbus_addr(hw); hw->phy.ops.release(hw); } return; } /** * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 * @hw: pointer to the HW structure * * During Sx to S0 transitions on non-managed devices or managed devices * on which PHY resets are not blocked, if the PHY registers cannot be * accessed properly by the s/w toggle the LANPHYPC value to power cycle * the PHY. * On i217, setup Intel Rapid Start Technology. **/ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_resume_workarounds_pchlan"); if (hw->mac.type < e1000_pch2lan) return; ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) { DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val); return; } /* For i217 Intel Rapid Start Technology support when the system * is transitioning from Sx and no manageability engine is present * configure SMBus to restore on reset, disable proxy, and enable * the reset on MTA (Multicast table array). */ if (hw->phy.type == e1000_phy_i217) { u16 phy_reg; ret_val = hw->phy.ops.acquire(hw); if (ret_val) { DEBUGOUT("Failed to setup iRST\n"); return; } /* Clear Auto Enable LPI after link up */ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Restore clear on SMB if no manageability engine * is present */ ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); if (ret_val) goto release; phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); /* Disable Proxy */ hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0); } /* Enable reset on MTA */ ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); if (ret_val) goto release; phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); release: if (ret_val) DEBUGOUT1("Error %d in resume workarounds\n", ret_val); hw->phy.ops.release(hw); } } /** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_cleanup_led_ich8lan"); if (hw->phy.type == e1000_phy_ife) return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); return E1000_SUCCESS; } /** * e1000_led_on_ich8lan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_led_on_ich8lan"); if (hw->phy.type == e1000_phy_ife) return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); return E1000_SUCCESS; } /** * e1000_led_off_ich8lan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_led_off_ich8lan"); if (hw->phy.type == e1000_phy_ife) return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); return E1000_SUCCESS; } /** * e1000_setup_led_pchlan - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use. **/ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) { DEBUGFUNC("e1000_setup_led_pchlan"); return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); } /** * e1000_cleanup_led_pchlan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) { DEBUGFUNC("e1000_cleanup_led_pchlan"); return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); } /** * e1000_led_on_pchlan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode2; u32 i, led; DEBUGFUNC("e1000_led_on_pchlan"); /* If no link, then turn LED on by setting the invert bit * for each LED that's mode is "link_up" in ledctl_mode2. */ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); } /** * e1000_led_off_pchlan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode1; u32 i, led; DEBUGFUNC("e1000_led_off_pchlan"); /* If no link, then turn LED off by clearing the invert bit * for each LED that's mode is "link_up" in ledctl_mode1. */ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); } /** * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset * @hw: pointer to the HW structure * * Read appropriate register for the config done bit for completion status * and configure the PHY through s/w for EEPROM-less parts. * * NOTE: some silicon which is EEPROM-less will fail trying to read the * config done bit, so only an error is logged and continues. If we were * to return with error, EEPROM-less silicon would not be able to be reset * or change link. **/ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u32 bank = 0; u32 status; DEBUGFUNC("e1000_get_cfg_done_ich8lan"); e1000_get_cfg_done_generic(hw); /* Wait for indication from h/w that it has completed basic config */ if (hw->mac.type >= e1000_ich10lan) { e1000_lan_init_done_ich8lan(hw); } else { ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { /* When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ DEBUGOUT("Auto Read Done did not complete\n"); ret_val = E1000_SUCCESS; } } /* Clear PHY Reset Asserted bit */ status = E1000_READ_REG(hw, E1000_STATUS); if (status & E1000_STATUS_PHYRA) E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); else DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); /* If EEPROM is not marked present, init the IGP 3 PHY manually */ if (hw->mac.type <= e1000_ich9lan) { if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) { e1000_phy_init_script_igp3(hw); } } else { if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { /* Maybe we should do a basic PHY config */ DEBUGOUT("EEPROM not present\n"); ret_val = -E1000_ERR_CONFIG; } } return ret_val; } /** * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(hw->mac.ops.check_mng_mode(hw) || hw->phy.ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters * @hw: pointer to the HW structure * * Clears hardware counters specific to the silicon family and calls * clear_hw_cntrs_generic to clear all general purpose counters. **/ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) { u16 phy_data; s32 ret_val; DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); E1000_READ_REG(hw, E1000_IAC); E1000_READ_REG(hw, E1000_ICRXOC); /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = hw->phy.ops.set_page(hw, HV_STATS_PAGE << IGP_PAGE_SHIFT); if (ret_val) goto release; hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); release: hw->phy.ops.release(hw); } } Index: head/sys/dev/e1000/e1000_regs.h =================================================================== --- head/sys/dev/e1000/e1000_regs.h (revision 258779) +++ head/sys/dev/e1000/e1000_regs.h (revision 258780) @@ -1,684 +1,684 @@ /****************************************************************************** Copyright (c) 2001-2013, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ #define E1000_CTRL 0x00000 /* Device Control - RW */ #define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ #define E1000_STATUS 0x00008 /* Device Status - RO */ #define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ #define E1000_EERD 0x00014 /* EEPROM Read - RW */ #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ #define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ #define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ #define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ #define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ #define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ #define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ #define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ #define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ #define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ #define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ #define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ #define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ #define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ #define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ #define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ #define E1000_SVCR 0x000F0 #define E1000_SVT 0x000F4 #define E1000_LPIC 0x000FC /* Low Power IDLE control */ #define E1000_RCTL 0x00100 /* Rx Control - RW */ #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ #define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ #define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ #define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ #define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ #define E1000_TCTL 0x00400 /* Tx Control - RW */ #define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ #define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ #define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ #define E1000_LEDMUX 0x08130 /* LED MUX Control */ #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ #define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ #define E1000_FLASHT 0x01028 /* FLASH Timer Register */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLSWCTL 0x01030 /* FLASH control register */ #define E1000_FLSWDATA 0x01034 /* FLASH data register */ #define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ #define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ #define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ #define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ #define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ #define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ #define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ #define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ #define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ #define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ #define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ #define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ #define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ #define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ #define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ #define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ #define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ #define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ #define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ #define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ #define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ #define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ /* Split and Replication Rx Control - RW */ #define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ #define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ #define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ #define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ #define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ #define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ #define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ #define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ #define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ #define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ #define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ #define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ #define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ #define E1000_I210_FLMNGCTL 0x12038 #define E1000_I210_FLMNGDATA 0x1203C #define E1000_I210_FLMNGCNT 0x12040 #define E1000_I210_FLSWCTL 0x12048 #define E1000_I210_FLSWDATA 0x1204C #define E1000_I210_FLSWCNT 0x12050 #define E1000_I210_FLA 0x1201C #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ /* QAV Tx mode control register */ #define E1000_I210_TQAVCTRL 0x3570 /* QAV Tx mode control register bitfields masks */ /* QAV enable */ #define E1000_TQAVCTRL_MODE (1 << 0) /* Fetching arbitration type */ #define E1000_TQAVCTRL_FETCH_ARB (1 << 4) /* Fetching timer enable */ #define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) /* Launch arbitration type */ #define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) /* Launch timer enable */ #define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) /* SP waits for SR enable */ #define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) /* Fetching timer correction */ #define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 #define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) /* High credit registers where _n can be 0 or 1. */ #define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) /* Queues fetch arbitration priority control register */ #define E1000_I210_TQAVARBCTRL 0x3574 /* Queues priority masks where _n and _p can be 0-3. */ #define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n)) /* QAV Tx mode control registers where _n can be 0 or 1. */ #define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) /* QAV Tx mode control register bitfields masks */ #define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ #define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ -#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ +#define E1000_TQAVCC_QUEUE_MODE (1U << 31) /* SP vs. SR Tx mode */ /* Good transmitted packets counter registers */ #define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) /* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ #define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n)) #define E1000_MMDAC 13 /* MMD Access Control */ #define E1000_MMDAAD 14 /* MMD Access Address/Data */ /* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ (0x0C000 + ((_n) * 0x40))) #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ (0x0C004 + ((_n) * 0x40))) #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ (0x0C008 + ((_n) * 0x40))) #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ (0x0C00C + ((_n) * 0x40))) #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ (0x0C010 + ((_n) * 0x40))) #define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ (0x0C014 + ((_n) * 0x40))) #define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ (0x0C018 + ((_n) * 0x40))) #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ (0x0C028 + ((_n) * 0x40))) #define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ (0x0C030 + ((_n) * 0x40))) #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ (0x0E000 + ((_n) * 0x40))) #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ (0x0E004 + ((_n) * 0x40))) #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ (0x0E008 + ((_n) * 0x40))) #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ (0x0E010 + ((_n) * 0x40))) #define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ (0x0E014 + ((_n) * 0x40))) #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ (0x0E028 + ((_n) * 0x40))) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ (0x0E03C + ((_n) * 0x40))) #define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) #define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ #define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ #define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ #define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ (0x054E0 + ((_i - 16) * 8))) #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) #define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ #define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ #define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ /* Same as TXPBS, renamed for newer Si - RW */ #define E1000_ITPBS 0x03404 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ #define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ #define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ #define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ #define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ #define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ #define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ #define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ #define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ /* DMA Tx Max Total Allow Size Reqs - RW */ #define E1000_DTXMXSZRQ 0x03540 #define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ #define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ #define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ #define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ #define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ #define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ #define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ #define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ #define E1000_COLC 0x04028 /* Collision Count - R/clr */ #define E1000_DC 0x04030 /* Defer Count - R/clr */ #define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ #define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ #define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ #define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ #define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ #define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ #define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ #define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ #define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ #define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ #define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ #define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ #define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ #define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ #define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ #define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ #define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ #define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ #define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ #define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ #define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ #define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ #define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ #define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ #define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ #define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ #define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ #define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ #define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ #define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ #define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ #define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ #define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ #define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ #define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ #define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ #define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ #define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ #define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ #define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ #define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ #define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ #define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ #define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ #define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ #define E1000_IAC 0x04100 /* Interrupt Assertion Count */ #define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ #define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ #define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ #define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ #define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ #define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ #define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ #define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ #define E1000_VFGPRC 0x00F10 #define E1000_VFGORC 0x00F18 #define E1000_VFMPRC 0x00F3C #define E1000_VFGPTC 0x00F14 #define E1000_VFGOTC 0x00F34 #define E1000_VFGOTLBC 0x00F50 #define E1000_VFGPTLBC 0x00F44 #define E1000_VFGORLBC 0x00F48 #define E1000_VFGPRLBC 0x00F40 /* Virtualization statistical counters */ #define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) #define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) #define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) #define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) #define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) #define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) #define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) #define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) #define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) /* LinkSec */ #define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ #define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ #define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ #define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ #define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ #define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ #define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ #define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ #define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ #define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ #define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ #define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ #define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ #define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ #define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ #define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ #define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ #define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ #define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ #define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ #define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ #define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ #define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ #define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ #define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ #define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ #define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ #define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ #define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ #define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ /* LinkSec Tx 128-bit Key 0 - WO */ #define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */ #define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) #define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ #define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ /* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit * key - RW. */ #define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) #define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ #define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ #define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ #define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ /* IPSec Rx IPv4/v6 Address - RW */ #define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */ #define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) #define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ #define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ /* IPSec Tx 128-bit Key - RW */ #define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) #define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ #define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ #define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ #define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ #define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ #define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ #define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ #define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ #define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ #define E1000_RPTHC 0x04104 /* Rx Packets To Host */ #define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ #define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ #define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ #define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ #define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ #define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ #define E1000_LENERRS 0x04138 /* Length Errors Count */ #define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ #define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ #define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ #define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ #define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ #define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ #define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ #define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ #define E1000_RA 0x05400 /* Receive Address - RW Array */ #define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ #define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ #define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ #define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ #define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ #define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ #define E1000_WUS 0x05810 /* Wakeup Status - RO */ #define E1000_MANC 0x05820 /* Management Control - RW */ #define E1000_IPAV 0x05838 /* IP Address Valid - RW */ #define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ #define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ #define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ #define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ #define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ #define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ #define E1000_HOST_IF 0x08800 /* Host Interface */ #define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ /* Flexible Host Filter Table */ #define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Ext Flexible Host Filter Table */ #define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) #define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ #define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ /* Management Decision Filters */ #define E1000_MDEF(_n) (0x05890 + (4 * (_n))) #define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ #define E1000_CCMCTL 0x05B48 /* CCM Control Register */ #define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ #define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ #define E1000_GCR 0x05B00 /* PCI-Ex Control */ #define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ #define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ #define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ #define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_FWSM 0x05B54 /* FW Semaphore */ /* Driver-only SW semaphore (not used by BOOT agents) */ #define E1000_SWSM2 0x05B58 #define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ #define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ #define E1000_UFUSE 0x05B78 /* UFUSE - RO */ #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ #define E1000_HICR 0x08F00 /* Host Interface Control */ #define E1000_FWSTS 0x08F0C /* FW Status */ /* RSS registers */ #define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ #define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ #define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ #define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ #define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ #define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ #define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ #define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ /* VT Registers */ #define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ #define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ #define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ #define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ #define E1000_VFRE 0x00C8C /* VF Receive Enables */ #define E1000_VFTE 0x00C90 /* VF Transmit Enables */ #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ #define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ #define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ #define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ #define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ #define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ #define E1000_MDFB 0x03558 /* Malicious Driver free block */ #define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ #define E1000_TXSWC 0x05ACC /* Tx Switch Control */ #define E1000_SCCRL 0x05DB0 /* Storm Control Control */ #define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ #define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ /* These act per VF so an array friendly macro is used */ #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) #define E1000_VFVMBMEM(_n) (0x00800 + (_n)) #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) /* VLAN Virtual Machine Filter - RW */ #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) #define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) #define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ #define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ #define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ #define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ #define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ #define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ #define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ #define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ #define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ #define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ #define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ #define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ #define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ #define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ #define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ #define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ /* Filtering Registers */ #define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ #define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ #define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ #define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ #define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ #define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ #define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ #define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ #define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ #define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ #define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ #define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ /* Tx Desc plane TC Rate-scheduler config */ #define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */ #define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */ #define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */ #define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */ #define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */ #define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */ #define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */ #define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */ #define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/ #define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */ #define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ #define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ #define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ #define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ #define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ #define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ #define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ #define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ #define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ #define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ #define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ #define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ #define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ /* DMA Coalescing registers */ #define E1000_DMACR 0x02508 /* Control Register */ #define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ #define E1000_DMCTLX 0x02514 /* Time to Lx Request */ #define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ #define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ #define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ /* PCIe Parity Status Register */ #define E1000_PCIEERRSTS 0x05BA8 #define E1000_PROXYS 0x5F64 /* Proxying Status */ #define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ /* Thermal sensor configuration and status registers */ #define E1000_THMJT 0x08100 /* Junction Temperature */ #define E1000_THLOWTC 0x08104 /* Low Threshold Control */ #define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ #define E1000_THHIGHTC 0x0810C /* High Threshold Control */ #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ /* Energy Efficient Ethernet "EEE" registers */ #define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ #define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ #define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ #define E1000_EEE_SU 0x0E34 /* EEE Setup */ #define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ #define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ /* OS2BMC Registers */ #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ #define E1000_DOBFFCTL 0x3F24 /* DMA OBFF Control Register */ #endif Index: head/sys/dev/etherswitch/arswitch/arswitchreg.h =================================================================== --- head/sys/dev/etherswitch/arswitch/arswitchreg.h (revision 258779) +++ head/sys/dev/etherswitch/arswitch/arswitchreg.h (revision 258780) @@ -1,506 +1,506 @@ /*- * Copyright (c) 2011 Aleksandr Rybalko. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __AR8X16_SWITCHREG_H__ #define __AR8X16_SWITCHREG_H__ /* XXX doesn't belong here; stolen shamelessly from ath_hal/ah_internal.h */ /* * Register manipulation macros that expect bit field defines * to follow the convention that an _S suffix is appended for * a shift count, while the field mask has no suffix. */ #define SM(_v, _f) (((_v) << _f##_S) & (_f)) #define MS(_v, _f) (((_v) & (_f)) >> _f##_S) /* Atheros specific MII registers */ #define MII_ATH_DBG_ADDR 0x1d #define MII_ATH_DBG_DATA 0x1e #define AR8X16_REG_MASK_CTRL 0x0000 #define AR8X16_MASK_CTRL_REV_MASK 0x000000ff #define AR8X16_MASK_CTRL_VER_MASK 0x0000ff00 #define AR8X16_MASK_CTRL_VER_SHIFT 8 -#define AR8X16_MASK_CTRL_SOFT_RESET (1 << 31) +#define AR8X16_MASK_CTRL_SOFT_RESET (1U << 31) #define AR8X16_REG_MODE 0x0008 /* DIR-615 E4 U-Boot */ #define AR8X16_MODE_DIR_615_UBOOT 0x8d1003e0 /* From Ubiquiti RSPRO */ #define AR8X16_MODE_RGMII_PORT4_ISO 0x81461bea #define AR8X16_MODE_RGMII_PORT4_SWITCH 0x01261be2 /* AVM Fritz!Box 7390 */ #define AR8X16_MODE_GMII 0x010e5b71 /* from avm_cpmac/linux_ar_reg.h */ #define AR8X16_MODE_RESERVED 0x000e1b20 #define AR8X16_MODE_MAC0_GMII_EN (1u << 0) #define AR8X16_MODE_MAC0_RGMII_EN (1u << 1) #define AR8X16_MODE_PHY4_GMII_EN (1u << 2) #define AR8X16_MODE_PHY4_RGMII_EN (1u << 3) #define AR8X16_MODE_MAC0_MAC_MODE (1u << 4) #define AR8X16_MODE_RGMII_RXCLK_DELAY_EN (1u << 6) #define AR8X16_MODE_RGMII_TXCLK_DELAY_EN (1u << 7) #define AR8X16_MODE_MAC5_MAC_MODE (1u << 14) #define AR8X16_MODE_MAC5_PHY_MODE (1u << 15) #define AR8X16_MODE_TXDELAY_S0 (1u << 21) #define AR8X16_MODE_TXDELAY_S1 (1u << 22) #define AR8X16_MODE_RXDELAY_S0 (1u << 23) #define AR8X16_MODE_LED_OPEN_EN (1u << 24) #define AR8X16_MODE_SPI_EN (1u << 25) #define AR8X16_MODE_RXDELAY_S1 (1u << 26) #define AR8X16_MODE_POWER_ON_SEL (1u << 31) #define AR8X16_REG_ISR 0x0010 #define AR8X16_REG_IMR 0x0014 #define AR8X16_REG_SW_MAC_ADDR0 0x0020 #define AR8X16_REG_SW_MAC_ADDR1 0x0024 #define AR8X16_REG_FLOOD_MASK 0x002c #define AR8X16_FLOOD_MASK_BCAST_TO_CPU (1 << 26) #define AR8X16_REG_GLOBAL_CTRL 0x0030 #define AR8216_GLOBAL_CTRL_MTU_MASK 0x00000fff #define AR8216_GLOBAL_CTRL_MTU_MASK_S 0 #define AR8316_GLOBAL_CTRL_MTU_MASK 0x00007fff #define AR8316_GLOBAL_CTRL_MTU_MASK_S 0 #define AR8236_GLOBAL_CTRL_MTU_MASK 0x00007fff #define AR8236_GLOBAL_CTRL_MTU_MASK_S 0 #define AR7240_GLOBAL_CTRL_MTU_MASK 0x00003fff #define AR7240_GLOBAL_CTRL_MTU_MASK_S 0 #define AR8X16_REG_VLAN_CTRL 0x0040 #define AR8X16_VLAN_OP 0x00000007 #define AR8X16_VLAN_OP_NOOP 0x0 #define AR8X16_VLAN_OP_FLUSH 0x1 #define AR8X16_VLAN_OP_LOAD 0x2 #define AR8X16_VLAN_OP_PURGE 0x3 #define AR8X16_VLAN_OP_REMOVE_PORT 0x4 #define AR8X16_VLAN_OP_GET_NEXT 0x5 #define AR8X16_VLAN_OP_GET 0x6 #define AR8X16_VLAN_ACTIVE (1 << 3) #define AR8X16_VLAN_FULL (1 << 4) #define AR8X16_VLAN_PORT 0x00000f00 #define AR8X16_VLAN_PORT_SHIFT 8 #define AR8X16_VLAN_VID 0x0fff0000 #define AR8X16_VLAN_VID_SHIFT 16 #define AR8X16_VLAN_PRIO 0x70000000 #define AR8X16_VLAN_PRIO_SHIFT 28 -#define AR8X16_VLAN_PRIO_EN (1 << 31) +#define AR8X16_VLAN_PRIO_EN (1U << 31) #define AR8X16_REG_VLAN_DATA 0x0044 #define AR8X16_VLAN_MEMBER 0x0000003f #define AR8X16_VLAN_VALID (1 << 11) #define AR8X16_REG_ARL_CTRL0 0x0050 #define AR8X16_REG_ARL_CTRL1 0x0054 #define AR8X16_REG_ARL_CTRL2 0x0058 #define AR8X16_REG_AT_CTRL 0x005c #define AR8X16_AT_CTRL_ARP_EN (1 << 20) #define AR8X16_REG_IP_PRIORITY_1 0x0060 #define AR8X16_REG_IP_PRIORITY_2 0x0064 #define AR8X16_REG_IP_PRIORITY_3 0x0068 #define AR8X16_REG_IP_PRIORITY_4 0x006C #define AR8X16_REG_TAG_PRIO 0x0070 #define AR8X16_REG_SERVICE_TAG 0x0074 #define AR8X16_SERVICE_TAG_MASK 0x0000ffff #define AR8X16_REG_CPU_PORT 0x0078 #define AR8X16_MIRROR_PORT_SHIFT 4 #define AR8X16_MIRROR_PORT_MASK (0xf << AR8X16_MIRROR_PORT_SHIFT) #define AR8X16_CPU_MIRROR_PORT(_p) ((_p) << AR8X16_MIRROR_PORT_SHIFT) #define AR8X16_CPU_MIRROR_DIS AR8X16_CPU_MIRROR_PORT(0xf) #define AR8X16_CPU_PORT_EN (1 << 8) #define AR8X16_REG_MIB_FUNC0 0x0080 #define AR8X16_MIB_TIMER_MASK 0x0000ffff #define AR8X16_MIB_AT_HALF_EN (1 << 16) #define AR8X16_MIB_BUSY (1 << 17) #define AR8X16_MIB_FUNC_SHIFT 24 #define AR8X16_MIB_FUNC_NO_OP 0x0 #define AR8X16_MIB_FUNC_FLUSH 0x1 #define AR8X16_MIB_FUNC_CAPTURE 0x3 #define AR8X16_MIB_FUNC_XXX (1 << 30) /* 0x40000000 */ #define AR934X_MIB_ENABLE (1 << 30) #define AR8X16_REG_MDIO_HIGH_ADDR 0x0094 #define AR8X16_REG_MDIO_CTRL 0x0098 #define AR8X16_MDIO_CTRL_DATA_MASK 0x0000ffff #define AR8X16_MDIO_CTRL_REG_ADDR_SHIFT 16 #define AR8X16_MDIO_CTRL_PHY_ADDR_SHIFT 21 #define AR8X16_MDIO_CTRL_CMD_WRITE 0 #define AR8X16_MDIO_CTRL_CMD_READ (1 << 27) #define AR8X16_MDIO_CTRL_MASTER_EN (1 << 30) -#define AR8X16_MDIO_CTRL_BUSY (1 << 31) +#define AR8X16_MDIO_CTRL_BUSY (1U << 31) #define AR8X16_REG_PORT_BASE(_p) (0x0100 + (_p) * 0x0100) #define AR8X16_REG_PORT_STS(_p) (AR8X16_REG_PORT_BASE((_p)) + 0x0000) #define AR8X16_PORT_STS_SPEED_MASK 0x00000003 #define AR8X16_PORT_STS_SPEED_10 0 #define AR8X16_PORT_STS_SPEED_100 1 #define AR8X16_PORT_STS_SPEED_1000 2 #define AR8X16_PORT_STS_TXMAC (1 << 2) #define AR8X16_PORT_STS_RXMAC (1 << 3) #define AR8X16_PORT_STS_TXFLOW (1 << 4) #define AR8X16_PORT_STS_RXFLOW (1 << 5) #define AR8X16_PORT_STS_DUPLEX (1 << 6) #define AR8X16_PORT_STS_LINK_UP (1 << 8) #define AR8X16_PORT_STS_LINK_AUTO (1 << 9) #define AR8X16_PORT_STS_LINK_PAUSE (1 << 10) #define AR8X16_REG_PORT_CTRL(_p) (AR8X16_REG_PORT_BASE((_p)) + 0x0004) #define AR8X16_PORT_CTRL_STATE_MASK 0x00000007 #define AR8X16_PORT_CTRL_STATE_DISABLED 0 #define AR8X16_PORT_CTRL_STATE_BLOCK 1 #define AR8X16_PORT_CTRL_STATE_LISTEN 2 #define AR8X16_PORT_CTRL_STATE_LEARN 3 #define AR8X16_PORT_CTRL_STATE_FORWARD 4 #define AR8X16_PORT_CTRL_LEARN_LOCK (1 << 7) #define AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT 8 #define AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_KEEP 0 #define AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_STRIP 1 #define AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_ADD 2 #define AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_DOUBLE_TAG 3 #define AR8X16_PORT_CTRL_IGMP_SNOOP (1 << 10) #define AR8X16_PORT_CTRL_HEADER (1 << 11) #define AR8X16_PORT_CTRL_MAC_LOOP (1 << 12) #define AR8X16_PORT_CTRL_SINGLE_VLAN (1 << 13) #define AR8X16_PORT_CTRL_LEARN (1 << 14) #define AR8X16_PORT_CTRL_DOUBLE_TAG (1 << 15) #define AR8X16_PORT_CTRL_MIRROR_TX (1 << 16) #define AR8X16_PORT_CTRL_MIRROR_RX (1 << 17) #define AR8X16_REG_PORT_VLAN(_p) (AR8X16_REG_PORT_BASE((_p)) + 0x0008) #define AR8X16_PORT_VLAN_DEFAULT_ID_SHIFT 0 #define AR8X16_PORT_VLAN_DEST_PORTS_SHIFT 16 #define AR8X16_PORT_VLAN_MODE_MASK 0xc0000000 #define AR8X16_PORT_VLAN_MODE_SHIFT 30 #define AR8X16_PORT_VLAN_MODE_PORT_ONLY 0 #define AR8X16_PORT_VLAN_MODE_PORT_FALLBACK 1 #define AR8X16_PORT_VLAN_MODE_VLAN_ONLY 2 #define AR8X16_PORT_VLAN_MODE_SECURE 3 #define AR8X16_REG_PORT_RATE_LIM(_p) (AR8X16_REG_PORT_BASE((_p)) + 0x000c) #define AR8X16_PORT_RATE_LIM_128KB 0 #define AR8X16_PORT_RATE_LIM_256KB 1 #define AR8X16_PORT_RATE_LIM_512KB 2 #define AR8X16_PORT_RATE_LIM_1MB 3 #define AR8X16_PORT_RATE_LIM_2MB 4 #define AR8X16_PORT_RATE_LIM_4MB 5 #define AR8X16_PORT_RATE_LIM_8MB 6 #define AR8X16_PORT_RATE_LIM_16MB 7 #define AR8X16_PORT_RATE_LIM_32MB 8 #define AR8X16_PORT_RATE_LIM_64MB 9 #define AR8X16_PORT_RATE_LIM_IN_EN (1 << 24) #define AR8X16_PORT_RATE_LIM_OUT_EN (1 << 23) #define AR8X16_PORT_RATE_LIM_IN_MASK 0x000f0000 #define AR8X16_PORT_RATE_LIM_IN_SHIFT 16 #define AR8X16_PORT_RATE_LIM_OUT_MASK 0x0000000f #define AR8X16_PORT_RATE_LIM_OUT_SHIFT 0 #define AR8X16_REG_PORT_PRIORITY(_p) (AR8X16_REG_PORT_BASE((_p)) + 0x0010) #define AR8X16_REG_STATS_BASE(_p) (0x20000 + (_p) * 0x100) #define AR8X16_STATS_RXBROAD 0x0000 #define AR8X16_STATS_RXPAUSE 0x0004 #define AR8X16_STATS_RXMULTI 0x0008 #define AR8X16_STATS_RXFCSERR 0x000c #define AR8X16_STATS_RXALIGNERR 0x0010 #define AR8X16_STATS_RXRUNT 0x0014 #define AR8X16_STATS_RXFRAGMENT 0x0018 #define AR8X16_STATS_RX64BYTE 0x001c #define AR8X16_STATS_RX128BYTE 0x0020 #define AR8X16_STATS_RX256BYTE 0x0024 #define AR8X16_STATS_RX512BYTE 0x0028 #define AR8X16_STATS_RX1024BYTE 0x002c #define AR8X16_STATS_RX1518BYTE 0x0030 #define AR8X16_STATS_RXMAXBYTE 0x0034 #define AR8X16_STATS_RXTOOLONG 0x0038 #define AR8X16_STATS_RXGOODBYTE 0x003c #define AR8X16_STATS_RXBADBYTE 0x0044 #define AR8X16_STATS_RXOVERFLOW 0x004c #define AR8X16_STATS_FILTERED 0x0050 #define AR8X16_STATS_TXBROAD 0x0054 #define AR8X16_STATS_TXPAUSE 0x0058 #define AR8X16_STATS_TXMULTI 0x005c #define AR8X16_STATS_TXUNDERRUN 0x0060 #define AR8X16_STATS_TX64BYTE 0x0064 #define AR8X16_STATS_TX128BYTE 0x0068 #define AR8X16_STATS_TX256BYTE 0x006c #define AR8X16_STATS_TX512BYTE 0x0070 #define AR8X16_STATS_TX1024BYTE 0x0074 #define AR8X16_STATS_TX1518BYTE 0x0078 #define AR8X16_STATS_TXMAXBYTE 0x007c #define AR8X16_STATS_TXOVERSIZE 0x0080 #define AR8X16_STATS_TXBYTE 0x0084 #define AR8X16_STATS_TXCOLLISION 0x008c #define AR8X16_STATS_TXABORTCOL 0x0090 #define AR8X16_STATS_TXMULTICOL 0x0094 #define AR8X16_STATS_TXSINGLECOL 0x0098 #define AR8X16_STATS_TXEXCDEFER 0x009c #define AR8X16_STATS_TXDEFER 0x00a0 #define AR8X16_STATS_TXLATECOL 0x00a4 #define AR8X16_PORT_CPU 0 #define AR8X16_NUM_PORTS 6 #define AR8X16_NUM_PHYS 5 #define AR8X16_MAGIC 0xc000050e #define AR8X16_PHY_ID1 0x004d #define AR8X16_PHY_ID2 0xd041 #define AR8X16_PORT_MASK(_port) (1 << (_port)) #define AR8X16_PORT_MASK_ALL ((1< * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef IF_FFECREG_H #define IF_FFECREG_H #include __FBSDID("$FreeBSD$"); /* * Hardware defines for Freescale Fast Ethernet Controller. */ /* * MAC registers. */ #define FEC_IER_REG 0x0004 #define FEC_IEM_REG 0x0008 -#define FEC_IER_HBERR (1 << 31) +#define FEC_IER_HBERR (1U << 31) #define FEC_IER_BABR (1 << 30) #define FEC_IER_BABT (1 << 29) #define FEC_IER_GRA (1 << 28) #define FEC_IER_TXF (1 << 27) #define FEC_IER_TXB (1 << 26) #define FEC_IER_RXF (1 << 25) #define FEC_IER_RXB (1 << 24) #define FEC_IER_MII (1 << 23) #define FEC_IER_EBERR (1 << 22) #define FEC_IER_LC (1 << 21) #define FEC_IER_RL (1 << 20) #define FEC_IER_UN (1 << 19) #define FEC_IER_PLR (1 << 18) #define FEC_IER_WAKEUP (1 << 17) #define FEC_IER_AVAIL (1 << 16) #define FEC_IER_TIMER (1 << 15) #define FEC_RDAR_REG 0x0010 #define FEC_RDAR_RDAR (1 << 24) #define FEC_TDAR_REG 0x0014 #define FEC_TDAR_TDAR (1 << 24) #define FEC_ECR_REG 0x0024 #define FEC_ECR_DBSWP (1 << 8) #define FEC_ECR_STOPEN (1 << 7) #define FEC_ECR_DBGEN (1 << 6) #define FEC_ECR_SPEED (1 << 5) #define FEC_ECR_EN1588 (1 << 4) #define FEC_ECR_SLEEP (1 << 3) #define FEC_ECR_MAGICEN (1 << 2) #define FEC_ECR_ETHEREN (1 << 1) #define FEC_ECR_RESET (1 << 0) #define FEC_MMFR_REG 0x0040 #define FEC_MMFR_ST_SHIFT 30 #define FEC_MMFR_ST_VALUE (0x01 << FEC_MMFR_ST_SHIFT) #define FEC_MMFR_OP_SHIFT 28 #define FEC_MMFR_OP_WRITE (0x01 << FEC_MMFR_OP_SHIFT) #define FEC_MMFR_OP_READ (0x02 << FEC_MMFR_OP_SHIFT) #define FEC_MMFR_PA_SHIFT 23 #define FEC_MMFR_PA_MASK (0x1f << FEC_MMFR_PA_SHIFT) #define FEC_MMFR_RA_SHIFT 18 #define FEC_MMFR_RA_MASK (0x1f << FEC_MMFR_RA_SHIFT) #define FEC_MMFR_TA_SHIFT 16 #define FEC_MMFR_TA_VALUE (0x02 << FEC_MMFR_TA_SHIFT) #define FEC_MMFR_DATA_SHIFT 0 #define FEC_MMFR_DATA_MASK (0xffff << FEC_MMFR_DATA_SHIFT) #define FEC_MSCR_REG 0x0044 #define FEC_MSCR_HOLDTIME_SHIFT 8 #define FEC_MSCR_HOLDTIME_MASK (0x07 << FEC_MSCR_HOLDTIME_SHIFT) #define FEC_MSCR_DIS_PRE (1 << 7) #define FEC_MSCR_MII_SPEED_SHIFT 1 #define FEC_MSCR_MII_SPEED_MASk (0x3f << FEC_MSCR_MII_SPEED_SHIFT) #define FEC_MIBC_REG 0x0064 -#define FEC_MIBC_DIS (1 << 31) +#define FEC_MIBC_DIS (1U << 31) #define FEC_MIBC_IDLE (1 << 30) #define FEC_MIBC_CLEAR (1 << 29) /* imx6 only */ #define FEC_RCR_REG 0x0084 -#define FEC_RCR_GRS (1 << 31) +#define FEC_RCR_GRS (1U << 31) #define FEC_RCR_NLC (1 << 30) #define FEC_RCR_MAX_FL_SHIFT 16 #define FEC_RCR_MAX_FL_MASK (0x3fff << FEC_RCR_MAX_FL_SHIFT) #define FEC_RCR_CFEN (1 << 15) #define FEC_RCR_CRCFWD (1 << 14) #define FEC_RCR_PAUFWD (1 << 13) #define FEC_RCR_PADEN (1 << 12) #define FEC_RCR_RMII_10T (1 << 9) #define FEC_RCR_RMII_MODE (1 << 8) #define FEC_RCR_RGMII_EN (1 << 6) #define FEC_RCR_FCE (1 << 5) #define FEC_RCR_BC_REJ (1 << 4) #define FEC_RCR_PROM (1 << 3) #define FEC_RCR_MII_MODE (1 << 2) #define FEC_RCR_DRT (1 << 1) #define FEC_RCR_LOOP (1 << 0) #define FEC_TCR_REG 0x00c4 #define FEC_TCR_ADDINS (1 << 9) #define FEC_TCR_ADDSEL_SHIFT 5 #define FEC_TCR_ADDSEL_MASK (0x07 << FEC_TCR_ADDSEL_SHIFT) #define FEC_TCR_RFC_PAUSE (1 << 4) #define FEC_TCR_TFC_PAUSE (1 << 3) #define FEC_TCR_FDEN (1 << 2) #define FEC_TCR_GTS (1 << 0) #define FEC_PALR_REG 0x00e4 #define FEC_PALR_PADDR1_SHIFT 0 #define FEC_PALR_PADDR1_MASK (0xffffffff << FEC_PALR_PADDR1_SHIFT) #define FEC_PAUR_REG 0x00e8 #define FEC_PAUR_PADDR2_SHIFT 16 #define FEC_PAUR_PADDR2_MASK (0xffff << FEC_PAUR_PADDR2_SHIFT) #define FEC_PAUR_TYPE_VALUE (0x8808) #define FEC_OPD_REG 0x00ec #define FEC_OPD_PAUSE_DUR_SHIFT 0 #define FEC_OPD_PAUSE_DUR_MASK (0xffff << FEC_OPD_PAUSE_DUR_SHIFT) #define FEC_IAUR_REG 0x0118 #define FEC_IALR_REG 0x011c #define FEC_GAUR_REG 0x0120 #define FEC_GALR_REG 0x0124 #define FEC_TFWR_REG 0x0144 #define FEC_TFWR_STRFWD (1 << 8) #define FEC_TFWR_TWFR_SHIFT 0 #define FEC_TFWR_TWFR_MASK (0x3f << FEC_TFWR_TWFR_SHIFT) #define FEC_TFWR_TWFR_128BYTE (0x02 << FEC_TFWR_TWFR_SHIFT) #define FEC_RDSR_REG 0x0180 #define FEC_TDSR_REG 0x0184 #define FEC_MRBR_REG 0x0188 #define FEC_MRBR_R_BUF_SIZE_SHIFT 0 #define FEC_MRBR_R_BUF_SIZE_MASK (0x3fff << FEC_MRBR_R_BUF_SIZE_SHIFT) #define FEC_RSFL_REG 0x0190 #define FEC_RSEM_REG 0x0194 #define FEC_RAEM_REG 0x0198 #define FEC_RAFL_REG 0x019c #define FEC_TSEM_REG 0x01a0 #define FEC_TAEM_REG 0x01a4 #define FEC_TAFL_REG 0x01a8 #define FEC_TIPG_REG 0x01ac #define FEC_FTRL_REG 0x01b0 #define FEC_TACC_REG 0x01c0 #define FEC_TACC_PROCHK (1 << 4) #define FEC_TACC_IPCHK (1 << 3) #define FEC_TACC_SHIFT16 (1 << 0) #define FEC_RACC_REG 0x01c4 #define FEC_RACC_SHIFT16 (1 << 7) #define FEC_RACC_LINEDIS (1 << 6) #define FEC_RACC_PRODIS (1 << 2) #define FEC_RACC_IPDIS (1 << 1) #define FEC_RACC_PADREM (1 << 0) /* * Statistics registers */ #define FEC_RMON_T_DROP 0x200 #define FEC_RMON_T_PACKETS 0x204 #define FEC_RMON_T_BC_PKT 0x208 #define FEC_RMON_T_MC_PKT 0x20C #define FEC_RMON_T_CRC_ALIGN 0x210 #define FEC_RMON_T_UNDERSIZE 0x214 #define FEC_RMON_T_OVERSIZE 0x218 #define FEC_RMON_T_FRAG 0x21C #define FEC_RMON_T_JAB 0x220 #define FEC_RMON_T_COL 0x224 #define FEC_RMON_T_P64 0x228 #define FEC_RMON_T_P65TO127 0x22C #define FEC_RMON_T_P128TO255 0x230 #define FEC_RMON_T_P256TO511 0x234 #define FEC_RMON_T_P512TO1023 0x238 #define FEC_RMON_T_P1024TO2047 0x23C #define FEC_RMON_T_P_GTE2048 0x240 #define FEC_RMON_T_OCTECTS 0x240 #define FEC_IEEE_T_DROP 0x248 #define FEC_IEEE_T_FRAME_OK 0x24C #define FEC_IEEE_T_1COL 0x250 #define FEC_IEEE_T_MCOL 0x254 #define FEC_IEEE_T_DEF 0x258 #define FEC_IEEE_T_LCOL 0x25C #define FEC_IEEE_T_EXCOL 0x260 #define FEC_IEEE_T_MACERR 0x264 #define FEC_IEEE_T_CSERR 0x268 #define FEC_IEEE_T_SQE 0x26C #define FEC_IEEE_T_FDXFC 0x270 #define FEC_IEEE_T_OCTETS_OK 0x274 #define FEC_RMON_R_PACKETS 0x284 #define FEC_RMON_R_BC_PKT 0x288 #define FEC_RMON_R_MC_PKT 0x28C #define FEC_RMON_R_CRC_ALIGN 0x290 #define FEC_RMON_R_UNDERSIZE 0x294 #define FEC_RMON_R_OVERSIZE 0x298 #define FEC_RMON_R_FRAG 0x29C #define FEC_RMON_R_JAB 0x2A0 #define FEC_RMON_R_RESVD_0 0x2A4 #define FEC_RMON_R_P64 0x2A8 #define FEC_RMON_R_P65TO127 0x2AC #define FEC_RMON_R_P128TO255 0x2B0 #define FEC_RMON_R_P256TO511 0x2B4 #define FEC_RMON_R_P512TO1023 0x2B8 #define FEC_RMON_R_P1024TO2047 0x2BC #define FEC_RMON_R_P_GTE2048 0x2C0 #define FEC_RMON_R_OCTETS 0x2C4 #define FEC_IEEE_R_DROP 0x2C8 #define FEC_IEEE_R_FRAME_OK 0x2CC #define FEC_IEEE_R_CRC 0x2D0 #define FEC_IEEE_R_ALIGN 0x2D4 #define FEC_IEEE_R_MACERR 0x2D8 #define FEC_IEEE_R_FDXFC 0x2DC #define FEC_IEEE_R_OCTETS_OK 0x2E0 #define FEC_MIIGSK_CFGR 0x300 #define FEC_MIIGSK_CFGR_FRCONT (1 << 6) /* Freq: 0=50MHz, 1=5MHz */ #define FEC_MIIGSK_CFGR_LBMODE (1 << 4) /* loopback mode */ #define FEC_MIIGSK_CFGR_EMODE (1 << 3) /* echo mode */ #define FEC_MIIGSK_CFGR_IF_MODE_MASK (0x3 << 0) #define FEC_MIIGSK_CFGR_IF_MODE_MII (0 << 0) #define FEC_MIIGSK_CFGR_IF_MODE_RMII (1 << 0) #define FEC_MIIGSK_ENR 0x308 #define FEC_MIIGSK_ENR_READY (1 << 2) #define FEC_MIIGSK_ENR_EN (1 << 1) /* * A hardware buffer descriptor. Rx and Tx buffers have the same descriptor * layout, but the bits in the flags field have different meanings. */ struct ffec_hwdesc { uint32_t flags_len; uint32_t buf_paddr; }; -#define FEC_TXDESC_READY (1 << 31) +#define FEC_TXDESC_READY (1U << 31) #define FEC_TXDESC_T01 (1 << 30) #define FEC_TXDESC_WRAP (1 << 29) #define FEC_TXDESC_T02 (1 << 28) #define FEC_TXDESC_L (1 << 27) #define FEC_TXDESC_TC (1 << 26) #define FEC_TXDESC_ABC (1 << 25) #define FEC_TXDESC_LEN_MASK (0xffff) -#define FEC_RXDESC_EMPTY (1 << 31) +#define FEC_RXDESC_EMPTY (1U << 31) #define FEC_RXDESC_R01 (1 << 30) #define FEC_RXDESC_WRAP (1 << 29) #define FEC_RXDESC_R02 (1 << 28) #define FEC_RXDESC_L (1 << 27) #define FEC_RXDESC_M (1 << 24) #define FEC_RXDESC_BC (1 << 23) #define FEC_RXDESC_MC (1 << 22) #define FEC_RXDESC_LG (1 << 21) #define FEC_RXDESC_NO (1 << 20) #define FEC_RXDESC_CR (1 << 18) #define FEC_RXDESC_OV (1 << 17) #define FEC_RXDESC_TR (1 << 16) #define FEC_RXDESC_LEN_MASK (0xffff) #define FEC_RXDESC_ERROR_BITS (FEC_RXDESC_LG | FEC_RXDESC_NO | \ FEC_RXDESC_OV | FEC_RXDESC_TR) /* * The hardware imposes alignment restrictions on various objects involved in * DMA transfers. These values are expressed in bytes (not bits). */ #define FEC_DESC_RING_ALIGN 16 #define FEC_RXBUF_ALIGN 16 #define FEC_TXBUF_ALIGN 16 #endif /* IF_FFECREG_H */ Index: head/sys/dev/firewire/firewire.c =================================================================== --- head/sys/dev/firewire/firewire.c (revision 258779) +++ head/sys/dev/firewire/firewire.c (revision 258780) @@ -1,2442 +1,2442 @@ /*- * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #if defined(__DragonFly__) || __FreeBSD_version < 500000 #include /* for DELAY() */ #endif #include /* used by smbus and newbus */ #include #ifdef __DragonFly__ #include "firewire.h" #include "firewirereg.h" #include "fwmem.h" #include "iec13213.h" #include "iec68113.h" #else #include #include #include #include #include #endif struct crom_src_buf { struct crom_src src; struct crom_chunk root; struct crom_chunk vendor; struct crom_chunk hw; }; int firewire_debug=0, try_bmr=1, hold_count=0; SYSCTL_INT(_debug, OID_AUTO, firewire_debug, CTLFLAG_RW, &firewire_debug, 0, "FireWire driver debug flag"); SYSCTL_NODE(_hw, OID_AUTO, firewire, CTLFLAG_RD, 0, "FireWire Subsystem"); SYSCTL_INT(_hw_firewire, OID_AUTO, try_bmr, CTLFLAG_RW, &try_bmr, 0, "Try to be a bus manager"); SYSCTL_INT(_hw_firewire, OID_AUTO, hold_count, CTLFLAG_RW, &hold_count, 0, "Number of count of bus resets for removing lost device information"); MALLOC_DEFINE(M_FW, "firewire", "FireWire"); MALLOC_DEFINE(M_FWXFER, "fw_xfer", "XFER/FireWire"); #define FW_MAXASYRTY 4 devclass_t firewire_devclass; static void firewire_identify (driver_t *, device_t); static int firewire_probe (device_t); static int firewire_attach (device_t); static int firewire_detach (device_t); static int firewire_resume (device_t); static void firewire_xfer_timeout(void *, int); #if 0 static int firewire_shutdown (device_t); #endif static device_t firewire_add_child(device_t, u_int, const char *, int); static void fw_try_bmr (void *); static void fw_try_bmr_callback (struct fw_xfer *); static void fw_asystart (struct fw_xfer *); static int fw_get_tlabel (struct firewire_comm *, struct fw_xfer *); static void fw_bus_probe (struct firewire_comm *); static void fw_attach_dev (struct firewire_comm *); static void fw_bus_probe_thread(void *); #ifdef FW_VMACCESS static void fw_vmaccess (struct fw_xfer *); #endif static int fw_bmr (struct firewire_comm *); static void fw_dump_hdr(struct fw_pkt *, char *); static device_method_t firewire_methods[] = { /* Device interface */ DEVMETHOD(device_identify, firewire_identify), DEVMETHOD(device_probe, firewire_probe), DEVMETHOD(device_attach, firewire_attach), DEVMETHOD(device_detach, firewire_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, firewire_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_add_child, firewire_add_child), DEVMETHOD_END }; char *linkspeed[] = { "S100", "S200", "S400", "S800", "S1600", "S3200", "undef", "undef" }; static char *tcode_str[] = { "WREQQ", "WREQB", "WRES", "undef", "RREQQ", "RREQB", "RRESQ", "RRESB", "CYCS", "LREQ", "STREAM", "LRES", "undef", "undef", "PHY", "undef" }; /* IEEE-1394a Table C-2 Gap count as a function of hops*/ #define MAX_GAPHOP 15 u_int gap_cnt[] = { 5, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40}; static driver_t firewire_driver = { "firewire", firewire_methods, sizeof(struct firewire_softc), }; /* * Lookup fwdev by node id. */ struct fw_device * fw_noderesolve_nodeid(struct firewire_comm *fc, int dst) { struct fw_device *fwdev; int s; s = splfw(); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->dst == dst && fwdev->status != FWDEVINVAL) break; splx(s); return fwdev; } /* * Lookup fwdev by EUI64. */ struct fw_device * fw_noderesolve_eui64(struct firewire_comm *fc, struct fw_eui64 *eui) { struct fw_device *fwdev; int s; s = splfw(); FW_GLOCK(fc); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, *eui)) break; FW_GUNLOCK(fc); splx(s); if(fwdev == NULL) return NULL; if(fwdev->status == FWDEVINVAL) return NULL; return fwdev; } /* * Async. request procedure for userland application. */ int fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer) { int err = 0; struct fw_xferq *xferq; int len; struct fw_pkt *fp; int tcode; struct tcode_info *info; if(xfer == NULL) return EINVAL; if(xfer->hand == NULL){ printf("hand == NULL\n"); return EINVAL; } fp = &xfer->send.hdr; tcode = fp->mode.common.tcode & 0xf; info = &fc->tcode[tcode]; if (info->flag == 0) { printf("invalid tcode=%x\n", tcode); return EINVAL; } /* XXX allow bus explore packets only after bus rest */ if ((fc->status < FWBUSEXPLORE) && ((tcode != FWTCODE_RREQQ) || (fp->mode.rreqq.dest_hi != 0xffff) || (fp->mode.rreqq.dest_lo < 0xf0000000) || (fp->mode.rreqq.dest_lo >= 0xf0001000))) { xfer->resp = EAGAIN; xfer->flag = FWXF_BUSY; return (EAGAIN); } if (info->flag & FWTI_REQ) xferq = fc->atq; else xferq = fc->ats; len = info->hdr_len; if (xfer->send.pay_len > MAXREC(fc->maxrec)) { printf("send.pay_len > maxrec\n"); return EINVAL; } if (info->flag & FWTI_BLOCK_STR) len = fp->mode.stream.len; else if (info->flag & FWTI_BLOCK_ASY) len = fp->mode.rresb.len; else len = 0; if (len != xfer->send.pay_len){ printf("len(%d) != send.pay_len(%d) %s(%x)\n", len, xfer->send.pay_len, tcode_str[tcode], tcode); return EINVAL; } if(xferq->start == NULL){ printf("xferq->start == NULL\n"); return EINVAL; } if(!(xferq->queued < xferq->maxq)){ device_printf(fc->bdev, "Discard a packet (queued=%d)\n", xferq->queued); return EAGAIN; } xfer->tl = -1; if (info->flag & FWTI_TLABEL) { if (fw_get_tlabel(fc, xfer) < 0) return EAGAIN; } xfer->resp = 0; xfer->fc = fc; xfer->q = xferq; fw_asystart(xfer); return err; } /* * Wakeup blocked process. */ void fw_xferwake(struct fw_xfer *xfer) { struct mtx *lock = &xfer->fc->wait_lock; mtx_lock(lock); xfer->flag |= FWXF_WAKE; mtx_unlock(lock); wakeup(xfer); return; } int fw_xferwait(struct fw_xfer *xfer) { struct mtx *lock = &xfer->fc->wait_lock; int err = 0; mtx_lock(lock); if ((xfer->flag & FWXF_WAKE) == 0) err = msleep((void *)xfer, lock, PWAIT|PCATCH, "fw_xferwait", 0); mtx_unlock(lock); return (err); } /* * Async. request with given xfer structure. */ static void fw_asystart(struct fw_xfer *xfer) { struct firewire_comm *fc = xfer->fc; int s; s = splfw(); /* Protect from interrupt/timeout */ FW_GLOCK(fc); xfer->flag = FWXF_INQ; STAILQ_INSERT_TAIL(&xfer->q->q, xfer, link); #if 0 xfer->q->queued ++; #endif FW_GUNLOCK(fc); splx(s); /* XXX just queue for mbuf */ if (xfer->mbuf == NULL) xfer->q->start(fc); return; } static void firewire_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "firewire", -1); } static int firewire_probe(device_t dev) { device_set_desc(dev, "IEEE1394(FireWire) bus"); return (0); } static void firewire_xfer_timeout(void *arg, int pending) { struct firewire_comm *fc = (struct firewire_comm *)arg; struct fw_xfer *xfer, *txfer; struct timeval tv; struct timeval split_timeout; STAILQ_HEAD(, fw_xfer) xfer_timeout; int i, s; split_timeout.tv_sec = 0; split_timeout.tv_usec = 200 * 1000; /* 200 msec */ microtime(&tv); timevalsub(&tv, &split_timeout); STAILQ_INIT(&xfer_timeout); s = splfw(); mtx_lock(&fc->tlabel_lock); for (i = 0; i < 0x40; i ++) { while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { if ((xfer->flag & FWXF_SENT) == 0) /* not sent yet */ break; if (timevalcmp(&xfer->tv, &tv, >)) /* the rests are newer than this */ break; device_printf(fc->bdev, "split transaction timeout: " "tl=0x%x flag=0x%02x\n", i, xfer->flag); fw_dump_hdr(&xfer->send.hdr, "send"); xfer->resp = ETIMEDOUT; xfer->tl = -1; STAILQ_REMOVE_HEAD(&fc->tlabels[i], tlabel); STAILQ_INSERT_TAIL(&xfer_timeout, xfer, tlabel); } } mtx_unlock(&fc->tlabel_lock); splx(s); fc->timeout(fc); STAILQ_FOREACH_SAFE(xfer, &xfer_timeout, tlabel, txfer) xfer->hand(xfer); } #define WATCHDOG_HZ 10 static void firewire_watchdog(void *arg) { struct firewire_comm *fc; static int watchdog_clock = 0; fc = (struct firewire_comm *)arg; /* * At boot stage, the device interrupt is disabled and * We encounter a timeout easily. To avoid this, * ignore clock interrupt for a while. */ if (watchdog_clock > WATCHDOG_HZ * 15) taskqueue_enqueue(fc->taskqueue, &fc->task_timeout); else watchdog_clock ++; callout_reset(&fc->timeout_callout, hz / WATCHDOG_HZ, (void *)firewire_watchdog, (void *)fc); } /* * The attach routine. */ static int firewire_attach(device_t dev) { int unit; struct firewire_softc *sc = device_get_softc(dev); device_t pa = device_get_parent(dev); struct firewire_comm *fc; fc = (struct firewire_comm *)device_get_softc(pa); sc->fc = fc; fc->status = FWBUSNOTREADY; unit = device_get_unit(dev); if( fc->nisodma > FWMAXNDMA) fc->nisodma = FWMAXNDMA; fwdev_makedev(sc); fc->crom_src_buf = (struct crom_src_buf *)malloc( sizeof(struct crom_src_buf), M_FW, M_NOWAIT | M_ZERO); if (fc->crom_src_buf == NULL) { device_printf(fc->dev, "%s: Malloc Failure crom src buff\n", __func__); return ENOMEM; } fc->topology_map = (struct fw_topology_map *)malloc( sizeof(struct fw_topology_map), M_FW, M_NOWAIT | M_ZERO); if (fc->topology_map == NULL) { device_printf(fc->dev, "%s: Malloc Failure topology map\n", __func__); free(fc->crom_src_buf, M_FW); return ENOMEM; } fc->speed_map = (struct fw_speed_map *)malloc( sizeof(struct fw_speed_map), M_FW, M_NOWAIT | M_ZERO); if (fc->speed_map == NULL) { device_printf(fc->dev, "%s: Malloc Failure speed map\n", __func__); free(fc->crom_src_buf, M_FW); free(fc->topology_map, M_FW); return ENOMEM; } mtx_init(&fc->wait_lock, "fwwait", NULL, MTX_DEF); mtx_init(&fc->tlabel_lock, "fwtlabel", NULL, MTX_DEF); CALLOUT_INIT(&fc->timeout_callout); CALLOUT_INIT(&fc->bmr_callout); CALLOUT_INIT(&fc->busprobe_callout); TASK_INIT(&fc->task_timeout, 0, firewire_xfer_timeout, (void *)fc); callout_reset(&sc->fc->timeout_callout, hz, (void *)firewire_watchdog, (void *)sc->fc); /* create thread */ kproc_create(fw_bus_probe_thread, (void *)fc, &fc->probe_thread, 0, 0, "fw%d_probe", unit); /* Locate our children */ bus_generic_probe(dev); /* launch attachement of the added children */ bus_generic_attach(dev); /* bus_reset */ FW_GLOCK(fc); fw_busreset(fc, FWBUSNOTREADY); FW_GUNLOCK(fc); fc->ibr(fc); return 0; } /* * Attach it as child. */ static device_t firewire_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct firewire_softc *sc; sc = (struct firewire_softc *)device_get_softc(dev); child = device_add_child(dev, name, unit); if (child) { device_set_ivars(child, sc->fc); device_probe_and_attach(child); } return child; } static int firewire_resume(device_t dev) { struct firewire_softc *sc; sc = (struct firewire_softc *)device_get_softc(dev); sc->fc->status = FWBUSNOTREADY; bus_generic_resume(dev); return(0); } /* * Dettach it. */ static int firewire_detach(device_t dev) { struct firewire_softc *sc; struct firewire_comm *fc; struct fw_device *fwdev, *fwdev_next; int err; sc = (struct firewire_softc *)device_get_softc(dev); fc = sc->fc; mtx_lock(&fc->wait_lock); fc->status = FWBUSDETACH; wakeup(fc); if (msleep(fc->probe_thread, &fc->wait_lock, PWAIT, "fwthr", hz * 60)) printf("firewire probe thread didn't die\n"); mtx_unlock(&fc->wait_lock); if (fc->arq !=0 && fc->arq->maxq > 0) fw_drain_txq(fc); if ((err = fwdev_destroydev(sc)) != 0) return err; if ((err = bus_generic_detach(dev)) != 0) return err; callout_stop(&fc->timeout_callout); callout_stop(&fc->bmr_callout); callout_stop(&fc->busprobe_callout); /* XXX xfer_free and untimeout on all xfers */ for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = fwdev_next) { fwdev_next = STAILQ_NEXT(fwdev, link); free(fwdev, M_FW); } free(fc->topology_map, M_FW); free(fc->speed_map, M_FW); free(fc->crom_src_buf, M_FW); mtx_destroy(&fc->tlabel_lock); mtx_destroy(&fc->wait_lock); return(0); } #if 0 static int firewire_shutdown( device_t dev ) { return 0; } #endif static void fw_xferq_drain(struct fw_xferq *xferq) { struct fw_xfer *xfer; while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->q, link); #if 0 xferq->queued --; #endif xfer->resp = EAGAIN; xfer->flag = FWXF_SENTERR; fw_xfer_done(xfer); } } void fw_drain_txq(struct firewire_comm *fc) { struct fw_xfer *xfer, *txfer; STAILQ_HEAD(, fw_xfer) xfer_drain; int i; STAILQ_INIT(&xfer_drain); FW_GLOCK(fc); fw_xferq_drain(fc->atq); fw_xferq_drain(fc->ats); for(i = 0; i < fc->nisodma; i++) fw_xferq_drain(fc->it[i]); FW_GUNLOCK(fc); mtx_lock(&fc->tlabel_lock); for (i = 0; i < 0x40; i ++) while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { if (firewire_debug) printf("tl=%d flag=%d\n", i, xfer->flag); xfer->tl = -1; xfer->resp = EAGAIN; STAILQ_REMOVE_HEAD(&fc->tlabels[i], tlabel); STAILQ_INSERT_TAIL(&xfer_drain, xfer, tlabel); } mtx_unlock(&fc->tlabel_lock); STAILQ_FOREACH_SAFE(xfer, &xfer_drain, tlabel, txfer) xfer->hand(xfer); } static void fw_reset_csr(struct firewire_comm *fc) { int i; CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14 ; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, NODE_IDS) = 0x3f; CSRARC(fc, TOPO_MAP + 8) = 0; fc->irm = -1; fc->max_node = -1; for(i = 2; i < 0x100/4 - 2 ; i++){ CSRARC(fc, SPED_MAP + i * 4) = 0; } CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14 ; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, RESET_START) = 0; CSRARC(fc, SPLIT_TIMEOUT_HI) = 0; CSRARC(fc, SPLIT_TIMEOUT_LO) = 800 << 19; CSRARC(fc, CYCLE_TIME) = 0x0; CSRARC(fc, BUS_TIME) = 0x0; CSRARC(fc, BUS_MGR_ID) = 0x3f; CSRARC(fc, BANDWIDTH_AV) = 4915; CSRARC(fc, CHANNELS_AV_HI) = 0xffffffff; CSRARC(fc, CHANNELS_AV_LO) = 0xffffffff; - CSRARC(fc, IP_CHANNELS) = (1 << 31); + CSRARC(fc, IP_CHANNELS) = (1U << 31); CSRARC(fc, CONF_ROM) = 0x04 << 24; CSRARC(fc, CONF_ROM + 4) = 0x31333934; /* means strings 1394 */ CSRARC(fc, CONF_ROM + 8) = 1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 0xff << 16 | 0x09 << 8; CSRARC(fc, CONF_ROM + 0xc) = 0; /* DV depend CSRs see blue book */ CSRARC(fc, oPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, iPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, STATE_CLEAR) &= ~(1 << 23 | 1 << 15 | 1 << 14 ); CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); } static void fw_init_crom(struct firewire_comm *fc) { struct crom_src *src; src = &fc->crom_src_buf->src; bzero(src, sizeof(struct crom_src)); /* BUS info sample */ src->hdr.info_len = 4; src->businfo.bus_name = CSR_BUS_NAME_IEEE1394; src->businfo.irmc = 1; src->businfo.cmc = 1; src->businfo.isc = 1; src->businfo.bmc = 1; src->businfo.pmc = 0; src->businfo.cyc_clk_acc = 100; src->businfo.max_rec = fc->maxrec; src->businfo.max_rom = MAXROM_4; #define FW_GENERATION_CHANGEABLE 2 src->businfo.generation = FW_GENERATION_CHANGEABLE; src->businfo.link_spd = fc->speed; src->businfo.eui64.hi = fc->eui.hi; src->businfo.eui64.lo = fc->eui.lo; STAILQ_INIT(&src->chunk_list); fc->crom_src = src; fc->crom_root = &fc->crom_src_buf->root; } static void fw_reset_crom(struct firewire_comm *fc) { struct crom_src_buf *buf; struct crom_src *src; struct crom_chunk *root; buf = fc->crom_src_buf; src = fc->crom_src; root = fc->crom_root; STAILQ_INIT(&src->chunk_list); bzero(root, sizeof(struct crom_chunk)); crom_add_chunk(src, NULL, root, 0); crom_add_entry(root, CSRKEY_NCAP, 0x0083c0); /* XXX */ /* private company_id */ crom_add_entry(root, CSRKEY_VENDOR, CSRVAL_VENDOR_PRIVATE); #ifdef __DragonFly__ crom_add_simple_text(src, root, &buf->vendor, "DragonFly Project"); crom_add_entry(root, CSRKEY_HW, __DragonFly_cc_version); #else crom_add_simple_text(src, root, &buf->vendor, "FreeBSD Project"); crom_add_entry(root, CSRKEY_HW, __FreeBSD_version); #endif mtx_lock(&prison0.pr_mtx); crom_add_simple_text(src, root, &buf->hw, prison0.pr_hostname); mtx_unlock(&prison0.pr_mtx); } /* * Called after bus reset. */ void fw_busreset(struct firewire_comm *fc, uint32_t new_status) { struct firewire_dev_comm *fdc; struct crom_src *src; device_t *devlistp; uint32_t *newrom; int i, devcnt; FW_GLOCK_ASSERT(fc); if (fc->status == FWBUSMGRELECT) callout_stop(&fc->bmr_callout); fc->status = new_status; fw_reset_csr(fc); if (fc->status == FWBUSNOTREADY) fw_init_crom(fc); fw_reset_crom(fc); if (device_get_children(fc->bdev, &devlistp, &devcnt) == 0) { for( i = 0 ; i < devcnt ; i++) if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_busreset != NULL) fdc->post_busreset(fdc); } free(devlistp, M_TEMP); } src = &fc->crom_src_buf->src; /* * If the old config rom needs to be overwritten, * bump the businfo.generation indicator to * indicate that we need to be reprobed * See 1394a-2000 8.3.2.5.4 for more details. * generation starts at 2 and rolls over at 0xF * back to 2. * * A generation of 0 indicates a device * that is not 1394a-2000 compliant. * A generation of 1 indicates a device that * does not change it's Bus Info Block or * Configuration ROM. */ #define FW_MAX_GENERATION 0xF newrom = malloc(CROMSIZE, M_FW, M_NOWAIT | M_ZERO); src = &fc->crom_src_buf->src; crom_load(src, newrom, CROMSIZE); if (bcmp(newrom, fc->config_rom, CROMSIZE) != 0) { if ( src->businfo.generation++ > FW_MAX_GENERATION ) src->businfo.generation = FW_GENERATION_CHANGEABLE; bcopy(newrom, (void *)fc->config_rom, CROMSIZE); } free(newrom, M_FW); } /* Call once after reboot */ void fw_init(struct firewire_comm *fc) { int i; #ifdef FW_VMACCESS struct fw_xfer *xfer; struct fw_bind *fwb; #endif fc->arq->queued = 0; fc->ars->queued = 0; fc->atq->queued = 0; fc->ats->queued = 0; fc->arq->buf = NULL; fc->ars->buf = NULL; fc->atq->buf = NULL; fc->ats->buf = NULL; fc->arq->flag = 0; fc->ars->flag = 0; fc->atq->flag = 0; fc->ats->flag = 0; STAILQ_INIT(&fc->atq->q); STAILQ_INIT(&fc->ats->q); for( i = 0 ; i < fc->nisodma ; i ++ ){ fc->it[i]->queued = 0; fc->ir[i]->queued = 0; fc->it[i]->start = NULL; fc->ir[i]->start = NULL; fc->it[i]->buf = NULL; fc->ir[i]->buf = NULL; fc->it[i]->flag = FWXFERQ_STREAM; fc->ir[i]->flag = FWXFERQ_STREAM; STAILQ_INIT(&fc->it[i]->q); STAILQ_INIT(&fc->ir[i]->q); } fc->arq->maxq = FWMAXQUEUE; fc->ars->maxq = FWMAXQUEUE; fc->atq->maxq = FWMAXQUEUE; fc->ats->maxq = FWMAXQUEUE; for( i = 0 ; i < fc->nisodma ; i++){ fc->ir[i]->maxq = FWMAXQUEUE; fc->it[i]->maxq = FWMAXQUEUE; } CSRARC(fc, TOPO_MAP) = 0x3f1 << 16; CSRARC(fc, TOPO_MAP + 4) = 1; CSRARC(fc, SPED_MAP) = 0x3f1 << 16; CSRARC(fc, SPED_MAP + 4) = 1; STAILQ_INIT(&fc->devices); /* Initialize Async handlers */ STAILQ_INIT(&fc->binds); for( i = 0 ; i < 0x40 ; i++){ STAILQ_INIT(&fc->tlabels[i]); } /* DV depend CSRs see blue book */ #if 0 CSRARC(fc, oMPR) = 0x3fff0001; /* # output channel = 1 */ CSRARC(fc, oPCR) = 0x8000007a; for(i = 4 ; i < 0x7c/4 ; i+=4){ CSRARC(fc, i + oPCR) = 0x8000007a; } CSRARC(fc, iMPR) = 0x00ff0001; /* # input channel = 1 */ CSRARC(fc, iPCR) = 0x803f0000; for(i = 4 ; i < 0x7c/4 ; i+=4){ CSRARC(fc, i + iPCR) = 0x0; } #endif fc->crom_src_buf = NULL; #ifdef FW_VMACCESS xfer = fw_xfer_alloc(); if(xfer == NULL) return; fwb = (struct fw_bind *)malloc(sizeof (struct fw_bind), M_FW, M_NOWAIT); if(fwb == NULL){ fw_xfer_free(xfer); return; } xfer->hand = fw_vmaccess; xfer->fc = fc; xfer->sc = NULL; fwb->start_hi = 0x2; fwb->start_lo = 0; fwb->addrlen = 0xffffffff; fwb->xfer = xfer; fw_bindadd(fc, fwb); #endif } #define BIND_CMP(addr, fwb) (((addr) < (fwb)->start)?-1:\ ((fwb)->end < (addr))?1:0) /* * To lookup bound process from IEEE1394 address. */ struct fw_bind * fw_bindlookup(struct firewire_comm *fc, uint16_t dest_hi, uint32_t dest_lo) { u_int64_t addr; struct fw_bind *tfw, *r = NULL; addr = ((u_int64_t)dest_hi << 32) | dest_lo; FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (BIND_CMP(addr, tfw) == 0) { r = tfw; break; } FW_GUNLOCK(fc); return(r); } /* * To bind IEEE1394 address block to process. */ int fw_bindadd(struct firewire_comm *fc, struct fw_bind *fwb) { struct fw_bind *tfw, *prev = NULL; int r = 0; if (fwb->start > fwb->end) { printf("%s: invalid range\n", __func__); return EINVAL; } FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) { if (fwb->end < tfw->start) break; prev = tfw; } if (prev == NULL) STAILQ_INSERT_HEAD(&fc->binds, fwb, fclist); else if (prev->end < fwb->start) STAILQ_INSERT_AFTER(&fc->binds, prev, fwb, fclist); else { printf("%s: bind failed\n", __func__); r = EBUSY; } FW_GUNLOCK(fc); return (r); } /* * To free IEEE1394 address block. */ int fw_bindremove(struct firewire_comm *fc, struct fw_bind *fwb) { #if 0 struct fw_xfer *xfer, *next; #endif struct fw_bind *tfw; int s; s = splfw(); FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (tfw == fwb) { STAILQ_REMOVE(&fc->binds, fwb, fw_bind, fclist); goto found; } printf("%s: no such binding\n", __func__); FW_GUNLOCK(fc); splx(s); return (1); found: #if 0 /* shall we do this? */ for (xfer = STAILQ_FIRST(&fwb->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwb->xferlist); #endif FW_GUNLOCK(fc); splx(s); return 0; } int fw_xferlist_add(struct fw_xferlist *q, struct malloc_type *type, int slen, int rlen, int n, struct firewire_comm *fc, void *sc, void (*hand)(struct fw_xfer *)) { int i, s; struct fw_xfer *xfer; for (i = 0; i < n; i++) { xfer = fw_xfer_alloc_buf(type, slen, rlen); if (xfer == NULL) return (n); xfer->fc = fc; xfer->sc = sc; xfer->hand = hand; s = splfw(); STAILQ_INSERT_TAIL(q, xfer, link); splx(s); } return (n); } void fw_xferlist_remove(struct fw_xferlist *q) { struct fw_xfer *xfer, *next; for (xfer = STAILQ_FIRST(q); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free_buf(xfer); } STAILQ_INIT(q); } /* * dump packet header */ static void fw_dump_hdr(struct fw_pkt *fp, char *prefix) { printf("%s: dst=0x%02x tl=0x%02x rt=%d tcode=0x%x pri=0x%x " "src=0x%03x\n", prefix, fp->mode.hdr.dst & 0x3f, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.hdr.tcode, fp->mode.hdr.pri, fp->mode.hdr.src); } /* * To free transaction label. */ static void fw_tl_free(struct firewire_comm *fc, struct fw_xfer *xfer) { struct fw_xfer *txfer; int s; s = splfw(); mtx_lock(&fc->tlabel_lock); if (xfer->tl < 0) { mtx_unlock(&fc->tlabel_lock); return; } #if 1 /* make sure the label is allocated */ STAILQ_FOREACH(txfer, &fc->tlabels[xfer->tl], tlabel) if(txfer == xfer) break; if (txfer == NULL) { printf("%s: the xfer is not in the queue " "(tlabel=%d, flag=0x%x)\n", __FUNCTION__, xfer->tl, xfer->flag); fw_dump_hdr(&xfer->send.hdr, "send"); fw_dump_hdr(&xfer->recv.hdr, "recv"); kdb_backtrace(); mtx_unlock(&fc->tlabel_lock); splx(s); return; } #endif STAILQ_REMOVE(&fc->tlabels[xfer->tl], xfer, fw_xfer, tlabel); xfer->tl = -1; mtx_unlock(&fc->tlabel_lock); splx(s); return; } /* * To obtain XFER structure by transaction label. */ static struct fw_xfer * fw_tl2xfer(struct firewire_comm *fc, int node, int tlabel, int tcode) { struct fw_xfer *xfer; int s = splfw(); int req; mtx_lock(&fc->tlabel_lock); STAILQ_FOREACH(xfer, &fc->tlabels[tlabel], tlabel) if(xfer->send.hdr.mode.hdr.dst == node) { mtx_unlock(&fc->tlabel_lock); splx(s); KASSERT(xfer->tl == tlabel, ("xfer->tl 0x%x != 0x%x", xfer->tl, tlabel)); /* extra sanity check */ req = xfer->send.hdr.mode.hdr.tcode; if (xfer->fc->tcode[req].valid_res != tcode) { printf("%s: invalid response tcode " "(0x%x for 0x%x)\n", __FUNCTION__, tcode, req); return(NULL); } if (firewire_debug > 2) printf("fw_tl2xfer: found tl=%d\n", tlabel); return(xfer); } mtx_unlock(&fc->tlabel_lock); if (firewire_debug > 1) printf("fw_tl2xfer: not found tl=%d\n", tlabel); splx(s); return(NULL); } /* * To allocate IEEE1394 XFER structure. */ struct fw_xfer * fw_xfer_alloc(struct malloc_type *type) { struct fw_xfer *xfer; xfer = malloc(sizeof(struct fw_xfer), type, M_NOWAIT | M_ZERO); if (xfer == NULL) return xfer; xfer->malloc = type; return xfer; } struct fw_xfer * fw_xfer_alloc_buf(struct malloc_type *type, int send_len, int recv_len) { struct fw_xfer *xfer; xfer = fw_xfer_alloc(type); if (xfer == NULL) return(NULL); xfer->send.pay_len = send_len; xfer->recv.pay_len = recv_len; if (send_len > 0) { xfer->send.payload = malloc(send_len, type, M_NOWAIT | M_ZERO); if (xfer->send.payload == NULL) { fw_xfer_free(xfer); return(NULL); } } if (recv_len > 0) { xfer->recv.payload = malloc(recv_len, type, M_NOWAIT); if (xfer->recv.payload == NULL) { if (xfer->send.payload != NULL) free(xfer->send.payload, type); fw_xfer_free(xfer); return(NULL); } } return(xfer); } /* * IEEE1394 XFER post process. */ void fw_xfer_done(struct fw_xfer *xfer) { if (xfer->hand == NULL) { printf("hand == NULL\n"); return; } if (xfer->fc == NULL) panic("fw_xfer_done: why xfer->fc is NULL?"); fw_tl_free(xfer->fc, xfer); xfer->hand(xfer); } void fw_xfer_unload(struct fw_xfer* xfer) { int s; if(xfer == NULL ) return; if(xfer->flag & FWXF_INQ){ printf("fw_xfer_free FWXF_INQ\n"); s = splfw(); FW_GLOCK(xfer->fc); STAILQ_REMOVE(&xfer->q->q, xfer, fw_xfer, link); #if 0 xfer->q->queued --; #endif FW_GUNLOCK(xfer->fc); splx(s); } if (xfer->fc != NULL) { /* * Ensure that any tlabel owner can't access this * xfer after it's freed. */ fw_tl_free(xfer->fc, xfer); #if 1 if(xfer->flag & FWXF_START) /* * This could happen if: * 1. We call fwohci_arcv() before fwohci_txd(). * 2. firewire_watch() is called. */ printf("fw_xfer_free FWXF_START\n"); #endif } xfer->flag = FWXF_INIT; xfer->resp = 0; } /* * To free IEEE1394 XFER structure. */ void fw_xfer_free_buf( struct fw_xfer* xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); if(xfer->send.payload != NULL){ free(xfer->send.payload, xfer->malloc); } if(xfer->recv.payload != NULL){ free(xfer->recv.payload, xfer->malloc); } free(xfer, xfer->malloc); } void fw_xfer_free( struct fw_xfer* xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); free(xfer, xfer->malloc); } void fw_asy_callback_free(struct fw_xfer *xfer) { #if 0 printf("asyreq done flag=0x%02x resp=%d\n", xfer->flag, xfer->resp); #endif fw_xfer_free(xfer); } /* * To configure PHY. */ static void fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count) { struct fw_xfer *xfer; struct fw_pkt *fp; fc->status = FWBUSPHYCONF; xfer = fw_xfer_alloc(M_FWXFER); if (xfer == NULL) return; xfer->fc = fc; xfer->hand = fw_asy_callback_free; fp = &xfer->send.hdr; fp->mode.ld[1] = 0; if (root_node >= 0) fp->mode.ld[1] |= (root_node & 0x3f) << 24 | 1 << 23; if (gap_count >= 0) fp->mode.ld[1] |= 1 << 22 | (gap_count & 0x3f) << 16; fp->mode.ld[2] = ~fp->mode.ld[1]; /* XXX Dangerous, how to pass PHY packet to device driver */ fp->mode.common.tcode |= FWTCODE_PHY; if (firewire_debug) device_printf(fc->bdev, "%s: root_node=%d gap_count=%d\n", __func__, root_node, gap_count); fw_asyreq(fc, -1, xfer); } /* * Dump self ID. */ static void fw_print_sid(uint32_t sid) { union fw_self_id *s; s = (union fw_self_id *) &sid; if ( s->p0.sequel ) { if ( s->p1.sequence_num == FW_SELF_ID_PAGE0 ) { printf("node:%d p3:%d p4:%d p5:%d p6:%d p7:%d" "p8:%d p9:%d p10:%d\n", s->p1.phy_id, s->p1.port3, s->p1.port4, s->p1.port5, s->p1.port6, s->p1.port7, s->p1.port8, s->p1.port9, s->p1.port10); } else if (s->p2.sequence_num == FW_SELF_ID_PAGE1 ){ printf("node:%d p11:%d p12:%d p13:%d p14:%d p15:%d\n", s->p2.phy_id, s->p2.port11, s->p2.port12, s->p2.port13, s->p2.port14, s->p2.port15); } else { printf("node:%d Unknown Self ID Page number %d\n", s->p1.phy_id, s->p1.sequence_num); } } else { printf("node:%d link:%d gap:%d spd:%d con:%d pwr:%d" " p0:%d p1:%d p2:%d i:%d m:%d\n", s->p0.phy_id, s->p0.link_active, s->p0.gap_count, s->p0.phy_speed, s->p0.contender, s->p0.power_class, s->p0.port0, s->p0.port1, s->p0.port2, s->p0.initiated_reset, s->p0.more_packets); } } /* * To receive self ID. */ void fw_sidrcv(struct firewire_comm* fc, uint32_t *sid, u_int len) { uint32_t *p; union fw_self_id *self_id; u_int i, j, node, c_port = 0, i_branch = 0; fc->sid_cnt = len /(sizeof(uint32_t) * 2); fc->max_node = fc->nodeid & 0x3f; CSRARC(fc, NODE_IDS) = ((uint32_t)fc->nodeid) << 16; fc->status = FWBUSCYMELECT; fc->topology_map->crc_len = 2; fc->topology_map->generation ++; fc->topology_map->self_id_count = 0; fc->topology_map->node_count = 0; fc->speed_map->generation ++; fc->speed_map->crc_len = 1 + (64*64 + 3) / 4; self_id = &fc->topology_map->self_id[0]; for(i = 0; i < fc->sid_cnt; i ++){ if (sid[1] != ~sid[0]) { device_printf(fc->bdev, "%s: ERROR invalid self-id packet\n", __func__); sid += 2; continue; } *self_id = *((union fw_self_id *)sid); fc->topology_map->crc_len++; if(self_id->p0.sequel == 0){ fc->topology_map->node_count ++; c_port = 0; if (firewire_debug) fw_print_sid(sid[0]); node = self_id->p0.phy_id; if(fc->max_node < node){ fc->max_node = self_id->p0.phy_id; } /* XXX I'm not sure this is the right speed_map */ fc->speed_map->speed[node][node] = self_id->p0.phy_speed; for (j = 0; j < node; j ++) { fc->speed_map->speed[j][node] = fc->speed_map->speed[node][j] = min(fc->speed_map->speed[j][j], self_id->p0.phy_speed); } if ((fc->irm == -1 || self_id->p0.phy_id > fc->irm) && (self_id->p0.link_active && self_id->p0.contender)) { fc->irm = self_id->p0.phy_id; } if(self_id->p0.port0 >= 0x2){ c_port++; } if(self_id->p0.port1 >= 0x2){ c_port++; } if(self_id->p0.port2 >= 0x2){ c_port++; } } if(c_port > 2){ i_branch += (c_port - 2); } sid += 2; self_id++; fc->topology_map->self_id_count ++; } /* CRC */ fc->topology_map->crc = fw_crc16( (uint32_t *)&fc->topology_map->generation, fc->topology_map->crc_len * 4); fc->speed_map->crc = fw_crc16( (uint32_t *)&fc->speed_map->generation, fc->speed_map->crc_len * 4); /* byteswap and copy to CSR */ p = (uint32_t *)fc->topology_map; for (i = 0; i <= fc->topology_map->crc_len; i++) CSRARC(fc, TOPO_MAP + i * 4) = htonl(*p++); p = (uint32_t *)fc->speed_map; CSRARC(fc, SPED_MAP) = htonl(*p++); CSRARC(fc, SPED_MAP + 4) = htonl(*p++); /* don't byte-swap uint8_t array */ bcopy(p, &CSRARC(fc, SPED_MAP + 8), (fc->speed_map->crc_len - 1)*4); fc->max_hop = fc->max_node - i_branch; device_printf(fc->bdev, "%d nodes, maxhop <= %d %s irm(%d) %s\n", fc->max_node + 1, fc->max_hop, (fc->irm == -1) ? "Not IRM capable" : "cable IRM", fc->irm, (fc->irm == fc->nodeid) ? " (me) " : ""); if (try_bmr && (fc->irm != -1) && (CSRARC(fc, BUS_MGR_ID) == 0x3f)) { if (fc->irm == fc->nodeid) { fc->status = FWBUSMGRDONE; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, fc->irm); fw_bmr(fc); } else { fc->status = FWBUSMGRELECT; callout_reset(&fc->bmr_callout, hz/8, (void *)fw_try_bmr, (void *)fc); } } else fc->status = FWBUSMGRDONE; callout_reset(&fc->busprobe_callout, hz/4, (void *)fw_bus_probe, (void *)fc); } /* * To probe devices on the IEEE1394 bus. */ static void fw_bus_probe(struct firewire_comm *fc) { int s; struct fw_device *fwdev; s = splfw(); fc->status = FWBUSEXPLORE; /* Invalidate all devices, just after bus reset. */ if (firewire_debug) device_printf(fc->bdev, "%s:" "iterate and invalidate all nodes\n", __func__); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->status != FWDEVINVAL) { fwdev->status = FWDEVINVAL; fwdev->rcnt = 0; if (firewire_debug) device_printf(fc->bdev, "%s:" "Invalidate Dev ID: %08x%08x\n", __func__, fwdev->eui.hi, fwdev->eui.lo); } else { if (firewire_debug) device_printf(fc->bdev, "%s:" "Dev ID: %08x%08x already invalid\n", __func__, fwdev->eui.hi, fwdev->eui.lo); } splx(s); wakeup((void *)fc); } static int fw_explore_read_quads(struct fw_device *fwdev, int offset, uint32_t *quad, int length) { struct fw_xfer *xfer; uint32_t tmp; int i, error; for (i = 0; i < length; i ++, offset += sizeof(uint32_t)) { xfer = fwmem_read_quad(fwdev, NULL, -1, 0xffff, 0xf0000000 | offset, (void *)&tmp, fw_xferwake); if (xfer == NULL) return (-1); fw_xferwait(xfer); if (xfer->resp == 0) quad[i] = ntohl(tmp); error = xfer->resp; fw_xfer_free(xfer); if (error) return (error); } return (0); } static int fw_explore_csrblock(struct fw_device *fwdev, int offset, int recur) { int err, i, off; struct csrdirectory *dir; struct csrreg *reg; dir = (struct csrdirectory *)&fwdev->csrrom[offset/sizeof(uint32_t)]; err = fw_explore_read_quads(fwdev, CSRROMOFF + offset, (uint32_t *)dir, 1); if (err) return (-1); offset += sizeof(uint32_t); reg = (struct csrreg *)&fwdev->csrrom[offset/sizeof(uint32_t)]; err = fw_explore_read_quads(fwdev, CSRROMOFF + offset, (uint32_t *)reg, dir->crc_len); if (err) return (-1); /* XXX check CRC */ off = CSRROMOFF + offset + sizeof(uint32_t) * (dir->crc_len - 1); if (fwdev->rommax < off) fwdev->rommax = off; if (recur == 0) return (0); for (i = 0; i < dir->crc_len; i ++, offset += sizeof(uint32_t)) { if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_D) recur = 1; else if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_L) recur = 0; else continue; off = offset + reg[i].val * sizeof(uint32_t); if (off > CROMSIZE) { printf("%s: invalid offset %d\n", __FUNCTION__, off); return(-1); } err = fw_explore_csrblock(fwdev, off, recur); if (err) return (-1); } return (0); } static int fw_explore_node(struct fw_device *dfwdev) { struct firewire_comm *fc; struct fw_device *fwdev, *pfwdev, *tfwdev; uint32_t *csr; struct csrhdr *hdr; struct bus_info *binfo; int err, node; uint32_t speed_test = 0; fc = dfwdev->fc; csr = dfwdev->csrrom; node = dfwdev->dst; /* First quad */ err = fw_explore_read_quads(dfwdev, CSRROMOFF, &csr[0], 1); if (err) { device_printf(fc->bdev, "%s: node%d: explore_read_quads failure\n", __func__, node); dfwdev->status = FWDEVINVAL; return (-1); } hdr = (struct csrhdr *)&csr[0]; if (hdr->info_len != 4) { if (firewire_debug) device_printf(fc->bdev, "%s: node%d: wrong bus info len(%d)\n", __func__, node, hdr->info_len); dfwdev->status = FWDEVINVAL; return (-1); } /* bus info */ err = fw_explore_read_quads(dfwdev, CSRROMOFF + 0x04, &csr[1], 4); if (err) { device_printf(fc->bdev, "%s: node%d: error reading 0x04\n", __func__, node); dfwdev->status = FWDEVINVAL; return (-1); } binfo = (struct bus_info *)&csr[1]; if (binfo->bus_name != CSR_BUS_NAME_IEEE1394) { device_printf(fc->bdev, "%s: node%d: invalid bus name 0x%08x\n", __func__, node, binfo->bus_name); dfwdev->status = FWDEVINVAL; return (-1); } if (firewire_debug) device_printf(fc->bdev, "%s: node(%d) BUS INFO BLOCK:\n" "irmc(%d) cmc(%d) isc(%d) bmc(%d) pmc(%d) " "cyc_clk_acc(%d) max_rec(%d) max_rom(%d) " "generation(%d) link_spd(%d)\n", __func__, node, binfo->irmc, binfo->cmc, binfo->isc, binfo->bmc, binfo->pmc, binfo->cyc_clk_acc, binfo->max_rec, binfo->max_rom, binfo->generation, binfo->link_spd); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, binfo->eui64)) break; if (fwdev == NULL) { /* new device */ fwdev = malloc(sizeof(struct fw_device), M_FW, M_NOWAIT | M_ZERO); if (fwdev == NULL) { device_printf(fc->bdev, "%s: node%d: no memory\n", __func__, node); return (-1); } fwdev->fc = fc; fwdev->eui = binfo->eui64; fwdev->dst = dfwdev->dst; fwdev->maxrec = dfwdev->maxrec; fwdev->status = dfwdev->status; /* * Pre-1394a-2000 didn't have link_spd in * the Bus Info block, so try and use the * speed map value. * 1394a-2000 compliant devices only use * the Bus Info Block link spd value, so * ignore the speed map alltogether. SWB */ if ( binfo->link_spd == FWSPD_S100 /* 0 */) { device_printf(fc->bdev, "%s: " "Pre 1394a-2000 detected\n", __func__); fwdev->speed = fc->speed_map->speed[fc->nodeid][node]; } else fwdev->speed = binfo->link_spd; /* * Test this speed with a read to the CSRROM. * If it fails, slow down the speed and retry. */ while (fwdev->speed > FWSPD_S100 /* 0 */) { err = fw_explore_read_quads(fwdev, CSRROMOFF, &speed_test, 1); if (err) { device_printf(fc->bdev, "%s: fwdev->speed(%s)" " decremented due to negotiation\n", __func__, linkspeed[fwdev->speed]); fwdev->speed--; } else break; } /* * If the fwdev is not found in the * fc->devices TAILQ, then we will add it. */ pfwdev = NULL; STAILQ_FOREACH(tfwdev, &fc->devices, link) { if (tfwdev->eui.hi > fwdev->eui.hi || (tfwdev->eui.hi == fwdev->eui.hi && tfwdev->eui.lo > fwdev->eui.lo)) break; pfwdev = tfwdev; } if (pfwdev == NULL) STAILQ_INSERT_HEAD(&fc->devices, fwdev, link); else STAILQ_INSERT_AFTER(&fc->devices, pfwdev, fwdev, link); device_printf(fc->bdev, "New %s device ID:%08x%08x\n", linkspeed[fwdev->speed], fwdev->eui.hi, fwdev->eui.lo); } else { fwdev->dst = node; fwdev->status = FWDEVINIT; /* unchanged ? */ if (bcmp(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5) == 0) { if (firewire_debug) device_printf(fc->dev, "node%d: crom unchanged\n", node); return (0); } } bzero(&fwdev->csrrom[0], CROMSIZE); /* copy first quad and bus info block */ bcopy(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5); fwdev->rommax = CSRROMOFF + sizeof(uint32_t) * 4; err = fw_explore_csrblock(fwdev, 0x14, 1); /* root directory */ if (err) { if (firewire_debug) device_printf(fc->dev, "%s: explore csrblock failed err(%d)\n", __func__, err); fwdev->status = FWDEVINVAL; fwdev->csrrom[0] = 0; } return (err); } /* * Find the self_id packet for a node, ignoring sequels. */ static union fw_self_id * fw_find_self_id(struct firewire_comm *fc, int node) { uint32_t i; union fw_self_id *s; for (i = 0; i < fc->topology_map->self_id_count; i++) { s = &fc->topology_map->self_id[i]; if (s->p0.sequel) continue; if (s->p0.phy_id == node) return s; } return 0; } static void fw_explore(struct firewire_comm *fc) { int node, err, s, i, todo, todo2, trys; char nodes[63]; struct fw_device dfwdev; union fw_self_id *fwsid; todo = 0; /* setup dummy fwdev */ dfwdev.fc = fc; dfwdev.speed = 0; dfwdev.maxrec = 8; /* 512 */ dfwdev.status = FWDEVINIT; for (node = 0; node <= fc->max_node; node ++) { /* We don't probe myself and linkdown nodes */ if (node == fc->nodeid) { if (firewire_debug) device_printf(fc->bdev, "%s:" "found myself node(%d) fc->nodeid(%d) fc->max_node(%d)\n", __func__, node, fc->nodeid, fc->max_node); continue; } else if (firewire_debug) { device_printf(fc->bdev, "%s:" "node(%d) fc->max_node(%d) found\n", __func__, node, fc->max_node); } fwsid = fw_find_self_id(fc, node); if (!fwsid || !fwsid->p0.link_active) { if (firewire_debug) device_printf(fc->bdev, "%s: node%d: link down\n", __func__, node); continue; } nodes[todo++] = node; } s = splfw(); for (trys = 0; todo > 0 && trys < 3; trys ++) { todo2 = 0; for (i = 0; i < todo; i ++) { dfwdev.dst = nodes[i]; err = fw_explore_node(&dfwdev); if (err) nodes[todo2++] = nodes[i]; if (firewire_debug) device_printf(fc->bdev, "%s: node %d, err = %d\n", __func__, node, err); } todo = todo2; } splx(s); } static void fw_bus_probe_thread(void *arg) { struct firewire_comm *fc; fc = (struct firewire_comm *)arg; mtx_lock(&fc->wait_lock); while (fc->status != FWBUSDETACH) { if (fc->status == FWBUSEXPLORE) { mtx_unlock(&fc->wait_lock); fw_explore(fc); fc->status = FWBUSEXPDONE; if (firewire_debug) printf("bus_explore done\n"); fw_attach_dev(fc); mtx_lock(&fc->wait_lock); } msleep((void *)fc, &fc->wait_lock, PWAIT|PCATCH, "-", 0); } mtx_unlock(&fc->wait_lock); kproc_exit(0); } /* * To attach sub-devices layer onto IEEE1394 bus. */ static void fw_attach_dev(struct firewire_comm *fc) { struct fw_device *fwdev, *next; int i, err; device_t *devlistp; int devcnt; struct firewire_dev_comm *fdc; for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = next) { next = STAILQ_NEXT(fwdev, link); if (fwdev->status == FWDEVINIT) { fwdev->status = FWDEVATTACHED; } else if (fwdev->status == FWDEVINVAL) { fwdev->rcnt ++; if (firewire_debug) device_printf(fc->bdev, "%s:" "fwdev->rcnt(%d), hold_count(%d)\n", __func__, fwdev->rcnt, hold_count); if (fwdev->rcnt > hold_count) { /* * Remove devices which have not been seen * for a while. */ device_printf(fc->bdev, "%s:" "Removing missing device ID:%08x%08x\n", __func__, fwdev->eui.hi, fwdev->eui.lo); STAILQ_REMOVE(&fc->devices, fwdev, fw_device, link); free(fwdev, M_FW); } } } err = device_get_children(fc->bdev, &devlistp, &devcnt); if( err == 0 ) { for( i = 0 ; i < devcnt ; i++){ if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_explore != NULL) fdc->post_explore(fdc); } } free(devlistp, M_TEMP); } return; } /* * To allocate unique transaction label. */ static int fw_get_tlabel(struct firewire_comm *fc, struct fw_xfer *xfer) { u_int dst, new_tlabel; struct fw_xfer *txfer; int s; dst = xfer->send.hdr.mode.hdr.dst & 0x3f; s = splfw(); mtx_lock(&fc->tlabel_lock); new_tlabel = (fc->last_tlabel[dst] + 1) & 0x3f; STAILQ_FOREACH(txfer, &fc->tlabels[new_tlabel], tlabel) if ((txfer->send.hdr.mode.hdr.dst & 0x3f) == dst) break; if(txfer == NULL) { fc->last_tlabel[dst] = new_tlabel; STAILQ_INSERT_TAIL(&fc->tlabels[new_tlabel], xfer, tlabel); mtx_unlock(&fc->tlabel_lock); splx(s); xfer->tl = new_tlabel; xfer->send.hdr.mode.hdr.tlrt = new_tlabel << 2; if (firewire_debug > 1) printf("fw_get_tlabel: dst=%d tl=%d\n", dst, new_tlabel); return (new_tlabel); } mtx_unlock(&fc->tlabel_lock); splx(s); if (firewire_debug > 1) printf("fw_get_tlabel: no free tlabel\n"); return (-1); } static void fw_rcv_copy(struct fw_rcv_buf *rb) { struct fw_pkt *pkt; u_char *p; struct tcode_info *tinfo; u_int res, i, len, plen; rb->xfer->recv.spd = rb->spd; pkt = (struct fw_pkt *)rb->vec->iov_base; tinfo = &rb->fc->tcode[pkt->mode.hdr.tcode]; /* Copy header */ p = (u_char *)&rb->xfer->recv.hdr; bcopy(rb->vec->iov_base, p, tinfo->hdr_len); rb->vec->iov_base = (u_char *)rb->vec->iov_base + tinfo->hdr_len; rb->vec->iov_len -= tinfo->hdr_len; /* Copy payload */ p = (u_char *)rb->xfer->recv.payload; res = rb->xfer->recv.pay_len; /* special handling for RRESQ */ if (pkt->mode.hdr.tcode == FWTCODE_RRESQ && p != NULL && res >= sizeof(uint32_t)) { *(uint32_t *)p = pkt->mode.rresq.data; rb->xfer->recv.pay_len = sizeof(uint32_t); return; } if ((tinfo->flag & FWTI_BLOCK_ASY) == 0) return; plen = pkt->mode.rresb.len; for (i = 0; i < rb->nvec; i++, rb->vec++) { len = MIN(rb->vec->iov_len, plen); if (res < len) { device_printf(rb->fc->bdev, "%s:" " rcv buffer(%d) is %d bytes short.\n", __func__, rb->xfer->recv.pay_len, len - res); len = res; } bcopy(rb->vec->iov_base, p, len); p += len; res -= len; plen -= len; if (res == 0 || plen == 0) break; } rb->xfer->recv.pay_len -= res; } /* * Generic packet receiving process. */ void fw_rcv(struct fw_rcv_buf *rb) { struct fw_pkt *fp, *resfp; struct fw_bind *bind; int tcode; int i, len, oldstate; #if 0 { uint32_t *qld; int i; qld = (uint32_t *)buf; printf("spd %d len:%d\n", spd, len); for( i = 0 ; i <= len && i < 32; i+= 4){ printf("0x%08x ", ntohl(qld[i/4])); if((i % 16) == 15) printf("\n"); } if((i % 16) != 15) printf("\n"); } #endif fp = (struct fw_pkt *)rb->vec[0].iov_base; tcode = fp->mode.common.tcode; switch (tcode) { case FWTCODE_WRES: case FWTCODE_RRESQ: case FWTCODE_RRESB: case FWTCODE_LRES: rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tcode); if(rb->xfer == NULL) { device_printf(rb->fc->bdev, "%s: " "unknown response " "%s(%x) src=0x%x tl=0x%x rt=%d data=0x%x\n", __func__, tcode_str[tcode], tcode, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.rresq.data); #if 0 printf("try ad-hoc work around!!\n"); rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, (fp->mode.hdr.tlrt >> 2)^3); if (rb->xfer == NULL) { printf("no use...\n"); return; } #else return; #endif } fw_rcv_copy(rb); if (rb->xfer->recv.hdr.mode.wres.rtcode != RESP_CMP) rb->xfer->resp = EIO; else rb->xfer->resp = 0; /* make sure the packet is drained in AT queue */ oldstate = rb->xfer->flag; rb->xfer->flag = FWXF_RCVD; switch (oldstate) { case FWXF_SENT: fw_xfer_done(rb->xfer); break; case FWXF_START: #if 0 if (firewire_debug) printf("not sent yet tl=%x\n", rb->xfer->tl); #endif break; default: device_printf(rb->fc->bdev, "%s: " "unexpected flag 0x%02x\n", __func__, rb->xfer->flag); } return; case FWTCODE_WREQQ: case FWTCODE_WREQB: case FWTCODE_RREQQ: case FWTCODE_RREQB: case FWTCODE_LREQ: bind = fw_bindlookup(rb->fc, fp->mode.rreqq.dest_hi, fp->mode.rreqq.dest_lo); if(bind == NULL){ device_printf(rb->fc->bdev, "%s: " "Unknown service addr 0x%04x:0x%08x %s(%x)" #if defined(__DragonFly__) || __FreeBSD_version < 500000 " src=0x%x data=%lx\n", #else " src=0x%x data=%x\n", #endif __func__, fp->mode.wreqq.dest_hi, fp->mode.wreqq.dest_lo, tcode_str[tcode], tcode, fp->mode.hdr.src, ntohl(fp->mode.wreqq.data)); if (rb->fc->status == FWBUSINIT) { device_printf(rb->fc->bdev, "%s: cannot respond(bus reset)!\n", __func__); return; } rb->xfer = fw_xfer_alloc(M_FWXFER); if(rb->xfer == NULL){ return; } rb->xfer->send.spd = rb->spd; rb->xfer->send.pay_len = 0; resfp = &rb->xfer->send.hdr; switch (tcode) { case FWTCODE_WREQQ: case FWTCODE_WREQB: resfp->mode.hdr.tcode = FWTCODE_WRES; break; case FWTCODE_RREQQ: resfp->mode.hdr.tcode = FWTCODE_RRESQ; break; case FWTCODE_RREQB: resfp->mode.hdr.tcode = FWTCODE_RRESB; break; case FWTCODE_LREQ: resfp->mode.hdr.tcode = FWTCODE_LRES; break; } resfp->mode.hdr.dst = fp->mode.hdr.src; resfp->mode.hdr.tlrt = fp->mode.hdr.tlrt; resfp->mode.hdr.pri = fp->mode.hdr.pri; resfp->mode.rresb.rtcode = RESP_ADDRESS_ERROR; resfp->mode.rresb.extcode = 0; resfp->mode.rresb.len = 0; /* rb->xfer->hand = fw_xferwake; */ rb->xfer->hand = fw_xfer_free; if(fw_asyreq(rb->fc, -1, rb->xfer)){ fw_xfer_free(rb->xfer); return; } return; } len = 0; for (i = 0; i < rb->nvec; i ++) len += rb->vec[i].iov_len; rb->xfer = STAILQ_FIRST(&bind->xferlist); if (rb->xfer == NULL) { device_printf(rb->fc->bdev, "%s: " "Discard a packet for this bind.\n", __func__); return; } STAILQ_REMOVE_HEAD(&bind->xferlist, link); fw_rcv_copy(rb); rb->xfer->hand(rb->xfer); return; #if 0 /* shouldn't happen ?? or for GASP */ case FWTCODE_STREAM: { struct fw_xferq *xferq; xferq = rb->fc->ir[sub]; #if 0 printf("stream rcv dma %d len %d off %d spd %d\n", sub, len, off, spd); #endif if(xferq->queued >= xferq->maxq) { printf("receive queue is full\n"); return; } /* XXX get xfer from xfer queue, we don't need copy for per packet mode */ rb->xfer = fw_xfer_alloc_buf(M_FWXFER, 0, /* XXX */ vec[0].iov_len); if (rb->xfer == NULL) return; fw_rcv_copy(rb) s = splfw(); xferq->queued++; STAILQ_INSERT_TAIL(&xferq->q, rb->xfer, link); splx(s); sc = device_get_softc(rb->fc->bdev); #if defined(__DragonFly__) || __FreeBSD_version < 500000 if (&xferq->rsel.si_pid != 0) #else if (SEL_WAITING(&xferq->rsel)) #endif selwakeuppri(&xferq->rsel, FWPRI); if (xferq->flag & FWXFERQ_WAKEUP) { xferq->flag &= ~FWXFERQ_WAKEUP; wakeup((caddr_t)xferq); } if (xferq->flag & FWXFERQ_HANDLER) { xferq->hand(xferq); } return; break; } #endif default: device_printf(rb->fc->bdev,"%s: unknown tcode %d\n", __func__, tcode); break; } } /* * Post process for Bus Manager election process. */ static void fw_try_bmr_callback(struct fw_xfer *xfer) { struct firewire_comm *fc; int bmr; if (xfer == NULL) return; fc = xfer->fc; if (xfer->resp != 0) goto error; if (xfer->recv.payload == NULL) goto error; if (xfer->recv.hdr.mode.lres.rtcode != FWRCODE_COMPLETE) goto error; bmr = ntohl(xfer->recv.payload[0]); if (bmr == 0x3f) bmr = fc->nodeid; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, bmr & 0x3f); fw_xfer_free_buf(xfer); fw_bmr(fc); return; error: device_printf(fc->bdev, "bus manager election failed\n"); fw_xfer_free_buf(xfer); } /* * To candidate Bus Manager election process. */ static void fw_try_bmr(void *arg) { struct fw_xfer *xfer; struct firewire_comm *fc = (struct firewire_comm *)arg; struct fw_pkt *fp; int err = 0; xfer = fw_xfer_alloc_buf(M_FWXFER, 8, 4); if(xfer == NULL){ return; } xfer->send.spd = 0; fc->status = FWBUSMGRELECT; fp = &xfer->send.hdr; fp->mode.lreq.dest_hi = 0xffff; fp->mode.lreq.tlrt = 0; fp->mode.lreq.tcode = FWTCODE_LREQ; fp->mode.lreq.pri = 0; fp->mode.lreq.src = 0; fp->mode.lreq.len = 8; fp->mode.lreq.extcode = EXTCODE_CMP_SWAP; fp->mode.lreq.dst = FWLOCALBUS | fc->irm; fp->mode.lreq.dest_lo = 0xf0000000 | BUS_MGR_ID; xfer->send.payload[0] = htonl(0x3f); xfer->send.payload[1] = htonl(fc->nodeid); xfer->hand = fw_try_bmr_callback; err = fw_asyreq(fc, -1, xfer); if(err){ fw_xfer_free_buf(xfer); return; } return; } #ifdef FW_VMACCESS /* * Software implementation for physical memory block access. * XXX:Too slow, usef for debug purpose only. */ static void fw_vmaccess(struct fw_xfer *xfer){ struct fw_pkt *rfp, *sfp = NULL; uint32_t *ld = (uint32_t *)xfer->recv.buf; printf("vmaccess spd:%2x len:%03x data:%08x %08x %08x %08x\n", xfer->spd, xfer->recv.len, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("vmaccess data:%08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); if(xfer->resp != 0){ fw_xfer_free( xfer); return; } if(xfer->recv.buf == NULL){ fw_xfer_free( xfer); return; } rfp = (struct fw_pkt *)xfer->recv.buf; switch(rfp->mode.hdr.tcode){ /* XXX need fix for 64bit arch */ case FWTCODE_WREQB: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp = (struct fw_pkt *)xfer->send.buf; bcopy(rfp->mode.wreqb.payload, (caddr_t)ntohl(rfp->mode.wreqb.dest_lo), ntohs(rfp->mode.wreqb.len)); sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; break; case FWTCODE_WREQQ: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp->mode.wres.tcode = FWTCODE_WRES; *((uint32_t *)(ntohl(rfp->mode.wreqb.dest_lo))) = rfp->mode.wreqq.data; sfp->mode.wres.rtcode = 0; break; case FWTCODE_RREQB: xfer->send.buf = malloc(16 + rfp->mode.rreqb.len, M_FW, M_NOWAIT); xfer->send.len = 16 + ntohs(rfp->mode.rreqb.len); sfp = (struct fw_pkt *)xfer->send.buf; bcopy((caddr_t)ntohl(rfp->mode.rreqb.dest_lo), sfp->mode.rresb.payload, (uint16_t)ntohs(rfp->mode.rreqb.len)); sfp->mode.rresb.tcode = FWTCODE_RRESB; sfp->mode.rresb.len = rfp->mode.rreqb.len; sfp->mode.rresb.rtcode = 0; sfp->mode.rresb.extcode = 0; break; case FWTCODE_RREQQ: xfer->send.buf = malloc(16, M_FW, M_NOWAIT); xfer->send.len = 16; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.rresq.data = *(uint32_t *)(ntohl(rfp->mode.rreqq.dest_lo)); sfp->mode.wres.tcode = FWTCODE_RRESQ; sfp->mode.rresb.rtcode = 0; break; default: fw_xfer_free( xfer); return; } sfp->mode.hdr.dst = rfp->mode.hdr.src; xfer->dst = ntohs(rfp->mode.hdr.src); xfer->hand = fw_xfer_free; sfp->mode.hdr.tlrt = rfp->mode.hdr.tlrt; sfp->mode.hdr.pri = 0; fw_asyreq(xfer->fc, -1, xfer); /**/ return; } #endif /* * CRC16 check-sum for IEEE1394 register blocks. */ uint16_t fw_crc16(uint32_t *ptr, uint32_t len){ uint32_t i, sum, crc = 0; int shift; len = (len + 3) & ~3; for(i = 0 ; i < len ; i+= 4){ for( shift = 28 ; shift >= 0 ; shift -= 4){ sum = ((crc >> 12) ^ (ptr[i/4] >> shift)) & 0xf; crc = (crc << 4) ^ ( sum << 12 ) ^ ( sum << 5) ^ sum; } crc &= 0xffff; } return((uint16_t) crc); } /* * Find the root node, if it is not * Cycle Master Capable, then we should * override this and become the Cycle * Master */ static int fw_bmr(struct firewire_comm *fc) { struct fw_device fwdev; union fw_self_id *self_id; int cmstr; uint32_t quad; /* Check to see if the current root node is cycle master capable */ self_id = fw_find_self_id(fc, fc->max_node); if (fc->max_node > 0) { /* XXX check cmc bit of businfo block rather than contender */ if (self_id->p0.link_active && self_id->p0.contender) cmstr = fc->max_node; else { device_printf(fc->bdev, "root node is not cycle master capable\n"); /* XXX shall we be the cycle master? */ cmstr = fc->nodeid; /* XXX need bus reset */ } } else cmstr = -1; device_printf(fc->bdev, "bus manager %d %s\n", CSRARC(fc, BUS_MGR_ID), (CSRARC(fc, BUS_MGR_ID) != fc->nodeid) ? "(me)" : ""); if(CSRARC(fc, BUS_MGR_ID) != fc->nodeid) { /* We are not the bus manager */ return(0); } /* Optimize gapcount */ if(fc->max_hop <= MAX_GAPHOP ) fw_phy_config(fc, cmstr, gap_cnt[fc->max_hop]); /* If we are the cycle master, nothing to do */ if (cmstr == fc->nodeid || cmstr == -1) return 0; /* Bus probe has not finished, make dummy fwdev for cmstr */ bzero(&fwdev, sizeof(fwdev)); fwdev.fc = fc; fwdev.dst = cmstr; fwdev.speed = 0; fwdev.maxrec = 8; /* 512 */ fwdev.status = FWDEVINIT; /* Set cmstr bit on the cycle master */ quad = htonl(1 << 8); fwmem_write_quad(&fwdev, NULL, 0/*spd*/, 0xffff, 0xf0000000 | STATE_SET, &quad, fw_asy_callback_free); return 0; } int fw_open_isodma(struct firewire_comm *fc, int tx) { struct fw_xferq **xferqa; struct fw_xferq *xferq; int i; if (tx) xferqa = &fc->it[0]; else xferqa = &fc->ir[0]; FW_GLOCK(fc); for (i = 0; i < fc->nisodma; i ++) { xferq = xferqa[i]; if ((xferq->flag & FWXFERQ_OPEN) == 0) { xferq->flag |= FWXFERQ_OPEN; break; } } if (i == fc->nisodma) { printf("no free dma channel (tx=%d)\n", tx); i = -1; } FW_GUNLOCK(fc); return (i); } static int fw_modevent(module_t mode, int type, void *data) { int err = 0; #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 static eventhandler_tag fwdev_ehtag = NULL; #endif switch (type) { case MOD_LOAD: #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 fwdev_ehtag = EVENTHANDLER_REGISTER(dev_clone, fwdev_clone, 0, 1000); #endif break; case MOD_UNLOAD: #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 if (fwdev_ehtag != NULL) EVENTHANDLER_DEREGISTER(dev_clone, fwdev_ehtag); #endif break; case MOD_SHUTDOWN: break; default: return (EOPNOTSUPP); } return (err); } #ifdef __DragonFly__ DECLARE_DUMMY_MODULE(firewire); #endif DRIVER_MODULE(firewire,fwohci,firewire_driver,firewire_devclass,fw_modevent,0); MODULE_VERSION(firewire, 1); Index: head/sys/dev/firewire/fwohci.c =================================================================== --- head/sys/dev/firewire/fwohci.c (revision 258779) +++ head/sys/dev/firewire/fwohci.c (revision 258780) @@ -1,2989 +1,2989 @@ /*- * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #define ATRQ_CH 0 #define ATRS_CH 1 #define ARRQ_CH 2 #define ARRS_CH 3 #define ITX_CH 4 #define IRX_CH 0x24 #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__DragonFly__) || __FreeBSD_version < 500000 #include /* for DELAY() */ #endif #ifdef __DragonFly__ #include "firewire.h" #include "firewirereg.h" #include "fwdma.h" #include "fwohcireg.h" #include "fwohcivar.h" #include "firewire_phy.h" #else #include #include #include #include #include #include #endif #undef OHCI_DEBUG static int nocyclemaster = 0; int firewire_phydma_enable = 1; SYSCTL_DECL(_hw_firewire); SYSCTL_INT(_hw_firewire, OID_AUTO, nocyclemaster, CTLFLAG_RW, &nocyclemaster, 0, "Do not send cycle start packets"); SYSCTL_INT(_hw_firewire, OID_AUTO, phydma_enable, CTLFLAG_RW, &firewire_phydma_enable, 1, "Allow physical request DMA from firewire"); TUNABLE_INT("hw.firewire.phydma_enable", &firewire_phydma_enable); static char dbcode[16][0x10]={"OUTM", "OUTL","INPM","INPL", "STOR","LOAD","NOP ","STOP",}; static char dbkey[8][0x10]={"ST0", "ST1","ST2","ST3", "UNDEF","REG","SYS","DEV"}; static char dbcond[4][0x10]={"NEV","C=1", "C=0", "ALL"}; char fwohcicode[32][0x20]={ "No stat","Undef","long","miss Ack err", "FIFO underrun","FIFO overrun","desc err", "data read err", "data write err","bus reset","timeout","tcode err", "Undef","Undef","unknown event","flushed", "Undef","ack complete","ack pend","Undef", "ack busy_X","ack busy_A","ack busy_B","Undef", "Undef","Undef","Undef","ack tardy", "Undef","ack data_err","ack type_err",""}; #define MAX_SPEED 3 extern char *linkspeed[]; uint32_t tagbit[4] = { 1 << 28, 1 << 29, 1 << 30, 1 << 31}; static struct tcode_info tinfo[] = { /* hdr_len block flag valid_response */ /* 0 WREQQ */ {16, FWTI_REQ | FWTI_TLABEL, FWTCODE_WRES}, /* 1 WREQB */ {16, FWTI_REQ | FWTI_TLABEL | FWTI_BLOCK_ASY, FWTCODE_WRES}, /* 2 WRES */ {12, FWTI_RES, 0xff}, /* 3 XXX */ { 0, 0, 0xff}, /* 4 RREQQ */ {12, FWTI_REQ | FWTI_TLABEL, FWTCODE_RRESQ}, /* 5 RREQB */ {16, FWTI_REQ | FWTI_TLABEL, FWTCODE_RRESB}, /* 6 RRESQ */ {16, FWTI_RES, 0xff}, /* 7 RRESB */ {16, FWTI_RES | FWTI_BLOCK_ASY, 0xff}, /* 8 CYCS */ { 0, 0, 0xff}, /* 9 LREQ */ {16, FWTI_REQ | FWTI_TLABEL | FWTI_BLOCK_ASY, FWTCODE_LRES}, /* a STREAM */ { 4, FWTI_REQ | FWTI_BLOCK_STR, 0xff}, /* b LRES */ {16, FWTI_RES | FWTI_BLOCK_ASY, 0xff}, /* c XXX */ { 0, 0, 0xff}, /* d XXX */ { 0, 0, 0xff}, /* e PHY */ {12, FWTI_REQ, 0xff}, /* f XXX */ { 0, 0, 0xff} }; #define OHCI_WRITE_SIGMASK 0xffff0000 #define OHCI_READ_SIGMASK 0xffff0000 #define OWRITE(sc, r, x) bus_space_write_4((sc)->bst, (sc)->bsh, (r), (x)) #define OREAD(sc, r) bus_space_read_4((sc)->bst, (sc)->bsh, (r)) static void fwohci_ibr (struct firewire_comm *); static void fwohci_db_init (struct fwohci_softc *, struct fwohci_dbch *); static void fwohci_db_free (struct fwohci_dbch *); static void fwohci_arcv (struct fwohci_softc *, struct fwohci_dbch *, int); static void fwohci_txd (struct fwohci_softc *, struct fwohci_dbch *); static void fwohci_start_atq (struct firewire_comm *); static void fwohci_start_ats (struct firewire_comm *); static void fwohci_start (struct fwohci_softc *, struct fwohci_dbch *); static uint32_t fwphy_wrdata ( struct fwohci_softc *, uint32_t, uint32_t); static uint32_t fwphy_rddata ( struct fwohci_softc *, uint32_t); static int fwohci_rx_enable (struct fwohci_softc *, struct fwohci_dbch *); static int fwohci_tx_enable (struct fwohci_softc *, struct fwohci_dbch *); static int fwohci_irx_enable (struct firewire_comm *, int); static int fwohci_irx_disable (struct firewire_comm *, int); #if BYTE_ORDER == BIG_ENDIAN static void fwohci_irx_post (struct firewire_comm *, uint32_t *); #endif static int fwohci_itxbuf_enable (struct firewire_comm *, int); static int fwohci_itx_disable (struct firewire_comm *, int); static void fwohci_timeout (void *); static void fwohci_set_intr (struct firewire_comm *, int); static int fwohci_add_rx_buf (struct fwohci_dbch *, struct fwohcidb_tr *, int, struct fwdma_alloc *); static int fwohci_add_tx_buf (struct fwohci_dbch *, struct fwohcidb_tr *, int); static void dump_db (struct fwohci_softc *, uint32_t); static void print_db (struct fwohcidb_tr *, struct fwohcidb *, uint32_t , uint32_t); static void dump_dma (struct fwohci_softc *, uint32_t); static uint32_t fwohci_cyctimer (struct firewire_comm *); static void fwohci_rbuf_update (struct fwohci_softc *, int); static void fwohci_tbuf_update (struct fwohci_softc *, int); void fwohci_txbufdb (struct fwohci_softc *, int , struct fw_bulkxfer *); static void fwohci_task_busreset(void *, int); static void fwohci_task_sid(void *, int); static void fwohci_task_dma(void *, int); /* * memory allocated for DMA programs */ #define DMA_PROG_ALLOC (8 * PAGE_SIZE) #define NDB FWMAXQUEUE #define OHCI_VERSION 0x00 #define OHCI_ATRETRY 0x08 #define OHCI_CROMHDR 0x18 #define OHCI_BUS_OPT 0x20 -#define OHCI_BUSIRMC (1 << 31) +#define OHCI_BUSIRMC (1U << 31) #define OHCI_BUSCMC (1 << 30) #define OHCI_BUSISC (1 << 29) #define OHCI_BUSBMC (1 << 28) #define OHCI_BUSPMC (1 << 27) #define OHCI_BUSFNC OHCI_BUSIRMC | OHCI_BUSCMC | OHCI_BUSISC |\ OHCI_BUSBMC | OHCI_BUSPMC #define OHCI_EUID_HI 0x24 #define OHCI_EUID_LO 0x28 #define OHCI_CROMPTR 0x34 #define OHCI_HCCCTL 0x50 #define OHCI_HCCCTLCLR 0x54 #define OHCI_AREQHI 0x100 #define OHCI_AREQHICLR 0x104 #define OHCI_AREQLO 0x108 #define OHCI_AREQLOCLR 0x10c #define OHCI_PREQHI 0x110 #define OHCI_PREQHICLR 0x114 #define OHCI_PREQLO 0x118 #define OHCI_PREQLOCLR 0x11c #define OHCI_PREQUPPER 0x120 #define OHCI_SID_BUF 0x64 #define OHCI_SID_CNT 0x68 -#define OHCI_SID_ERR (1 << 31) +#define OHCI_SID_ERR (1U << 31) #define OHCI_SID_CNT_MASK 0xffc #define OHCI_IT_STAT 0x90 #define OHCI_IT_STATCLR 0x94 #define OHCI_IT_MASK 0x98 #define OHCI_IT_MASKCLR 0x9c #define OHCI_IR_STAT 0xa0 #define OHCI_IR_STATCLR 0xa4 #define OHCI_IR_MASK 0xa8 #define OHCI_IR_MASKCLR 0xac #define OHCI_LNKCTL 0xe0 #define OHCI_LNKCTLCLR 0xe4 #define OHCI_PHYACCESS 0xec #define OHCI_CYCLETIMER 0xf0 #define OHCI_DMACTL(off) (off) #define OHCI_DMACTLCLR(off) (off + 4) #define OHCI_DMACMD(off) (off + 0xc) #define OHCI_DMAMATCH(off) (off + 0x10) #define OHCI_ATQOFF 0x180 #define OHCI_ATQCTL OHCI_ATQOFF #define OHCI_ATQCTLCLR (OHCI_ATQOFF + 4) #define OHCI_ATQCMD (OHCI_ATQOFF + 0xc) #define OHCI_ATQMATCH (OHCI_ATQOFF + 0x10) #define OHCI_ATSOFF 0x1a0 #define OHCI_ATSCTL OHCI_ATSOFF #define OHCI_ATSCTLCLR (OHCI_ATSOFF + 4) #define OHCI_ATSCMD (OHCI_ATSOFF + 0xc) #define OHCI_ATSMATCH (OHCI_ATSOFF + 0x10) #define OHCI_ARQOFF 0x1c0 #define OHCI_ARQCTL OHCI_ARQOFF #define OHCI_ARQCTLCLR (OHCI_ARQOFF + 4) #define OHCI_ARQCMD (OHCI_ARQOFF + 0xc) #define OHCI_ARQMATCH (OHCI_ARQOFF + 0x10) #define OHCI_ARSOFF 0x1e0 #define OHCI_ARSCTL OHCI_ARSOFF #define OHCI_ARSCTLCLR (OHCI_ARSOFF + 4) #define OHCI_ARSCMD (OHCI_ARSOFF + 0xc) #define OHCI_ARSMATCH (OHCI_ARSOFF + 0x10) #define OHCI_ITOFF(CH) (0x200 + 0x10 * (CH)) #define OHCI_ITCTL(CH) (OHCI_ITOFF(CH)) #define OHCI_ITCTLCLR(CH) (OHCI_ITOFF(CH) + 4) #define OHCI_ITCMD(CH) (OHCI_ITOFF(CH) + 0xc) #define OHCI_IROFF(CH) (0x400 + 0x20 * (CH)) #define OHCI_IRCTL(CH) (OHCI_IROFF(CH)) #define OHCI_IRCTLCLR(CH) (OHCI_IROFF(CH) + 4) #define OHCI_IRCMD(CH) (OHCI_IROFF(CH) + 0xc) #define OHCI_IRMATCH(CH) (OHCI_IROFF(CH) + 0x10) d_ioctl_t fwohci_ioctl; /* * Communication with PHY device */ /* XXX need lock for phy access */ static uint32_t fwphy_wrdata( struct fwohci_softc *sc, uint32_t addr, uint32_t data) { uint32_t fun; addr &= 0xf; data &= 0xff; fun = (PHYDEV_WRCMD | (addr << PHYDEV_REGADDR) | (data << PHYDEV_WRDATA)); OWRITE(sc, OHCI_PHYACCESS, fun); DELAY(100); return(fwphy_rddata( sc, addr)); } static uint32_t fwohci_set_bus_manager(struct firewire_comm *fc, u_int node) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; int i; uint32_t bm; #define OHCI_CSR_DATA 0x0c #define OHCI_CSR_COMP 0x10 #define OHCI_CSR_CONT 0x14 #define OHCI_BUS_MANAGER_ID 0 OWRITE(sc, OHCI_CSR_DATA, node); OWRITE(sc, OHCI_CSR_COMP, 0x3f); OWRITE(sc, OHCI_CSR_CONT, OHCI_BUS_MANAGER_ID); for (i = 0; !(OREAD(sc, OHCI_CSR_CONT) & (1<<31)) && (i < 1000); i++) DELAY(10); bm = OREAD(sc, OHCI_CSR_DATA); if((bm & 0x3f) == 0x3f) bm = node; if (firewire_debug) device_printf(sc->fc.dev, "%s: %d->%d (loop=%d)\n", __func__, bm, node, i); return(bm); } static uint32_t fwphy_rddata(struct fwohci_softc *sc, u_int addr) { uint32_t fun, stat; u_int i, retry = 0; addr &= 0xf; #define MAX_RETRY 100 again: OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_REG_FAIL); fun = PHYDEV_RDCMD | (addr << PHYDEV_REGADDR); OWRITE(sc, OHCI_PHYACCESS, fun); for ( i = 0 ; i < MAX_RETRY ; i ++ ){ fun = OREAD(sc, OHCI_PHYACCESS); if ((fun & PHYDEV_RDCMD) == 0 && (fun & PHYDEV_RDDONE) != 0) break; DELAY(100); } if(i >= MAX_RETRY) { if (firewire_debug) device_printf(sc->fc.dev, "%s: failed(1).\n", __func__); if (++retry < MAX_RETRY) { DELAY(100); goto again; } } /* Make sure that SCLK is started */ stat = OREAD(sc, FWOHCI_INTSTAT); if ((stat & OHCI_INT_REG_FAIL) != 0 || ((fun >> PHYDEV_REGADDR) & 0xf) != addr) { if (firewire_debug) device_printf(sc->fc.dev, "%s: failed(2).\n", __func__); if (++retry < MAX_RETRY) { DELAY(100); goto again; } } if (firewire_debug > 1 || retry >= MAX_RETRY) device_printf(sc->fc.dev, "%s:: 0x%x loop=%d, retry=%d\n", __func__, addr, i, retry); #undef MAX_RETRY return((fun >> PHYDEV_RDDATA )& 0xff); } /* Device specific ioctl. */ int fwohci_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td) { struct firewire_softc *sc; struct fwohci_softc *fc; int unit = DEV2UNIT(dev); int err = 0; struct fw_reg_req_t *reg = (struct fw_reg_req_t *) data; uint32_t *dmach = (uint32_t *) data; sc = devclass_get_softc(firewire_devclass, unit); if(sc == NULL){ return(EINVAL); } fc = (struct fwohci_softc *)sc->fc; if (!data) return(EINVAL); switch (cmd) { case FWOHCI_WRREG: #define OHCI_MAX_REG 0x800 if(reg->addr <= OHCI_MAX_REG){ OWRITE(fc, reg->addr, reg->data); reg->data = OREAD(fc, reg->addr); }else{ err = EINVAL; } break; case FWOHCI_RDREG: if(reg->addr <= OHCI_MAX_REG){ reg->data = OREAD(fc, reg->addr); }else{ err = EINVAL; } break; /* Read DMA descriptors for debug */ case DUMPDMA: if(*dmach <= OHCI_MAX_DMA_CH ){ dump_dma(fc, *dmach); dump_db(fc, *dmach); }else{ err = EINVAL; } break; /* Read/Write Phy registers */ #define OHCI_MAX_PHY_REG 0xf case FWOHCI_RDPHYREG: if (reg->addr <= OHCI_MAX_PHY_REG) reg->data = fwphy_rddata(fc, reg->addr); else err = EINVAL; break; case FWOHCI_WRPHYREG: if (reg->addr <= OHCI_MAX_PHY_REG) reg->data = fwphy_wrdata(fc, reg->addr, reg->data); else err = EINVAL; break; default: err = EINVAL; break; } return err; } static int fwohci_probe_phy(struct fwohci_softc *sc, device_t dev) { uint32_t reg, reg2; int e1394a = 1; /* * probe PHY parameters * 0. to prove PHY version, whether compliance of 1394a. * 1. to probe maximum speed supported by the PHY and * number of port supported by core-logic. * It is not actually available port on your PC . */ OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LPS); DELAY(500); reg = fwphy_rddata(sc, FW_PHY_SPD_REG); if((reg >> 5) != 7 ){ sc->fc.mode &= ~FWPHYASYST; sc->fc.nport = reg & FW_PHY_NP; sc->fc.speed = reg & FW_PHY_SPD >> 6; if (sc->fc.speed > MAX_SPEED) { device_printf(dev, "invalid speed %d (fixed to %d).\n", sc->fc.speed, MAX_SPEED); sc->fc.speed = MAX_SPEED; } device_printf(dev, "Phy 1394 only %s, %d ports.\n", linkspeed[sc->fc.speed], sc->fc.nport); }else{ reg2 = fwphy_rddata(sc, FW_PHY_ESPD_REG); sc->fc.mode |= FWPHYASYST; sc->fc.nport = reg & FW_PHY_NP; sc->fc.speed = (reg2 & FW_PHY_ESPD) >> 5; if (sc->fc.speed > MAX_SPEED) { device_printf(dev, "invalid speed %d (fixed to %d).\n", sc->fc.speed, MAX_SPEED); sc->fc.speed = MAX_SPEED; } device_printf(dev, "Phy 1394a available %s, %d ports.\n", linkspeed[sc->fc.speed], sc->fc.nport); /* check programPhyEnable */ reg2 = fwphy_rddata(sc, 5); #if 0 if (e1394a && (OREAD(sc, OHCI_HCCCTL) & OHCI_HCC_PRPHY)) { #else /* XXX force to enable 1394a */ if (e1394a) { #endif if (firewire_debug) device_printf(dev, "Enable 1394a Enhancements\n"); /* enable EAA EMC */ reg2 |= 0x03; /* set aPhyEnhanceEnable */ OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_PHYEN); OWRITE(sc, OHCI_HCCCTLCLR, OHCI_HCC_PRPHY); } else { /* for safe */ reg2 &= ~0x83; } reg2 = fwphy_wrdata(sc, 5, reg2); } reg = fwphy_rddata(sc, FW_PHY_SPD_REG); if((reg >> 5) == 7 ){ reg = fwphy_rddata(sc, 4); reg |= 1 << 6; fwphy_wrdata(sc, 4, reg); reg = fwphy_rddata(sc, 4); } return 0; } void fwohci_reset(struct fwohci_softc *sc, device_t dev) { int i, max_rec, speed; uint32_t reg, reg2; struct fwohcidb_tr *db_tr; /* Disable interrupts */ OWRITE(sc, FWOHCI_INTMASKCLR, ~0); /* Now stopping all DMA channels */ OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_IR_MASKCLR, ~0); for( i = 0 ; i < sc->fc.nisodma ; i ++ ){ OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN); } /* FLUSH FIFO and reset Transmitter/Reciever */ OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_RESET); if (firewire_debug) device_printf(dev, "resetting OHCI..."); i = 0; while(OREAD(sc, OHCI_HCCCTL) & OHCI_HCC_RESET) { if (i++ > 100) break; DELAY(1000); } if (firewire_debug) printf("done (loop=%d)\n", i); /* Probe phy */ fwohci_probe_phy(sc, dev); /* Probe link */ reg = OREAD(sc, OHCI_BUS_OPT); reg2 = reg | OHCI_BUSFNC; max_rec = (reg & 0x0000f000) >> 12; speed = (reg & 0x00000007); device_printf(dev, "Link %s, max_rec %d bytes.\n", linkspeed[speed], MAXREC(max_rec)); /* XXX fix max_rec */ sc->fc.maxrec = sc->fc.speed + 8; if (max_rec != sc->fc.maxrec) { reg2 = (reg2 & 0xffff0fff) | (sc->fc.maxrec << 12); device_printf(dev, "max_rec %d -> %d\n", MAXREC(max_rec), MAXREC(sc->fc.maxrec)); } if (firewire_debug) device_printf(dev, "BUS_OPT 0x%x -> 0x%x\n", reg, reg2); OWRITE(sc, OHCI_BUS_OPT, reg2); /* Initialize registers */ OWRITE(sc, OHCI_CROMHDR, sc->fc.config_rom[0]); OWRITE(sc, OHCI_CROMPTR, sc->crom_dma.bus_addr); OWRITE(sc, OHCI_HCCCTLCLR, OHCI_HCC_BIGEND); OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_POSTWR); OWRITE(sc, OHCI_SID_BUF, sc->sid_dma.bus_addr); OWRITE(sc, OHCI_LNKCTL, OHCI_CNTL_SID); /* Enable link */ OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LINKEN); /* Force to start async RX DMA */ sc->arrq.xferq.flag &= ~FWXFERQ_RUNNING; sc->arrs.xferq.flag &= ~FWXFERQ_RUNNING; fwohci_rx_enable(sc, &sc->arrq); fwohci_rx_enable(sc, &sc->arrs); /* Initialize async TX */ OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN | OHCI_CNTL_DMA_DEAD); OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN | OHCI_CNTL_DMA_DEAD); /* AT Retries */ OWRITE(sc, FWOHCI_RETRY, /* CycleLimit PhyRespRetries ATRespRetries ATReqRetries */ (0xffff << 16 ) | (0x0f << 8) | (0x0f << 4) | 0x0f) ; sc->atrq.top = STAILQ_FIRST(&sc->atrq.db_trq); sc->atrs.top = STAILQ_FIRST(&sc->atrs.db_trq); sc->atrq.bottom = sc->atrq.top; sc->atrs.bottom = sc->atrs.top; for( i = 0, db_tr = sc->atrq.top; i < sc->atrq.ndb ; i ++, db_tr = STAILQ_NEXT(db_tr, link)){ db_tr->xfer = NULL; } for( i = 0, db_tr = sc->atrs.top; i < sc->atrs.ndb ; i ++, db_tr = STAILQ_NEXT(db_tr, link)){ db_tr->xfer = NULL; } /* Enable interrupts */ sc->intmask = (OHCI_INT_ERR | OHCI_INT_PHY_SID | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS | OHCI_INT_DMA_PRRQ | OHCI_INT_DMA_PRRS | OHCI_INT_PHY_BUS_R | OHCI_INT_PW_ERR); sc->intmask |= OHCI_INT_DMA_IR | OHCI_INT_DMA_IT; sc->intmask |= OHCI_INT_CYC_LOST | OHCI_INT_PHY_INT; OWRITE(sc, FWOHCI_INTMASK, sc->intmask); fwohci_set_intr(&sc->fc, 1); } int fwohci_init(struct fwohci_softc *sc, device_t dev) { int i, mver; uint32_t reg; uint8_t ui[8]; /* OHCI version */ reg = OREAD(sc, OHCI_VERSION); mver = (reg >> 16) & 0xff; device_printf(dev, "OHCI version %x.%x (ROM=%d)\n", mver, reg & 0xff, (reg>>24) & 1); if (mver < 1 || mver > 9) { device_printf(dev, "invalid OHCI version\n"); return (ENXIO); } /* Available Isochronous DMA channel probe */ OWRITE(sc, OHCI_IT_MASK, 0xffffffff); OWRITE(sc, OHCI_IR_MASK, 0xffffffff); reg = OREAD(sc, OHCI_IT_MASK) & OREAD(sc, OHCI_IR_MASK); OWRITE(sc, OHCI_IT_MASKCLR, 0xffffffff); OWRITE(sc, OHCI_IR_MASKCLR, 0xffffffff); for (i = 0; i < 0x20; i++) if ((reg & (1 << i)) == 0) break; sc->fc.nisodma = i; device_printf(dev, "No. of Isochronous channels is %d.\n", i); if (i == 0) return (ENXIO); sc->fc.arq = &sc->arrq.xferq; sc->fc.ars = &sc->arrs.xferq; sc->fc.atq = &sc->atrq.xferq; sc->fc.ats = &sc->atrs.xferq; sc->arrq.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE); sc->arrs.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE); sc->atrq.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE); sc->atrs.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE); sc->arrq.xferq.start = NULL; sc->arrs.xferq.start = NULL; sc->atrq.xferq.start = fwohci_start_atq; sc->atrs.xferq.start = fwohci_start_ats; sc->arrq.xferq.buf = NULL; sc->arrs.xferq.buf = NULL; sc->atrq.xferq.buf = NULL; sc->atrs.xferq.buf = NULL; sc->arrq.xferq.dmach = -1; sc->arrs.xferq.dmach = -1; sc->atrq.xferq.dmach = -1; sc->atrs.xferq.dmach = -1; sc->arrq.ndesc = 1; sc->arrs.ndesc = 1; sc->atrq.ndesc = 8; /* equal to maximum of mbuf chains */ sc->atrs.ndesc = 2; sc->arrq.ndb = NDB; sc->arrs.ndb = NDB / 2; sc->atrq.ndb = NDB; sc->atrs.ndb = NDB / 2; for( i = 0 ; i < sc->fc.nisodma ; i ++ ){ sc->fc.it[i] = &sc->it[i].xferq; sc->fc.ir[i] = &sc->ir[i].xferq; sc->it[i].xferq.dmach = i; sc->ir[i].xferq.dmach = i; sc->it[i].ndb = 0; sc->ir[i].ndb = 0; } sc->fc.tcode = tinfo; sc->fc.dev = dev; sc->fc.config_rom = fwdma_malloc(&sc->fc, CROMSIZE, CROMSIZE, &sc->crom_dma, BUS_DMA_WAITOK | BUS_DMA_COHERENT); if(sc->fc.config_rom == NULL){ device_printf(dev, "config_rom alloc failed."); return ENOMEM; } #if 0 bzero(&sc->fc.config_rom[0], CROMSIZE); sc->fc.config_rom[1] = 0x31333934; sc->fc.config_rom[2] = 0xf000a002; sc->fc.config_rom[3] = OREAD(sc, OHCI_EUID_HI); sc->fc.config_rom[4] = OREAD(sc, OHCI_EUID_LO); sc->fc.config_rom[5] = 0; sc->fc.config_rom[0] = (4 << 24) | (5 << 16); sc->fc.config_rom[0] |= fw_crc16(&sc->fc.config_rom[1], 5*4); #endif /* SID recieve buffer must align 2^11 */ #define OHCI_SIDSIZE (1 << 11) sc->sid_buf = fwdma_malloc(&sc->fc, OHCI_SIDSIZE, OHCI_SIDSIZE, &sc->sid_dma, BUS_DMA_WAITOK | BUS_DMA_COHERENT); if (sc->sid_buf == NULL) { device_printf(dev, "sid_buf alloc failed."); return ENOMEM; } fwdma_malloc(&sc->fc, sizeof(uint32_t), sizeof(uint32_t), &sc->dummy_dma, BUS_DMA_WAITOK); if (sc->dummy_dma.v_addr == NULL) { device_printf(dev, "dummy_dma alloc failed."); return ENOMEM; } fwohci_db_init(sc, &sc->arrq); if ((sc->arrq.flags & FWOHCI_DBCH_INIT) == 0) return ENOMEM; fwohci_db_init(sc, &sc->arrs); if ((sc->arrs.flags & FWOHCI_DBCH_INIT) == 0) return ENOMEM; fwohci_db_init(sc, &sc->atrq); if ((sc->atrq.flags & FWOHCI_DBCH_INIT) == 0) return ENOMEM; fwohci_db_init(sc, &sc->atrs); if ((sc->atrs.flags & FWOHCI_DBCH_INIT) == 0) return ENOMEM; sc->fc.eui.hi = OREAD(sc, FWOHCIGUID_H); sc->fc.eui.lo = OREAD(sc, FWOHCIGUID_L); for( i = 0 ; i < 8 ; i ++) ui[i] = FW_EUI64_BYTE(&sc->fc.eui,i); device_printf(dev, "EUI64 %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", ui[0], ui[1], ui[2], ui[3], ui[4], ui[5], ui[6], ui[7]); sc->fc.ioctl = fwohci_ioctl; sc->fc.cyctimer = fwohci_cyctimer; sc->fc.set_bmr = fwohci_set_bus_manager; sc->fc.ibr = fwohci_ibr; sc->fc.irx_enable = fwohci_irx_enable; sc->fc.irx_disable = fwohci_irx_disable; sc->fc.itx_enable = fwohci_itxbuf_enable; sc->fc.itx_disable = fwohci_itx_disable; #if BYTE_ORDER == BIG_ENDIAN sc->fc.irx_post = fwohci_irx_post; #else sc->fc.irx_post = NULL; #endif sc->fc.itx_post = NULL; sc->fc.timeout = fwohci_timeout; sc->fc.poll = fwohci_poll; sc->fc.set_intr = fwohci_set_intr; sc->intmask = sc->irstat = sc->itstat = 0; /* Init task queue */ sc->fc.taskqueue = taskqueue_create_fast("fw_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->fc.taskqueue); taskqueue_start_threads(&sc->fc.taskqueue, 1, PI_NET, "fw%d_taskq", device_get_unit(dev)); TASK_INIT(&sc->fwohci_task_busreset, 2, fwohci_task_busreset, sc); TASK_INIT(&sc->fwohci_task_sid, 1, fwohci_task_sid, sc); TASK_INIT(&sc->fwohci_task_dma, 0, fwohci_task_dma, sc); fw_init(&sc->fc); fwohci_reset(sc, dev); return 0; } void fwohci_timeout(void *arg) { struct fwohci_softc *sc; sc = (struct fwohci_softc *)arg; } uint32_t fwohci_cyctimer(struct firewire_comm *fc) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; return(OREAD(sc, OHCI_CYCLETIMER)); } int fwohci_detach(struct fwohci_softc *sc, device_t dev) { int i; if (sc->sid_buf != NULL) fwdma_free(&sc->fc, &sc->sid_dma); if (sc->fc.config_rom != NULL) fwdma_free(&sc->fc, &sc->crom_dma); fwohci_db_free(&sc->arrq); fwohci_db_free(&sc->arrs); fwohci_db_free(&sc->atrq); fwohci_db_free(&sc->atrs); for( i = 0 ; i < sc->fc.nisodma ; i ++ ){ fwohci_db_free(&sc->it[i]); fwohci_db_free(&sc->ir[i]); } if (sc->fc.taskqueue != NULL) { taskqueue_drain(sc->fc.taskqueue, &sc->fwohci_task_busreset); taskqueue_drain(sc->fc.taskqueue, &sc->fwohci_task_sid); taskqueue_drain(sc->fc.taskqueue, &sc->fwohci_task_dma); taskqueue_drain(sc->fc.taskqueue, &sc->fc.task_timeout); taskqueue_free(sc->fc.taskqueue); sc->fc.taskqueue = NULL; } return 0; } #define LAST_DB(dbtr, db) do { \ struct fwohcidb_tr *_dbtr = (dbtr); \ int _cnt = _dbtr->dbcnt; \ db = &_dbtr->db[ (_cnt > 2) ? (_cnt -1) : 0]; \ } while (0) static void fwohci_execute_db(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct fwohcidb_tr *db_tr; struct fwohcidb *db; bus_dma_segment_t *s; int i; db_tr = (struct fwohcidb_tr *)arg; db = &db_tr->db[db_tr->dbcnt]; if (error) { if (firewire_debug || error != EFBIG) printf("fwohci_execute_db: error=%d\n", error); return; } for (i = 0; i < nseg; i++) { s = &segs[i]; FWOHCI_DMA_WRITE(db->db.desc.addr, s->ds_addr); FWOHCI_DMA_WRITE(db->db.desc.cmd, s->ds_len); FWOHCI_DMA_WRITE(db->db.desc.res, 0); db++; db_tr->dbcnt++; } } static void fwohci_execute_db2(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t size, int error) { fwohci_execute_db(arg, segs, nseg, error); } static void fwohci_start(struct fwohci_softc *sc, struct fwohci_dbch *dbch) { int i, s; int tcode, hdr_len, pl_off; int fsegment = -1; uint32_t off; struct fw_xfer *xfer; struct fw_pkt *fp; struct fwohci_txpkthdr *ohcifp; struct fwohcidb_tr *db_tr; struct fwohcidb *db; uint32_t *ld; struct tcode_info *info; static int maxdesc=0; FW_GLOCK_ASSERT(&sc->fc); if(&sc->atrq == dbch){ off = OHCI_ATQOFF; }else if(&sc->atrs == dbch){ off = OHCI_ATSOFF; }else{ return; } if (dbch->flags & FWOHCI_DBCH_FULL) return; s = splfw(); db_tr = dbch->top; txloop: xfer = STAILQ_FIRST(&dbch->xferq.q); if(xfer == NULL){ goto kick; } #if 0 if(dbch->xferq.queued == 0 ){ device_printf(sc->fc.dev, "TX queue empty\n"); } #endif STAILQ_REMOVE_HEAD(&dbch->xferq.q, link); db_tr->xfer = xfer; xfer->flag = FWXF_START; fp = &xfer->send.hdr; tcode = fp->mode.common.tcode; ohcifp = (struct fwohci_txpkthdr *) db_tr->db[1].db.immed; info = &tinfo[tcode]; hdr_len = pl_off = info->hdr_len; ld = &ohcifp->mode.ld[0]; ld[0] = ld[1] = ld[2] = ld[3] = 0; for( i = 0 ; i < pl_off ; i+= 4) ld[i/4] = fp->mode.ld[i/4]; ohcifp->mode.common.spd = xfer->send.spd & 0x7; if (tcode == FWTCODE_STREAM ){ hdr_len = 8; ohcifp->mode.stream.len = fp->mode.stream.len; } else if (tcode == FWTCODE_PHY) { hdr_len = 12; ld[1] = fp->mode.ld[1]; ld[2] = fp->mode.ld[2]; ohcifp->mode.common.spd = 0; ohcifp->mode.common.tcode = FWOHCITCODE_PHY; } else { ohcifp->mode.asycomm.dst = fp->mode.hdr.dst; ohcifp->mode.asycomm.srcbus = OHCI_ASYSRCBUS; ohcifp->mode.asycomm.tlrt |= FWRETRY_X; } db = &db_tr->db[0]; FWOHCI_DMA_WRITE(db->db.desc.cmd, OHCI_OUTPUT_MORE | OHCI_KEY_ST2 | hdr_len); FWOHCI_DMA_WRITE(db->db.desc.addr, 0); FWOHCI_DMA_WRITE(db->db.desc.res, 0); /* Specify bound timer of asy. responce */ if(&sc->atrs == dbch){ FWOHCI_DMA_WRITE(db->db.desc.res, (OREAD(sc, OHCI_CYCLETIMER) >> 12) + (1 << 13)); } #if BYTE_ORDER == BIG_ENDIAN if (tcode == FWTCODE_WREQQ || tcode == FWTCODE_RRESQ) hdr_len = 12; for (i = 0; i < hdr_len/4; i ++) FWOHCI_DMA_WRITE(ld[i], ld[i]); #endif again: db_tr->dbcnt = 2; db = &db_tr->db[db_tr->dbcnt]; if (xfer->send.pay_len > 0) { int err; /* handle payload */ if (xfer->mbuf == NULL) { err = bus_dmamap_load(dbch->dmat, db_tr->dma_map, &xfer->send.payload[0], xfer->send.pay_len, fwohci_execute_db, db_tr, /*flags*/0); } else { /* XXX we can handle only 6 (=8-2) mbuf chains */ err = bus_dmamap_load_mbuf(dbch->dmat, db_tr->dma_map, xfer->mbuf, fwohci_execute_db2, db_tr, /* flags */0); if (err == EFBIG) { struct mbuf *m0; if (firewire_debug) device_printf(sc->fc.dev, "EFBIG.\n"); m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m_copydata(xfer->mbuf, 0, xfer->mbuf->m_pkthdr.len, mtod(m0, caddr_t)); m0->m_len = m0->m_pkthdr.len = xfer->mbuf->m_pkthdr.len; m_freem(xfer->mbuf); xfer->mbuf = m0; goto again; } device_printf(sc->fc.dev, "m_getcl failed.\n"); } } if (err) printf("dmamap_load: err=%d\n", err); bus_dmamap_sync(dbch->dmat, db_tr->dma_map, BUS_DMASYNC_PREWRITE); #if 0 /* OHCI_OUTPUT_MODE == 0 */ for (i = 2; i < db_tr->dbcnt; i++) FWOHCI_DMA_SET(db_tr->db[i].db.desc.cmd, OHCI_OUTPUT_MORE); #endif } if (maxdesc < db_tr->dbcnt) { maxdesc = db_tr->dbcnt; if (firewire_debug) device_printf(sc->fc.dev, "%s: maxdesc %d\n", __func__, maxdesc); } /* last db */ LAST_DB(db_tr, db); FWOHCI_DMA_SET(db->db.desc.cmd, OHCI_OUTPUT_LAST | OHCI_INTERRUPT_ALWAYS | OHCI_BRANCH_ALWAYS); FWOHCI_DMA_WRITE(db->db.desc.depend, STAILQ_NEXT(db_tr, link)->bus_addr); if(fsegment == -1 ) fsegment = db_tr->dbcnt; if (dbch->pdb_tr != NULL) { LAST_DB(dbch->pdb_tr, db); FWOHCI_DMA_SET(db->db.desc.depend, db_tr->dbcnt); } dbch->xferq.queued ++; dbch->pdb_tr = db_tr; db_tr = STAILQ_NEXT(db_tr, link); if(db_tr != dbch->bottom){ goto txloop; } else { device_printf(sc->fc.dev, "fwohci_start: lack of db_trq\n"); dbch->flags |= FWOHCI_DBCH_FULL; } kick: /* kick asy q */ fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE); if(dbch->xferq.flag & FWXFERQ_RUNNING) { OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_WAKE); } else { if (firewire_debug) device_printf(sc->fc.dev, "start AT DMA status=%x\n", OREAD(sc, OHCI_DMACTL(off))); OWRITE(sc, OHCI_DMACMD(off), dbch->top->bus_addr | fsegment); OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_RUN); dbch->xferq.flag |= FWXFERQ_RUNNING; } dbch->top = db_tr; splx(s); return; } static void fwohci_start_atq(struct firewire_comm *fc) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; FW_GLOCK(&sc->fc); fwohci_start( sc, &(sc->atrq)); FW_GUNLOCK(&sc->fc); return; } static void fwohci_start_ats(struct firewire_comm *fc) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; FW_GLOCK(&sc->fc); fwohci_start( sc, &(sc->atrs)); FW_GUNLOCK(&sc->fc); return; } void fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch) { int s, ch, err = 0; struct fwohcidb_tr *tr; struct fwohcidb *db; struct fw_xfer *xfer; uint32_t off; u_int stat, status; int packets; struct firewire_comm *fc = (struct firewire_comm *)sc; if(&sc->atrq == dbch){ off = OHCI_ATQOFF; ch = ATRQ_CH; }else if(&sc->atrs == dbch){ off = OHCI_ATSOFF; ch = ATRS_CH; }else{ return; } s = splfw(); tr = dbch->bottom; packets = 0; fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTREAD); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTWRITE); while(dbch->xferq.queued > 0){ LAST_DB(tr, db); status = FWOHCI_DMA_READ(db->db.desc.res) >> OHCI_STATUS_SHIFT; if(!(status & OHCI_CNTL_DMA_ACTIVE)){ if (fc->status != FWBUSINIT) /* maybe out of order?? */ goto out; } bus_dmamap_sync(dbch->dmat, tr->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dbch->dmat, tr->dma_map); #if 1 if (firewire_debug > 1) dump_db(sc, ch); #endif if(status & OHCI_CNTL_DMA_DEAD) { /* Stop DMA */ OWRITE(sc, OHCI_DMACTLCLR(off), OHCI_CNTL_DMA_RUN); device_printf(sc->fc.dev, "force reset AT FIFO\n"); OWRITE(sc, OHCI_HCCCTLCLR, OHCI_HCC_LINKEN); OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LPS | OHCI_HCC_LINKEN); OWRITE(sc, OHCI_DMACTLCLR(off), OHCI_CNTL_DMA_RUN); } stat = status & FWOHCIEV_MASK; switch(stat){ case FWOHCIEV_ACKPEND: case FWOHCIEV_ACKCOMPL: err = 0; break; case FWOHCIEV_ACKBSA: case FWOHCIEV_ACKBSB: case FWOHCIEV_ACKBSX: device_printf(sc->fc.dev, "txd err=%2x %s\n", stat, fwohcicode[stat]); err = EBUSY; break; case FWOHCIEV_FLUSHED: case FWOHCIEV_ACKTARD: device_printf(sc->fc.dev, "txd err=%2x %s\n", stat, fwohcicode[stat]); err = EAGAIN; break; case FWOHCIEV_MISSACK: case FWOHCIEV_UNDRRUN: case FWOHCIEV_OVRRUN: case FWOHCIEV_DESCERR: case FWOHCIEV_DTRDERR: case FWOHCIEV_TIMEOUT: case FWOHCIEV_TCODERR: case FWOHCIEV_UNKNOWN: case FWOHCIEV_ACKDERR: case FWOHCIEV_ACKTERR: default: device_printf(sc->fc.dev, "txd err=%2x %s\n", stat, fwohcicode[stat]); err = EINVAL; break; } if (tr->xfer != NULL) { xfer = tr->xfer; if (xfer->flag & FWXF_RCVD) { #if 0 if (firewire_debug) printf("already rcvd\n"); #endif fw_xfer_done(xfer); } else { microtime(&xfer->tv); xfer->flag = FWXF_SENT; if (err == EBUSY) { xfer->flag = FWXF_BUSY; xfer->resp = err; xfer->recv.pay_len = 0; fw_xfer_done(xfer); } else if (stat != FWOHCIEV_ACKPEND) { if (stat != FWOHCIEV_ACKCOMPL) xfer->flag = FWXF_SENTERR; xfer->resp = err; xfer->recv.pay_len = 0; fw_xfer_done(xfer); } } /* * The watchdog timer takes care of split * transcation timeout for ACKPEND case. */ } else { printf("this shouldn't happen\n"); } FW_GLOCK(fc); dbch->xferq.queued --; FW_GUNLOCK(fc); tr->xfer = NULL; packets ++; tr = STAILQ_NEXT(tr, link); dbch->bottom = tr; if (dbch->bottom == dbch->top) { /* we reaches the end of context program */ if (firewire_debug && dbch->xferq.queued > 0) printf("queued > 0\n"); break; } } out: if ((dbch->flags & FWOHCI_DBCH_FULL) && packets > 0) { printf("make free slot\n"); dbch->flags &= ~FWOHCI_DBCH_FULL; FW_GLOCK(fc); fwohci_start(sc, dbch); FW_GUNLOCK(fc); } splx(s); } static void fwohci_db_free(struct fwohci_dbch *dbch) { struct fwohcidb_tr *db_tr; int idb; if ((dbch->flags & FWOHCI_DBCH_INIT) == 0) return; for(db_tr = STAILQ_FIRST(&dbch->db_trq), idb = 0; idb < dbch->ndb; db_tr = STAILQ_NEXT(db_tr, link), idb++){ if ((dbch->xferq.flag & FWXFERQ_EXTBUF) == 0 && db_tr->buf != NULL) { fwdma_free_size(dbch->dmat, db_tr->dma_map, db_tr->buf, dbch->xferq.psize); db_tr->buf = NULL; } else if (db_tr->dma_map != NULL) bus_dmamap_destroy(dbch->dmat, db_tr->dma_map); } dbch->ndb = 0; db_tr = STAILQ_FIRST(&dbch->db_trq); fwdma_free_multiseg(dbch->am); free(db_tr, M_FW); STAILQ_INIT(&dbch->db_trq); dbch->flags &= ~FWOHCI_DBCH_INIT; } static void fwohci_db_init(struct fwohci_softc *sc, struct fwohci_dbch *dbch) { int idb; struct fwohcidb_tr *db_tr; if ((dbch->flags & FWOHCI_DBCH_INIT) != 0) goto out; /* create dma_tag for buffers */ #define MAX_REQCOUNT 0xffff if (bus_dma_tag_create(/*parent*/ sc->fc.dmat, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/ dbch->xferq.psize, /*nsegments*/ dbch->ndesc > 3 ? dbch->ndesc - 2 : 1, /*maxsegsz*/ MAX_REQCOUNT, /*flags*/ 0, #if defined(__FreeBSD__) && __FreeBSD_version >= 501102 /*lockfunc*/busdma_lock_mutex, /*lockarg*/FW_GMTX(&sc->fc), #endif &dbch->dmat)) return; /* allocate DB entries and attach one to each DMA channels */ /* DB entry must start at 16 bytes bounary. */ STAILQ_INIT(&dbch->db_trq); db_tr = (struct fwohcidb_tr *) malloc(sizeof(struct fwohcidb_tr) * dbch->ndb, M_FW, M_WAITOK | M_ZERO); if(db_tr == NULL){ printf("fwohci_db_init: malloc(1) failed\n"); return; } #define DB_SIZE(x) (sizeof(struct fwohcidb) * (x)->ndesc) dbch->am = fwdma_malloc_multiseg(&sc->fc, DB_SIZE(dbch), DB_SIZE(dbch), dbch->ndb, BUS_DMA_WAITOK); if (dbch->am == NULL) { printf("fwohci_db_init: fwdma_malloc_multiseg failed\n"); free(db_tr, M_FW); return; } /* Attach DB to DMA ch. */ for(idb = 0 ; idb < dbch->ndb ; idb++){ db_tr->dbcnt = 0; db_tr->db = (struct fwohcidb *)fwdma_v_addr(dbch->am, idb); db_tr->bus_addr = fwdma_bus_addr(dbch->am, idb); /* create dmamap for buffers */ /* XXX do we need 4bytes alignment tag? */ /* XXX don't alloc dma_map for AR */ if (bus_dmamap_create(dbch->dmat, 0, &db_tr->dma_map) != 0) { printf("bus_dmamap_create failed\n"); dbch->flags = FWOHCI_DBCH_INIT; /* XXX fake */ fwohci_db_free(dbch); return; } STAILQ_INSERT_TAIL(&dbch->db_trq, db_tr, link); if (dbch->xferq.flag & FWXFERQ_EXTBUF) { if (idb % dbch->xferq.bnpacket == 0) dbch->xferq.bulkxfer[idb / dbch->xferq.bnpacket ].start = (caddr_t)db_tr; if ((idb + 1) % dbch->xferq.bnpacket == 0) dbch->xferq.bulkxfer[idb / dbch->xferq.bnpacket ].end = (caddr_t)db_tr; } db_tr++; } STAILQ_LAST(&dbch->db_trq, fwohcidb_tr,link)->link.stqe_next = STAILQ_FIRST(&dbch->db_trq); out: dbch->xferq.queued = 0; dbch->pdb_tr = NULL; dbch->top = STAILQ_FIRST(&dbch->db_trq); dbch->bottom = dbch->top; dbch->flags = FWOHCI_DBCH_INIT; } static int fwohci_itx_disable(struct firewire_comm *fc, int dmach) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; OWRITE(sc, OHCI_ITCTLCLR(dmach), OHCI_CNTL_DMA_RUN | OHCI_CNTL_CYCMATCH_S); OWRITE(sc, OHCI_IT_MASKCLR, 1 << dmach); OWRITE(sc, OHCI_IT_STATCLR, 1 << dmach); /* XXX we cannot free buffers until the DMA really stops */ pause("fwitxd", hz); fwohci_db_free(&sc->it[dmach]); sc->it[dmach].xferq.flag &= ~FWXFERQ_RUNNING; return 0; } static int fwohci_irx_disable(struct firewire_comm *fc, int dmach) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; OWRITE(sc, OHCI_IRCTLCLR(dmach), OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_IR_MASKCLR, 1 << dmach); OWRITE(sc, OHCI_IR_STATCLR, 1 << dmach); /* XXX we cannot free buffers until the DMA really stops */ pause("fwirxd", hz); fwohci_db_free(&sc->ir[dmach]); sc->ir[dmach].xferq.flag &= ~FWXFERQ_RUNNING; return 0; } #if BYTE_ORDER == BIG_ENDIAN static void fwohci_irx_post (struct firewire_comm *fc , uint32_t *qld) { qld[0] = FWOHCI_DMA_READ(qld[0]); return; } #endif static int fwohci_tx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch) { int err = 0; int idb, z, i, dmach = 0, ldesc; uint32_t off = 0; struct fwohcidb_tr *db_tr; struct fwohcidb *db; if(!(dbch->xferq.flag & FWXFERQ_EXTBUF)){ err = EINVAL; return err; } z = dbch->ndesc; for(dmach = 0 ; dmach < sc->fc.nisodma ; dmach++){ if( &sc->it[dmach] == dbch){ off = OHCI_ITOFF(dmach); break; } } if(off == 0){ err = EINVAL; return err; } if(dbch->xferq.flag & FWXFERQ_RUNNING) return err; dbch->xferq.flag |= FWXFERQ_RUNNING; for( i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++){ dbch->bottom = STAILQ_NEXT(dbch->bottom, link); } db_tr = dbch->top; for (idb = 0; idb < dbch->ndb; idb ++) { fwohci_add_tx_buf(dbch, db_tr, idb); if(STAILQ_NEXT(db_tr, link) == NULL){ break; } db = db_tr->db; ldesc = db_tr->dbcnt - 1; FWOHCI_DMA_WRITE(db[0].db.desc.depend, STAILQ_NEXT(db_tr, link)->bus_addr | z); db[ldesc].db.desc.depend = db[0].db.desc.depend; if(dbch->xferq.flag & FWXFERQ_EXTBUF){ if(((idb + 1 ) % dbch->xferq.bnpacket) == 0){ FWOHCI_DMA_SET( db[ldesc].db.desc.cmd, OHCI_INTERRUPT_ALWAYS); /* OHCI 1.1 and above */ FWOHCI_DMA_SET( db[0].db.desc.cmd, OHCI_INTERRUPT_ALWAYS); } } db_tr = STAILQ_NEXT(db_tr, link); } FWOHCI_DMA_CLEAR( dbch->bottom->db[dbch->bottom->dbcnt - 1].db.desc.depend, 0xf); return err; } static int fwohci_rx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch) { int err = 0; int idb, z, i, dmach = 0, ldesc; uint32_t off = 0; struct fwohcidb_tr *db_tr; struct fwohcidb *db; z = dbch->ndesc; if(&sc->arrq == dbch){ off = OHCI_ARQOFF; }else if(&sc->arrs == dbch){ off = OHCI_ARSOFF; }else{ for(dmach = 0 ; dmach < sc->fc.nisodma ; dmach++){ if( &sc->ir[dmach] == dbch){ off = OHCI_IROFF(dmach); break; } } } if(off == 0){ err = EINVAL; return err; } if(dbch->xferq.flag & FWXFERQ_STREAM){ if(dbch->xferq.flag & FWXFERQ_RUNNING) return err; }else{ if(dbch->xferq.flag & FWXFERQ_RUNNING){ err = EBUSY; return err; } } dbch->xferq.flag |= FWXFERQ_RUNNING; dbch->top = STAILQ_FIRST(&dbch->db_trq); for( i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++){ dbch->bottom = STAILQ_NEXT(dbch->bottom, link); } db_tr = dbch->top; for (idb = 0; idb < dbch->ndb; idb ++) { fwohci_add_rx_buf(dbch, db_tr, idb, &sc->dummy_dma); if (STAILQ_NEXT(db_tr, link) == NULL) break; db = db_tr->db; ldesc = db_tr->dbcnt - 1; FWOHCI_DMA_WRITE(db[ldesc].db.desc.depend, STAILQ_NEXT(db_tr, link)->bus_addr | z); if(dbch->xferq.flag & FWXFERQ_EXTBUF){ if(((idb + 1 ) % dbch->xferq.bnpacket) == 0){ FWOHCI_DMA_SET( db[ldesc].db.desc.cmd, OHCI_INTERRUPT_ALWAYS); FWOHCI_DMA_CLEAR( db[ldesc].db.desc.depend, 0xf); } } db_tr = STAILQ_NEXT(db_tr, link); } FWOHCI_DMA_CLEAR( dbch->bottom->db[db_tr->dbcnt - 1].db.desc.depend, 0xf); dbch->buf_offset = 0; fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE); if(dbch->xferq.flag & FWXFERQ_STREAM){ return err; }else{ OWRITE(sc, OHCI_DMACMD(off), dbch->top->bus_addr | z); } OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_RUN); return err; } static int fwohci_next_cycle(struct firewire_comm *fc, int cycle_now) { int sec, cycle, cycle_match; cycle = cycle_now & 0x1fff; sec = cycle_now >> 13; #define CYCLE_MOD 0x10 #if 1 #define CYCLE_DELAY 8 /* min delay to start DMA */ #else #define CYCLE_DELAY 7000 /* min delay to start DMA */ #endif cycle = cycle + CYCLE_DELAY; if (cycle >= 8000) { sec ++; cycle -= 8000; } cycle = roundup2(cycle, CYCLE_MOD); if (cycle >= 8000) { sec ++; if (cycle == 8000) cycle = 0; else cycle = CYCLE_MOD; } cycle_match = ((sec << 13) | cycle) & 0x7ffff; return(cycle_match); } static int fwohci_itxbuf_enable(struct firewire_comm *fc, int dmach) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; int err = 0; unsigned short tag, ich; struct fwohci_dbch *dbch; int cycle_match, cycle_now, s, ldesc; uint32_t stat; struct fw_bulkxfer *first, *chunk, *prev; struct fw_xferq *it; dbch = &sc->it[dmach]; it = &dbch->xferq; tag = (it->flag >> 6) & 3; ich = it->flag & 0x3f; if ((dbch->flags & FWOHCI_DBCH_INIT) == 0) { dbch->ndb = it->bnpacket * it->bnchunk; dbch->ndesc = 3; fwohci_db_init(sc, dbch); if ((dbch->flags & FWOHCI_DBCH_INIT) == 0) return ENOMEM; err = fwohci_tx_enable(sc, dbch); } if(err) return err; ldesc = dbch->ndesc - 1; s = splfw(); FW_GLOCK(fc); prev = STAILQ_LAST(&it->stdma, fw_bulkxfer, link); while ((chunk = STAILQ_FIRST(&it->stvalid)) != NULL) { struct fwohcidb *db; fwdma_sync_multiseg(it->buf, chunk->poffset, it->bnpacket, BUS_DMASYNC_PREWRITE); fwohci_txbufdb(sc, dmach, chunk); if (prev != NULL) { db = ((struct fwohcidb_tr *)(prev->end))->db; #if 0 /* XXX necessary? */ FWOHCI_DMA_SET(db[ldesc].db.desc.cmd, OHCI_BRANCH_ALWAYS); #endif #if 0 /* if bulkxfer->npacket changes */ db[ldesc].db.desc.depend = db[0].db.desc.depend = ((struct fwohcidb_tr *) (chunk->start))->bus_addr | dbch->ndesc; #else FWOHCI_DMA_SET(db[0].db.desc.depend, dbch->ndesc); FWOHCI_DMA_SET(db[ldesc].db.desc.depend, dbch->ndesc); #endif } STAILQ_REMOVE_HEAD(&it->stvalid, link); STAILQ_INSERT_TAIL(&it->stdma, chunk, link); prev = chunk; } FW_GUNLOCK(fc); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD); splx(s); stat = OREAD(sc, OHCI_ITCTL(dmach)); if (firewire_debug && (stat & OHCI_CNTL_CYCMATCH_S)) printf("stat 0x%x\n", stat); if (stat & (OHCI_CNTL_DMA_ACTIVE | OHCI_CNTL_CYCMATCH_S)) return 0; #if 0 OWRITE(sc, OHCI_ITCTLCLR(dmach), OHCI_CNTL_DMA_RUN); #endif OWRITE(sc, OHCI_IT_MASKCLR, 1 << dmach); OWRITE(sc, OHCI_IT_STATCLR, 1 << dmach); OWRITE(sc, OHCI_IT_MASK, 1 << dmach); OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_DMA_IT); first = STAILQ_FIRST(&it->stdma); OWRITE(sc, OHCI_ITCMD(dmach), ((struct fwohcidb_tr *)(first->start))->bus_addr | dbch->ndesc); if (firewire_debug > 1) { printf("fwohci_itxbuf_enable: kick 0x%08x\n", stat); #if 1 dump_dma(sc, ITX_CH + dmach); #endif } if ((stat & OHCI_CNTL_DMA_RUN) == 0) { #if 1 /* Don't start until all chunks are buffered */ if (STAILQ_FIRST(&it->stfree) != NULL) goto out; #endif #if 1 /* Clear cycle match counter bits */ OWRITE(sc, OHCI_ITCTLCLR(dmach), 0xffff0000); /* 2bit second + 13bit cycle */ cycle_now = (fc->cyctimer(fc) >> 12) & 0x7fff; cycle_match = fwohci_next_cycle(fc, cycle_now); OWRITE(sc, OHCI_ITCTL(dmach), OHCI_CNTL_CYCMATCH_S | (cycle_match << 16) | OHCI_CNTL_DMA_RUN); #else OWRITE(sc, OHCI_ITCTL(dmach), OHCI_CNTL_DMA_RUN); #endif if (firewire_debug > 1) { printf("cycle_match: 0x%04x->0x%04x\n", cycle_now, cycle_match); dump_dma(sc, ITX_CH + dmach); dump_db(sc, ITX_CH + dmach); } } else if ((stat & OHCI_CNTL_CYCMATCH_S) == 0) { device_printf(sc->fc.dev, "IT DMA underrun (0x%08x)\n", stat); OWRITE(sc, OHCI_ITCTL(dmach), OHCI_CNTL_DMA_WAKE); } out: return err; } static int fwohci_irx_enable(struct firewire_comm *fc, int dmach) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; int err = 0, s, ldesc; unsigned short tag, ich; uint32_t stat; struct fwohci_dbch *dbch; struct fwohcidb_tr *db_tr; struct fw_bulkxfer *first, *prev, *chunk; struct fw_xferq *ir; dbch = &sc->ir[dmach]; ir = &dbch->xferq; if ((ir->flag & FWXFERQ_RUNNING) == 0) { tag = (ir->flag >> 6) & 3; ich = ir->flag & 0x3f; OWRITE(sc, OHCI_IRMATCH(dmach), tagbit[tag] | ich); ir->queued = 0; dbch->ndb = ir->bnpacket * ir->bnchunk; dbch->ndesc = 2; fwohci_db_init(sc, dbch); if ((dbch->flags & FWOHCI_DBCH_INIT) == 0) return ENOMEM; err = fwohci_rx_enable(sc, dbch); } if(err) return err; first = STAILQ_FIRST(&ir->stfree); if (first == NULL) { device_printf(fc->dev, "IR DMA no free chunk\n"); return 0; } ldesc = dbch->ndesc - 1; s = splfw(); if ((ir->flag & FWXFERQ_HANDLER) == 0) FW_GLOCK(fc); prev = STAILQ_LAST(&ir->stdma, fw_bulkxfer, link); while ((chunk = STAILQ_FIRST(&ir->stfree)) != NULL) { struct fwohcidb *db; #if 1 /* XXX for if_fwe */ if (chunk->mbuf != NULL) { db_tr = (struct fwohcidb_tr *)(chunk->start); db_tr->dbcnt = 1; err = bus_dmamap_load_mbuf(dbch->dmat, db_tr->dma_map, chunk->mbuf, fwohci_execute_db2, db_tr, /* flags */0); FWOHCI_DMA_SET(db_tr->db[1].db.desc.cmd, OHCI_UPDATE | OHCI_INPUT_LAST | OHCI_INTERRUPT_ALWAYS | OHCI_BRANCH_ALWAYS); } #endif db = ((struct fwohcidb_tr *)(chunk->end))->db; FWOHCI_DMA_WRITE(db[ldesc].db.desc.res, 0); FWOHCI_DMA_CLEAR(db[ldesc].db.desc.depend, 0xf); if (prev != NULL) { db = ((struct fwohcidb_tr *)(prev->end))->db; FWOHCI_DMA_SET(db[ldesc].db.desc.depend, dbch->ndesc); } STAILQ_REMOVE_HEAD(&ir->stfree, link); STAILQ_INSERT_TAIL(&ir->stdma, chunk, link); prev = chunk; } if ((ir->flag & FWXFERQ_HANDLER) == 0) FW_GUNLOCK(fc); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD); splx(s); stat = OREAD(sc, OHCI_IRCTL(dmach)); if (stat & OHCI_CNTL_DMA_ACTIVE) return 0; if (stat & OHCI_CNTL_DMA_RUN) { OWRITE(sc, OHCI_IRCTLCLR(dmach), OHCI_CNTL_DMA_RUN); device_printf(sc->fc.dev, "IR DMA overrun (0x%08x)\n", stat); } if (firewire_debug) printf("start IR DMA 0x%x\n", stat); OWRITE(sc, OHCI_IR_MASKCLR, 1 << dmach); OWRITE(sc, OHCI_IR_STATCLR, 1 << dmach); OWRITE(sc, OHCI_IR_MASK, 1 << dmach); OWRITE(sc, OHCI_IRCTLCLR(dmach), 0xf0000000); OWRITE(sc, OHCI_IRCTL(dmach), OHCI_CNTL_ISOHDR); OWRITE(sc, OHCI_IRCMD(dmach), ((struct fwohcidb_tr *)(first->start))->bus_addr | dbch->ndesc); OWRITE(sc, OHCI_IRCTL(dmach), OHCI_CNTL_DMA_RUN); OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_DMA_IR); #if 0 dump_db(sc, IRX_CH + dmach); #endif return err; } int fwohci_stop(struct fwohci_softc *sc, device_t dev) { u_int i; fwohci_set_intr(&sc->fc, 0); /* Now stopping all DMA channel */ OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN); for( i = 0 ; i < sc->fc.nisodma ; i ++ ){ OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN); OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN); } #if 0 /* Let dcons(4) be accessed */ /* Stop interrupt */ OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_EN | OHCI_INT_ERR | OHCI_INT_PHY_SID | OHCI_INT_PHY_INT | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS | OHCI_INT_DMA_PRRQ | OHCI_INT_DMA_PRRS | OHCI_INT_DMA_ARRQ | OHCI_INT_DMA_ARRS | OHCI_INT_PHY_BUS_R); /* FLUSH FIFO and reset Transmitter/Reciever */ OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_RESET); #endif /* XXX Link down? Bus reset? */ return 0; } int fwohci_resume(struct fwohci_softc *sc, device_t dev) { int i; struct fw_xferq *ir; struct fw_bulkxfer *chunk; fwohci_reset(sc, dev); /* XXX resume isochronous receive automatically. (how about TX?) */ for(i = 0; i < sc->fc.nisodma; i ++) { ir = &sc->ir[i].xferq; if((ir->flag & FWXFERQ_RUNNING) != 0) { device_printf(sc->fc.dev, "resume iso receive ch: %d\n", i); ir->flag &= ~FWXFERQ_RUNNING; /* requeue stdma to stfree */ while((chunk = STAILQ_FIRST(&ir->stdma)) != NULL) { STAILQ_REMOVE_HEAD(&ir->stdma, link); STAILQ_INSERT_TAIL(&ir->stfree, chunk, link); } sc->fc.irx_enable(&sc->fc, i); } } bus_generic_resume(dev); sc->fc.ibr(&sc->fc); return 0; } #ifdef OHCI_DEBUG static void fwohci_dump_intr(struct fwohci_softc *sc, uint32_t stat) { if(stat & OREAD(sc, FWOHCI_INTMASK)) device_printf(fc->dev, "INTERRUPT < %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s> 0x%08x, 0x%08x\n", stat & OHCI_INT_EN ? "DMA_EN ":"", stat & OHCI_INT_PHY_REG ? "PHY_REG ":"", stat & OHCI_INT_CYC_LONG ? "CYC_LONG ":"", stat & OHCI_INT_ERR ? "INT_ERR ":"", stat & OHCI_INT_CYC_ERR ? "CYC_ERR ":"", stat & OHCI_INT_CYC_LOST ? "CYC_LOST ":"", stat & OHCI_INT_CYC_64SECOND ? "CYC_64SECOND ":"", stat & OHCI_INT_CYC_START ? "CYC_START ":"", stat & OHCI_INT_PHY_INT ? "PHY_INT ":"", stat & OHCI_INT_PHY_BUS_R ? "BUS_RESET ":"", stat & OHCI_INT_PHY_SID ? "SID ":"", stat & OHCI_INT_LR_ERR ? "DMA_LR_ERR ":"", stat & OHCI_INT_PW_ERR ? "DMA_PW_ERR ":"", stat & OHCI_INT_DMA_IR ? "DMA_IR ":"", stat & OHCI_INT_DMA_IT ? "DMA_IT " :"", stat & OHCI_INT_DMA_PRRS ? "DMA_PRRS " :"", stat & OHCI_INT_DMA_PRRQ ? "DMA_PRRQ " :"", stat & OHCI_INT_DMA_ARRS ? "DMA_ARRS " :"", stat & OHCI_INT_DMA_ARRQ ? "DMA_ARRQ " :"", stat & OHCI_INT_DMA_ATRS ? "DMA_ATRS " :"", stat & OHCI_INT_DMA_ATRQ ? "DMA_ATRQ " :"", stat, OREAD(sc, FWOHCI_INTMASK) ); } #endif static void fwohci_intr_core(struct fwohci_softc *sc, uint32_t stat, int count) { struct firewire_comm *fc = (struct firewire_comm *)sc; uint32_t node_id, plen; FW_GLOCK_ASSERT(fc); if ((stat & OHCI_INT_PHY_BUS_R) && (fc->status != FWBUSRESET)) { fc->status = FWBUSRESET; /* Disable bus reset interrupt until sid recv. */ OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_PHY_BUS_R); device_printf(fc->dev, "%s: BUS reset\n", __func__); OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST); OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCSRC); OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN); sc->atrq.xferq.flag &= ~FWXFERQ_RUNNING; OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN); sc->atrs.xferq.flag &= ~FWXFERQ_RUNNING; if (!kdb_active) taskqueue_enqueue(sc->fc.taskqueue, &sc->fwohci_task_busreset); } if (stat & OHCI_INT_PHY_SID) { /* Enable bus reset interrupt */ OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_PHY_BUS_R); OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_PHY_BUS_R); /* Allow async. request to us */ OWRITE(sc, OHCI_AREQHI, 1 << 31); if (firewire_phydma_enable) { /* allow from all nodes */ OWRITE(sc, OHCI_PREQHI, 0x7fffffff); OWRITE(sc, OHCI_PREQLO, 0xffffffff); /* 0 to 4GB region */ OWRITE(sc, OHCI_PREQUPPER, 0x10000); } /* Set ATRetries register */ OWRITE(sc, OHCI_ATRETRY, 1<<(13+16) | 0xfff); /* * Checking whether the node is root or not. If root, turn on * cycle master. */ node_id = OREAD(sc, FWOHCI_NODEID); plen = OREAD(sc, OHCI_SID_CNT); fc->nodeid = node_id & 0x3f; device_printf(fc->dev, "%s: node_id=0x%08x, SelfID Count=%d, ", __func__, fc->nodeid, (plen >> 16) & 0xff); if (!(node_id & OHCI_NODE_VALID)) { device_printf(fc->dev, "%s: Bus reset failure\n", __func__); goto sidout; } /* cycle timer */ sc->cycle_lost = 0; OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_CYC_LOST); if ((node_id & OHCI_NODE_ROOT) && !nocyclemaster) { printf("CYCLEMASTER mode\n"); OWRITE(sc, OHCI_LNKCTL, OHCI_CNTL_CYCMTR | OHCI_CNTL_CYCTIMER); } else { printf("non CYCLEMASTER mode\n"); OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCMTR); OWRITE(sc, OHCI_LNKCTL, OHCI_CNTL_CYCTIMER); } fc->status = FWBUSINIT; if (!kdb_active) taskqueue_enqueue(sc->fc.taskqueue, &sc->fwohci_task_sid); } sidout: if ((stat & ~(OHCI_INT_PHY_BUS_R | OHCI_INT_PHY_SID)) && (!kdb_active)) taskqueue_enqueue(sc->fc.taskqueue, &sc->fwohci_task_dma); } static void fwohci_intr_dma(struct fwohci_softc *sc, uint32_t stat, int count) { uint32_t irstat, itstat; u_int i; struct firewire_comm *fc = (struct firewire_comm *)sc; if (stat & OHCI_INT_DMA_IR) { irstat = atomic_readandclear_int(&sc->irstat); for(i = 0; i < fc->nisodma ; i++){ struct fwohci_dbch *dbch; if((irstat & (1 << i)) != 0){ dbch = &sc->ir[i]; if ((dbch->xferq.flag & FWXFERQ_OPEN) == 0) { device_printf(sc->fc.dev, "dma(%d) not active\n", i); continue; } fwohci_rbuf_update(sc, i); } } } if (stat & OHCI_INT_DMA_IT) { itstat = atomic_readandclear_int(&sc->itstat); for(i = 0; i < fc->nisodma ; i++){ if((itstat & (1 << i)) != 0){ fwohci_tbuf_update(sc, i); } } } if (stat & OHCI_INT_DMA_PRRS) { #if 0 dump_dma(sc, ARRS_CH); dump_db(sc, ARRS_CH); #endif fwohci_arcv(sc, &sc->arrs, count); } if (stat & OHCI_INT_DMA_PRRQ) { #if 0 dump_dma(sc, ARRQ_CH); dump_db(sc, ARRQ_CH); #endif fwohci_arcv(sc, &sc->arrq, count); } if (stat & OHCI_INT_CYC_LOST) { if (sc->cycle_lost >= 0) sc->cycle_lost ++; if (sc->cycle_lost > 10) { sc->cycle_lost = -1; #if 0 OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCTIMER); #endif OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST); device_printf(fc->dev, "too many cycles lost, " "no cycle master present?\n"); } } if (stat & OHCI_INT_DMA_ATRQ) { fwohci_txd(sc, &(sc->atrq)); } if (stat & OHCI_INT_DMA_ATRS) { fwohci_txd(sc, &(sc->atrs)); } if (stat & OHCI_INT_PW_ERR) { device_printf(fc->dev, "posted write error\n"); } if (stat & OHCI_INT_ERR) { device_printf(fc->dev, "unrecoverable error\n"); } if (stat & OHCI_INT_PHY_INT) { device_printf(fc->dev, "phy int\n"); } return; } static void fwohci_task_busreset(void *arg, int pending) { struct fwohci_softc *sc = (struct fwohci_softc *)arg; FW_GLOCK(&sc->fc); fw_busreset(&sc->fc, FWBUSRESET); OWRITE(sc, OHCI_CROMHDR, ntohl(sc->fc.config_rom[0])); OWRITE(sc, OHCI_BUS_OPT, ntohl(sc->fc.config_rom[2])); FW_GUNLOCK(&sc->fc); } static void fwohci_task_sid(void *arg, int pending) { struct fwohci_softc *sc = (struct fwohci_softc *)arg; struct firewire_comm *fc = &sc->fc; uint32_t *buf; int i, plen; /* * We really should have locking * here. Not sure why it's not */ plen = OREAD(sc, OHCI_SID_CNT); if (plen & OHCI_SID_ERR) { device_printf(fc->dev, "SID Error\n"); return; } plen &= OHCI_SID_CNT_MASK; if (plen < 4 || plen > OHCI_SIDSIZE) { device_printf(fc->dev, "invalid SID len = %d\n", plen); return; } plen -= 4; /* chop control info */ buf = (uint32_t *)malloc(OHCI_SIDSIZE, M_FW, M_NOWAIT); if (buf == NULL) { device_printf(fc->dev, "malloc failed\n"); return; } for (i = 0; i < plen / 4; i ++) buf[i] = FWOHCI_DMA_READ(sc->sid_buf[i+1]); /* pending all pre-bus_reset packets */ fwohci_txd(sc, &sc->atrq); fwohci_txd(sc, &sc->atrs); fwohci_arcv(sc, &sc->arrs, -1); fwohci_arcv(sc, &sc->arrq, -1); fw_drain_txq(fc); fw_sidrcv(fc, buf, plen); free(buf, M_FW); } static void fwohci_task_dma(void *arg, int pending) { struct fwohci_softc *sc = (struct fwohci_softc *)arg; uint32_t stat; again: stat = atomic_readandclear_int(&sc->intstat); if (stat) fwohci_intr_dma(sc, stat, -1); else return; goto again; } static int fwohci_check_stat(struct fwohci_softc *sc) { uint32_t stat, irstat, itstat; FW_GLOCK_ASSERT(&sc->fc); stat = OREAD(sc, FWOHCI_INTSTAT); if (stat == 0xffffffff) { if (!bus_child_present(sc->fc.dev)) return (FILTER_HANDLED); device_printf(sc->fc.dev, "device physically ejected?\n"); return (FILTER_STRAY); } if (stat) OWRITE(sc, FWOHCI_INTSTATCLR, stat & ~OHCI_INT_PHY_BUS_R); stat &= sc->intmask; if (stat == 0) return (FILTER_STRAY); atomic_set_int(&sc->intstat, stat); if (stat & OHCI_INT_DMA_IR) { irstat = OREAD(sc, OHCI_IR_STAT); OWRITE(sc, OHCI_IR_STATCLR, irstat); atomic_set_int(&sc->irstat, irstat); } if (stat & OHCI_INT_DMA_IT) { itstat = OREAD(sc, OHCI_IT_STAT); OWRITE(sc, OHCI_IT_STATCLR, itstat); atomic_set_int(&sc->itstat, itstat); } fwohci_intr_core(sc, stat, -1); return (FILTER_HANDLED); } void fwohci_intr(void *arg) { struct fwohci_softc *sc = (struct fwohci_softc *)arg; FW_GLOCK(&sc->fc); fwohci_check_stat(sc); FW_GUNLOCK(&sc->fc); } void fwohci_poll(struct firewire_comm *fc, int quick, int count) { struct fwohci_softc *sc = (struct fwohci_softc *)fc; FW_GLOCK(fc); fwohci_check_stat(sc); FW_GUNLOCK(fc); } static void fwohci_set_intr(struct firewire_comm *fc, int enable) { struct fwohci_softc *sc; sc = (struct fwohci_softc *)fc; if (firewire_debug) device_printf(sc->fc.dev, "fwohci_set_intr: %d\n", enable); if (enable) { sc->intmask |= OHCI_INT_EN; OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_EN); } else { sc->intmask &= ~OHCI_INT_EN; OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_EN); } } static void fwohci_tbuf_update(struct fwohci_softc *sc, int dmach) { struct firewire_comm *fc = &sc->fc; struct fwohcidb *db; struct fw_bulkxfer *chunk; struct fw_xferq *it; uint32_t stat, count; int s, w=0, ldesc; it = fc->it[dmach]; ldesc = sc->it[dmach].ndesc - 1; s = splfw(); /* unnecessary ? */ FW_GLOCK(fc); fwdma_sync_multiseg_all(sc->it[dmach].am, BUS_DMASYNC_POSTREAD); if (firewire_debug) dump_db(sc, ITX_CH + dmach); while ((chunk = STAILQ_FIRST(&it->stdma)) != NULL) { db = ((struct fwohcidb_tr *)(chunk->end))->db; stat = FWOHCI_DMA_READ(db[ldesc].db.desc.res) >> OHCI_STATUS_SHIFT; db = ((struct fwohcidb_tr *)(chunk->start))->db; /* timestamp */ count = FWOHCI_DMA_READ(db[ldesc].db.desc.res) & OHCI_COUNT_MASK; if (stat == 0) break; STAILQ_REMOVE_HEAD(&it->stdma, link); switch (stat & FWOHCIEV_MASK){ case FWOHCIEV_ACKCOMPL: #if 0 device_printf(fc->dev, "0x%08x\n", count); #endif break; default: device_printf(fc->dev, "Isochronous transmit err %02x(%s)\n", stat, fwohcicode[stat & 0x1f]); } STAILQ_INSERT_TAIL(&it->stfree, chunk, link); w++; } FW_GUNLOCK(fc); splx(s); if (w) wakeup(it); } static void fwohci_rbuf_update(struct fwohci_softc *sc, int dmach) { struct firewire_comm *fc = &sc->fc; struct fwohcidb_tr *db_tr; struct fw_bulkxfer *chunk; struct fw_xferq *ir; uint32_t stat; int s, w = 0, ldesc; ir = fc->ir[dmach]; ldesc = sc->ir[dmach].ndesc - 1; #if 0 dump_db(sc, dmach); #endif s = splfw(); if ((ir->flag & FWXFERQ_HANDLER) == 0) FW_GLOCK(fc); fwdma_sync_multiseg_all(sc->ir[dmach].am, BUS_DMASYNC_POSTREAD); while ((chunk = STAILQ_FIRST(&ir->stdma)) != NULL) { db_tr = (struct fwohcidb_tr *)chunk->end; stat = FWOHCI_DMA_READ(db_tr->db[ldesc].db.desc.res) >> OHCI_STATUS_SHIFT; if (stat == 0) break; if (chunk->mbuf != NULL) { bus_dmamap_sync(sc->ir[dmach].dmat, db_tr->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->ir[dmach].dmat, db_tr->dma_map); } else if (ir->buf != NULL) { fwdma_sync_multiseg(ir->buf, chunk->poffset, ir->bnpacket, BUS_DMASYNC_POSTREAD); } else { /* XXX */ printf("fwohci_rbuf_update: this shouldn't happend\n"); } STAILQ_REMOVE_HEAD(&ir->stdma, link); STAILQ_INSERT_TAIL(&ir->stvalid, chunk, link); switch (stat & FWOHCIEV_MASK) { case FWOHCIEV_ACKCOMPL: chunk->resp = 0; break; default: chunk->resp = EINVAL; device_printf(fc->dev, "Isochronous receive err %02x(%s)\n", stat, fwohcicode[stat & 0x1f]); } w++; } if ((ir->flag & FWXFERQ_HANDLER) == 0) FW_GUNLOCK(fc); splx(s); if (w == 0) return; if (ir->flag & FWXFERQ_HANDLER) ir->hand(ir); else wakeup(ir); } void dump_dma(struct fwohci_softc *sc, uint32_t ch) { uint32_t off, cntl, stat, cmd, match; if(ch == 0){ off = OHCI_ATQOFF; }else if(ch == 1){ off = OHCI_ATSOFF; }else if(ch == 2){ off = OHCI_ARQOFF; }else if(ch == 3){ off = OHCI_ARSOFF; }else if(ch < IRX_CH){ off = OHCI_ITCTL(ch - ITX_CH); }else{ off = OHCI_IRCTL(ch - IRX_CH); } cntl = stat = OREAD(sc, off); cmd = OREAD(sc, off + 0xc); match = OREAD(sc, off + 0x10); device_printf(sc->fc.dev, "ch %1x cntl:0x%08x cmd:0x%08x match:0x%08x\n", ch, cntl, cmd, match); stat &= 0xffff ; if (stat) { device_printf(sc->fc.dev, "dma %d ch:%s%s%s%s%s%s %s(%x)\n", ch, stat & OHCI_CNTL_DMA_RUN ? "RUN," : "", stat & OHCI_CNTL_DMA_WAKE ? "WAKE," : "", stat & OHCI_CNTL_DMA_DEAD ? "DEAD," : "", stat & OHCI_CNTL_DMA_ACTIVE ? "ACTIVE," : "", stat & OHCI_CNTL_DMA_BT ? "BRANCH," : "", stat & OHCI_CNTL_DMA_BAD ? "BADDMA," : "", fwohcicode[stat & 0x1f], stat & 0x1f ); }else{ device_printf(sc->fc.dev, "dma %d ch: Nostat\n", ch); } } void dump_db(struct fwohci_softc *sc, uint32_t ch) { struct fwohci_dbch *dbch; struct fwohcidb_tr *cp = NULL, *pp, *np = NULL; struct fwohcidb *curr = NULL, *prev, *next = NULL; int idb, jdb; uint32_t cmd, off; if(ch == 0){ off = OHCI_ATQOFF; dbch = &sc->atrq; }else if(ch == 1){ off = OHCI_ATSOFF; dbch = &sc->atrs; }else if(ch == 2){ off = OHCI_ARQOFF; dbch = &sc->arrq; }else if(ch == 3){ off = OHCI_ARSOFF; dbch = &sc->arrs; }else if(ch < IRX_CH){ off = OHCI_ITCTL(ch - ITX_CH); dbch = &sc->it[ch - ITX_CH]; }else { off = OHCI_IRCTL(ch - IRX_CH); dbch = &sc->ir[ch - IRX_CH]; } cmd = OREAD(sc, off + 0xc); if( dbch->ndb == 0 ){ device_printf(sc->fc.dev, "No DB is attached ch=%d\n", ch); return; } pp = dbch->top; prev = pp->db; for(idb = 0 ; idb < dbch->ndb ; idb ++ ){ cp = STAILQ_NEXT(pp, link); if(cp == NULL){ curr = NULL; goto outdb; } np = STAILQ_NEXT(cp, link); for(jdb = 0 ; jdb < dbch->ndesc ; jdb ++ ){ if ((cmd & 0xfffffff0) == cp->bus_addr) { curr = cp->db; if(np != NULL){ next = np->db; }else{ next = NULL; } goto outdb; } } pp = STAILQ_NEXT(pp, link); if(pp == NULL){ curr = NULL; goto outdb; } prev = pp->db; } outdb: if( curr != NULL){ #if 0 printf("Prev DB %d\n", ch); print_db(pp, prev, ch, dbch->ndesc); #endif printf("Current DB %d\n", ch); print_db(cp, curr, ch, dbch->ndesc); #if 0 printf("Next DB %d\n", ch); print_db(np, next, ch, dbch->ndesc); #endif }else{ printf("dbdump err ch = %d cmd = 0x%08x\n", ch, cmd); } return; } void print_db(struct fwohcidb_tr *db_tr, struct fwohcidb *db, uint32_t ch, uint32_t max) { fwohcireg_t stat; int i, key; uint32_t cmd, res; if(db == NULL){ printf("No Descriptor is found\n"); return; } printf("ch = %d\n%8s %s %s %s %s %4s %8s %8s %4s:%4s\n", ch, "Current", "OP ", "KEY", "INT", "BR ", "len", "Addr", "Depend", "Stat", "Cnt"); for( i = 0 ; i <= max ; i ++){ cmd = FWOHCI_DMA_READ(db[i].db.desc.cmd); res = FWOHCI_DMA_READ(db[i].db.desc.res); key = cmd & OHCI_KEY_MASK; stat = res >> OHCI_STATUS_SHIFT; #if defined(__DragonFly__) || __FreeBSD_version < 500000 printf("%08x %s %s %s %s %5d %08x %08x %04x:%04x", db_tr->bus_addr, #else printf("%08jx %s %s %s %s %5d %08x %08x %04x:%04x", (uintmax_t)db_tr->bus_addr, #endif dbcode[(cmd >> 28) & 0xf], dbkey[(cmd >> 24) & 0x7], dbcond[(cmd >> 20) & 0x3], dbcond[(cmd >> 18) & 0x3], cmd & OHCI_COUNT_MASK, FWOHCI_DMA_READ(db[i].db.desc.addr), FWOHCI_DMA_READ(db[i].db.desc.depend), stat, res & OHCI_COUNT_MASK); if(stat & 0xff00){ printf(" %s%s%s%s%s%s %s(%x)\n", stat & OHCI_CNTL_DMA_RUN ? "RUN," : "", stat & OHCI_CNTL_DMA_WAKE ? "WAKE," : "", stat & OHCI_CNTL_DMA_DEAD ? "DEAD," : "", stat & OHCI_CNTL_DMA_ACTIVE ? "ACTIVE," : "", stat & OHCI_CNTL_DMA_BT ? "BRANCH," : "", stat & OHCI_CNTL_DMA_BAD ? "BADDMA," : "", fwohcicode[stat & 0x1f], stat & 0x1f ); }else{ printf(" Nostat\n"); } if(key == OHCI_KEY_ST2 ){ printf("0x%08x 0x%08x 0x%08x 0x%08x\n", FWOHCI_DMA_READ(db[i+1].db.immed[0]), FWOHCI_DMA_READ(db[i+1].db.immed[1]), FWOHCI_DMA_READ(db[i+1].db.immed[2]), FWOHCI_DMA_READ(db[i+1].db.immed[3])); } if(key == OHCI_KEY_DEVICE){ return; } if((cmd & OHCI_BRANCH_MASK) == OHCI_BRANCH_ALWAYS){ return; } if((cmd & OHCI_CMD_MASK) == OHCI_OUTPUT_LAST){ return; } if((cmd & OHCI_CMD_MASK) == OHCI_INPUT_LAST){ return; } if(key == OHCI_KEY_ST2 ){ i++; } } return; } void fwohci_ibr(struct firewire_comm *fc) { struct fwohci_softc *sc; uint32_t fun; device_printf(fc->dev, "Initiate bus reset\n"); sc = (struct fwohci_softc *)fc; FW_GLOCK(fc); /* * Make sure our cached values from the config rom are * initialised. */ OWRITE(sc, OHCI_CROMHDR, ntohl(sc->fc.config_rom[0])); OWRITE(sc, OHCI_BUS_OPT, ntohl(sc->fc.config_rom[2])); /* * Set root hold-off bit so that non cyclemaster capable node * shouldn't became the root node. */ #if 1 fun = fwphy_rddata(sc, FW_PHY_IBR_REG); fun |= FW_PHY_IBR | FW_PHY_RHB; fun = fwphy_wrdata(sc, FW_PHY_IBR_REG, fun); #else /* Short bus reset */ fun = fwphy_rddata(sc, FW_PHY_ISBR_REG); fun |= FW_PHY_ISBR | FW_PHY_RHB; fun = fwphy_wrdata(sc, FW_PHY_ISBR_REG, fun); #endif FW_GUNLOCK(fc); } void fwohci_txbufdb(struct fwohci_softc *sc, int dmach, struct fw_bulkxfer *bulkxfer) { struct fwohcidb_tr *db_tr, *fdb_tr; struct fwohci_dbch *dbch; struct fwohcidb *db; struct fw_pkt *fp; struct fwohci_txpkthdr *ohcifp; unsigned short chtag; int idb; FW_GLOCK_ASSERT(&sc->fc); dbch = &sc->it[dmach]; chtag = sc->it[dmach].xferq.flag & 0xff; db_tr = (struct fwohcidb_tr *)(bulkxfer->start); fdb_tr = (struct fwohcidb_tr *)(bulkxfer->end); /* device_printf(sc->fc.dev, "DB %08x %08x %08x\n", bulkxfer, db_tr->bus_addr, fdb_tr->bus_addr); */ for (idb = 0; idb < dbch->xferq.bnpacket; idb ++) { db = db_tr->db; fp = (struct fw_pkt *)db_tr->buf; ohcifp = (struct fwohci_txpkthdr *) db[1].db.immed; ohcifp->mode.ld[0] = fp->mode.ld[0]; ohcifp->mode.common.spd = 0 & 0x7; ohcifp->mode.stream.len = fp->mode.stream.len; ohcifp->mode.stream.chtag = chtag; ohcifp->mode.stream.tcode = 0xa; #if BYTE_ORDER == BIG_ENDIAN FWOHCI_DMA_WRITE(db[1].db.immed[0], db[1].db.immed[0]); FWOHCI_DMA_WRITE(db[1].db.immed[1], db[1].db.immed[1]); #endif FWOHCI_DMA_CLEAR(db[2].db.desc.cmd, OHCI_COUNT_MASK); FWOHCI_DMA_SET(db[2].db.desc.cmd, fp->mode.stream.len); FWOHCI_DMA_WRITE(db[2].db.desc.res, 0); #if 0 /* if bulkxfer->npackets changes */ db[2].db.desc.cmd = OHCI_OUTPUT_LAST | OHCI_UPDATE | OHCI_BRANCH_ALWAYS; db[0].db.desc.depend = = db[dbch->ndesc - 1].db.desc.depend = STAILQ_NEXT(db_tr, link)->bus_addr | dbch->ndesc; #else FWOHCI_DMA_SET(db[0].db.desc.depend, dbch->ndesc); FWOHCI_DMA_SET(db[dbch->ndesc - 1].db.desc.depend, dbch->ndesc); #endif bulkxfer->end = (caddr_t)db_tr; db_tr = STAILQ_NEXT(db_tr, link); } db = ((struct fwohcidb_tr *)bulkxfer->end)->db; FWOHCI_DMA_CLEAR(db[0].db.desc.depend, 0xf); FWOHCI_DMA_CLEAR(db[dbch->ndesc - 1].db.desc.depend, 0xf); #if 0 /* if bulkxfer->npackets changes */ db[dbch->ndesc - 1].db.desc.control |= OHCI_INTERRUPT_ALWAYS; /* OHCI 1.1 and above */ db[0].db.desc.control |= OHCI_INTERRUPT_ALWAYS; #endif /* db_tr = (struct fwohcidb_tr *)bulkxfer->start; fdb_tr = (struct fwohcidb_tr *)bulkxfer->end; device_printf(sc->fc.dev, "DB %08x %3d %08x %08x\n", bulkxfer, bulkxfer->npacket, db_tr->bus_addr, fdb_tr->bus_addr); */ return; } static int fwohci_add_tx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr, int poffset) { struct fwohcidb *db = db_tr->db; struct fw_xferq *it; int err = 0; it = &dbch->xferq; if(it->buf == 0){ err = EINVAL; return err; } db_tr->buf = fwdma_v_addr(it->buf, poffset); db_tr->dbcnt = 3; FWOHCI_DMA_WRITE(db[0].db.desc.cmd, OHCI_OUTPUT_MORE | OHCI_KEY_ST2 | 8); FWOHCI_DMA_WRITE(db[0].db.desc.addr, 0); bzero((void *)&db[1].db.immed[0], sizeof(db[1].db.immed)); FWOHCI_DMA_WRITE(db[2].db.desc.addr, fwdma_bus_addr(it->buf, poffset) + sizeof(uint32_t)); FWOHCI_DMA_WRITE(db[2].db.desc.cmd, OHCI_OUTPUT_LAST | OHCI_UPDATE | OHCI_BRANCH_ALWAYS); #if 1 FWOHCI_DMA_WRITE(db[0].db.desc.res, 0); FWOHCI_DMA_WRITE(db[2].db.desc.res, 0); #endif return 0; } int fwohci_add_rx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr, int poffset, struct fwdma_alloc *dummy_dma) { struct fwohcidb *db = db_tr->db; struct fw_xferq *ir; int i, ldesc; bus_addr_t dbuf[2]; int dsiz[2]; ir = &dbch->xferq; if (ir->buf == NULL && (dbch->xferq.flag & FWXFERQ_EXTBUF) == 0) { if (db_tr->buf == NULL) { db_tr->buf = fwdma_malloc_size(dbch->dmat, &db_tr->dma_map, ir->psize, &dbuf[0], BUS_DMA_NOWAIT); if (db_tr->buf == NULL) return(ENOMEM); } db_tr->dbcnt = 1; dsiz[0] = ir->psize; bus_dmamap_sync(dbch->dmat, db_tr->dma_map, BUS_DMASYNC_PREREAD); } else { db_tr->dbcnt = 0; if (dummy_dma != NULL) { dsiz[db_tr->dbcnt] = sizeof(uint32_t); dbuf[db_tr->dbcnt++] = dummy_dma->bus_addr; } dsiz[db_tr->dbcnt] = ir->psize; if (ir->buf != NULL) { db_tr->buf = fwdma_v_addr(ir->buf, poffset); dbuf[db_tr->dbcnt] = fwdma_bus_addr( ir->buf, poffset); } db_tr->dbcnt++; } for(i = 0 ; i < db_tr->dbcnt ; i++){ FWOHCI_DMA_WRITE(db[i].db.desc.addr, dbuf[i]); FWOHCI_DMA_WRITE(db[i].db.desc.cmd, OHCI_INPUT_MORE | dsiz[i]); if (ir->flag & FWXFERQ_STREAM) { FWOHCI_DMA_SET(db[i].db.desc.cmd, OHCI_UPDATE); } FWOHCI_DMA_WRITE(db[i].db.desc.res, dsiz[i]); } ldesc = db_tr->dbcnt - 1; if (ir->flag & FWXFERQ_STREAM) { FWOHCI_DMA_SET(db[ldesc].db.desc.cmd, OHCI_INPUT_LAST); } FWOHCI_DMA_SET(db[ldesc].db.desc.cmd, OHCI_BRANCH_ALWAYS); return 0; } static int fwohci_arcv_swap(struct fw_pkt *fp, int len) { struct fw_pkt *fp0; uint32_t ld0; int slen, hlen; #if BYTE_ORDER == BIG_ENDIAN int i; #endif ld0 = FWOHCI_DMA_READ(fp->mode.ld[0]); #if 0 printf("ld0: x%08x\n", ld0); #endif fp0 = (struct fw_pkt *)&ld0; /* determine length to swap */ switch (fp0->mode.common.tcode) { case FWTCODE_RREQQ: case FWTCODE_WRES: case FWTCODE_WREQQ: case FWTCODE_RRESQ: case FWOHCITCODE_PHY: slen = 12; break; case FWTCODE_RREQB: case FWTCODE_WREQB: case FWTCODE_LREQ: case FWTCODE_RRESB: case FWTCODE_LRES: slen = 16; break; default: printf("Unknown tcode %d\n", fp0->mode.common.tcode); return(0); } hlen = tinfo[fp0->mode.common.tcode].hdr_len; if (hlen > len) { if (firewire_debug) printf("splitted header\n"); return(-hlen); } #if BYTE_ORDER == BIG_ENDIAN for(i = 0; i < slen/4; i ++) fp->mode.ld[i] = FWOHCI_DMA_READ(fp->mode.ld[i]); #endif return(hlen); } static int fwohci_get_plen(struct fwohci_softc *sc, struct fwohci_dbch *dbch, struct fw_pkt *fp) { struct tcode_info *info; int r; info = &tinfo[fp->mode.common.tcode]; r = info->hdr_len + sizeof(uint32_t); if ((info->flag & FWTI_BLOCK_ASY) != 0) r += roundup2(fp->mode.wreqb.len, sizeof(uint32_t)); if (r == sizeof(uint32_t)) { /* XXX */ device_printf(sc->fc.dev, "Unknown tcode %d\n", fp->mode.common.tcode); return (-1); } if (r > dbch->xferq.psize) { device_printf(sc->fc.dev, "Invalid packet length %d\n", r); return (-1); /* panic ? */ } return r; } static void fwohci_arcv_free_buf(struct fwohci_softc *sc, struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr, uint32_t off, int wake) { struct fwohcidb *db = &db_tr->db[0]; FWOHCI_DMA_CLEAR(db->db.desc.depend, 0xf); FWOHCI_DMA_WRITE(db->db.desc.res, dbch->xferq.psize); FWOHCI_DMA_SET(dbch->bottom->db[0].db.desc.depend, 1); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE); dbch->bottom = db_tr; if (wake) OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_WAKE); } static void fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count) { struct fwohcidb_tr *db_tr; struct iovec vec[2]; struct fw_pkt pktbuf; int nvec; struct fw_pkt *fp; uint8_t *ld; uint32_t stat, off, status, event; u_int spd; int len, plen, hlen, pcnt, offset; int s; caddr_t buf; int resCount; if(&sc->arrq == dbch){ off = OHCI_ARQOFF; }else if(&sc->arrs == dbch){ off = OHCI_ARSOFF; }else{ return; } s = splfw(); db_tr = dbch->top; pcnt = 0; /* XXX we cannot handle a packet which lies in more than two buf */ fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTREAD); fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTWRITE); status = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) >> OHCI_STATUS_SHIFT; resCount = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) & OHCI_COUNT_MASK; while (status & OHCI_CNTL_DMA_ACTIVE) { #if 0 if (off == OHCI_ARQOFF) printf("buf 0x%08x, status 0x%04x, resCount 0x%04x\n", db_tr->bus_addr, status, resCount); #endif len = dbch->xferq.psize - resCount; ld = (uint8_t *)db_tr->buf; if (dbch->pdb_tr == NULL) { len -= dbch->buf_offset; ld += dbch->buf_offset; } if (len > 0) bus_dmamap_sync(dbch->dmat, db_tr->dma_map, BUS_DMASYNC_POSTREAD); while (len > 0 ) { if (count >= 0 && count-- == 0) goto out; if(dbch->pdb_tr != NULL){ /* we have a fragment in previous buffer */ int rlen; offset = dbch->buf_offset; if (offset < 0) offset = - offset; buf = dbch->pdb_tr->buf + offset; rlen = dbch->xferq.psize - offset; if (firewire_debug) printf("rlen=%d, offset=%d\n", rlen, dbch->buf_offset); if (dbch->buf_offset < 0) { /* splitted in header, pull up */ char *p; p = (char *)&pktbuf; bcopy(buf, p, rlen); p += rlen; /* this must be too long but harmless */ rlen = sizeof(pktbuf) - rlen; if (rlen < 0) printf("why rlen < 0\n"); bcopy(db_tr->buf, p, rlen); ld += rlen; len -= rlen; hlen = fwohci_arcv_swap(&pktbuf, sizeof(pktbuf)); if (hlen <= 0) { printf("hlen should be positive."); goto err; } offset = sizeof(pktbuf); vec[0].iov_base = (char *)&pktbuf; vec[0].iov_len = offset; } else { /* splitted in payload */ offset = rlen; vec[0].iov_base = buf; vec[0].iov_len = rlen; } fp=(struct fw_pkt *)vec[0].iov_base; nvec = 1; } else { /* no fragment in previous buffer */ fp=(struct fw_pkt *)ld; hlen = fwohci_arcv_swap(fp, len); if (hlen == 0) goto err; if (hlen < 0) { dbch->pdb_tr = db_tr; dbch->buf_offset = - dbch->buf_offset; /* sanity check */ if (resCount != 0) { printf("resCount=%d hlen=%d\n", resCount, hlen); goto err; } goto out; } offset = 0; nvec = 0; } plen = fwohci_get_plen(sc, dbch, fp) - offset; if (plen < 0) { /* minimum header size + trailer = sizeof(fw_pkt) so this shouldn't happens */ printf("plen(%d) is negative! offset=%d\n", plen, offset); goto err; } if (plen > 0) { len -= plen; if (len < 0) { dbch->pdb_tr = db_tr; if (firewire_debug) printf("splitted payload\n"); /* sanity check */ if (resCount != 0) { printf("resCount=%d plen=%d" " len=%d\n", resCount, plen, len); goto err; } goto out; } vec[nvec].iov_base = ld; vec[nvec].iov_len = plen; nvec ++; ld += plen; } dbch->buf_offset = ld - (uint8_t *)db_tr->buf; if (nvec == 0) printf("nvec == 0\n"); /* DMA result-code will be written at the tail of packet */ stat = FWOHCI_DMA_READ(*(uint32_t *)(ld - sizeof(struct fwohci_trailer))); #if 0 printf("plen: %d, stat %x\n", plen ,stat); #endif spd = (stat >> 21) & 0x3; event = (stat >> 16) & 0x1f; switch (event) { case FWOHCIEV_ACKPEND: #if 0 printf("fwohci_arcv: ack pending tcode=0x%x..\n", fp->mode.common.tcode); #endif /* fall through */ case FWOHCIEV_ACKCOMPL: { struct fw_rcv_buf rb; if ((vec[nvec-1].iov_len -= sizeof(struct fwohci_trailer)) == 0) nvec--; rb.fc = &sc->fc; rb.vec = vec; rb.nvec = nvec; rb.spd = spd; fw_rcv(&rb); break; } case FWOHCIEV_BUSRST: if ((sc->fc.status != FWBUSRESET) && (sc->fc.status != FWBUSINIT)) printf("got BUSRST packet!?\n"); break; default: device_printf(sc->fc.dev, "Async DMA Receive error err=%02x %s" " plen=%d offset=%d len=%d status=0x%08x" " tcode=0x%x, stat=0x%08x\n", event, fwohcicode[event], plen, dbch->buf_offset, len, OREAD(sc, OHCI_DMACTL(off)), fp->mode.common.tcode, stat); #if 1 /* XXX */ goto err; #endif break; } pcnt ++; if (dbch->pdb_tr != NULL) { fwohci_arcv_free_buf(sc, dbch, dbch->pdb_tr, off, 1); dbch->pdb_tr = NULL; } } out: if (resCount == 0) { /* done on this buffer */ if (dbch->pdb_tr == NULL) { fwohci_arcv_free_buf(sc, dbch, db_tr, off, 1); dbch->buf_offset = 0; } else if (dbch->pdb_tr != db_tr) printf("pdb_tr != db_tr\n"); db_tr = STAILQ_NEXT(db_tr, link); status = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) >> OHCI_STATUS_SHIFT; resCount = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) & OHCI_COUNT_MASK; /* XXX check buffer overrun */ dbch->top = db_tr; } else { dbch->buf_offset = dbch->xferq.psize - resCount; break; } /* XXX make sure DMA is not dead */ } #if 0 if (pcnt < 1) printf("fwohci_arcv: no packets\n"); #endif splx(s); return; err: device_printf(sc->fc.dev, "AR DMA status=%x, ", OREAD(sc, OHCI_DMACTL(off))); dbch->pdb_tr = NULL; /* skip until resCount != 0 */ printf(" skip buffer"); while (resCount == 0) { printf(" #"); fwohci_arcv_free_buf(sc, dbch, db_tr, off, 0); db_tr = STAILQ_NEXT(db_tr, link); resCount = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) & OHCI_COUNT_MASK; } printf(" done\n"); dbch->top = db_tr; dbch->buf_offset = dbch->xferq.psize - resCount; OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_WAKE); splx(s); } Index: head/sys/dev/firewire/fwohcireg.h =================================================================== --- head/sys/dev/firewire/fwohcireg.h (revision 258779) +++ head/sys/dev/firewire/fwohcireg.h (revision 258780) @@ -1,447 +1,447 @@ /*- * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #define PCI_CBMEM PCIR_BAR(0) #define FW_VENDORID_NATSEMI 0x100B #define FW_VENDORID_NEC 0x1033 #define FW_VENDORID_SIS 0x1039 #define FW_VENDORID_TI 0x104c #define FW_VENDORID_SONY 0x104d #define FW_VENDORID_VIA 0x1106 #define FW_VENDORID_RICOH 0x1180 #define FW_VENDORID_APPLE 0x106b #define FW_VENDORID_LUCENT 0x11c1 #define FW_VENDORID_INTEL 0x8086 #define FW_VENDORID_ADAPTEC 0x9004 #define FW_VENDORID_SUN 0x108e #define FW_DEVICE_CS4210 (0x000f << 16) #define FW_DEVICE_UPD861 (0x0063 << 16) #define FW_DEVICE_UPD871 (0x00ce << 16) #define FW_DEVICE_UPD72870 (0x00cd << 16) #define FW_DEVICE_UPD72873 (0x00e7 << 16) #define FW_DEVICE_UPD72874 (0x00f2 << 16) #define FW_DEVICE_TITSB22 (0x8009 << 16) #define FW_DEVICE_TITSB23 (0x8019 << 16) #define FW_DEVICE_TITSB26 (0x8020 << 16) #define FW_DEVICE_TITSB43 (0x8021 << 16) #define FW_DEVICE_TITSB43A (0x8023 << 16) #define FW_DEVICE_TITSB43AB23 (0x8024 << 16) #define FW_DEVICE_TITSB82AA2 (0x8025 << 16) #define FW_DEVICE_TITSB43AB21 (0x8026 << 16) #define FW_DEVICE_TIPCI4410A (0x8017 << 16) #define FW_DEVICE_TIPCI4450 (0x8011 << 16) #define FW_DEVICE_TIPCI4451 (0x8027 << 16) #define FW_DEVICE_CXD1947 (0x8009 << 16) #define FW_DEVICE_CXD3222 (0x8039 << 16) #define FW_DEVICE_VT6306 (0x3044 << 16) #define FW_DEVICE_R5C551 (0x0551 << 16) #define FW_DEVICE_R5C552 (0x0552 << 16) #define FW_DEVICE_PANGEA (0x0030 << 16) #define FW_DEVICE_UNINORTH (0x0031 << 16) #define FW_DEVICE_AIC5800 (0x5800 << 16) #define FW_DEVICE_FW322 (0x5811 << 16) #define FW_DEVICE_7007 (0x7007 << 16) #define FW_DEVICE_82372FB (0x7605 << 16) #define FW_DEVICE_PCIO2FW (0x1102 << 16) #define PCI_INTERFACE_OHCI 0x10 #define FW_OHCI_BASE_REG 0x10 #define OHCI_DMA_ITCH 0x20 #define OHCI_DMA_IRCH 0x20 #define OHCI_MAX_DMA_CH (0x4 + OHCI_DMA_ITCH + OHCI_DMA_IRCH) typedef uint32_t fwohcireg_t; /* for PCI */ #if BYTE_ORDER == BIG_ENDIAN #define FWOHCI_DMA_WRITE(x, y) ((x) = htole32(y)) #define FWOHCI_DMA_READ(x) le32toh(x) #define FWOHCI_DMA_SET(x, y) ((x) |= htole32(y)) #define FWOHCI_DMA_CLEAR(x, y) ((x) &= htole32(~(y))) #else #define FWOHCI_DMA_WRITE(x, y) ((x) = (y)) #define FWOHCI_DMA_READ(x) (x) #define FWOHCI_DMA_SET(x, y) ((x) |= (y)) #define FWOHCI_DMA_CLEAR(x, y) ((x) &= ~(y)) #endif struct fwohcidb { union { struct { uint32_t cmd; uint32_t addr; uint32_t depend; uint32_t res; } desc; uint32_t immed[4]; } db; #define OHCI_STATUS_SHIFT 16 #define OHCI_COUNT_MASK 0xffff #define OHCI_OUTPUT_MORE (0 << 28) #define OHCI_OUTPUT_LAST (1 << 28) #define OHCI_INPUT_MORE (2 << 28) #define OHCI_INPUT_LAST (3 << 28) #define OHCI_STORE_QUAD (4 << 28) #define OHCI_LOAD_QUAD (5 << 28) #define OHCI_NOP (6 << 28) #define OHCI_STOP (7 << 28) #define OHCI_STORE (8 << 28) #define OHCI_CMD_MASK (0xf << 28) #define OHCI_UPDATE (1 << 27) #define OHCI_KEY_ST0 (0 << 24) #define OHCI_KEY_ST1 (1 << 24) #define OHCI_KEY_ST2 (2 << 24) #define OHCI_KEY_ST3 (3 << 24) #define OHCI_KEY_REGS (5 << 24) #define OHCI_KEY_SYS (6 << 24) #define OHCI_KEY_DEVICE (7 << 24) #define OHCI_KEY_MASK (7 << 24) #define OHCI_INTERRUPT_NEVER (0 << 20) #define OHCI_INTERRUPT_TRUE (1 << 20) #define OHCI_INTERRUPT_FALSE (2 << 20) #define OHCI_INTERRUPT_ALWAYS (3 << 20) #define OHCI_BRANCH_NEVER (0 << 18) #define OHCI_BRANCH_TRUE (1 << 18) #define OHCI_BRANCH_FALSE (2 << 18) #define OHCI_BRANCH_ALWAYS (3 << 18) #define OHCI_BRANCH_MASK (3 << 18) #define OHCI_WAIT_NEVER (0 << 16) #define OHCI_WAIT_TRUE (1 << 16) #define OHCI_WAIT_FALSE (2 << 16) #define OHCI_WAIT_ALWAYS (3 << 16) }; #define OHCI_SPD_S100 0x4 #define OHCI_SPD_S200 0x1 #define OHCI_SPD_S400 0x2 #define FWOHCIEV_NOSTAT 0 #define FWOHCIEV_LONGP 2 #define FWOHCIEV_MISSACK 3 #define FWOHCIEV_UNDRRUN 4 #define FWOHCIEV_OVRRUN 5 #define FWOHCIEV_DESCERR 6 #define FWOHCIEV_DTRDERR 7 #define FWOHCIEV_DTWRERR 8 #define FWOHCIEV_BUSRST 9 #define FWOHCIEV_TIMEOUT 0xa #define FWOHCIEV_TCODERR 0xb #define FWOHCIEV_UNKNOWN 0xe #define FWOHCIEV_FLUSHED 0xf #define FWOHCIEV_ACKCOMPL 0x11 #define FWOHCIEV_ACKPEND 0x12 #define FWOHCIEV_ACKBSX 0x14 #define FWOHCIEV_ACKBSA 0x15 #define FWOHCIEV_ACKBSB 0x16 #define FWOHCIEV_ACKTARD 0x1b #define FWOHCIEV_ACKDERR 0x1d #define FWOHCIEV_ACKTERR 0x1e #define FWOHCIEV_MASK 0x1f struct ohci_dma{ fwohcireg_t cntl; #define OHCI_CNTL_CYCMATCH_S (0x1 << 31) #define OHCI_CNTL_BUFFIL (0x1 << 31) #define OHCI_CNTL_ISOHDR (0x1 << 30) #define OHCI_CNTL_CYCMATCH_R (0x1 << 29) #define OHCI_CNTL_MULTICH (0x1 << 28) #define OHCI_CNTL_DMA_RUN (0x1 << 15) #define OHCI_CNTL_DMA_WAKE (0x1 << 12) #define OHCI_CNTL_DMA_DEAD (0x1 << 11) #define OHCI_CNTL_DMA_ACTIVE (0x1 << 10) #define OHCI_CNTL_DMA_BT (0x1 << 8) #define OHCI_CNTL_DMA_BAD (0x1 << 7) #define OHCI_CNTL_DMA_STAT (0xff) fwohcireg_t cntl_clr; fwohcireg_t dummy0; fwohcireg_t cmd; fwohcireg_t match; fwohcireg_t dummy1; fwohcireg_t dummy2; fwohcireg_t dummy3; }; struct ohci_itdma{ fwohcireg_t cntl; fwohcireg_t cntl_clr; fwohcireg_t dummy0; fwohcireg_t cmd; }; struct ohci_registers { fwohcireg_t ver; /* Version No. 0x0 */ fwohcireg_t guid; /* GUID_ROM No. 0x4 */ fwohcireg_t retry; /* AT retries 0x8 */ #define FWOHCI_RETRY 0x8 fwohcireg_t csr_data; /* CSR data 0xc */ fwohcireg_t csr_cmp; /* CSR compare 0x10 */ fwohcireg_t csr_cntl; /* CSR compare 0x14 */ fwohcireg_t rom_hdr; /* config ROM ptr. 0x18 */ fwohcireg_t bus_id; /* BUS_ID 0x1c */ fwohcireg_t bus_opt; /* BUS option 0x20 */ #define FWOHCIGUID_H 0x24 #define FWOHCIGUID_L 0x28 fwohcireg_t guid_hi; /* GUID hi 0x24 */ fwohcireg_t guid_lo; /* GUID lo 0x28 */ fwohcireg_t dummy0[2]; /* dummy 0x2c-0x30 */ fwohcireg_t config_rom; /* config ROM map 0x34 */ fwohcireg_t post_wr_lo; /* post write addr lo 0x38 */ fwohcireg_t post_wr_hi; /* post write addr hi 0x3c */ fwohcireg_t vender; /* vender ID 0x40 */ fwohcireg_t dummy1[3]; /* dummy 0x44-0x4c */ fwohcireg_t hcc_cntl_set; /* HCC control set 0x50 */ fwohcireg_t hcc_cntl_clr; /* HCC control clr 0x54 */ -#define OHCI_HCC_BIBIV (1 << 31) /* BIBimage Valid */ +#define OHCI_HCC_BIBIV (1U << 31) /* BIBimage Valid */ #define OHCI_HCC_BIGEND (1 << 30) /* noByteSwapData */ #define OHCI_HCC_PRPHY (1 << 23) /* programPhyEnable */ #define OHCI_HCC_PHYEN (1 << 22) /* aPhyEnhanceEnable */ #define OHCI_HCC_LPS (1 << 19) /* LPS */ #define OHCI_HCC_POSTWR (1 << 18) /* postedWriteEnable */ #define OHCI_HCC_LINKEN (1 << 17) /* linkEnable */ #define OHCI_HCC_RESET (1 << 16) /* softReset */ fwohcireg_t dummy2[2]; /* dummy 0x58-0x5c */ fwohcireg_t dummy3[1]; /* dummy 0x60 */ fwohcireg_t sid_buf; /* self id buffer 0x64 */ fwohcireg_t sid_cnt; /* self id count 0x68 */ fwohcireg_t dummy4[1]; /* dummy 0x6c */ fwohcireg_t ir_mask_hi_set; /* ir mask hi set 0x70 */ fwohcireg_t ir_mask_hi_clr; /* ir mask hi set 0x74 */ fwohcireg_t ir_mask_lo_set; /* ir mask hi set 0x78 */ fwohcireg_t ir_mask_lo_clr; /* ir mask hi set 0x7c */ #define FWOHCI_INTSTAT 0x80 #define FWOHCI_INTSTATCLR 0x84 #define FWOHCI_INTMASK 0x88 #define FWOHCI_INTMASKCLR 0x8c fwohcireg_t int_stat; /* 0x80 */ fwohcireg_t int_clear; /* 0x84 */ fwohcireg_t int_mask; /* 0x88 */ fwohcireg_t int_mask_clear; /* 0x8c */ fwohcireg_t it_int_stat; /* 0x90 */ fwohcireg_t it_int_clear; /* 0x94 */ fwohcireg_t it_int_mask; /* 0x98 */ fwohcireg_t it_mask_clear; /* 0x9c */ fwohcireg_t ir_int_stat; /* 0xa0 */ fwohcireg_t ir_int_clear; /* 0xa4 */ fwohcireg_t ir_int_mask; /* 0xa8 */ fwohcireg_t ir_mask_clear; /* 0xac */ fwohcireg_t dummy5[11]; /* dummy 0xb0-d8 */ fwohcireg_t fairness; /* fairness control 0xdc */ fwohcireg_t link_cntl; /* Chip control 0xe0*/ fwohcireg_t link_cntl_clr; /* Chip control clear 0xe4*/ #define FWOHCI_NODEID 0xe8 fwohcireg_t node; /* Node ID 0xe8 */ -#define OHCI_NODE_VALID (1 << 31) +#define OHCI_NODE_VALID (1U << 31) #define OHCI_NODE_ROOT (1 << 30) #define OHCI_ASYSRCBUS 1 fwohcireg_t phy_access; /* PHY cntl 0xec */ #define PHYDEV_RDDONE (1<<31) #define PHYDEV_RDCMD (1<<15) #define PHYDEV_WRCMD (1<<14) #define PHYDEV_REGADDR 8 #define PHYDEV_WRDATA 0 #define PHYDEV_RDADDR 24 #define PHYDEV_RDDATA 16 fwohcireg_t cycle_timer; /* Cycle Timer 0xf0 */ fwohcireg_t dummy6[3]; /* dummy 0xf4-fc */ fwohcireg_t areq_hi; /* Async req. filter hi 0x100 */ fwohcireg_t areq_hi_clr; /* Async req. filter hi 0x104 */ fwohcireg_t areq_lo; /* Async req. filter lo 0x108 */ fwohcireg_t areq_lo_clr; /* Async req. filter lo 0x10c */ fwohcireg_t preq_hi; /* Async req. filter hi 0x110 */ fwohcireg_t preq_hi_clr; /* Async req. filter hi 0x114 */ fwohcireg_t preq_lo; /* Async req. filter lo 0x118 */ fwohcireg_t preq_lo_clr; /* Async req. filter lo 0x11c */ fwohcireg_t pys_upper; /* Physical Upper bound 0x120 */ fwohcireg_t dummy7[23]; /* dummy 0x124-0x17c */ /* 0x180, 0x184, 0x188, 0x18c */ /* 0x190, 0x194, 0x198, 0x19c */ /* 0x1a0, 0x1a4, 0x1a8, 0x1ac */ /* 0x1b0, 0x1b4, 0x1b8, 0x1bc */ /* 0x1c0, 0x1c4, 0x1c8, 0x1cc */ /* 0x1d0, 0x1d4, 0x1d8, 0x1dc */ /* 0x1e0, 0x1e4, 0x1e8, 0x1ec */ /* 0x1f0, 0x1f4, 0x1f8, 0x1fc */ struct ohci_dma dma_ch[0x4]; /* 0x200, 0x204, 0x208, 0x20c */ /* 0x210, 0x204, 0x208, 0x20c */ struct ohci_itdma dma_itch[0x20]; /* 0x400, 0x404, 0x408, 0x40c */ /* 0x410, 0x404, 0x408, 0x40c */ struct ohci_dma dma_irch[0x20]; }; struct fwohcidb_tr{ STAILQ_ENTRY(fwohcidb_tr) link; struct fw_xfer *xfer; struct fwohcidb *db; bus_dmamap_t dma_map; caddr_t buf; bus_addr_t bus_addr; int dbcnt; }; /* * OHCI info structure. */ struct fwohci_txpkthdr{ union{ uint32_t ld[4]; struct { #if BYTE_ORDER == BIG_ENDIAN uint32_t spd:16, /* XXX include reserved field */ :8, tcode:4, :4; #else uint32_t :4, tcode:4, :8, spd:16; /* XXX include reserved fields */ #endif }common; struct { #if BYTE_ORDER == BIG_ENDIAN uint32_t :8, srcbus:1, :4, spd:3, tlrt:8, tcode:4, :4; #else uint32_t :4, tcode:4, tlrt:8, spd:3, :4, srcbus:1, :8; #endif BIT16x2(dst, ); }asycomm; struct { #if BYTE_ORDER == BIG_ENDIAN uint32_t :13, spd:3, chtag:8, tcode:4, sy:4; #else uint32_t sy:4, tcode:4, chtag:8, spd:3, :13; #endif BIT16x2(len, ); }stream; }mode; }; struct fwohci_trailer{ #if BYTE_ORDER == BIG_ENDIAN uint32_t stat:16, time:16; #else uint32_t time:16, stat:16; #endif }; #define OHCI_CNTL_CYCSRC (0x1 << 22) #define OHCI_CNTL_CYCMTR (0x1 << 21) #define OHCI_CNTL_CYCTIMER (0x1 << 20) #define OHCI_CNTL_PHYPKT (0x1 << 10) #define OHCI_CNTL_SID (0x1 << 9) /* * defined in OHCI 1.1 * chapter 6.1 */ #define OHCI_INT_DMA_ATRQ (0x1 << 0) #define OHCI_INT_DMA_ATRS (0x1 << 1) #define OHCI_INT_DMA_ARRQ (0x1 << 2) #define OHCI_INT_DMA_ARRS (0x1 << 3) #define OHCI_INT_DMA_PRRQ (0x1 << 4) #define OHCI_INT_DMA_PRRS (0x1 << 5) #define OHCI_INT_DMA_IT (0x1 << 6) #define OHCI_INT_DMA_IR (0x1 << 7) #define OHCI_INT_PW_ERR (0x1 << 8) #define OHCI_INT_LR_ERR (0x1 << 9) #define OHCI_INT_PHY_SID (0x1 << 16) #define OHCI_INT_PHY_BUS_R (0x1 << 17) #define OHCI_INT_REG_FAIL (0x1 << 18) #define OHCI_INT_PHY_INT (0x1 << 19) #define OHCI_INT_CYC_START (0x1 << 20) #define OHCI_INT_CYC_64SECOND (0x1 << 21) #define OHCI_INT_CYC_LOST (0x1 << 22) #define OHCI_INT_CYC_ERR (0x1 << 23) #define OHCI_INT_ERR (0x1 << 24) #define OHCI_INT_CYC_LONG (0x1 << 25) #define OHCI_INT_PHY_REG (0x1 << 26) #define OHCI_INT_EN (0x1 << 31) #define IP_CHANNELS 0x0234 #define FWOHCI_MAXREC 2048 #define OHCI_ISORA 0x02 #define OHCI_ISORB 0x04 #define FWOHCITCODE_PHY 0xe Index: head/sys/dev/firewire/sbp.c =================================================================== --- head/sys/dev/firewire/sbp.c (revision 258779) +++ head/sys/dev/firewire/sbp.c (revision 258780) @@ -1,2998 +1,2998 @@ /*- * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) && __FreeBSD_version >= 501102 #include #include #endif #if defined(__DragonFly__) || __FreeBSD_version < 500106 #include /* for struct devstat */ #endif #ifdef __DragonFly__ #include #include #include #include #include #include #include #include #include #include #include #include "sbp.h" #else #include #include #include #include #include #include #include #include #include #include #include #include #endif #define ccb_sdev_ptr spriv_ptr0 #define ccb_sbp_ptr spriv_ptr1 #define SBP_NUM_TARGETS 8 /* MAX 64 */ /* * Scan_bus doesn't work for more than 8 LUNs * because of CAM_SCSI2_MAXLUN in cam_xpt.c */ #define SBP_NUM_LUNS 64 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */) #define SBP_DMA_SIZE PAGE_SIZE #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res) #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb)) #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS) /* * STATUS FIFO addressing * bit * ----------------------- * 0- 1( 2): 0 (alignment) * 2- 7( 6): target * 8-15( 8): lun * 16-31( 8): reserved * 32-47(16): SBP_BIND_HI * 48-64(16): bus_id, node_id */ #define SBP_BIND_HI 0x1 #define SBP_DEV2ADDR(t, l) \ (((u_int64_t)SBP_BIND_HI << 32) \ | (((l) & 0xff) << 8) \ | (((t) & 0x3f) << 2)) #define SBP_ADDR2TRG(a) (((a) >> 2) & 0x3f) #define SBP_ADDR2LUN(a) (((a) >> 8) & 0xff) #define SBP_INITIATOR 7 static char *orb_fun_name[] = { ORB_FUN_NAMES }; static int debug = 0; static int auto_login = 1; static int max_speed = -1; static int sbp_cold = 1; static int ex_login = 1; static int login_delay = 1000; /* msec */ static int scan_delay = 500; /* msec */ static int use_doorbell = 0; static int sbp_tags = 0; SYSCTL_DECL(_hw_firewire); static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0, "SBP-II Subsystem"); SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RW, &debug, 0, "SBP debug flag"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RW, &auto_login, 0, "SBP perform login automatically"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RW, &max_speed, 0, "SBP transfer max speed"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RW, &ex_login, 0, "SBP enable exclusive login"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RW, &login_delay, 0, "SBP login delay in msec"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RW, &scan_delay, 0, "SBP scan delay in msec"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RW, &use_doorbell, 0, "SBP use doorbell request"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RW, &sbp_tags, 0, "SBP tagged queuing support"); TUNABLE_INT("hw.firewire.sbp.auto_login", &auto_login); TUNABLE_INT("hw.firewire.sbp.max_speed", &max_speed); TUNABLE_INT("hw.firewire.sbp.exclusive_login", &ex_login); TUNABLE_INT("hw.firewire.sbp.login_delay", &login_delay); TUNABLE_INT("hw.firewire.sbp.scan_delay", &scan_delay); TUNABLE_INT("hw.firewire.sbp.use_doorbell", &use_doorbell); TUNABLE_INT("hw.firewire.sbp.tags", &sbp_tags); #define NEED_RESPONSE 0 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) #ifdef __sparc64__ /* iommu */ #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX) #else #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE) #endif struct sbp_ocb { STAILQ_ENTRY(sbp_ocb) ocb; union ccb *ccb; bus_addr_t bus_addr; uint32_t orb[8]; #define IND_PTR_OFFSET (8*sizeof(uint32_t)) struct ind_ptr ind_ptr[SBP_IND_MAX]; struct sbp_dev *sdev; int flags; /* XXX should be removed */ bus_dmamap_t dmamap; struct callout_handle timeout_ch; }; #define OCB_ACT_MGM 0 #define OCB_ACT_CMD 1 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo)) struct sbp_dev{ #define SBP_DEV_RESET 0 /* accept login */ #define SBP_DEV_LOGIN 1 /* to login */ #if 0 #define SBP_DEV_RECONN 2 /* to reconnect */ #endif #define SBP_DEV_TOATTACH 3 /* to attach */ #define SBP_DEV_PROBE 4 /* scan lun */ #define SBP_DEV_ATTACHED 5 /* in operation */ #define SBP_DEV_DEAD 6 /* unavailable unit */ #define SBP_DEV_RETRY 7 /* unavailable unit */ uint8_t status:4, timeout:4; uint8_t type; uint16_t lun_id; uint16_t freeze; #define ORB_LINK_DEAD (1 << 0) #define VALID_LUN (1 << 1) #define ORB_POINTER_ACTIVE (1 << 2) #define ORB_POINTER_NEED (1 << 3) #define ORB_DOORBELL_ACTIVE (1 << 4) #define ORB_DOORBELL_NEED (1 << 5) #define ORB_SHORTAGE (1 << 6) uint16_t flags; struct cam_path *path; struct sbp_target *target; struct fwdma_alloc dma; struct sbp_login_res *login; struct callout login_callout; struct sbp_ocb *ocb; STAILQ_HEAD(, sbp_ocb) ocbs; STAILQ_HEAD(, sbp_ocb) free_ocbs; struct sbp_ocb *last_ocb; char vendor[32]; char product[32]; char revision[10]; char bustgtlun[32]; }; struct sbp_target { int target_id; int num_lun; struct sbp_dev **luns; struct sbp_softc *sbp; struct fw_device *fwdev; uint32_t mgm_hi, mgm_lo; struct sbp_ocb *mgm_ocb_cur; STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue; struct callout mgm_ocb_timeout; struct callout scan_callout; STAILQ_HEAD(, fw_xfer) xferlist; int n_xfer; }; struct sbp_softc { struct firewire_dev_comm fd; struct cam_sim *sim; struct cam_path *path; struct sbp_target targets[SBP_NUM_TARGETS]; struct fw_bind fwb; bus_dma_tag_t dmat; struct timeval last_busreset; #define SIMQ_FREEZED 1 int flags; struct mtx mtx; }; #define SBP_LOCK(sbp) mtx_lock(&(sbp)->mtx) #define SBP_UNLOCK(sbp) mtx_unlock(&(sbp)->mtx) static void sbp_post_explore (void *); static void sbp_recv (struct fw_xfer *); static void sbp_mgm_callback (struct fw_xfer *); #if 0 static void sbp_cmd_callback (struct fw_xfer *); #endif static void sbp_orb_pointer (struct sbp_dev *, struct sbp_ocb *); static void sbp_doorbell(struct sbp_dev *); static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int); static void sbp_free_ocb (struct sbp_dev *, struct sbp_ocb *); static void sbp_abort_ocb (struct sbp_ocb *, int); static void sbp_abort_all_ocbs (struct sbp_dev *, int); static struct fw_xfer * sbp_write_cmd_locked (struct sbp_dev *, int, int); static struct fw_xfer * sbp_write_cmd (struct sbp_dev *, int, int); static struct sbp_ocb * sbp_get_ocb (struct sbp_dev *); static struct sbp_ocb * sbp_enqueue_ocb (struct sbp_dev *, struct sbp_ocb *); static struct sbp_ocb * sbp_dequeue_ocb (struct sbp_dev *, struct sbp_status *); static void sbp_cam_detach_sdev(struct sbp_dev *); static void sbp_free_sdev(struct sbp_dev *); static void sbp_cam_detach_target (struct sbp_target *); static void sbp_free_target (struct sbp_target *); static void sbp_mgm_timeout (void *arg); static void sbp_timeout (void *arg); static void sbp_mgm_orb (struct sbp_dev *, int, struct sbp_ocb *); static MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire"); /* cam related functions */ static void sbp_action(struct cam_sim *sim, union ccb *ccb); static void sbp_poll(struct cam_sim *sim); static void sbp_cam_scan_lun(struct cam_periph *, union ccb *); static void sbp_cam_scan_target(void *arg); static char *orb_status0[] = { /* 0 */ "No additional information to report", /* 1 */ "Request type not supported", /* 2 */ "Speed not supported", /* 3 */ "Page size not supported", /* 4 */ "Access denied", /* 5 */ "Logical unit not supported", /* 6 */ "Maximum payload too small", /* 7 */ "Reserved for future standardization", /* 8 */ "Resources unavailable", /* 9 */ "Function rejected", /* A */ "Login ID not recognized", /* B */ "Dummy ORB completed", /* C */ "Request aborted", /* FF */ "Unspecified error" #define MAX_ORB_STATUS0 0xd }; static char *orb_status1_object[] = { /* 0 */ "Operation request block (ORB)", /* 1 */ "Data buffer", /* 2 */ "Page table", /* 3 */ "Unable to specify" }; static char *orb_status1_serial_bus_error[] = { /* 0 */ "Missing acknowledge", /* 1 */ "Reserved; not to be used", /* 2 */ "Time-out error", /* 3 */ "Reserved; not to be used", /* 4 */ "Busy retry limit exceeded(X)", /* 5 */ "Busy retry limit exceeded(A)", /* 6 */ "Busy retry limit exceeded(B)", /* 7 */ "Reserved for future standardization", /* 8 */ "Reserved for future standardization", /* 9 */ "Reserved for future standardization", /* A */ "Reserved for future standardization", /* B */ "Tardy retry limit exceeded", /* C */ "Conflict error", /* D */ "Data error", /* E */ "Type error", /* F */ "Address error" }; static void sbp_identify(driver_t *driver, device_t parent) { SBP_DEBUG(0) printf("sbp_identify\n"); END_DEBUG BUS_ADD_CHILD(parent, 0, "sbp", device_get_unit(parent)); } /* * sbp_probe() */ static int sbp_probe(device_t dev) { device_t pa; SBP_DEBUG(0) printf("sbp_probe\n"); END_DEBUG pa = device_get_parent(dev); if(device_get_unit(dev) != device_get_unit(pa)){ return(ENXIO); } device_set_desc(dev, "SBP-2/SCSI over FireWire"); #if 0 if (bootverbose) debug = bootverbose; #endif return (0); } /* * Display device characteristics on the console */ static void sbp_show_sdev_info(struct sbp_dev *sdev) { struct fw_device *fwdev; fwdev = sdev->target->fwdev; device_printf(sdev->target->sbp->fd.dev, "%s: %s: ordered:%d type:%d EUI:%08x%08x node:%d " "speed:%d maxrec:%d\n", __func__, sdev->bustgtlun, (sdev->type & 0x40) >> 6, (sdev->type & 0x1f), fwdev->eui.hi, fwdev->eui.lo, fwdev->dst, fwdev->speed, fwdev->maxrec); device_printf(sdev->target->sbp->fd.dev, "%s: %s '%s' '%s' '%s'\n", __func__, sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision); } static struct { int bus; int target; struct fw_eui64 eui; } wired[] = { /* Bus Target EUI64 */ #if 0 {0, 2, {0x00018ea0, 0x01fd0154}}, /* Logitec HDD */ {0, 0, {0x00018ea6, 0x00100682}}, /* Logitec DVD */ {0, 1, {0x00d03200, 0xa412006a}}, /* Yano HDD */ #endif {-1, -1, {0,0}} }; static int sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev) { int bus, i, target=-1; char w[SBP_NUM_TARGETS]; bzero(w, sizeof(w)); bus = device_get_unit(sbp->fd.dev); /* XXX wired-down configuration should be gotten from tunable or device hint */ for (i = 0; wired[i].bus >= 0; i ++) { if (wired[i].bus == bus) { w[wired[i].target] = 1; if (wired[i].eui.hi == fwdev->eui.hi && wired[i].eui.lo == fwdev->eui.lo) target = wired[i].target; } } if (target >= 0) { if(target < SBP_NUM_TARGETS && sbp->targets[target].fwdev == NULL) return(target); device_printf(sbp->fd.dev, "target %d is not free for %08x:%08x\n", target, fwdev->eui.hi, fwdev->eui.lo); target = -1; } /* non-wired target */ for (i = 0; i < SBP_NUM_TARGETS; i ++) if (sbp->targets[i].fwdev == NULL && w[i] == 0) { target = i; break; } return target; } static void sbp_alloc_lun(struct sbp_target *target) { struct crom_context cc; struct csrreg *reg; struct sbp_dev *sdev, **newluns; struct sbp_softc *sbp; int maxlun, lun, i; sbp = target->sbp; crom_init_context(&cc, target->fwdev->csrrom); /* XXX shoud parse appropriate unit directories only */ maxlun = -1; while (cc.depth >= 0) { reg = crom_search_key(&cc, CROM_LUN); if (reg == NULL) break; lun = reg->val & 0xffff; SBP_DEBUG(0) printf("target %d lun %d found\n", target->target_id, lun); END_DEBUG if (maxlun < lun) maxlun = lun; crom_next(&cc); } if (maxlun < 0) printf("%s:%d no LUN found\n", device_get_nameunit(target->sbp->fd.dev), target->target_id); maxlun ++; if (maxlun >= SBP_NUM_LUNS) maxlun = SBP_NUM_LUNS; /* Invalidiate stale devices */ for (lun = 0; lun < target->num_lun; lun ++) { sdev = target->luns[lun]; if (sdev == NULL) continue; sdev->flags &= ~VALID_LUN; if (lun >= maxlun) { /* lost device */ sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[lun] = NULL; } } /* Reallocate */ if (maxlun != target->num_lun) { newluns = (struct sbp_dev **) realloc(target->luns, sizeof(struct sbp_dev *) * maxlun, M_SBP, M_NOWAIT | M_ZERO); if (newluns == NULL) { printf("%s: realloc failed\n", __func__); newluns = target->luns; maxlun = target->num_lun; } /* * We must zero the extended region for the case * realloc() doesn't allocate new buffer. */ if (maxlun > target->num_lun) bzero(&newluns[target->num_lun], sizeof(struct sbp_dev *) * (maxlun - target->num_lun)); target->luns = newluns; target->num_lun = maxlun; } crom_init_context(&cc, target->fwdev->csrrom); while (cc.depth >= 0) { int new = 0; reg = crom_search_key(&cc, CROM_LUN); if (reg == NULL) break; lun = reg->val & 0xffff; if (lun >= SBP_NUM_LUNS) { printf("too large lun %d\n", lun); goto next; } sdev = target->luns[lun]; if (sdev == NULL) { sdev = malloc(sizeof(struct sbp_dev), M_SBP, M_NOWAIT | M_ZERO); if (sdev == NULL) { printf("%s: malloc failed\n", __func__); goto next; } target->luns[lun] = sdev; sdev->lun_id = lun; sdev->target = target; STAILQ_INIT(&sdev->ocbs); CALLOUT_INIT(&sdev->login_callout); sdev->status = SBP_DEV_RESET; new = 1; snprintf(sdev->bustgtlun, 32, "%s:%d:%d", device_get_nameunit(sdev->target->sbp->fd.dev), sdev->target->target_id, sdev->lun_id); } sdev->flags |= VALID_LUN; sdev->type = (reg->val & 0xff0000) >> 16; if (new == 0) goto next; fwdma_malloc(sbp->fd.fc, /* alignment */ sizeof(uint32_t), SBP_DMA_SIZE, &sdev->dma, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); if (sdev->dma.v_addr == NULL) { printf("%s: dma space allocation failed\n", __func__); free(sdev, M_SBP); target->luns[lun] = NULL; goto next; } sdev->login = (struct sbp_login_res *) sdev->dma.v_addr; sdev->ocb = (struct sbp_ocb *) ((char *)sdev->dma.v_addr + SBP_LOGIN_SIZE); bzero((char *)sdev->ocb, sizeof (struct sbp_ocb) * SBP_QUEUE_LEN); STAILQ_INIT(&sdev->free_ocbs); for (i = 0; i < SBP_QUEUE_LEN; i++) { struct sbp_ocb *ocb; ocb = &sdev->ocb[i]; ocb->bus_addr = sdev->dma.bus_addr + SBP_LOGIN_SIZE + sizeof(struct sbp_ocb) * i + offsetof(struct sbp_ocb, orb[0]); if (bus_dmamap_create(sbp->dmat, 0, &ocb->dmamap)) { printf("sbp_attach: cannot create dmamap\n"); /* XXX */ goto next; } callout_handle_init(&ocb->timeout_ch); sbp_free_ocb(sdev, ocb); } next: crom_next(&cc); } for (lun = 0; lun < target->num_lun; lun ++) { sdev = target->luns[lun]; if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) { sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[lun] = NULL; } } } static struct sbp_target * sbp_alloc_target(struct sbp_softc *sbp, struct fw_device *fwdev) { int i; struct sbp_target *target; struct crom_context cc; struct csrreg *reg; SBP_DEBUG(1) printf("sbp_alloc_target\n"); END_DEBUG i = sbp_new_target(sbp, fwdev); if (i < 0) { device_printf(sbp->fd.dev, "increase SBP_NUM_TARGETS!\n"); return NULL; } /* new target */ target = &sbp->targets[i]; target->sbp = sbp; target->fwdev = fwdev; target->target_id = i; /* XXX we may want to reload mgm port after each bus reset */ /* XXX there might be multiple management agents */ crom_init_context(&cc, target->fwdev->csrrom); reg = crom_search_key(&cc, CROM_MGM); if (reg == NULL || reg->val == 0) { printf("NULL management address\n"); target->fwdev = NULL; return NULL; } target->mgm_hi = 0xffff; target->mgm_lo = 0xf0000000 | (reg->val << 2); target->mgm_ocb_cur = NULL; SBP_DEBUG(1) printf("target:%d mgm_port: %x\n", i, target->mgm_lo); END_DEBUG STAILQ_INIT(&target->xferlist); target->n_xfer = 0; STAILQ_INIT(&target->mgm_ocb_queue); CALLOUT_INIT(&target->mgm_ocb_timeout); CALLOUT_INIT(&target->scan_callout); target->luns = NULL; target->num_lun = 0; return target; } static void sbp_probe_lun(struct sbp_dev *sdev) { struct fw_device *fwdev; struct crom_context c, *cc = &c; struct csrreg *reg; bzero(sdev->vendor, sizeof(sdev->vendor)); bzero(sdev->product, sizeof(sdev->product)); fwdev = sdev->target->fwdev; crom_init_context(cc, fwdev->csrrom); /* get vendor string */ crom_search_key(cc, CSRKEY_VENDOR); crom_next(cc); crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor)); /* skip to the unit directory for SBP-2 */ while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) { if (reg->val == CSRVAL_T10SBP2) break; crom_next(cc); } /* get firmware revision */ reg = crom_search_key(cc, CSRKEY_FIRM_VER); if (reg != NULL) snprintf(sdev->revision, sizeof(sdev->revision), "%06x", reg->val); /* get product string */ crom_search_key(cc, CSRKEY_MODEL); crom_next(cc); crom_parse_text(cc, sdev->product, sizeof(sdev->product)); } static void sbp_login_callout(void *arg) { struct sbp_dev *sdev = (struct sbp_dev *)arg; sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL); } static void sbp_login(struct sbp_dev *sdev) { struct timeval delta; struct timeval t; int ticks = 0; microtime(&delta); timevalsub(&delta, &sdev->target->sbp->last_busreset); t.tv_sec = login_delay / 1000; t.tv_usec = (login_delay % 1000) * 1000; timevalsub(&t, &delta); if (t.tv_sec >= 0 && t.tv_usec > 0) ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000; SBP_DEBUG(0) printf("%s: sec = %jd usec = %ld ticks = %d\n", __func__, (intmax_t)t.tv_sec, t.tv_usec, ticks); END_DEBUG callout_reset(&sdev->login_callout, ticks, sbp_login_callout, (void *)(sdev)); } #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \ && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2)) static void sbp_probe_target(void *arg) { struct sbp_target *target = (struct sbp_target *)arg; struct sbp_softc *sbp = target->sbp; struct sbp_dev *sdev; int i, alive; alive = SBP_FWDEV_ALIVE(target->fwdev); SBP_DEBUG(1) device_printf(sbp->fd.dev, "%s %d%salive\n", __func__, target->target_id, (!alive) ? " not " : ""); END_DEBUG sbp = target->sbp; sbp_alloc_lun(target); /* XXX untimeout mgm_ocb and dequeue */ for (i=0; i < target->num_lun; i++) { sdev = target->luns[i]; if (sdev == NULL) continue; if (alive && (sdev->status != SBP_DEV_DEAD)) { if (sdev->path != NULL) { SBP_LOCK(sbp); xpt_freeze_devq(sdev->path, 1); sdev->freeze ++; SBP_UNLOCK(sbp); } sbp_probe_lun(sdev); sbp_show_sdev_info(sdev); sbp_abort_all_ocbs(sdev, CAM_SCSI_BUS_RESET); switch (sdev->status) { case SBP_DEV_RESET: /* new or revived target */ if (auto_login) sbp_login(sdev); break; case SBP_DEV_TOATTACH: case SBP_DEV_PROBE: case SBP_DEV_ATTACHED: case SBP_DEV_RETRY: default: sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL); break; } } else { switch (sdev->status) { case SBP_DEV_ATTACHED: SBP_DEBUG(0) /* the device has gone */ device_printf(sbp->fd.dev, "%s: lost target\n", __func__); END_DEBUG if (sdev->path) { SBP_LOCK(sbp); xpt_freeze_devq(sdev->path, 1); sdev->freeze ++; SBP_UNLOCK(sbp); } sdev->status = SBP_DEV_RETRY; sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[i] = NULL; break; case SBP_DEV_PROBE: case SBP_DEV_TOATTACH: sdev->status = SBP_DEV_RESET; break; case SBP_DEV_RETRY: case SBP_DEV_RESET: case SBP_DEV_DEAD: break; } } } } static void sbp_post_busreset(void *arg) { struct sbp_softc *sbp; sbp = (struct sbp_softc *)arg; SBP_DEBUG(0) printf("sbp_post_busreset\n"); END_DEBUG if ((sbp->sim->flags & SIMQ_FREEZED) == 0) { SBP_LOCK(sbp); xpt_freeze_simq(sbp->sim, /*count*/1); sbp->sim->flags |= SIMQ_FREEZED; SBP_UNLOCK(sbp); } microtime(&sbp->last_busreset); } static void sbp_post_explore(void *arg) { struct sbp_softc *sbp = (struct sbp_softc *)arg; struct sbp_target *target; struct fw_device *fwdev; int i, alive; SBP_DEBUG(0) printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold); END_DEBUG /* We need physical access */ if (!firewire_phydma_enable) return; if (sbp_cold > 0) sbp_cold --; #if 0 /* * XXX don't let CAM the bus rest. * CAM tries to do something with freezed (DEV_RETRY) devices. */ xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL); #endif /* Garbage Collection */ for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){ target = &sbp->targets[i]; STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) if (target->fwdev == NULL || target->fwdev == fwdev) break; if (fwdev == NULL) { /* device has removed in lower driver */ sbp_cam_detach_target(target); sbp_free_target(target); } } /* traverse device list */ STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) { SBP_DEBUG(0) device_printf(sbp->fd.dev,"%s:: EUI:%08x%08x %s attached, state=%d\n", __func__, fwdev->eui.hi, fwdev->eui.lo, (fwdev->status != FWDEVATTACHED) ? "not" : "", fwdev->status); END_DEBUG alive = SBP_FWDEV_ALIVE(fwdev); for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){ target = &sbp->targets[i]; if(target->fwdev == fwdev ) { /* known target */ break; } } if(i == SBP_NUM_TARGETS){ if (alive) { /* new target */ target = sbp_alloc_target(sbp, fwdev); if (target == NULL) continue; } else { continue; } } sbp_probe_target((void *)target); if (target->num_lun == 0) sbp_free_target(target); } SBP_LOCK(sbp); xpt_release_simq(sbp->sim, /*run queue*/TRUE); sbp->sim->flags &= ~SIMQ_FREEZED; SBP_UNLOCK(sbp); } #if NEED_RESPONSE static void sbp_loginres_callback(struct fw_xfer *xfer){ int s; struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev,"%s\n", __func__); END_DEBUG /* recycle */ s = splfw(); STAILQ_INSERT_TAIL(&sdev->target->sbp->fwb.xferlist, xfer, link); splx(s); return; } #endif static __inline void sbp_xfer_free(struct fw_xfer *xfer) { struct sbp_dev *sdev; int s; sdev = (struct sbp_dev *)xfer->sc; fw_xfer_unload(xfer); s = splfw(); SBP_LOCK(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link); SBP_UNLOCK(sdev->target->sbp); splx(s); } static void sbp_reset_start_callback(struct fw_xfer *xfer) { struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc; struct sbp_target *target = sdev->target; int i; if (xfer->resp != 0) { device_printf(sdev->target->sbp->fd.dev, "%s: %s failed: resp=%d\n", __func__, sdev->bustgtlun, xfer->resp); } for (i = 0; i < target->num_lun; i++) { tsdev = target->luns[i]; if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN) sbp_login(tsdev); } } static void sbp_reset_start(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__,sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); xfer->hand = sbp_reset_start_callback; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = 0xffff; fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_mgm_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; int resp; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG resp = xfer->resp; sbp_xfer_free(xfer); return; } static struct sbp_dev * sbp_next_dev(struct sbp_target *target, int lun) { struct sbp_dev **sdevp; int i; for (i = lun, sdevp = &target->luns[lun]; i < target->num_lun; i++, sdevp++) if (*sdevp != NULL && (*sdevp)->status == SBP_DEV_PROBE) return(*sdevp); return(NULL); } #define SCAN_PRI 1 static void sbp_cam_scan_lun(struct cam_periph *periph, union ccb *ccb) { struct sbp_target *target; struct sbp_dev *sdev; sdev = (struct sbp_dev *) ccb->ccb_h.ccb_sdev_ptr; target = sdev->target; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { sdev->status = SBP_DEV_ATTACHED; } else { device_printf(sdev->target->sbp->fd.dev, "%s:%s failed\n", __func__, sdev->bustgtlun); } sdev = sbp_next_dev(target, sdev->lun_id + 1); if (sdev == NULL) { free(ccb, M_SBP); return; } /* reuse ccb */ xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI); ccb->ccb_h.ccb_sdev_ptr = sdev; xpt_action(ccb); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 1; } static void sbp_cam_scan_target(void *arg) { struct sbp_target *target = (struct sbp_target *)arg; struct sbp_dev *sdev; union ccb *ccb; sdev = sbp_next_dev(target, 0); if (sdev == NULL) { printf("sbp_cam_scan_target: nothing to do for target%d\n", target->target_id); return; } SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG ccb = malloc(sizeof(union ccb), M_SBP, M_NOWAIT | M_ZERO); if (ccb == NULL) { printf("sbp_cam_scan_target: malloc failed\n"); return; } xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = sbp_cam_scan_lun; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; ccb->crcn.flags = CAM_FLAG_NONE; ccb->ccb_h.ccb_sdev_ptr = sdev; /* The scan is in progress now. */ SBP_LOCK(target->sbp); xpt_action(ccb); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 1; SBP_UNLOCK(target->sbp); } static __inline void sbp_scan_dev(struct sbp_dev *sdev) { sdev->status = SBP_DEV_PROBE; callout_reset(&sdev->target->scan_callout, scan_delay * hz / 1000, sbp_cam_scan_target, (void *)sdev->target); } static void sbp_do_attach(struct fw_xfer *xfer) { struct sbp_dev *sdev; struct sbp_target *target; struct sbp_softc *sbp; sdev = (struct sbp_dev *)xfer->sc; target = sdev->target; sbp = target->sbp; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG sbp_xfer_free(xfer); if (sdev->path == NULL) xpt_create_path(&sdev->path, NULL, cam_sim_path(target->sbp->sim), target->target_id, sdev->lun_id); /* * Let CAM scan the bus if we are in the boot process. * XXX xpt_scan_bus cannot detect LUN larger than 0 * if LUN 0 doesn't exists. */ if (sbp_cold > 0) { sdev->status = SBP_DEV_ATTACHED; return; } sbp_scan_dev(sdev); return; } static void sbp_agent_reset_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { device_printf(sdev->target->sbp->fd.dev, "%s:%s resp=%d\n", __func__, sdev->bustgtlun, xfer->resp); } sbp_xfer_free(xfer); if (sdev->path) { SBP_LOCK(sdev->target->sbp); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 0; SBP_UNLOCK(sdev->target->sbp); } } static void sbp_agent_reset(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04); if (xfer == NULL) return; if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE) xfer->hand = sbp_agent_reset_callback; else xfer->hand = sbp_do_attach; fp = &xfer->send.hdr; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); sbp_abort_all_ocbs(sdev, CAM_BDR_SENT); } static void sbp_busy_timeout_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG sbp_xfer_free(xfer); sbp_agent_reset(sdev); } static void sbp_busy_timeout(struct sbp_dev *sdev) { struct fw_pkt *fp; struct fw_xfer *xfer; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); xfer->hand = sbp_busy_timeout_callback; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = 0xffff; fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT; fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_orb_pointer_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(2) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { /* XXX */ printf("%s: xfer->resp = %d\n", __func__, xfer->resp); } sbp_xfer_free(xfer); SBP_LOCK(sdev->target->sbp); sdev->flags &= ~ORB_POINTER_ACTIVE; if ((sdev->flags & ORB_POINTER_NEED) != 0) { struct sbp_ocb *ocb; sdev->flags &= ~ORB_POINTER_NEED; ocb = STAILQ_FIRST(&sdev->ocbs); if (ocb != NULL) sbp_orb_pointer(sdev, ocb); } SBP_UNLOCK(sdev->target->sbp); return; } static void sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08x\n", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); END_DEBUG mtx_assert(&sdev->target->sbp->mtx, MA_OWNED); if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) { SBP_DEBUG(0) printf("%s: orb pointer active\n", __func__); END_DEBUG sdev->flags |= ORB_POINTER_NEED; return; } sdev->flags |= ORB_POINTER_ACTIVE; xfer = sbp_write_cmd_locked(sdev, FWTCODE_WREQB, 0x08); if (xfer == NULL) return; xfer->hand = sbp_orb_pointer_callback; fp = &xfer->send.hdr; fp->mode.wreqb.len = 8; fp->mode.wreqb.extcode = 0; xfer->send.payload[0] = htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS )<< 16)); xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr); /* * sbp_xfer_free() will attempt to acquire * the SBP lock on entrance. Also, this removes * a LOR between the firewire layer and sbp */ SBP_UNLOCK(sdev->target->sbp); if(fw_asyreq(xfer->fc, -1, xfer) != 0){ sbp_xfer_free(xfer); ocb->ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ocb->ccb); } SBP_LOCK(sdev->target->sbp); } static void sbp_doorbell_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { /* XXX */ device_printf(sdev->target->sbp->fd.dev, "%s: xfer->resp = %d\n", __func__, xfer->resp); } sbp_xfer_free(xfer); sdev->flags &= ~ORB_DOORBELL_ACTIVE; if ((sdev->flags & ORB_DOORBELL_NEED) != 0) { sdev->flags &= ~ORB_DOORBELL_NEED; SBP_LOCK(sdev->target->sbp); sbp_doorbell(sdev); SBP_UNLOCK(sdev->target->sbp); } return; } static void sbp_doorbell(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) { sdev->flags |= ORB_DOORBELL_NEED; return; } sdev->flags |= ORB_DOORBELL_ACTIVE; xfer = sbp_write_cmd_locked(sdev, FWTCODE_WREQQ, 0x10); if (xfer == NULL) return; xfer->hand = sbp_doorbell_callback; fp = &xfer->send.hdr; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); } static struct fw_xfer * sbp_write_cmd_locked(struct sbp_dev *sdev, int tcode, int offset) { struct fw_xfer *xfer; struct fw_pkt *fp; struct sbp_target *target; int s, new = 0; mtx_assert(&sdev->target->sbp->mtx, MA_OWNED); target = sdev->target; s = splfw(); xfer = STAILQ_FIRST(&target->xferlist); if (xfer == NULL) { if (target->n_xfer > 5 /* XXX */) { printf("sbp: no more xfer for this target\n"); splx(s); return(NULL); } xfer = fw_xfer_alloc_buf(M_SBP, 8, 0); if(xfer == NULL){ printf("sbp: fw_xfer_alloc_buf failed\n"); splx(s); return NULL; } target->n_xfer ++; if (debug) printf("sbp: alloc %d xfer\n", target->n_xfer); new = 1; } else { STAILQ_REMOVE_HEAD(&target->xferlist, link); } splx(s); if (new) { xfer->recv.pay_len = 0; xfer->send.spd = min(sdev->target->fwdev->speed, max_speed); xfer->fc = sdev->target->sbp->fd.fc; } if (tcode == FWTCODE_WREQB) xfer->send.pay_len = 8; else xfer->send.pay_len = 0; xfer->sc = (caddr_t)sdev; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = sdev->login->cmd_hi; fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset; fp->mode.wreqq.tlrt = 0; fp->mode.wreqq.tcode = tcode; fp->mode.wreqq.pri = 0; fp->mode.wreqq.dst = FWLOCALBUS | sdev->target->fwdev->dst; return xfer; } static struct fw_xfer * sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset) { struct sbp_softc *sbp = sdev->target->sbp; struct fw_xfer *xfer; SBP_LOCK(sbp); xfer = sbp_write_cmd_locked(sdev, tcode, offset); SBP_UNLOCK(sbp); return (xfer); } static void sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb) { struct fw_xfer *xfer; struct fw_pkt *fp; struct sbp_ocb *ocb; struct sbp_target *target; int s, nid; target = sdev->target; nid = target->sbp->fd.fc->nodeid | FWLOCALBUS; s = splfw(); SBP_LOCK(target->sbp); if (func == ORB_FUN_RUNQUEUE) { ocb = STAILQ_FIRST(&target->mgm_ocb_queue); if (target->mgm_ocb_cur != NULL || ocb == NULL) { SBP_UNLOCK(target->sbp); splx(s); return; } STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb); SBP_UNLOCK(target->sbp); goto start; } if ((ocb = sbp_get_ocb(sdev)) == NULL) { SBP_UNLOCK(target->sbp); splx(s); /* XXX */ return; } SBP_UNLOCK(target->sbp); ocb->flags = OCB_ACT_MGM; ocb->sdev = sdev; bzero((void *)ocb->orb, sizeof(ocb->orb)); ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI); ocb->orb[7] = htonl(SBP_DEV2ADDR(target->target_id, sdev->lun_id)); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s %s\n", __func__,sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]); END_DEBUG switch (func) { case ORB_FUN_LGI: ocb->orb[0] = ocb->orb[1] = 0; /* password */ ocb->orb[2] = htonl(nid << 16); ocb->orb[3] = htonl(sdev->dma.bus_addr); ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id); if (ex_login) ocb->orb[4] |= htonl(ORB_EXV); ocb->orb[5] = htonl(SBP_LOGIN_SIZE); fwdma_sync(&sdev->dma, BUS_DMASYNC_PREREAD); break; case ORB_FUN_ATA: ocb->orb[0] = htonl((0 << 16) | 0); ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff); /* fall through */ case ORB_FUN_RCN: case ORB_FUN_LGO: case ORB_FUN_LUR: case ORB_FUN_RST: case ORB_FUN_ATS: ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id); break; } if (target->mgm_ocb_cur != NULL) { /* there is a standing ORB */ SBP_LOCK(target->sbp); STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb); SBP_UNLOCK(target->sbp); splx(s); return; } start: target->mgm_ocb_cur = ocb; splx(s); callout_reset(&target->mgm_ocb_timeout, 5*hz, sbp_mgm_timeout, (caddr_t)ocb); xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0); if(xfer == NULL){ return; } xfer->hand = sbp_mgm_callback; fp = &xfer->send.hdr; fp->mode.wreqb.dest_hi = sdev->target->mgm_hi; fp->mode.wreqb.dest_lo = sdev->target->mgm_lo; fp->mode.wreqb.len = 8; fp->mode.wreqb.extcode = 0; xfer->send.payload[0] = htonl(nid << 16); xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_print_scsi_cmd(struct sbp_ocb *ocb) { struct ccb_scsiio *csio; csio = &ocb->ccb->csio; printf("%s:%d:%jx XPT_SCSI_IO: " "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x" ", flags: 0x%02x, " "%db cmd/%db data/%db sense\n", device_get_nameunit(ocb->sdev->target->sbp->fd.dev), ocb->ccb->ccb_h.target_id, (uintmax_t)ocb->ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->cdb_io.cdb_bytes[1], csio->cdb_io.cdb_bytes[2], csio->cdb_io.cdb_bytes[3], csio->cdb_io.cdb_bytes[4], csio->cdb_io.cdb_bytes[5], csio->cdb_io.cdb_bytes[6], csio->cdb_io.cdb_bytes[7], csio->cdb_io.cdb_bytes[8], csio->cdb_io.cdb_bytes[9], ocb->ccb->ccb_h.flags & CAM_DIR_MASK, csio->cdb_len, csio->dxfer_len, csio->sense_len); } static void sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb) { struct sbp_cmd_status *sbp_cmd_status; struct scsi_sense_data_fixed *sense; sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data; sense = (struct scsi_sense_data_fixed *)&ocb->ccb->csio.sense_data; SBP_DEBUG(0) sbp_print_scsi_cmd(ocb); /* XXX need decode status */ printf("%s: SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n", ocb->sdev->bustgtlun, sbp_cmd_status->status, sbp_cmd_status->sfmt, sbp_cmd_status->valid, sbp_cmd_status->s_key, sbp_cmd_status->s_code, sbp_cmd_status->s_qlfr, sbp_status->len); END_DEBUG switch (sbp_cmd_status->status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_BUSY: case SCSI_STATUS_CMD_TERMINATED: if(sbp_cmd_status->sfmt == SBP_SFMT_CURR){ sense->error_code = SSD_CURRENT_ERROR; }else{ sense->error_code = SSD_DEFERRED_ERROR; } if(sbp_cmd_status->valid) sense->error_code |= SSD_ERRCODE_VALID; sense->flags = sbp_cmd_status->s_key; if(sbp_cmd_status->mark) sense->flags |= SSD_FILEMARK; if(sbp_cmd_status->eom) sense->flags |= SSD_EOM; if(sbp_cmd_status->ill_len) sense->flags |= SSD_ILI; bcopy(&sbp_cmd_status->info, &sense->info[0], 4); if (sbp_status->len <= 1) /* XXX not scsi status. shouldn't be happened */ sense->extra_len = 0; else if (sbp_status->len <= 4) /* add_sense_code(_qual), info, cmd_spec_info */ sense->extra_len = 6; else /* fru, sense_key_spec */ sense->extra_len = 10; bcopy(&sbp_cmd_status->cdb, &sense->cmd_spec_info[0], 4); sense->add_sense_code = sbp_cmd_status->s_code; sense->add_sense_code_qual = sbp_cmd_status->s_qlfr; sense->fru = sbp_cmd_status->fru; bcopy(&sbp_cmd_status->s_keydep[0], &sense->sense_key_spec[0], 3); ocb->ccb->csio.scsi_status = sbp_cmd_status->status; ocb->ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; /* { uint8_t j, *tmp; tmp = sense; for( j = 0 ; j < 32 ; j+=8){ printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n", tmp[j], tmp[j+1], tmp[j+2], tmp[j+3], tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]); } } */ break; default: device_printf(ocb->sdev->target->sbp->fd.dev, "%s:%s unknown scsi status 0x%x\n", __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status); } } static void sbp_fix_inq_data(struct sbp_ocb *ocb) { union ccb *ccb; struct sbp_dev *sdev; struct scsi_inquiry_data *inq; ccb = ocb->ccb; sdev = ocb->sdev; if (ccb->csio.cdb_io.cdb_bytes[1] & SI_EVPD) return; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG inq = (struct scsi_inquiry_data *) ccb->csio.data_ptr; switch (SID_TYPE(inq)) { case T_DIRECT: #if 0 /* * XXX Convert Direct Access device to RBC. * I've never seen FireWire DA devices which support READ_6. */ if (SID_TYPE(inq) == T_DIRECT) inq->device |= T_RBC; /* T_DIRECT == 0 */ #endif /* fall through */ case T_RBC: /* * Override vendor/product/revision information. * Some devices sometimes return strange strings. */ #if 1 bcopy(sdev->vendor, inq->vendor, sizeof(inq->vendor)); bcopy(sdev->product, inq->product, sizeof(inq->product)); bcopy(sdev->revision+2, inq->revision, sizeof(inq->revision)); #endif break; } /* * Force to enable/disable tagged queuing. * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page. */ if (sbp_tags > 0) inq->flags |= SID_CmdQue; else if (sbp_tags < 0) inq->flags &= ~SID_CmdQue; } static void sbp_recv1(struct fw_xfer *xfer) { struct fw_pkt *rfp; #if NEED_RESPONSE struct fw_pkt *sfp; #endif struct sbp_softc *sbp; struct sbp_dev *sdev; struct sbp_ocb *ocb; struct sbp_login_res *login_res = NULL; struct sbp_status *sbp_status; struct sbp_target *target; int orb_fun, status_valid0, status_valid, t, l, reset_agent = 0; uint32_t addr; /* uint32_t *ld; ld = xfer->recv.buf; printf("sbp %x %d %d %08x %08x %08x %08x\n", xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11])); */ sbp = (struct sbp_softc *)xfer->sc; if (xfer->resp != 0){ printf("sbp_recv: xfer->resp = %d\n", xfer->resp); goto done0; } if (xfer->recv.payload == NULL){ printf("sbp_recv: xfer->recv.payload == NULL\n"); goto done0; } rfp = &xfer->recv.hdr; if(rfp->mode.wreqb.tcode != FWTCODE_WREQB){ printf("sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode); goto done0; } sbp_status = (struct sbp_status *)xfer->recv.payload; addr = rfp->mode.wreqb.dest_lo; SBP_DEBUG(2) printf("received address 0x%x\n", addr); END_DEBUG t = SBP_ADDR2TRG(addr); if (t >= SBP_NUM_TARGETS) { device_printf(sbp->fd.dev, "sbp_recv1: invalid target %d\n", t); goto done0; } target = &sbp->targets[t]; l = SBP_ADDR2LUN(addr); if (l >= target->num_lun || target->luns[l] == NULL) { device_printf(sbp->fd.dev, "sbp_recv1: invalid lun %d (target=%d)\n", l, t); goto done0; } sdev = target->luns[l]; ocb = NULL; switch (sbp_status->src) { case 0: case 1: /* check mgm_ocb_cur first */ ocb = target->mgm_ocb_cur; if (ocb != NULL) { if (OCB_MATCH(ocb, sbp_status)) { callout_stop(&target->mgm_ocb_timeout); target->mgm_ocb_cur = NULL; break; } } ocb = sbp_dequeue_ocb(sdev, sbp_status); if (ocb == NULL) { device_printf(sdev->target->sbp->fd.dev, #if defined(__DragonFly__) || __FreeBSD_version < 500000 "%s:%s No ocb(%lx) on the queue\n", #else "%s:%s No ocb(%x) on the queue\n", #endif __func__,sdev->bustgtlun, ntohl(sbp_status->orb_lo)); } break; case 2: /* unsolicit */ device_printf(sdev->target->sbp->fd.dev, "%s:%s unsolicit status received\n", __func__, sdev->bustgtlun); break; default: device_printf(sdev->target->sbp->fd.dev, "%s:%s unknown sbp_status->src\n", __func__, sdev->bustgtlun); } status_valid0 = (sbp_status->src < 2 && sbp_status->resp == ORB_RES_CMPL && sbp_status->dead == 0); status_valid = (status_valid0 && sbp_status->status == 0); if (!status_valid0 || debug > 2){ int status; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s ORB status src:%x resp:%x dead:%x" #if defined(__DragonFly__) || __FreeBSD_version < 500000 " len:%x stat:%x orb:%x%08lx\n", #else " len:%x stat:%x orb:%x%08x\n", #endif __func__, sdev->bustgtlun, sbp_status->src, sbp_status->resp, sbp_status->dead, sbp_status->len, sbp_status->status, ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo)); END_DEBUG device_printf(sdev->target->sbp->fd.dev, "%s\n", sdev->bustgtlun); status = sbp_status->status; switch(sbp_status->resp) { case 0: if (status > MAX_ORB_STATUS0) printf("%s\n", orb_status0[MAX_ORB_STATUS0]); else printf("%s\n", orb_status0[status]); break; case 1: printf("Obj: %s, Error: %s\n", orb_status1_object[(status>>6) & 3], orb_status1_serial_bus_error[status & 0xf]); break; case 2: printf("Illegal request\n"); break; case 3: printf("Vendor dependent\n"); break; default: printf("unknown respose code %d\n", sbp_status->resp); } } /* we have to reset the fetch agent if it's dead */ if (sbp_status->dead) { if (sdev->path) { SBP_LOCK(sbp); xpt_freeze_devq(sdev->path, 1); sdev->freeze ++; SBP_UNLOCK(sbp); } reset_agent = 1; } if (ocb == NULL) goto done; switch(ntohl(ocb->orb[4]) & ORB_FMT_MSK){ case ORB_FMT_NOP: break; case ORB_FMT_VED: break; case ORB_FMT_STD: switch(ocb->flags) { case OCB_ACT_MGM: orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK; reset_agent = 0; switch(orb_fun) { case ORB_FUN_LGI: fwdma_sync(&sdev->dma, BUS_DMASYNC_POSTREAD); login_res = sdev->login; login_res->len = ntohs(login_res->len); login_res->id = ntohs(login_res->id); login_res->cmd_hi = ntohs(login_res->cmd_hi); login_res->cmd_lo = ntohl(login_res->cmd_lo); if (status_valid) { SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s login: len %d, ID %d, cmd %08x%08x, recon_hold %d\n", __func__, sdev->bustgtlun, login_res->len, login_res->id, login_res->cmd_hi, login_res->cmd_lo, ntohs(login_res->recon_hold)); END_DEBUG sbp_busy_timeout(sdev); } else { /* forgot logout? */ device_printf(sdev->target->sbp->fd.dev, "%s:%s login failed\n", __func__, sdev->bustgtlun); sdev->status = SBP_DEV_RESET; } break; case ORB_FUN_RCN: login_res = sdev->login; if (status_valid) { SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s reconnect: len %d, ID %d, cmd %08x%08x\n", __func__, sdev->bustgtlun, login_res->len, login_res->id, login_res->cmd_hi, login_res->cmd_lo); END_DEBUG if (sdev->status == SBP_DEV_ATTACHED) sbp_scan_dev(sdev); else sbp_agent_reset(sdev); } else { /* reconnection hold time exceed? */ SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s reconnect failed\n", __func__, sdev->bustgtlun); END_DEBUG sbp_login(sdev); } break; case ORB_FUN_LGO: sdev->status = SBP_DEV_RESET; break; case ORB_FUN_RST: sbp_busy_timeout(sdev); break; case ORB_FUN_LUR: case ORB_FUN_ATA: case ORB_FUN_ATS: sbp_agent_reset(sdev); break; default: device_printf(sdev->target->sbp->fd.dev, "%s:%s unknown function %d\n", __func__, sdev->bustgtlun, orb_fun); break; } sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); break; case OCB_ACT_CMD: sdev->timeout = 0; if(ocb->ccb != NULL){ union ccb *ccb; ccb = ocb->ccb; if(sbp_status->len > 1){ sbp_scsi_status(sbp_status, ocb); }else{ if(sbp_status->resp != ORB_RES_CMPL){ ccb->ccb_h.status = CAM_REQ_CMP_ERR; }else{ ccb->ccb_h.status = CAM_REQ_CMP; } } /* fix up inq data */ if (ccb->csio.cdb_io.cdb_bytes[0] == INQUIRY) sbp_fix_inq_data(ocb); SBP_LOCK(sbp); xpt_done(ccb); SBP_UNLOCK(sbp); } break; default: break; } } if (!use_doorbell) sbp_free_ocb(sdev, ocb); done: if (reset_agent) sbp_agent_reset(sdev); done0: xfer->recv.pay_len = SBP_RECV_LEN; /* The received packet is usually small enough to be stored within * the buffer. In that case, the controller return ack_complete and * no respose is necessary. * * XXX fwohci.c and firewire.c should inform event_code such as * ack_complete or ack_pending to upper driver. */ #if NEED_RESPONSE xfer->send.off = 0; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.wres.dst = rfp->mode.wreqb.src; xfer->dst = sfp->mode.wres.dst; xfer->spd = min(sdev->target->fwdev->speed, max_speed); xfer->hand = sbp_loginres_callback; sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt; sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; sfp->mode.wres.pri = 0; fw_asyreq(xfer->fc, -1, xfer); #else /* recycle */ /* we don't need a lock here because bottom half is serialized */ STAILQ_INSERT_TAIL(&sbp->fwb.xferlist, xfer, link); #endif return; } static void sbp_recv(struct fw_xfer *xfer) { int s; s = splcam(); sbp_recv1(xfer); splx(s); } /* * sbp_attach() */ static int sbp_attach(device_t dev) { struct sbp_softc *sbp; struct cam_devq *devq; struct firewire_comm *fc; int i, s, error; if (DFLTPHYS > SBP_MAXPHYS) device_printf(dev, "Warning, DFLTPHYS(%dKB) is larger than " "SBP_MAXPHYS(%dKB).\n", DFLTPHYS / 1024, SBP_MAXPHYS / 1024); if (!firewire_phydma_enable) device_printf(dev, "Warning, hw.firewire.phydma_enable must be 1 " "for SBP over FireWire.\n"); SBP_DEBUG(0) printf("sbp_attach (cold=%d)\n", cold); END_DEBUG if (cold) sbp_cold ++; sbp = ((struct sbp_softc *)device_get_softc(dev)); bzero(sbp, sizeof(struct sbp_softc)); sbp->fd.dev = dev; sbp->fd.fc = fc = device_get_ivars(dev); mtx_init(&sbp->mtx, "sbp", NULL, MTX_DEF); if (max_speed < 0) max_speed = fc->speed; error = bus_dma_tag_create(/*parent*/fc->dmat, /* XXX shoud be 4 for sane backend? */ /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/SBP_IND_MAX, /*maxsegsz*/SBP_SEG_MAX, /*flags*/BUS_DMA_ALLOCNOW, #if defined(__FreeBSD__) && __FreeBSD_version >= 501102 /*lockfunc*/busdma_lock_mutex, /*lockarg*/&sbp->mtx, #endif &sbp->dmat); if (error != 0) { printf("sbp_attach: Could not allocate DMA tag " "- error %d\n", error); return (ENOMEM); } devq = cam_simq_alloc(/*maxopenings*/SBP_NUM_OCB); if (devq == NULL) return (ENXIO); for( i = 0 ; i < SBP_NUM_TARGETS ; i++){ sbp->targets[i].fwdev = NULL; sbp->targets[i].luns = NULL; } sbp->sim = cam_sim_alloc(sbp_action, sbp_poll, "sbp", sbp, device_get_unit(dev), &sbp->mtx, /*untagged*/ 1, /*tagged*/ SBP_QUEUE_LEN - 1, devq); if (sbp->sim == NULL) { cam_simq_free(devq); return (ENXIO); } SBP_LOCK(sbp); if (xpt_bus_register(sbp->sim, dev, /*bus*/0) != CAM_SUCCESS) goto fail; if (xpt_create_path(&sbp->path, NULL, cam_sim_path(sbp->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sbp->sim)); goto fail; } SBP_UNLOCK(sbp); /* We reserve 16 bit space (4 bytes X 64 targets X 256 luns) */ sbp->fwb.start = ((u_int64_t)SBP_BIND_HI << 32) | SBP_DEV2ADDR(0, 0); sbp->fwb.end = sbp->fwb.start + 0xffff; /* pre-allocate xfer */ STAILQ_INIT(&sbp->fwb.xferlist); fw_xferlist_add(&sbp->fwb.xferlist, M_SBP, /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB/2, fc, (void *)sbp, sbp_recv); fw_bindadd(fc, &sbp->fwb); sbp->fd.post_busreset = sbp_post_busreset; sbp->fd.post_explore = sbp_post_explore; if (fc->status != -1) { s = splfw(); sbp_post_busreset((void *)sbp); sbp_post_explore((void *)sbp); splx(s); } SBP_LOCK(sbp); xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL); SBP_UNLOCK(sbp); return (0); fail: SBP_UNLOCK(sbp); cam_sim_free(sbp->sim, /*free_devq*/TRUE); return (ENXIO); } static int sbp_logout_all(struct sbp_softc *sbp) { struct sbp_target *target; struct sbp_dev *sdev; int i, j; SBP_DEBUG(0) printf("sbp_logout_all\n"); END_DEBUG for (i = 0 ; i < SBP_NUM_TARGETS ; i ++) { target = &sbp->targets[i]; if (target->luns == NULL) continue; for (j = 0; j < target->num_lun; j++) { sdev = target->luns[j]; if (sdev == NULL) continue; callout_stop(&sdev->login_callout); if (sdev->status >= SBP_DEV_TOATTACH && sdev->status <= SBP_DEV_ATTACHED) sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL); } } return 0; } static int sbp_shutdown(device_t dev) { struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev)); sbp_logout_all(sbp); return (0); } static void sbp_free_sdev(struct sbp_dev *sdev) { int i; if (sdev == NULL) return; for (i = 0; i < SBP_QUEUE_LEN; i++) bus_dmamap_destroy(sdev->target->sbp->dmat, sdev->ocb[i].dmamap); fwdma_free(sdev->target->sbp->fd.fc, &sdev->dma); free(sdev, M_SBP); sdev = NULL; } static void sbp_free_target(struct sbp_target *target) { struct sbp_softc *sbp; struct fw_xfer *xfer, *next; int i; if (target->luns == NULL) return; callout_stop(&target->mgm_ocb_timeout); sbp = target->sbp; for (i = 0; i < target->num_lun; i++) sbp_free_sdev(target->luns[i]); for (xfer = STAILQ_FIRST(&target->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free_buf(xfer); } STAILQ_INIT(&target->xferlist); free(target->luns, M_SBP); target->num_lun = 0; target->luns = NULL; target->fwdev = NULL; } static int sbp_detach(device_t dev) { struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev)); struct firewire_comm *fc = sbp->fd.fc; int i; SBP_DEBUG(0) printf("sbp_detach\n"); END_DEBUG for (i = 0; i < SBP_NUM_TARGETS; i ++) sbp_cam_detach_target(&sbp->targets[i]); SBP_LOCK(sbp); xpt_async(AC_LOST_DEVICE, sbp->path, NULL); xpt_free_path(sbp->path); xpt_bus_deregister(cam_sim_path(sbp->sim)); cam_sim_free(sbp->sim, /*free_devq*/ TRUE); SBP_UNLOCK(sbp); sbp_logout_all(sbp); /* XXX wait for logout completion */ pause("sbpdtc", hz/2); for (i = 0 ; i < SBP_NUM_TARGETS ; i ++) sbp_free_target(&sbp->targets[i]); fw_bindremove(fc, &sbp->fwb); fw_xferlist_remove(&sbp->fwb.xferlist); bus_dma_tag_destroy(sbp->dmat); mtx_destroy(&sbp->mtx); return (0); } static void sbp_cam_detach_sdev(struct sbp_dev *sdev) { if (sdev == NULL) return; if (sdev->status == SBP_DEV_DEAD) return; if (sdev->status == SBP_DEV_RESET) return; sbp_abort_all_ocbs(sdev, CAM_DEV_NOT_THERE); if (sdev->path) { SBP_LOCK(sdev->target->sbp); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 0; xpt_async(AC_LOST_DEVICE, sdev->path, NULL); xpt_free_path(sdev->path); sdev->path = NULL; SBP_UNLOCK(sdev->target->sbp); } } static void sbp_cam_detach_target(struct sbp_target *target) { int i; if (target->luns != NULL) { SBP_DEBUG(0) printf("sbp_detach_target %d\n", target->target_id); END_DEBUG callout_stop(&target->scan_callout); for (i = 0; i < target->num_lun; i++) sbp_cam_detach_sdev(target->luns[i]); } } static void sbp_target_reset(struct sbp_dev *sdev, int method) { int i; struct sbp_target *target = sdev->target; struct sbp_dev *tsdev; for (i = 0; i < target->num_lun; i++) { tsdev = target->luns[i]; if (tsdev == NULL) continue; if (tsdev->status == SBP_DEV_DEAD) continue; if (tsdev->status == SBP_DEV_RESET) continue; SBP_LOCK(target->sbp); xpt_freeze_devq(tsdev->path, 1); tsdev->freeze ++; SBP_UNLOCK(target->sbp); sbp_abort_all_ocbs(tsdev, CAM_CMD_TIMEOUT); if (method == 2) tsdev->status = SBP_DEV_LOGIN; } switch(method) { case 1: printf("target reset\n"); sbp_mgm_orb(sdev, ORB_FUN_RST, NULL); break; case 2: printf("reset start\n"); sbp_reset_start(sdev); break; } } static void sbp_mgm_timeout(void *arg) { struct sbp_ocb *ocb = (struct sbp_ocb *)arg; struct sbp_dev *sdev = ocb->sdev; struct sbp_target *target = sdev->target; device_printf(sdev->target->sbp->fd.dev, "%s:%s request timeout(mgm orb:0x%08x)\n", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); target->mgm_ocb_cur = NULL; sbp_free_ocb(sdev, ocb); #if 0 /* XXX */ printf("run next request\n"); sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); #endif device_printf(sdev->target->sbp->fd.dev, "%s:%s reset start\n", __func__, sdev->bustgtlun); sbp_reset_start(sdev); } static void sbp_timeout(void *arg) { struct sbp_ocb *ocb = (struct sbp_ocb *)arg; struct sbp_dev *sdev = ocb->sdev; device_printf(sdev->target->sbp->fd.dev, "%s:%s request timeout(cmd orb:0x%08x) ... ", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); sdev->timeout ++; switch(sdev->timeout) { case 1: printf("agent reset\n"); SBP_LOCK(sdev->target->sbp); xpt_freeze_devq(sdev->path, 1); sdev->freeze ++; SBP_UNLOCK(sdev->target->sbp); sbp_abort_all_ocbs(sdev, CAM_CMD_TIMEOUT); sbp_agent_reset(sdev); break; case 2: case 3: sbp_target_reset(sdev, sdev->timeout - 1); break; #if 0 default: /* XXX give up */ sbp_cam_detach_target(target); if (target->luns != NULL) free(target->luns, M_SBP); target->num_lun = 0; target->luns = NULL; target->fwdev = NULL; #endif } } static void sbp_action1(struct cam_sim *sim, union ccb *ccb) { struct sbp_softc *sbp = (struct sbp_softc *)sim->softc; struct sbp_target *target = NULL; struct sbp_dev *sdev = NULL; /* target:lun -> sdev mapping */ if (sbp != NULL && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.target_id < SBP_NUM_TARGETS) { target = &sbp->targets[ccb->ccb_h.target_id]; if (target->fwdev != NULL && ccb->ccb_h.target_lun != CAM_LUN_WILDCARD && ccb->ccb_h.target_lun < target->num_lun) { sdev = target->luns[ccb->ccb_h.target_lun]; if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED && sdev->status != SBP_DEV_PROBE) sdev = NULL; } } SBP_DEBUG(1) if (sdev == NULL) printf("invalid target %d lun %jx\n", ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: case XPT_RESET_DEV: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_CALC_GEOMETRY: if (sdev == NULL) { SBP_DEBUG(1) printf("%s:%d:%jx:func_code 0x%04x: " "Invalid target (target needed)\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); END_DEBUG ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } break; case XPT_PATH_INQ: case XPT_NOOP: /* The opcodes sometimes aimed at a target (sc is valid), * sometimes aimed at the SIM (sc is invalid and target is * CAM_TARGET_WILDCARD) */ if (sbp == NULL && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { SBP_DEBUG(0) printf("%s:%d:%jx func_code 0x%04x: " "Invalid target (no wildcard)\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); END_DEBUG ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } break; default: /* XXX Hm, we should check the input parameters */ break; } switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; struct sbp_ocb *ocb; int speed; void *cdb; csio = &ccb->csio; mtx_assert(sim->mtx, MA_OWNED); SBP_DEBUG(2) printf("%s:%d:%jx XPT_SCSI_IO: " "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x" ", flags: 0x%02x, " "%db cmd/%db data/%db sense\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->cdb_io.cdb_bytes[1], csio->cdb_io.cdb_bytes[2], csio->cdb_io.cdb_bytes[3], csio->cdb_io.cdb_bytes[4], csio->cdb_io.cdb_bytes[5], csio->cdb_io.cdb_bytes[6], csio->cdb_io.cdb_bytes[7], csio->cdb_io.cdb_bytes[8], csio->cdb_io.cdb_bytes[9], ccb->ccb_h.flags & CAM_DIR_MASK, csio->cdb_len, csio->dxfer_len, csio->sense_len); END_DEBUG if(sdev == NULL){ ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } #if 0 /* if we are in probe stage, pass only probe commands */ if (sdev->status == SBP_DEV_PROBE) { char *name; name = xpt_path_periph(ccb->ccb_h.path)->periph_name; printf("probe stage, periph name: %s\n", name); if (strcmp(name, "probe") != 0) { ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } } #endif if ((ocb = sbp_get_ocb(sdev)) == NULL) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; if (sdev->freeze == 0) { SBP_LOCK(sdev->target->sbp); xpt_freeze_devq(sdev->path, 1); sdev->freeze ++; SBP_UNLOCK(sdev->target->sbp); } xpt_done(ccb); return; } ocb->flags = OCB_ACT_CMD; ocb->sdev = sdev; ocb->ccb = ccb; ccb->ccb_h.ccb_sdev_ptr = sdev; - ocb->orb[0] = htonl(1 << 31); + ocb->orb[0] = htonl(1U << 31); ocb->orb[1] = 0; ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS )<< 16) ); ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET); speed = min(target->fwdev->speed, max_speed); ocb->orb[4] = htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7)); if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN){ ocb->orb[4] |= htonl(ORB_CMD_IN); } if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = (void *)csio->cdb_io.cdb_ptr; else cdb = (void *)&csio->cdb_io.cdb_bytes; bcopy(cdb, (void *)&ocb->orb[5], csio->cdb_len); /* printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3])); printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7])); */ if (ccb->csio.dxfer_len > 0) { int error; error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat, /*dma map*/ocb->dmamap, ccb, sbp_execute_ocb, ocb, /*flags*/0); if (error) printf("sbp: bus_dmamap_load error %d\n", error); } else sbp_execute_ocb(ocb, NULL, 0, 0); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; #if defined(__DragonFly__) || __FreeBSD_version < 501100 uint32_t size_mb; uint32_t secs_per_cylinder; int extended = 1; #endif ccg = &ccb->ccg; if (ccg->block_size == 0) { printf("sbp_action1: block_size is 0.\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } SBP_DEBUG(1) printf("%s:%d:%d:%jx:XPT_CALC_GEOMETRY: " #if defined(__DragonFly__) || __FreeBSD_version < 500000 "Volume size = %d\n", #else "Volume size = %jd\n", #endif device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 (uintmax_t) #endif ccg->volume_size); END_DEBUG #if defined(__DragonFly__) || __FreeBSD_version < 501100 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb > 1024 && extended) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; #else cam_calc_geometry(ccg, /*extended*/1); #endif xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { SBP_DEBUG(1) printf("%s:%d:XPT_RESET_BUS: \n", device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim)); END_DEBUG ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; SBP_DEBUG(1) printf("%s:%d:%jx XPT_PATH_INQ:.\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; cpi->max_target = SBP_NUM_TARGETS - 1; cpi->max_lun = SBP_NUM_LUNS - 1; cpi->initiator_id = SBP_INITIATOR; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 400 * 1000 / 8; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "SBP", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->transport = XPORT_SPI; /* XX should have a FireWire */ cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; /* should have a FireWire */ cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; SBP_DEBUG(1) printf("%s:%d:%jx XPT_GET_TRAN_SETTINGS:.\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG cts->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_ABORT: ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX */ default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } return; } static void sbp_action(struct cam_sim *sim, union ccb *ccb) { int s; s = splfw(); sbp_action1(sim, ccb); splx(s); } static void sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error) { int i; struct sbp_ocb *ocb; struct sbp_ocb *prev; bus_dma_segment_t *s; if (error) printf("sbp_execute_ocb: error=%d\n", error); ocb = (struct sbp_ocb *)arg; SBP_DEBUG(2) printf("sbp_execute_ocb: seg %d", seg); for (i = 0; i < seg; i++) #if defined(__DragonFly__) || __FreeBSD_version < 500000 printf(", %x:%d", segments[i].ds_addr, segments[i].ds_len); #else printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr, (uintmax_t)segments[i].ds_len); #endif printf("\n"); END_DEBUG if (seg == 1) { /* direct pointer */ s = &segments[0]; if (s->ds_len > SBP_SEG_MAX) panic("ds_len > SBP_SEG_MAX, fix busdma code"); ocb->orb[3] = htonl(s->ds_addr); ocb->orb[4] |= htonl(s->ds_len); } else if(seg > 1) { /* page table */ for (i = 0; i < seg; i++) { s = &segments[i]; SBP_DEBUG(0) /* XXX LSI Logic "< 16 byte" bug might be hit */ if (s->ds_len < 16) printf("sbp_execute_ocb: warning, " #if defined(__DragonFly__) || __FreeBSD_version < 500000 "segment length(%d) is less than 16." #else "segment length(%zd) is less than 16." #endif "(seg=%d/%d)\n", (size_t)s->ds_len, i+1, seg); END_DEBUG if (s->ds_len > SBP_SEG_MAX) panic("ds_len > SBP_SEG_MAX, fix busdma code"); ocb->ind_ptr[i].hi = htonl(s->ds_len << 16); ocb->ind_ptr[i].lo = htonl(s->ds_addr); } ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg); } if (seg > 0) bus_dmamap_sync(ocb->sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); prev = sbp_enqueue_ocb(ocb->sdev, ocb); fwdma_sync(&ocb->sdev->dma, BUS_DMASYNC_PREWRITE); if (use_doorbell) { if (prev == NULL) { if (ocb->sdev->last_ocb != NULL) sbp_doorbell(ocb->sdev); else sbp_orb_pointer(ocb->sdev, ocb); } } else { if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) { ocb->sdev->flags &= ~ORB_LINK_DEAD; sbp_orb_pointer(ocb->sdev, ocb); } } } static void sbp_poll(struct cam_sim *sim) { struct sbp_softc *sbp; struct firewire_comm *fc; sbp = (struct sbp_softc *)sim->softc; fc = sbp->fd.fc; fc->poll(fc, 0, -1); return; } static struct sbp_ocb * sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status) { struct sbp_ocb *ocb; struct sbp_ocb *next; int s = splfw(), order = 0; int flags; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, #if defined(__DragonFly__) || __FreeBSD_version < 500000 "%s:%s 0x%08lx src %d\n", #else "%s:%s 0x%08x src %d\n", #endif __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), sbp_status->src); END_DEBUG SBP_LOCK(sdev->target->sbp); for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) { next = STAILQ_NEXT(ocb, ocb); flags = ocb->flags; if (OCB_MATCH(ocb, sbp_status)) { /* found */ STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb); if (ocb->ccb != NULL) untimeout(sbp_timeout, (caddr_t)ocb, ocb->timeout_ch); if (ntohl(ocb->orb[4]) & 0xffff) { bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap); } if (!use_doorbell) { if (sbp_status->src == SRC_NO_NEXT) { if (next != NULL) sbp_orb_pointer(sdev, next); else if (order > 0) { /* * Unordered execution * We need to send pointer for * next ORB */ sdev->flags |= ORB_LINK_DEAD; } } } else { /* * XXX this is not correct for unordered * execution. */ if (sdev->last_ocb != NULL) { SBP_UNLOCK(sdev->target->sbp); sbp_free_ocb(sdev, sdev->last_ocb); SBP_LOCK(sdev->target->sbp); } sdev->last_ocb = ocb; if (next != NULL && sbp_status->src == SRC_NO_NEXT) sbp_doorbell(sdev); } break; } else order ++; } SBP_UNLOCK(sdev->target->sbp); splx(s); SBP_DEBUG(0) if (ocb && order > 0) { device_printf(sdev->target->sbp->fd.dev, "%s:%s unordered execution order:%d\n", __func__, sdev->bustgtlun, order); } END_DEBUG return (ocb); } static struct sbp_ocb * sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) { int s = splfw(); struct sbp_ocb *prev, *prev2; mtx_assert(&sdev->target->sbp->mtx, MA_OWNED); SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, #if defined(__DragonFly__) || __FreeBSD_version < 500000 "%s:%s 0x%08x\n", __func__, sdev->bustgtlun, ocb->bus_addr); #else "%s:%s 0x%08jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); #endif END_DEBUG prev2 = prev = STAILQ_LAST(&sdev->ocbs, sbp_ocb, ocb); STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb); if (ocb->ccb != NULL) ocb->timeout_ch = timeout(sbp_timeout, (caddr_t)ocb, (ocb->ccb->ccb_h.timeout * hz) / 1000); if (use_doorbell && prev == NULL) prev2 = sdev->last_ocb; if (prev2 != NULL && (ocb->sdev->flags & ORB_LINK_DEAD) == 0) { SBP_DEBUG(1) #if defined(__DragonFly__) || __FreeBSD_version < 500000 printf("linking chain 0x%x -> 0x%x\n", prev2->bus_addr, ocb->bus_addr); #else printf("linking chain 0x%jx -> 0x%jx\n", (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr); #endif END_DEBUG /* * Suppress compiler optimization so that orb[1] must be written first. * XXX We may need an explicit memory barrier for other architectures * other than i386/amd64. */ *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr); *(volatile uint32_t *)&prev2->orb[0] = 0; } splx(s); return prev; } static struct sbp_ocb * sbp_get_ocb(struct sbp_dev *sdev) { struct sbp_ocb *ocb; int s = splfw(); mtx_assert(&sdev->target->sbp->mtx, MA_OWNED); ocb = STAILQ_FIRST(&sdev->free_ocbs); if (ocb == NULL) { sdev->flags |= ORB_SHORTAGE; printf("ocb shortage!!!\n"); splx(s); return NULL; } STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb); splx(s); ocb->ccb = NULL; return (ocb); } static void sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) { ocb->flags = 0; ocb->ccb = NULL; SBP_LOCK(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb); if ((sdev->flags & ORB_SHORTAGE) != 0) { int count; sdev->flags &= ~ORB_SHORTAGE; count = sdev->freeze; sdev->freeze = 0; xpt_release_devq(sdev->path, count, TRUE); } SBP_UNLOCK(sdev->target->sbp); } static void sbp_abort_ocb(struct sbp_ocb *ocb, int status) { struct sbp_dev *sdev; sdev = ocb->sdev; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, #if defined(__DragonFly__) || __FreeBSD_version < 500000 "%s:%s 0x%x\n", __func__, sdev->bustgtlun, ocb->bus_addr); #else "%s:%s 0x%jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); #endif END_DEBUG SBP_DEBUG(1) if (ocb->ccb != NULL) sbp_print_scsi_cmd(ocb); END_DEBUG if (ntohl(ocb->orb[4]) & 0xffff) { bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap); } if (ocb->ccb != NULL) { untimeout(sbp_timeout, (caddr_t)ocb, ocb->timeout_ch); ocb->ccb->ccb_h.status = status; SBP_LOCK(sdev->target->sbp); xpt_done(ocb->ccb); SBP_UNLOCK(sdev->target->sbp); } sbp_free_ocb(sdev, ocb); } static void sbp_abort_all_ocbs(struct sbp_dev *sdev, int status) { int s; struct sbp_ocb *ocb, *next; STAILQ_HEAD(, sbp_ocb) temp; s = splfw(); STAILQ_INIT(&temp); SBP_LOCK(sdev->target->sbp); STAILQ_CONCAT(&temp, &sdev->ocbs); STAILQ_INIT(&sdev->ocbs); SBP_UNLOCK(sdev->target->sbp); for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) { next = STAILQ_NEXT(ocb, ocb); sbp_abort_ocb(ocb, status); } if (sdev->last_ocb != NULL) { sbp_free_ocb(sdev, sdev->last_ocb); sdev->last_ocb = NULL; } splx(s); } static devclass_t sbp_devclass; static device_method_t sbp_methods[] = { /* device interface */ DEVMETHOD(device_identify, sbp_identify), DEVMETHOD(device_probe, sbp_probe), DEVMETHOD(device_attach, sbp_attach), DEVMETHOD(device_detach, sbp_detach), DEVMETHOD(device_shutdown, sbp_shutdown), { 0, 0 } }; static driver_t sbp_driver = { "sbp", sbp_methods, sizeof(struct sbp_softc), }; #ifdef __DragonFly__ DECLARE_DUMMY_MODULE(sbp); #endif DRIVER_MODULE(sbp, firewire, sbp_driver, sbp_devclass, 0, 0); MODULE_VERSION(sbp, 1); MODULE_DEPEND(sbp, firewire, 1, 1, 1); MODULE_DEPEND(sbp, cam, 1, 1, 1); Index: head/sys/dev/firewire/sbp.h =================================================================== --- head/sys/dev/firewire/sbp.h (revision 258779) +++ head/sys/dev/firewire/sbp.h (revision 258780) @@ -1,203 +1,203 @@ /*- * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ -#define ORB_NOTIFY (1 << 31) +#define ORB_NOTIFY (1U << 31) #define ORB_FMT_STD (0 << 29) #define ORB_FMT_VED (2 << 29) #define ORB_FMT_NOP (3 << 29) #define ORB_FMT_MSK (3 << 29) #define ORB_EXV (1 << 28) /* */ #define ORB_CMD_IN (1 << 27) /* */ #define ORB_CMD_SPD(x) ((x) << 24) #define ORB_CMD_MAXP(x) ((x) << 20) #define ORB_RCN_TMO(x) ((x) << 20) #define ORB_CMD_PTBL (1 << 19) #define ORB_CMD_PSZ(x) ((x) << 16) #define ORB_FUN_LGI (0 << 16) #define ORB_FUN_QLG (1 << 16) #define ORB_FUN_RCN (3 << 16) #define ORB_FUN_LGO (7 << 16) #define ORB_FUN_ATA (0xb << 16) #define ORB_FUN_ATS (0xc << 16) #define ORB_FUN_LUR (0xe << 16) #define ORB_FUN_RST (0xf << 16) #define ORB_FUN_MSK (0xf << 16) #define ORB_FUN_RUNQUEUE 0xffff #define ORB_RES_CMPL 0 #define ORB_RES_FAIL 1 #define ORB_RES_ILLE 2 #define ORB_RES_VEND 3 #define SBP_DEBUG(x) if (debug > x) { #define END_DEBUG } struct ind_ptr { uint32_t hi,lo; }; #define SBP_RECV_LEN 32 struct sbp_login_res{ uint16_t len; uint16_t id; uint16_t res0; uint16_t cmd_hi; uint32_t cmd_lo; uint16_t res1; uint16_t recon_hold; }; struct sbp_status{ #if BYTE_ORDER == BIG_ENDIAN uint8_t src:2, resp:2, dead:1, len:3; #else uint8_t len:3, dead:1, resp:2, src:2; #endif uint8_t status; uint16_t orb_hi; uint32_t orb_lo; uint32_t data[6]; }; /* src */ #define SRC_NEXT_EXISTS 0 #define SRC_NO_NEXT 1 #define SRC_UNSOL 2 /* resp */ #define SBP_REQ_CMP 0 /* request complete */ #define SBP_TRANS_FAIL 1 /* transport failure */ #define SBP_ILLE_REQ 2 /* illegal request */ #define SBP_VEND_DEP 3 /* vendor dependent */ /* status (resp == 0) */ /* 0: No additional Information to report */ /* 1: Request Type not supported */ /* 2: Speed not supported */ /* 3: Page size not supported */ /* 4: Access denied */ #define STATUS_ACCESS_DENY 4 #define STATUS_LUR 5 /* 6: Maximum payload too small */ /* 7: Reserved for future standardization */ /* 8: Resource unavailabe */ #define STATUS_RES_UNAVAIL 8 /* 9: Function Rejected */ /* 10: Login ID not recognized */ /* 11: Dummy ORB completed */ /* 12: Request aborted */ /* 255: Unspecified error */ /* status (resp == 1) */ /* Referenced object */ #define OBJ_ORB (0 << 6) /* 0: ORB */ #define OBJ_DATA (1 << 6) /* 1: Data buffer */ #define OBJ_PT (2 << 6) /* 2: Page table */ #define OBJ_UNSPEC (3 << 6) /* 3: Unable to specify */ /* Serial bus error */ /* 0: Missing acknowledge */ /* 1: Reserved; not to be used */ /* 2: Time-out error */ #define SBE_TIMEOUT 2 /* 3: Reserved; not to be used */ /* 4: Busy retry limit exceeded: ack_busy_X */ /* 5: Busy retry limit exceeded: ack_busy_A */ /* 6: Busy retry limit exceeded: ack_busy_B */ /* 7-A: Reserved for future standardization */ /* B: Tardy retry limit exceeded */ /* C: Confilict error */ /* D: Data error */ /* E: Type error */ /* F: Address error */ struct sbp_cmd_status{ #define SBP_SFMT_CURR 0 #define SBP_SFMT_DEFER 1 #if BYTE_ORDER == BIG_ENDIAN uint8_t sfmt:2, status:6; uint8_t valid:1, mark:1, eom:1, ill_len:1, s_key:4; #else uint8_t status:6, sfmt:2; uint8_t s_key:4, ill_len:1, eom:1, mark:1, valid:1; #endif uint8_t s_code; uint8_t s_qlfr; uint32_t info; uint32_t cdb; uint8_t fru; uint8_t s_keydep[3]; uint32_t vend[2]; }; #define ORB_FUN_NAMES \ /* 0 */ "LOGIN", \ /* 1 */ "QUERY LOGINS", \ /* 2 */ "Reserved", \ /* 3 */ "RECONNECT", \ /* 4 */ "SET PASSWORD", \ /* 5 */ "Reserved", \ /* 6 */ "Reserved", \ /* 7 */ "LOGOUT", \ /* 8 */ "Reserved", \ /* 9 */ "Reserved", \ /* A */ "Reserved", \ /* B */ "ABORT TASK", \ /* C */ "ABORT TASK SET", \ /* D */ "Reserved", \ /* E */ "LOGICAL UNIT RESET", \ /* F */ "TARGET RESET" Index: head/sys/dev/firewire/sbp_targ.c =================================================================== --- head/sys/dev/firewire/sbp_targ.c (revision 258779) +++ head/sys/dev/firewire/sbp_targ.c (revision 258780) @@ -1,2071 +1,2071 @@ /*- * Copyright (C) 2003 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SBP_TARG_RECV_LEN 8 #define MAX_INITIATORS 8 #define MAX_LUN 63 #define MAX_LOGINS 63 #define MAX_NODES 63 /* * management/command block agent registers * * BASE 0xffff f001 0000 management port * BASE 0xffff f001 0020 command port for login id 0 * BASE 0xffff f001 0040 command port for login id 1 * */ #define SBP_TARG_MGM 0x10000 /* offset from 0xffff f000 000 */ #define SBP_TARG_BIND_HI 0xffff #define SBP_TARG_BIND_LO(l) (0xf0000000 + SBP_TARG_MGM + 0x20 * ((l) + 1)) #define SBP_TARG_BIND_START (((u_int64_t)SBP_TARG_BIND_HI << 32) | \ SBP_TARG_BIND_LO(-1)) #define SBP_TARG_BIND_END (((u_int64_t)SBP_TARG_BIND_HI << 32) | \ SBP_TARG_BIND_LO(MAX_LOGINS)) #define SBP_TARG_LOGIN_ID(lo) (((lo) - SBP_TARG_BIND_LO(0))/0x20) #define FETCH_MGM 0 #define FETCH_CMD 1 #define FETCH_POINTER 2 #define F_LINK_ACTIVE (1 << 0) #define F_ATIO_STARVED (1 << 1) #define F_LOGIN (1 << 2) #define F_HOLD (1 << 3) #define F_FREEZED (1 << 4) static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode"); static int debug = 0; SYSCTL_INT(_debug, OID_AUTO, sbp_targ_debug, CTLFLAG_RW, &debug, 0, "SBP target mode debug flag"); struct sbp_targ_login { struct sbp_targ_lstate *lstate; struct fw_device *fwdev; struct sbp_login_res loginres; uint16_t fifo_hi; uint16_t last_hi; uint32_t fifo_lo; uint32_t last_lo; STAILQ_HEAD(, orb_info) orbs; STAILQ_ENTRY(sbp_targ_login) link; uint16_t hold_sec; uint16_t id; uint8_t flags; uint8_t spd; struct callout hold_callout; }; struct sbp_targ_lstate { uint16_t lun; struct sbp_targ_softc *sc; struct cam_path *path; struct ccb_hdr_slist accept_tios; struct ccb_hdr_slist immed_notifies; struct crom_chunk model; uint32_t flags; STAILQ_HEAD(, sbp_targ_login) logins; }; struct sbp_targ_softc { struct firewire_dev_comm fd; struct cam_sim *sim; struct cam_path *path; struct fw_bind fwb; int ndevs; int flags; struct crom_chunk unit; struct sbp_targ_lstate *lstate[MAX_LUN]; struct sbp_targ_lstate *black_hole; struct sbp_targ_login *logins[MAX_LOGINS]; struct mtx mtx; }; #define SBP_LOCK(sc) mtx_lock(&(sc)->mtx) #define SBP_UNLOCK(sc) mtx_unlock(&(sc)->mtx) struct corb4 { #if BYTE_ORDER == BIG_ENDIAN uint32_t n:1, rq_fmt:2, :1, dir:1, spd:3, max_payload:4, page_table_present:1, page_size:3, data_size:16; #else uint32_t data_size:16, page_size:3, page_table_present:1, max_payload:4, spd:3, dir:1, :1, rq_fmt:2, n:1; #endif }; struct morb4 { #if BYTE_ORDER == BIG_ENDIAN uint32_t n:1, rq_fmt:2, :9, fun:4, id:16; #else uint32_t id:16, fun:4, :9, rq_fmt:2, n:1; #endif }; /* * Urestricted page table format * states that the segment length * and high base addr are in the first * 32 bits and the base low is in * the second */ struct unrestricted_page_table_fmt { uint16_t segment_len; uint16_t segment_base_high; uint32_t segment_base_low; }; struct orb_info { struct sbp_targ_softc *sc; struct fw_device *fwdev; struct sbp_targ_login *login; union ccb *ccb; struct ccb_accept_tio *atio; uint8_t state; #define ORBI_STATUS_NONE 0 #define ORBI_STATUS_FETCH 1 #define ORBI_STATUS_ATIO 2 #define ORBI_STATUS_CTIO 3 #define ORBI_STATUS_STATUS 4 #define ORBI_STATUS_POINTER 5 #define ORBI_STATUS_ABORTED 7 uint8_t refcount; uint16_t orb_hi; uint32_t orb_lo; uint32_t data_hi; uint32_t data_lo; struct corb4 orb4; STAILQ_ENTRY(orb_info) link; uint32_t orb[8]; struct unrestricted_page_table_fmt *page_table; struct unrestricted_page_table_fmt *cur_pte; struct unrestricted_page_table_fmt *last_pte; uint32_t last_block_read; struct sbp_status status; }; static char *orb_fun_name[] = { ORB_FUN_NAMES }; static void sbp_targ_recv(struct fw_xfer *); static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *, uint16_t, uint32_t, struct sbp_targ_login *, int); static void sbp_targ_xfer_pt(struct orb_info *); static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *); static void sbp_targ_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "sbp_targ", device_get_unit(parent)); } static int sbp_targ_probe(device_t dev) { device_t pa; pa = device_get_parent(dev); if(device_get_unit(dev) != device_get_unit(pa)){ return(ENXIO); } device_set_desc(dev, "SBP-2/SCSI over FireWire target mode"); return (0); } static void sbp_targ_dealloc_login(struct sbp_targ_login *login) { struct orb_info *orbi, *next; if (login == NULL) { printf("%s: login = NULL\n", __func__); return; } for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) { next = STAILQ_NEXT(orbi, link); if (debug) printf("%s: free orbi %p\n", __func__, orbi); free(orbi, M_SBP_TARG); orbi = NULL; } callout_stop(&login->hold_callout); STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link); login->lstate->sc->logins[login->id] = NULL; if (debug) printf("%s: free login %p\n", __func__, login); free((void *)login, M_SBP_TARG); login = NULL; } static void sbp_targ_hold_expire(void *arg) { struct sbp_targ_login *login; login = (struct sbp_targ_login *)arg; if (login->flags & F_HOLD) { printf("%s: login_id=%d expired\n", __func__, login->id); sbp_targ_dealloc_login(login); } else { printf("%s: login_id=%d not hold\n", __func__, login->id); } } static void sbp_targ_post_busreset(void *arg) { struct sbp_targ_softc *sc; struct crom_src *src; struct crom_chunk *root; struct crom_chunk *unit; struct sbp_targ_lstate *lstate; struct sbp_targ_login *login; int i; sc = (struct sbp_targ_softc *)arg; src = sc->fd.fc->crom_src; root = sc->fd.fc->crom_root; unit = &sc->unit; if ((sc->flags & F_FREEZED) == 0) { SBP_LOCK(sc); sc->flags |= F_FREEZED; xpt_freeze_simq(sc->sim, /*count*/1); SBP_UNLOCK(sc); } else { printf("%s: already freezed\n", __func__); } bzero(unit, sizeof(struct crom_chunk)); crom_add_chunk(src, root, unit, CROM_UDIR); crom_add_entry(unit, CSRKEY_SPEC, CSRVAL_ANSIT10); crom_add_entry(unit, CSRKEY_VER, CSRVAL_T10SBP2); crom_add_entry(unit, CSRKEY_COM_SPEC, CSRVAL_ANSIT10); crom_add_entry(unit, CSRKEY_COM_SET, CSRVAL_SCSI); crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2); crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8); for (i = 0; i < MAX_LUN; i ++) { lstate = sc->lstate[i]; if (lstate == NULL) continue; crom_add_entry(unit, CSRKEY_FIRM_VER, 1); crom_add_entry(unit, CROM_LUN, i); crom_add_entry(unit, CSRKEY_MODEL, 1); crom_add_simple_text(src, unit, &lstate->model, "TargetMode"); } /* Process for reconnection hold time */ for (i = 0; i < MAX_LOGINS; i ++) { login = sc->logins[i]; if (login == NULL) continue; sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs)); if (login->flags & F_LOGIN) { login->flags |= F_HOLD; callout_reset(&login->hold_callout, hz * login->hold_sec, sbp_targ_hold_expire, (void *)login); } } } static void sbp_targ_post_explore(void *arg) { struct sbp_targ_softc *sc; sc = (struct sbp_targ_softc *)arg; SBP_LOCK(sc); sc->flags &= ~F_FREEZED; xpt_release_simq(sc->sim, /*run queue*/TRUE); SBP_UNLOCK(sc); return; } static cam_status sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb, struct sbp_targ_lstate **lstate, int notfound_failure) { u_int lun; /* XXX 0 is the only vaild target_id */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *lstate = sc->black_hole; if (debug) printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id); return (CAM_REQ_CMP); } lun = ccb->ccb_h.target_lun; if (lun >= MAX_LUN) return (CAM_LUN_INVALID); *lstate = sc->lstate[lun]; if (notfound_failure != 0 && *lstate == NULL) { if (debug) printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n", __func__, ccb->ccb_h.target_id, lun); return (CAM_PATH_INVALID); } else if (debug) printf("%s: setting lstate for tgt(%d) lun(%d)\n", __func__,ccb->ccb_h.target_id, lun); return (CAM_REQ_CMP); } static void sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb) { struct ccb_en_lun *cel = &ccb->cel; struct sbp_targ_lstate *lstate; cam_status status; status = sbp_targ_find_devs(sc, ccb, &lstate, 0); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cel->enable != 0) { if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printf("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("Non-zero Group Codes\n"); return; } lstate = (struct sbp_targ_lstate *) malloc(sizeof(*lstate), M_SBP_TARG, M_NOWAIT | M_ZERO); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } else { if (debug) printf("%s: malloc'd lstate %p\n",__func__, lstate); } if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) { sc->black_hole = lstate; if (debug) printf("Blackhole set due to target id == %d\n", ccb->ccb_h.target_id); } else sc->lstate[ccb->ccb_h.target_lun] = lstate; memset(lstate, 0, sizeof(*lstate)); lstate->sc = sc; status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { free(lstate, M_SBP_TARG); lstate = NULL; xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); STAILQ_INIT(&lstate->logins); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printf("Lun now enabled for target mode\n"); /* bus reset */ sc->fd.fc->ibr(sc->fd.fc); } else { struct sbp_targ_login *login, *next; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; printf("Invalid lstate for this target\n"); return; } ccb->ccb_h.status = CAM_REQ_CMP; if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printf("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printf("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { printf("status != CAM_REQ_CMP\n"); return; } xpt_print_path(ccb->ccb_h.path); printf("Target mode disabled\n"); xpt_free_path(lstate->path); for (login = STAILQ_FIRST(&lstate->logins); login != NULL; login = next) { next = STAILQ_NEXT(login, link); sbp_targ_dealloc_login(login); } if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) sc->black_hole = NULL; else sc->lstate[ccb->ccb_h.target_lun] = NULL; if (debug) printf("%s: free lstate %p\n", __func__, lstate); free(lstate, M_SBP_TARG); lstate = NULL; /* bus reset */ sc->fd.fc->ibr(sc->fd.fc); } } static void sbp_targ_send_lstate_events(struct sbp_targ_softc *sc, struct sbp_targ_lstate *lstate) { #if 0 struct ccb_hdr *ccbh; struct ccb_immediate_notify *inot; printf("%s: not implemented yet\n", __func__); #endif } static __inline void sbp_targ_remove_orb_info_locked(struct sbp_targ_login *login, struct orb_info *orbi) { STAILQ_REMOVE(&login->orbs, orbi, orb_info, link); } static __inline void sbp_targ_remove_orb_info(struct sbp_targ_login *login, struct orb_info *orbi) { SBP_LOCK(orbi->sc); STAILQ_REMOVE(&login->orbs, orbi, orb_info, link); SBP_UNLOCK(orbi->sc); } /* * tag_id/init_id encoding * * tag_id and init_id has only 32bit for each. * scsi_target can handle very limited number(up to 15) of init_id. * we have to encode 48bit orb and 64bit EUI64 into these * variables. * * tag_id represents lower 32bit of ORB address. * init_id represents login_id. * */ static struct orb_info * sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate, u_int tag_id, u_int init_id) { struct sbp_targ_login *login; struct orb_info *orbi; login = lstate->sc->logins[init_id]; if (login == NULL) { printf("%s: no such login\n", __func__); return (NULL); } STAILQ_FOREACH(orbi, &login->orbs, link) if (orbi->orb_lo == tag_id) goto found; printf("%s: orb not found tag_id=0x%08x init_id=%d\n", __func__, tag_id, init_id); return (NULL); found: return (orbi); } static void sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi) { struct orb_info *norbi; SBP_LOCK(sc); for (; orbi != NULL; orbi = norbi) { printf("%s: status=%d ccb=%p\n", __func__, orbi->state, orbi->ccb); norbi = STAILQ_NEXT(orbi, link); if (orbi->state != ORBI_STATUS_ABORTED) { if (orbi->ccb != NULL) { orbi->ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(orbi->ccb); orbi->ccb = NULL; } if (orbi->state <= ORBI_STATUS_ATIO) { sbp_targ_remove_orb_info_locked(orbi->login, orbi); if (debug) printf("%s: free orbi %p\n", __func__, orbi); free(orbi, M_SBP_TARG); orbi = NULL; } else orbi->state = ORBI_STATUS_ABORTED; } } SBP_UNLOCK(sc); } static void sbp_targ_free_orbi(struct fw_xfer *xfer) { struct orb_info *orbi; if (xfer->resp != 0) { /* XXX */ printf("%s: xfer->resp = %d\n", __func__, xfer->resp); } orbi = (struct orb_info *)xfer->sc; if ( orbi->page_table != NULL ) { if (debug) printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); free(orbi->page_table, M_SBP_TARG); orbi->page_table = NULL; } if (debug) printf("%s: free orbi %p\n", __func__, orbi); free(orbi, M_SBP_TARG); orbi = NULL; fw_xfer_free(xfer); } static void sbp_targ_status_FIFO(struct orb_info *orbi, uint32_t fifo_hi, uint32_t fifo_lo, int dequeue) { struct fw_xfer *xfer; if (dequeue) sbp_targ_remove_orb_info(orbi->login, orbi); xfer = fwmem_write_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400, fifo_hi, fifo_lo, sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status, sbp_targ_free_orbi); if (xfer == NULL) { /* XXX */ printf("%s: xfer == NULL\n", __func__); } } /* * Generate the appropriate CAM status for the * target. */ static void sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb) { struct sbp_status *sbp_status; #if 0 struct orb_info *norbi; #endif sbp_status = &orbi->status; orbi->state = ORBI_STATUS_STATUS; sbp_status->resp = 0; /* XXX */ sbp_status->status = 0; /* XXX */ sbp_status->dead = 0; /* XXX */ ccb->ccb_h.status= CAM_REQ_CMP; switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: if (debug) printf("%s: STATUS_OK\n", __func__); sbp_status->len = 1; break; case SCSI_STATUS_CHECK_COND: if (debug) printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__); goto process_scsi_status; case SCSI_STATUS_BUSY: if (debug) printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__); goto process_scsi_status; case SCSI_STATUS_CMD_TERMINATED: process_scsi_status: { struct sbp_cmd_status *sbp_cmd_status; struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; uint8_t stream_bits; uint8_t sks[3]; uint64_t info; int64_t sinfo; int sense_len; sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0]; sbp_cmd_status->status = ccb->csio.scsi_status; sense = &ccb->csio.sense_data; #if 0 /* XXX What we should do? */ #if 0 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); #else norbi = STAILQ_NEXT(orbi, link); while (norbi) { printf("%s: status=%d\n", __func__, norbi->state); if (norbi->ccb != NULL) { norbi->ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(norbi->ccb); norbi->ccb = NULL; } sbp_targ_remove_orb_info_locked(orbi->login, norbi); norbi = STAILQ_NEXT(norbi, link); free(norbi, M_SBP_TARG); } #endif #endif sense_len = ccb->csio.sense_len - ccb->csio.sense_resid; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 0); switch (error_code) { case SSD_CURRENT_ERROR: case SSD_DESC_CURRENT_ERROR: sbp_cmd_status->sfmt = SBP_SFMT_CURR; break; default: sbp_cmd_status->sfmt = SBP_SFMT_DEFER; break; } if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, &sinfo) == 0) { uint32_t info_trunc; sbp_cmd_status->valid = 1; info_trunc = info; sbp_cmd_status->info = htobe32(info_trunc); } else { sbp_cmd_status->valid = 0; } sbp_cmd_status->s_key = sense_key; if (scsi_get_stream_info(sense, sense_len, NULL, &stream_bits) == 0) { sbp_cmd_status->mark = (stream_bits & SSD_FILEMARK) ? 1 : 0; sbp_cmd_status->eom = (stream_bits & SSD_EOM) ? 1 : 0; sbp_cmd_status->ill_len = (stream_bits & SSD_ILI) ? 1 : 0; } else { sbp_cmd_status->mark = 0; sbp_cmd_status->eom = 0; sbp_cmd_status->ill_len = 0; } /* add_sense_code(_qual), info, cmd_spec_info */ sbp_status->len = 4; if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND, &info, &sinfo) == 0) { uint32_t cmdspec_trunc; cmdspec_trunc = info; sbp_cmd_status->cdb = htobe32(cmdspec_trunc); } sbp_cmd_status->s_code = asc; sbp_cmd_status->s_qlfr = ascq; if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &info, &sinfo) == 0) { sbp_cmd_status->fru = (uint8_t)info; sbp_status->len = 5; } else { sbp_cmd_status->fru = 0; } if (scsi_get_sks(sense, sense_len, sks) == 0) { bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks)); sbp_status->len = 5; ccb->ccb_h.status |= CAM_SENT_SENSE; } break; } default: printf("%s: unknown scsi status 0x%x\n", __func__, sbp_status->status); } sbp_targ_status_FIFO(orbi, orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); } /* * Invoked as a callback handler from fwmem_read/write_block * * Process read/write of initiator address space * completion and pass status onto the backend target. * If this is a partial read/write for a CCB then * we decrement the orbi's refcount to indicate * the status of the read/write is complete */ static void sbp_targ_cam_done(struct fw_xfer *xfer) { struct orb_info *orbi; union ccb *ccb; orbi = (struct orb_info *)xfer->sc; if (debug) printf("%s: resp=%d refcount=%d\n", __func__, xfer->resp, orbi->refcount); if (xfer->resp != 0) { printf("%s: xfer->resp = %d\n", __func__, xfer->resp); orbi->status.resp = SBP_TRANS_FAIL; orbi->status.status = OBJ_DATA | SBE_TIMEOUT/*XXX*/; orbi->status.dead = 1; sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); } orbi->refcount --; ccb = orbi->ccb; if (orbi->refcount == 0) { orbi->ccb = NULL; if (orbi->state == ORBI_STATUS_ABORTED) { if (debug) printf("%s: orbi aborted\n", __func__); sbp_targ_remove_orb_info(orbi->login, orbi); if (orbi->page_table != NULL) { if (debug) printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); free(orbi->page_table, M_SBP_TARG); } if (debug) printf("%s: free orbi %p\n", __func__, orbi); free(orbi, M_SBP_TARG); orbi = NULL; } else if (orbi->status.resp == ORBI_STATUS_NONE) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { if (debug) printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags); sbp_targ_send_status(orbi, ccb); } else { if (debug) printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags); ccb->ccb_h.status = CAM_REQ_CMP; } SBP_LOCK(orbi->sc); xpt_done(ccb); SBP_UNLOCK(orbi->sc); } else { orbi->status.len = 1; sbp_targ_status_FIFO(orbi, orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); ccb->ccb_h.status = CAM_REQ_ABORTED; SBP_LOCK(orbi->sc); xpt_done(ccb); SBP_UNLOCK(orbi->sc); } } fw_xfer_free(xfer); } static cam_status sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb) { union ccb *accb; struct sbp_targ_lstate *lstate; struct ccb_hdr_slist *list; struct ccb_hdr *curelm; int found; cam_status status; status = sbp_targ_find_devs(sc, ccb, &lstate, 0); if (status != CAM_REQ_CMP) return (status); accb = ccb->cab.abort_ccb; if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) list = &lstate->accept_tios; else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) list = &lstate->immed_notifies; else return (CAM_UA_ABORT); curelm = SLIST_FIRST(list); found = 0; if (curelm == &accb->ccb_h) { found = 1; SLIST_REMOVE_HEAD(list, sim_links.sle); } else { while(curelm != NULL) { struct ccb_hdr *nextelm; nextelm = SLIST_NEXT(curelm, sim_links.sle); if (nextelm == &accb->ccb_h) { found = 1; SLIST_NEXT(curelm, sim_links.sle) = SLIST_NEXT(nextelm, sim_links.sle); break; } curelm = nextelm; } } if (found) { accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); return (CAM_REQ_CMP); } printf("%s: not found\n", __func__); return (CAM_PATH_INVALID); } /* * directly execute a read or write to the initiator * address space and set hand(sbp_targ_cam_done) to * process the completion from the SIM to the target. * set orbi->refcount to inidicate that a read/write * is inflight to/from the initiator. */ static void sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset, uint16_t dst_hi, uint32_t dst_lo, u_int size, void (*hand)(struct fw_xfer *)) { struct fw_xfer *xfer; u_int len, ccb_dir, off = 0; char *ptr; if (debug > 1) printf("%s: offset=%d size=%d\n", __func__, offset, size); ccb_dir = orbi->ccb->ccb_h.flags & CAM_DIR_MASK; ptr = (char *)orbi->ccb->csio.data_ptr + offset; while (size > 0) { /* XXX assume dst_lo + off doesn't overflow */ len = MIN(size, 2048 /* XXX */); size -= len; orbi->refcount ++; if (ccb_dir == CAM_DIR_OUT) { if (debug) printf("%s: CAM_DIR_OUT --> read block in?\n",__func__); xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400, dst_hi, dst_lo + off, len, ptr + off, hand); } else { if (debug) printf("%s: CAM_DIR_IN --> write block out?\n",__func__); xfer = fwmem_write_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400, dst_hi, dst_lo + off, len, ptr + off, hand); } if (xfer == NULL) { printf("%s: xfer == NULL", __func__); /* XXX what should we do?? */ orbi->refcount --; } off += len; } } static void sbp_targ_pt_done(struct fw_xfer *xfer) { struct orb_info *orbi; struct unrestricted_page_table_fmt *pt; uint32_t i; orbi = (struct orb_info *)xfer->sc; if (orbi->state == ORBI_STATUS_ABORTED) { if (debug) printf("%s: orbi aborted\n", __func__); sbp_targ_remove_orb_info(orbi->login, orbi); if (debug) { printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); printf("%s: free orbi %p\n", __func__, orbi); } free(orbi->page_table, M_SBP_TARG); free(orbi, M_SBP_TARG); orbi = NULL; fw_xfer_free(xfer); return; } if (xfer->resp != 0) { printf("%s: xfer->resp = %d\n", __func__, xfer->resp); orbi->status.resp = SBP_TRANS_FAIL; orbi->status.status = OBJ_PT | SBE_TIMEOUT/*XXX*/; orbi->status.dead = 1; orbi->status.len = 1; sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); if (debug) printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); sbp_targ_status_FIFO(orbi, orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); free(orbi->page_table, M_SBP_TARG); orbi->page_table = NULL; fw_xfer_free(xfer); return; } orbi->refcount++; /* * Set endianess here so we don't have * to deal with is later */ for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) { pt->segment_len = ntohs(pt->segment_len); if (debug) printf("%s:segment_len = %u\n", __func__,pt->segment_len); pt->segment_base_high = ntohs(pt->segment_base_high); pt->segment_base_low = ntohl(pt->segment_base_low); } sbp_targ_xfer_pt(orbi); orbi->refcount--; if (orbi->refcount == 0) printf("%s: refcount == 0\n", __func__); fw_xfer_free(xfer); return; } static void sbp_targ_xfer_pt(struct orb_info *orbi) { union ccb *ccb; uint32_t res, offset, len; ccb = orbi->ccb; if (debug) printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len); res = ccb->csio.dxfer_len; /* * If the page table required multiple CTIO's to * complete, then cur_pte is non NULL * and we need to start from the last position * If this is the first pass over a page table * then we just start at the beginning of the page * table. * * Parse the unrestricted page table and figure out where we need * to shove the data from this read request. */ for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) { len = MIN(orbi->cur_pte->segment_len, res); res -= len; if (debug) printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n", __func__, orbi->cur_pte->segment_base_high, orbi->cur_pte->segment_base_low, orbi->cur_pte->segment_len, res, len); sbp_targ_xfer_buf(orbi, offset, orbi->cur_pte->segment_base_high, orbi->cur_pte->segment_base_low, len, sbp_targ_cam_done); /* * If we have only written partially to * this page table, then we need to save * our position for the next CTIO. If we * have completed the page table, then we * are safe to move on to the next entry. */ if (len == orbi->cur_pte->segment_len) { orbi->cur_pte++; } else { uint32_t saved_base_low; /* Handle transfers that cross a 4GB boundary. */ saved_base_low = orbi->cur_pte->segment_base_low; orbi->cur_pte->segment_base_low += len; if (orbi->cur_pte->segment_base_low < saved_base_low) orbi->cur_pte->segment_base_high++; orbi->cur_pte->segment_len -= len; } } if (debug) { printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n", __func__, orbi->cur_pte->segment_base_low, orbi->cur_pte, orbi->last_block_read); } if (res != 0) printf("Warning - short pt encountered. " "Could not transfer all data.\n"); return; } /* * Create page table in local memory * and transfer it from the initiator * in order to know where we are supposed * to put the data. */ static void sbp_targ_fetch_pt(struct orb_info *orbi) { struct fw_xfer *xfer; /* * Pull in page table from initiator * and setup for data from our * backend device. */ if (orbi->page_table == NULL) { orbi->page_table = malloc(orbi->orb4.data_size* sizeof(struct unrestricted_page_table_fmt), M_SBP_TARG, M_NOWAIT|M_ZERO); if (orbi->page_table == NULL) goto error; orbi->cur_pte = orbi->page_table; orbi->last_pte = orbi->page_table + orbi->orb4.data_size; orbi->last_block_read = orbi->orb4.data_size; if (debug && orbi->page_table != NULL) printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n", __func__, orbi->page_table, orbi->orb4.data_size); xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400, orbi->data_hi, orbi->data_lo, orbi->orb4.data_size* sizeof(struct unrestricted_page_table_fmt), (void *)orbi->page_table, sbp_targ_pt_done); if (xfer != NULL) return; } else { /* * This is a CTIO for a page table we have * already malloc'd, so just directly invoke * the xfer function on the orbi. */ sbp_targ_xfer_pt(orbi); return; } error: orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL; if (debug) printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table); if (orbi->page_table != NULL) { free(orbi->page_table, M_SBP_TARG); orbi->page_table = NULL; } xpt_done(orbi->ccb); return; } static void sbp_targ_action1(struct cam_sim *sim, union ccb *ccb) { struct sbp_targ_softc *sc; struct sbp_targ_lstate *lstate; cam_status status; u_int ccb_dir; sc = (struct sbp_targ_softc *)cam_sim_softc(sim); status = sbp_targ_find_devs(sc, ccb, &lstate, TRUE); switch (ccb->ccb_h.func_code) { case XPT_CONT_TARGET_IO: { struct orb_info *orbi; if (debug) printf("%s: XPT_CONT_TARGET_IO (0x%08x)\n", __func__, ccb->csio.tag_id); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; xpt_done(ccb); break; } /* XXX transfer from/to initiator */ orbi = sbp_targ_get_orb_info(lstate, ccb->csio.tag_id, ccb->csio.init_id); if (orbi == NULL) { ccb->ccb_h.status = CAM_REQ_ABORTED; /* XXX */ xpt_done(ccb); break; } if (orbi->state == ORBI_STATUS_ABORTED) { if (debug) printf("%s: ctio aborted\n", __func__); sbp_targ_remove_orb_info_locked(orbi->login, orbi); if (debug) printf("%s: free orbi %p\n", __func__, orbi); free(orbi, M_SBP_TARG); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); break; } orbi->state = ORBI_STATUS_CTIO; orbi->ccb = ccb; ccb_dir = ccb->ccb_h.flags & CAM_DIR_MASK; /* XXX */ if (ccb->csio.dxfer_len == 0) ccb_dir = CAM_DIR_NONE; /* Sanity check */ if (ccb_dir == CAM_DIR_IN && orbi->orb4.dir == 0) printf("%s: direction mismatch\n", __func__); /* check page table */ if (ccb_dir != CAM_DIR_NONE && orbi->orb4.page_table_present) { if (debug) printf("%s: page_table_present\n", __func__); if (orbi->orb4.page_size != 0) { printf("%s: unsupported pagesize %d != 0\n", __func__, orbi->orb4.page_size); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } sbp_targ_fetch_pt(orbi); break; } /* Sanity check */ if (ccb_dir != CAM_DIR_NONE) { sbp_targ_xfer_buf(orbi, 0, orbi->data_hi, orbi->data_lo, MIN(orbi->orb4.data_size, ccb->csio.dxfer_len), sbp_targ_cam_done); if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) { orbi->data_lo += ccb->csio.dxfer_len; orbi->orb4.data_size -= ccb->csio.dxfer_len; } } if (ccb_dir == CAM_DIR_NONE) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { /* XXX */ SBP_UNLOCK(sc); sbp_targ_send_status(orbi, ccb); SBP_LOCK(sc); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } break; } case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; xpt_done(ccb); break; } SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, sim_links.sle); ccb->ccb_h.status = CAM_REQ_INPROG; if ((lstate->flags & F_ATIO_STARVED) != 0) { struct sbp_targ_login *login; if (debug) printf("%s: new atio arrived\n", __func__); lstate->flags &= ~F_ATIO_STARVED; STAILQ_FOREACH(login, &lstate->logins, link) if ((login->flags & F_ATIO_STARVED) != 0) { login->flags &= ~F_ATIO_STARVED; sbp_targ_fetch_orb(lstate->sc, login->fwdev, login->last_hi, login->last_lo, login, FETCH_CMD); } } break; case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; xpt_done(ccb); break; } SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, sim_links.sle); ccb->ccb_h.status = CAM_REQ_INPROG; sbp_targ_send_lstate_events(sc, lstate); break; case XPT_EN_LUN: sbp_targ_en_lun(sc, ccb); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */ cpi->hba_misc = PIM_NOBUSRESET | PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = 7; /* XXX */ cpi->max_lun = MAX_LUN - 1; cpi->initiator_id = 7; /* XXX */ cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 400 * 1000 / 8; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "SBP_TARG", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: ccb->ccb_h.status = sbp_targ_abort_ccb(sc, ccb); break; case XPT_CONT_TARGET_IO: /* XXX */ ccb->ccb_h.status = CAM_UA_ABORT; break; default: printf("%s: aborting unknown function %d\n", __func__, accb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); break; } #ifdef CAM_NEW_TRAN_CODE case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_FW; /* should have a FireWire */ cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; #if 0 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n", device_get_nameunit(sc->fd.dev), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); #endif cts->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } #endif default: printf("%s: unknown function 0x%x\n", __func__, ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } return; } static void sbp_targ_action(struct cam_sim *sim, union ccb *ccb) { int s; s = splfw(); sbp_targ_action1(sim, ccb); splx(s); } static void sbp_targ_poll(struct cam_sim *sim) { /* XXX */ return; } static void sbp_targ_cmd_handler(struct fw_xfer *xfer) { struct fw_pkt *fp; uint32_t *orb; struct corb4 *orb4; struct orb_info *orbi; struct ccb_accept_tio *atio; u_char *bytes; int i; orbi = (struct orb_info *)xfer->sc; if (xfer->resp != 0) { printf("%s: xfer->resp = %d\n", __func__, xfer->resp); orbi->status.resp = SBP_TRANS_FAIL; orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/; orbi->status.dead = 1; orbi->status.len = 1; sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); sbp_targ_status_FIFO(orbi, orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); fw_xfer_free(xfer); return; } fp = &xfer->recv.hdr; atio = orbi->atio; if (orbi->state == ORBI_STATUS_ABORTED) { printf("%s: aborted\n", __func__); sbp_targ_remove_orb_info(orbi->login, orbi); free(orbi, M_SBP_TARG); atio->ccb_h.status = CAM_REQ_ABORTED; SBP_LOCK(orbi->sc); xpt_done((union ccb*)atio); SBP_UNLOCK(orbi->sc); goto done0; } orbi->state = ORBI_STATUS_ATIO; orb = orbi->orb; /* swap payload except SCSI command */ for (i = 0; i < 5; i ++) orb[i] = ntohl(orb[i]); orb4 = (struct corb4 *)&orb[4]; if (orb4->rq_fmt != 0) { /* XXX */ printf("%s: rq_fmt(%d) != 0\n", __func__, orb4->rq_fmt); } atio->ccb_h.target_id = 0; /* XXX */ atio->ccb_h.target_lun = orbi->login->lstate->lun; atio->sense_len = 0; atio->tag_action = MSG_SIMPLE_TASK; atio->tag_id = orbi->orb_lo; atio->init_id = orbi->login->id; atio->ccb_h.flags |= CAM_TAG_ACTION_VALID; bytes = (u_char *)&orb[5]; if (debug) printf("%s: %p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", __func__, (void *)atio, bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]); switch (bytes[0] >> 5) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printf("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, bytes, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; /* next ORB */ if ((orb[0] & (1<<31)) == 0) { if (debug) printf("%s: fetch next orb\n", __func__); orbi->status.src = SRC_NEXT_EXISTS; sbp_targ_fetch_orb(orbi->sc, orbi->fwdev, orb[0], orb[1], orbi->login, FETCH_CMD); } else { orbi->status.src = SRC_NO_NEXT; orbi->login->flags &= ~F_LINK_ACTIVE; } orbi->data_hi = orb[2]; orbi->data_lo = orb[3]; orbi->orb4 = *orb4; SBP_LOCK(orbi->sc); xpt_done((union ccb*)atio); SBP_UNLOCK(orbi->sc); done0: fw_xfer_free(xfer); return; } static struct sbp_targ_login * sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun) { struct sbp_targ_lstate *lstate; struct sbp_targ_login *login; int i; lstate = sc->lstate[lun]; STAILQ_FOREACH(login, &lstate->logins, link) if (login->fwdev == fwdev) return (login); for (i = 0; i < MAX_LOGINS; i ++) if (sc->logins[i] == NULL) goto found; printf("%s: increase MAX_LOGIN\n", __func__); return (NULL); found: login = (struct sbp_targ_login *)malloc( sizeof(struct sbp_targ_login), M_SBP_TARG, M_NOWAIT | M_ZERO); if (login == NULL) { printf("%s: malloc failed\n", __func__); return (NULL); } login->id = i; login->fwdev = fwdev; login->lstate = lstate; login->last_hi = 0xffff; login->last_lo = 0xffffffff; login->hold_sec = 1; STAILQ_INIT(&login->orbs); CALLOUT_INIT(&login->hold_callout); sc->logins[i] = login; return (login); } static void sbp_targ_mgm_handler(struct fw_xfer *xfer) { struct sbp_targ_lstate *lstate; struct sbp_targ_login *login; struct fw_pkt *fp; uint32_t *orb; struct morb4 *orb4; struct orb_info *orbi; int i; orbi = (struct orb_info *)xfer->sc; if (xfer->resp != 0) { printf("%s: xfer->resp = %d\n", __func__, xfer->resp); orbi->status.resp = SBP_TRANS_FAIL; orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/; orbi->status.dead = 1; orbi->status.len = 1; sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); sbp_targ_status_FIFO(orbi, orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/0); fw_xfer_free(xfer); return; } fp = &xfer->recv.hdr; orb = orbi->orb; /* swap payload */ for (i = 0; i < 8; i ++) { orb[i] = ntohl(orb[i]); } orb4 = (struct morb4 *)&orb[4]; if (debug) printf("%s: %s\n", __func__, orb_fun_name[orb4->fun]); orbi->status.src = SRC_NO_NEXT; switch (orb4->fun << 16) { case ORB_FUN_LGI: { int exclusive = 0, lun; if (orb[4] & ORB_EXV) exclusive = 1; lun = orb4->id; lstate = orbi->sc->lstate[lun]; if (lun >= MAX_LUN || lstate == NULL || (exclusive && STAILQ_FIRST(&lstate->logins) != NULL && STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev) ) { /* error */ orbi->status.dead = 1; orbi->status.status = STATUS_ACCESS_DENY; orbi->status.len = 1; break; } /* allocate login */ login = sbp_targ_get_login(orbi->sc, orbi->fwdev, lun); if (login == NULL) { printf("%s: sbp_targ_get_login failed\n", __func__); orbi->status.dead = 1; orbi->status.status = STATUS_RES_UNAVAIL; orbi->status.len = 1; break; } printf("%s: login id=%d\n", __func__, login->id); login->fifo_hi = orb[6]; login->fifo_lo = orb[7]; login->loginres.len = htons(sizeof(uint32_t) * 4); login->loginres.id = htons(login->id); login->loginres.cmd_hi = htons(SBP_TARG_BIND_HI); login->loginres.cmd_lo = htonl(SBP_TARG_BIND_LO(login->id)); login->loginres.recon_hold = htons(login->hold_sec); STAILQ_INSERT_TAIL(&lstate->logins, login, link); fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3], sizeof(struct sbp_login_res), (void *)&login->loginres, fw_asy_callback_free); /* XXX return status after loginres is successfully written */ break; } case ORB_FUN_RCN: login = orbi->sc->logins[orb4->id]; if (login != NULL && login->fwdev == orbi->fwdev) { login->flags &= ~F_HOLD; callout_stop(&login->hold_callout); printf("%s: reconnected id=%d\n", __func__, login->id); } else { orbi->status.dead = 1; orbi->status.status = STATUS_ACCESS_DENY; printf("%s: reconnection faild id=%d\n", __func__, orb4->id); } break; case ORB_FUN_LGO: login = orbi->sc->logins[orb4->id]; if (login->fwdev != orbi->fwdev) { printf("%s: wrong initiator\n", __func__); break; } sbp_targ_dealloc_login(login); break; default: printf("%s: %s not implemented yet\n", __func__, orb_fun_name[orb4->fun]); break; } orbi->status.len = 1; sbp_targ_status_FIFO(orbi, orb[6], orb[7], /*dequeue*/0); fw_xfer_free(xfer); return; } static void sbp_targ_pointer_handler(struct fw_xfer *xfer) { struct orb_info *orbi; uint32_t orb0, orb1; orbi = (struct orb_info *)xfer->sc; if (xfer->resp != 0) { printf("%s: xfer->resp = %d\n", __func__, xfer->resp); goto done; } orb0 = ntohl(orbi->orb[0]); orb1 = ntohl(orbi->orb[1]); - if ((orb0 & (1 << 31)) != 0) { + if ((orb0 & (1U << 31)) != 0) { printf("%s: invalid pointer\n", __func__); goto done; } sbp_targ_fetch_orb(orbi->login->lstate->sc, orbi->fwdev, (uint16_t)orb0, orb1, orbi->login, FETCH_CMD); done: free(orbi, M_SBP_TARG); fw_xfer_free(xfer); return; } static void sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev, uint16_t orb_hi, uint32_t orb_lo, struct sbp_targ_login *login, int mode) { struct orb_info *orbi; if (debug) printf("%s: fetch orb %04x:%08x\n", __func__, orb_hi, orb_lo); orbi = malloc(sizeof(struct orb_info), M_SBP_TARG, M_NOWAIT | M_ZERO); if (orbi == NULL) { printf("%s: malloc failed\n", __func__); return; } orbi->sc = sc; orbi->fwdev = fwdev; orbi->login = login; orbi->orb_hi = orb_hi; orbi->orb_lo = orb_lo; orbi->status.orb_hi = htons(orb_hi); orbi->status.orb_lo = htonl(orb_lo); orbi->page_table = NULL; switch (mode) { case FETCH_MGM: fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, sizeof(uint32_t) * 8, &orbi->orb[0], sbp_targ_mgm_handler); break; case FETCH_CMD: orbi->state = ORBI_STATUS_FETCH; login->last_hi = orb_hi; login->last_lo = orb_lo; login->flags |= F_LINK_ACTIVE; /* dequeue */ SBP_LOCK(sc); orbi->atio = (struct ccb_accept_tio *) SLIST_FIRST(&login->lstate->accept_tios); if (orbi->atio == NULL) { SBP_UNLOCK(sc); printf("%s: no free atio\n", __func__); login->lstate->flags |= F_ATIO_STARVED; login->flags |= F_ATIO_STARVED; #if 0 /* XXX ?? */ login->fwdev = fwdev; #endif break; } SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle); STAILQ_INSERT_TAIL(&login->orbs, orbi, link); SBP_UNLOCK(sc); fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, sizeof(uint32_t) * 8, &orbi->orb[0], sbp_targ_cmd_handler); break; case FETCH_POINTER: orbi->state = ORBI_STATUS_POINTER; login->flags |= F_LINK_ACTIVE; fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, sizeof(uint32_t) * 2, &orbi->orb[0], sbp_targ_pointer_handler); break; default: printf("%s: invalid mode %d\n", __func__, mode); } } static void sbp_targ_resp_callback(struct fw_xfer *xfer) { struct sbp_targ_softc *sc; int s; if (debug) printf("%s: xfer=%p\n", __func__, xfer); sc = (struct sbp_targ_softc *)xfer->sc; fw_xfer_unload(xfer); xfer->recv.pay_len = SBP_TARG_RECV_LEN; xfer->hand = sbp_targ_recv; s = splfw(); STAILQ_INSERT_TAIL(&sc->fwb.xferlist, xfer, link); splx(s); } static int sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id, int reg) { struct sbp_targ_login *login; struct sbp_targ_softc *sc; int rtcode = 0; if (login_id < 0 || login_id >= MAX_LOGINS) return(RESP_ADDRESS_ERROR); sc = (struct sbp_targ_softc *)xfer->sc; login = sc->logins[login_id]; if (login == NULL) return(RESP_ADDRESS_ERROR); if (login->fwdev != fwdev) { /* XXX */ return(RESP_ADDRESS_ERROR); } switch (reg) { case 0x08: /* ORB_POINTER */ if (debug) printf("%s: ORB_POINTER(%d)\n", __func__, login_id); if ((login->flags & F_LINK_ACTIVE) != 0) { if (debug) printf("link active (ORB_POINTER)\n"); break; } sbp_targ_fetch_orb(sc, fwdev, ntohl(xfer->recv.payload[0]), ntohl(xfer->recv.payload[1]), login, FETCH_CMD); break; case 0x04: /* AGENT_RESET */ if (debug) printf("%s: AGENT RESET(%d)\n", __func__, login_id); login->last_hi = 0xffff; login->last_lo = 0xffffffff; sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs)); break; case 0x10: /* DOORBELL */ if (debug) printf("%s: DOORBELL(%d)\n", __func__, login_id); if (login->last_hi == 0xffff && login->last_lo == 0xffffffff) { printf("%s: no previous pointer(DOORBELL)\n", __func__); break; } if ((login->flags & F_LINK_ACTIVE) != 0) { if (debug) printf("link active (DOORBELL)\n"); break; } sbp_targ_fetch_orb(sc, fwdev, login->last_hi, login->last_lo, login, FETCH_POINTER); break; case 0x00: /* AGENT_STATE */ printf("%s: AGENT_STATE (%d:ignore)\n", __func__, login_id); break; case 0x14: /* UNSOLICITED_STATE_ENABLE */ printf("%s: UNSOLICITED_STATE_ENABLE (%d:ignore)\n", __func__, login_id); break; default: printf("%s: invalid register %d(%d)\n", __func__, reg, login_id); rtcode = RESP_ADDRESS_ERROR; } return (rtcode); } static int sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev) { struct sbp_targ_softc *sc; struct fw_pkt *fp; sc = (struct sbp_targ_softc *)xfer->sc; fp = &xfer->recv.hdr; if (fp->mode.wreqb.tcode != FWTCODE_WREQB){ printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode); return(RESP_TYPE_ERROR); } sbp_targ_fetch_orb(sc, fwdev, ntohl(xfer->recv.payload[0]), ntohl(xfer->recv.payload[1]), NULL, FETCH_MGM); return(0); } static void sbp_targ_recv(struct fw_xfer *xfer) { struct fw_pkt *fp, *sfp; struct fw_device *fwdev; uint32_t lo; int s, rtcode; struct sbp_targ_softc *sc; s = splfw(); sc = (struct sbp_targ_softc *)xfer->sc; fp = &xfer->recv.hdr; fwdev = fw_noderesolve_nodeid(sc->fd.fc, fp->mode.wreqb.src & 0x3f); if (fwdev == NULL) { printf("%s: cannot resolve nodeid=%d\n", __func__, fp->mode.wreqb.src & 0x3f); rtcode = RESP_TYPE_ERROR; /* XXX */ goto done; } lo = fp->mode.wreqb.dest_lo; if (lo == SBP_TARG_BIND_LO(-1)) rtcode = sbp_targ_mgm(xfer, fwdev); else if (lo >= SBP_TARG_BIND_LO(0)) rtcode = sbp_targ_cmd(xfer, fwdev, SBP_TARG_LOGIN_ID(lo), lo % 0x20); else rtcode = RESP_ADDRESS_ERROR; done: if (rtcode != 0) printf("%s: rtcode = %d\n", __func__, rtcode); sfp = &xfer->send.hdr; xfer->send.spd = FWSPD_S400; xfer->hand = sbp_targ_resp_callback; sfp->mode.wres.dst = fp->mode.wreqb.src; sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt; sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = rtcode; sfp->mode.wres.pri = 0; fw_asyreq(xfer->fc, -1, xfer); splx(s); } static int sbp_targ_attach(device_t dev) { struct sbp_targ_softc *sc; struct cam_devq *devq; struct firewire_comm *fc; sc = (struct sbp_targ_softc *) device_get_softc(dev); bzero((void *)sc, sizeof(struct sbp_targ_softc)); mtx_init(&sc->mtx, "sbp_targ", NULL, MTX_DEF); sc->fd.fc = fc = device_get_ivars(dev); sc->fd.dev = dev; sc->fd.post_explore = (void *) sbp_targ_post_explore; sc->fd.post_busreset = (void *) sbp_targ_post_busreset; devq = cam_simq_alloc(/*maxopenings*/MAX_LUN*MAX_INITIATORS); if (devq == NULL) return (ENXIO); sc->sim = cam_sim_alloc(sbp_targ_action, sbp_targ_poll, "sbp_targ", sc, device_get_unit(dev), &sc->mtx, /*untagged*/ 1, /*tagged*/ 1, devq); if (sc->sim == NULL) { cam_simq_free(devq); return (ENXIO); } SBP_LOCK(sc); if (xpt_bus_register(sc->sim, dev, /*bus*/0) != CAM_SUCCESS) goto fail; if (xpt_create_path(&sc->path, /*periph*/ NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim)); goto fail; } SBP_UNLOCK(sc); sc->fwb.start = SBP_TARG_BIND_START; sc->fwb.end = SBP_TARG_BIND_END; /* pre-allocate xfer */ STAILQ_INIT(&sc->fwb.xferlist); fw_xferlist_add(&sc->fwb.xferlist, M_SBP_TARG, /*send*/ 0, /*recv*/ SBP_TARG_RECV_LEN, MAX_LUN /* XXX */, fc, (void *)sc, sbp_targ_recv); fw_bindadd(fc, &sc->fwb); return 0; fail: SBP_UNLOCK(sc); cam_sim_free(sc->sim, /*free_devq*/TRUE); return (ENXIO); } static int sbp_targ_detach(device_t dev) { struct sbp_targ_softc *sc; struct sbp_targ_lstate *lstate; int i; sc = (struct sbp_targ_softc *)device_get_softc(dev); sc->fd.post_busreset = NULL; SBP_LOCK(sc); xpt_free_path(sc->path); xpt_bus_deregister(cam_sim_path(sc->sim)); SBP_UNLOCK(sc); cam_sim_free(sc->sim, /*free_devq*/TRUE); for (i = 0; i < MAX_LUN; i ++) { lstate = sc->lstate[i]; if (lstate != NULL) { xpt_free_path(lstate->path); free(lstate, M_SBP_TARG); } } if (sc->black_hole != NULL) { xpt_free_path(sc->black_hole->path); free(sc->black_hole, M_SBP_TARG); } fw_bindremove(sc->fd.fc, &sc->fwb); fw_xferlist_remove(&sc->fwb.xferlist); mtx_destroy(&sc->mtx); return 0; } static devclass_t sbp_targ_devclass; static device_method_t sbp_targ_methods[] = { /* device interface */ DEVMETHOD(device_identify, sbp_targ_identify), DEVMETHOD(device_probe, sbp_targ_probe), DEVMETHOD(device_attach, sbp_targ_attach), DEVMETHOD(device_detach, sbp_targ_detach), { 0, 0 } }; static driver_t sbp_targ_driver = { "sbp_targ", sbp_targ_methods, sizeof(struct sbp_targ_softc), }; DRIVER_MODULE(sbp_targ, firewire, sbp_targ_driver, sbp_targ_devclass, 0, 0); MODULE_VERSION(sbp_targ, 1); MODULE_DEPEND(sbp_targ, firewire, 1, 1, 1); MODULE_DEPEND(sbp_targ, cam, 1, 1, 1); Index: head/sys/dev/hatm/if_hatmreg.h =================================================================== --- head/sys/dev/hatm/if_hatmreg.h (revision 258779) +++ head/sys/dev/hatm/if_hatmreg.h (revision 258780) @@ -1,641 +1,641 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * $FreeBSD$ * * Fore HE driver for NATM */ /* check configuration */ #if HE_CONFIG_VPI_BITS + HE_CONFIG_VCI_BITS > 12 #error "hatm: too many bits configured for VPI/VCI" #endif #define HE_MAX_VCCS (1 << (HE_CONFIG_VPI_BITS + HE_CONFIG_VCI_BITS)) #define HE_VPI_MASK ((1 << (HE_CONFIG_VPI_BITS))-1) #define HE_VCI_MASK ((1 << (HE_CONFIG_VCI_BITS))-1) #define HE_VPI(CID) (((CID) >> HE_CONFIG_VCI_BITS) & HE_VPI_MASK) #define HE_VCI(CID) ((CID) & HE_VCI_MASK) #define HE_CID(VPI,VCI) ((((VPI) & HE_VPI_MASK) << HE_CONFIG_VCI_BITS) | \ ((VCI) & HE_VCI_MASK)) /* GEN_CNTL_0 register */ #define HE_PCIR_GEN_CNTL_0 0x40 #define HE_PCIM_CTL0_64BIT (1 << 0) #define HE_PCIM_CTL0_IGNORE_TIMEOUT (1 << 1) #define HE_PCIM_CTL0_INIT_ENB (1 << 2) #define HE_PCIM_CTL0_MRM (1 << 4) #define HE_PCIM_CTL0_MRL (1 << 5) #define HE_PCIM_CTL0_BIGENDIAN (1 << 16) #define HE_PCIM_CTL0_INT_PROC_ENB (1 << 25) /* * Memory registers */ #define HE_REGO_FLASH 0x00000 #define HE_REGO_RESET_CNTL 0x80000 #define HE_REGM_RESET_STATE (1 << 6) #define HE_REGO_HOST_CNTL 0x80004 #define HE_REGM_HOST_BUS64 (1 << 27) #define HE_REGM_HOST_DESC_RD64 (1 << 26) #define HE_REGM_HOST_DATA_RD64 (1 << 25) #define HE_REGM_HOST_DATA_WR64 (1 << 24) #define HE_REGM_HOST_PROM_SEL (1 << 12) #define HE_REGM_HOST_PROM_WREN (1 << 11) #define HE_REGM_HOST_PROM_DATA_OUT (1 << 10) #define HE_REGS_HOST_PROM_DATA_OUT 10 #define HE_REGM_HOST_PROM_DATA_IN (1 << 9) #define HE_REGS_HOST_PROM_DATA_IN 9 #define HE_REGM_HOST_PROM_CLOCK (1 << 8) #define HE_REGM_HOST_PROM_BITS (0x00001f00) #define HE_REGM_HOST_QUICK_RD (1 << 7) #define HE_REGM_HOST_QUICK_WR (1 << 6) #define HE_REGM_HOST_OUTFF_ENB (1 << 5) #define HE_REGM_HOST_CMDFF_ENB (1 << 4) #define HE_REGO_LB_SWAP 0x80008 #define HE_REGM_LBSWAP_RNUM (0xf << 27) #define HE_REGS_LBSWAP_RNUM 27 #define HE_REGM_LBSWAP_DATA_WR_SWAP (1 << 20) #define HE_REGM_LBSWAP_DESC_RD_SWAP (1 << 19) #define HE_REGM_LBSWAP_DATA_RD_SWAP (1 << 18) #define HE_REGM_LBSWAP_INTR_SWAP (1 << 17) #define HE_REGM_LBSWAP_DESC_WR_SWAP (1 << 16) #define HE_REGM_LBSWAP_BIG_ENDIAN (1 << 14) #define HE_REGM_LBSWAP_XFER_SIZE (1 << 7) #define HE_REGO_LB_MEM_ADDR 0x8000C #define HE_REGO_LB_MEM_DATA 0x80010 #define HE_REGO_LB_MEM_ACCESS 0x80014 #define HE_REGM_LB_MEM_HNDSHK (1 << 30) #define HE_REGM_LB_MEM_READ 0x3 #define HE_REGM_LB_MEM_WRITE 0x7 #define HE_REGO_SDRAM_CNTL 0x80018 #define HE_REGM_SDRAM_64BIT (1 << 3) #define HE_REGO_INT_FIFO 0x8001C #define HE_REGM_INT_FIFO_CLRA (1 << 8) #define HE_REGM_INT_FIFO_CLRB (1 << 9) #define HE_REGM_INT_FIFO_CLRC (1 << 10) #define HE_REGM_INT_FIFO_CLRD (1 << 11) #define HE_REGO_ABORT_ADDR 0x80020 #define HE_REGO_IRQ0_BASE 0x80080 #define HE_REGO_IRQ_BASE(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x00) #define HE_REGM_IRQ_BASE_TAIL 0x3ff #define HE_REGO_IRQ_HEAD(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x04) #define HE_REGS_IRQ_HEAD_SIZE 22 #define HE_REGS_IRQ_HEAD_THRESH 12 #define HE_REGS_IRQ_HEAD_HEAD 2 #define HE_REGO_IRQ_CNTL(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x08) #define HE_REGM_IRQ_A (0 << 2) #define HE_REGM_IRQ_B (1 << 2) #define HE_REGM_IRQ_C (2 << 2) #define HE_REGM_IRQ_D (3 << 2) #define HE_REGO_IRQ_DATA(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x0C) #define HE_REGO_GRP_1_0_MAP 0x800C0 #define HE_REGO_GRP_3_2_MAP 0x800C4 #define HE_REGO_GRP_5_4_MAP 0x800C8 #define HE_REGO_GRP_7_6_MAP 0x800CC /* * Receive buffer pools */ #define HE_REGO_G0_RBPS_S 0x80400 #define HE_REGO_G0_RBPS_T 0x80404 #define HE_REGO_G0_RBPS_QI 0x80408 #define HE_REGO_G0_RBPS_BL 0x8040C #define HE_REGO_RBP_S(K,G) (HE_REGO_G0_RBPS_S + (K) * 0x10 + (G) * 0x20) #define HE_REGO_RBP_T(K,G) (HE_REGO_G0_RBPS_T + (K) * 0x10 + (G) * 0x20) #define HE_REGO_RBP_QI(K,G) (HE_REGO_G0_RBPS_QI + (K) * 0x10 + (G) * 0x20) #define HE_REGO_RBP_BL(K,G) (HE_REGO_G0_RBPS_BL + (K) * 0x10 + (G) * 0x20) #define HE_REGS_RBP_HEAD 3 #define HE_REGS_RBP_TAIL 3 #define HE_REGS_RBP_SIZE 14 #define HE_REGM_RBP_INTR_ENB (1 << 13) #define HE_REGS_RBP_THRESH 0 /* * Receive buffer return queues */ #define HE_REGO_G0_RBRQ_ST 0x80500 #define HE_REGO_G0_RBRQ_H 0x80504 #define HE_REGO_G0_RBRQ_Q 0x80508 #define HE_REGO_G0_RBRQ_I 0x8050C #define HE_REGO_RBRQ_ST(G) (HE_REGO_G0_RBRQ_ST + (G) * 0x10) #define HE_REGO_RBRQ_H(G) (HE_REGO_G0_RBRQ_H + (G) * 0x10) #define HE_REGO_RBRQ_Q(G) (HE_REGO_G0_RBRQ_Q + (G) * 0x10) #define HE_REGO_RBRQ_I(G) (HE_REGO_G0_RBRQ_I + (G) * 0x10) #define HE_REGS_RBRQ_HEAD 3 #define HE_REGS_RBRQ_THRESH 13 #define HE_REGS_RBRQ_SIZE 0 #define HE_REGS_RBRQ_TIME 8 #define HE_REGS_RBRQ_COUNT 0 /* * Intermediate queues */ #define HE_REGO_G0_INMQ_S 0x80580 #define HE_REGO_G0_INMQ_L 0x80584 #define HE_REGO_INMQ_S(G) (HE_REGO_G0_INMQ_S + (G) * 8) #define HE_REGO_INMQ_L(G) (HE_REGO_G0_INMQ_L + (G) * 8) #define HE_REGO_RHCONFIG 0x805C0 #define HE_REGM_RHCONFIG_PHYENB (1 << 10) #define HE_REGS_RHCONFIG_OAM_GID 7 #define HE_REGS_RHCONFIG_PTMR_PRE 0 /* * Transmit buffer return queues */ #define HE_REGO_TBRQ0_B_T 0x80600 #define HE_REGO_TBRQ0_H 0x80604 #define HE_REGO_TBRQ0_S 0x80608 #define HE_REGO_TBRQ0_THRESH 0x8060C #define HE_REGO_TBRQ_B_T(G) (HE_REGO_TBRQ0_B_T + (G) * 0x10) #define HE_REGO_TBRQ_H(G) (HE_REGO_TBRQ0_H + (G) * 0x10) #define HE_REGO_TBRQ_S(G) (HE_REGO_TBRQ0_S + (G) * 0x10) #define HE_REGO_TBRQ_THRESH(G) (HE_REGO_TBRQ0_THRESH + (G) * 0x10) #define HE_REGS_TBRQ_HEAD 2 /* * Transmit packet descriptor ready queue */ #define HE_REGO_TPDRQ_H 0x80680 #define HE_REGS_TPDRQ_H_H 3 /* #define HE_REGM_TPDRQ_H_H ((HE_CONFIG_TPDRQ_SIZE - 1) << 3) */ #define HE_REGO_TPDRQ_T 0x80684 #define HE_REGS_TPDRQ_T_T 3 /* #define HE_REGM_TPDRQ_T_T ((HE_CONFIG_TPDRQ_SIZE - 1) << 3) */ #define HE_REGO_TPDRQ_S 0x80688 #define HE_REGO_UBUFF_BA 0x8068C #define HE_REGO_RLBF0_H 0x806C0 #define HE_REGO_RLBF0_T 0x806C4 #define HE_REGO_RLBF1_H 0x806C8 #define HE_REGO_RLBF1_T 0x806CC #define HE_REGO_RLBF_H(N) (HE_REGO_RLBF0_H + (N) * 8) #define HE_REGO_RLBF_T(N) (HE_REGO_RLBF0_T + (N) * 8) #define HE_REGO_RLBC_H 0x806D0 #define HE_REGO_RLBC_T 0x806D4 #define HE_REGO_RLBC_H2 0x806D8 #define HE_REGO_TLBF_H 0x806E0 #define HE_REGO_TLBF_T 0x806E4 #define HE_REGO_RLBF0_C 0x806E8 #define HE_REGO_RLBF1_C 0x806EC #define HE_REGO_RLBF_C(N) (HE_REGO_RLBF0_C + (N) * 4) #define HE_REGO_RXTHRSH 0x806F0 #define HE_REGO_LITHRSH 0x806F4 #define HE_REGO_LBARB 0x80700 #define HE_REGS_LBARB_SLICE 28 #define HE_REGS_LBARB_RNUM 23 #define HE_REGS_LBARB_THPRI 21 #define HE_REGS_LBARB_RHPRI 19 #define HE_REGS_LBARB_TLPRI 17 #define HE_REGS_LBARB_RLPRI 15 #define HE_REGS_LBARB_BUS_MULT 8 #define HE_REGS_LBARB_NET_PREF 0 #define HE_REGO_SDRAMCON 0x80704 #define HE_REGM_SDRAMCON_BANK (1 << 14) #define HE_REGM_SDRAMCON_WIDE (1 << 13) #define HE_REGM_SDRAMCON_TWRWAIT (1 << 12) #define HE_REGM_SDRAMCON_TRPWAIT (1 << 11) #define HE_REGM_SDRAMCON_TRASWAIT (1 << 10) #define HE_REGS_SDRAMCON_REF 0 #define HE_REGO_RCCSTAT 0x8070C #define HE_REGM_RCCSTAT_PROG (1 << 0) #define HE_REGO_TCMCONFIG 0x80740 #define HE_REGS_TCMCONFIG_BANK_WAIT 6 #define HE_REGS_TCMCONFIG_RW_WAIT 2 #define HE_REGS_TCMCONFIG_TYPE 0 #define HE_REGO_TSRB_BA 0x80744 #define HE_REGO_TSRC_BA 0x80748 #define HE_REGO_TMABR_BA 0x8074C #define HE_REGO_TPD_BA 0x80750 #define HE_REGO_TSRD_BA 0x80758 #define HE_REGO_TXCONFIG 0x80760 #define HE_REGS_TXCONFIG_THRESH 22 #define HE_REGM_TXCONFIG_UTMODE (1 << 21) #define HE_REGS_TXCONFIG_VCI_MASK 17 #define HE_REGS_TXCONFIG_LBFREE 0 #define HE_REGO_TXAAL5_PROTO 0x80764 #define HE_REGO_RCMCONFIG 0x80780 #define HE_REGS_RCMCONFIG_BANK_WAIT 6 #define HE_REGS_RCMCONFIG_RW_WAIT 2 #define HE_REGS_RCMCONFIG_TYPE 0 #define HE_REGO_RCMRSRB_BA 0x80784 #define HE_REGO_RCMLBM_BA 0x80788 #define HE_REGO_RCMABR_BA 0x8078C #define HE_REGO_RCCONFIG 0x807C0 #define HE_REGS_RCCONFIG_UTDELAY 11 #define HE_REGM_RCCONFIG_WRAP_MODE (1 << 10) #define HE_REGM_RCCONFIG_UT_MODE (1 << 9) #define HE_REGM_RCCONFIG_RXENB (1 << 8) #define HE_REGS_RCCONFIG_VP 4 #define HE_REGS_RCCONFIG_VC 0 #define HE_REGO_MCC 0x807C4 #define HE_REGO_OEC 0x807C8 #define HE_REGO_DCC 0x807CC #define HE_REGO_CEC 0x807D0 #define HE_REGO_HSP_BA 0x807F0 #define HE_REGO_LBCONFIG 0x807F4 #define HE_REGO_CON_DAT 0x807F8 #define HE_REGO_CON_CTL 0x807FC #define HE_REGM_CON_MBOX (2U << 30) #define HE_REGM_CON_TCM (1 << 30) #define HE_REGM_CON_RCM (0 << 30) #define HE_REGM_CON_WE (1 << 29) #define HE_REGM_CON_STATUS (1 << 28) #define HE_REGM_CON_DIS3 (1 << 22) #define HE_REGM_CON_DIS2 (1 << 21) #define HE_REGM_CON_DIS1 (1 << 20) #define HE_REGM_CON_DIS0 (1 << 19) #define HE_REGS_CON_DIS 19 #define HE_REGS_CON_ADDR 0 #define HE_REGO_SUNI 0x80800 #define HE_REGO_SUNI_END 0x80C00 #define HE_REGO_END 0x100000 /* * MBOX registers */ #define HE_REGO_CS_STPER0 0x000 #define HE_REGO_CS_STPER(G) (HE_REGO_CS_STPER0 + (G)) #define HE_REGN_CS_STPER 32 #define HE_REGO_CS_STTIM0 0x020 #define HE_REGO_CS_STTIM(G) (HE_REGO_CS_STTIM0 + (G)) #define HE_REGO_CS_TGRLD0 0x040 #define HE_REGO_CS_TGRLD(G) (HE_REGO_CS_TGRLD0 + (G)) #define HE_REGO_CS_ERTHR0 0x50 #define HE_REGO_CS_ERTHR1 0x51 #define HE_REGO_CS_ERTHR2 0x52 #define HE_REGO_CS_ERTHR3 0x53 #define HE_REGO_CS_ERTHR4 0x54 #define HE_REGO_CS_ERCTL0 0x55 #define HE_REGO_CS_ERCTL1 0x56 #define HE_REGO_CS_ERCTL2 0x57 #define HE_REGO_CS_ERSTAT0 0x58 #define HE_REGO_CS_ERSTAT1 0x59 #define HE_REGO_CS_RTCCT 0x60 #define HE_REGO_CS_RTFWC 0x61 #define HE_REGO_CS_RTFWR 0x62 #define HE_REGO_CS_RTFTC 0x63 #define HE_REGO_CS_RTATR 0x64 #define HE_REGO_CS_TFBSET 0x70 #define HE_REGO_CS_TFBADD 0x71 #define HE_REGO_CS_TFBSUB 0x72 #define HE_REGO_CS_WCRMAX 0x73 #define HE_REGO_CS_WCRMIN 0x74 #define HE_REGO_CS_WCRINC 0x75 #define HE_REGO_CS_WCRDEC 0x76 #define HE_REGO_CS_WCRCEIL 0x77 #define HE_REGO_CS_BWDCNT 0x78 #define HE_REGO_CS_OTPPER 0x80 #define HE_REGO_CS_OTWPER 0x81 #define HE_REGO_CS_OTTLIM 0x82 #define HE_REGO_CS_OTTCNT 0x83 #define HE_REGO_CS_HGRRT0 0x90 #define HE_REGO_CS_HGRRT(G) (HE_REGO_CS_HGRRT0 + (G)) #define HE_REGO_CS_ORPTRS 0xA0 #define HE_REGO_RCON_CLOSE 0x100 #define HE_REGO_CS_END 0x101 #define HE_REGT_CS_ERTHR { \ { /* 155 */ \ { 0x000800ea, 0x000400ea, 0x000200ea }, /* ERTHR0 */ \ { 0x000C3388, 0x00063388, 0x00033388 }, /* ERTHR1 */ \ { 0x00101018, 0x00081018, 0x00041018 }, /* ERTHR2 */ \ { 0x00181dac, 0x000c1dac, 0x00061dac }, /* ERTHR3 */ \ { 0x0028051a, 0x0014051a, 0x000a051a }, /* ERTHR4 */ \ }, { /* 622 */ \ { 0x000800fa, 0x000400fa, 0x000200fa }, /* ERTHR0 */ \ { 0x000c33cb, 0x000633cb, 0x000333cb }, /* ERTHR1 */ \ { 0x0010101b, 0x0008101b, 0x0004101b }, /* ERTHR2 */ \ { 0x00181dac, 0x000c1dac, 0x00061dac }, /* ERTHR3 */ \ { 0x00280600, 0x00140600, 0x000a0600 }, /* ERTHR4 */ \ } \ } #define HE_REGT_CS_ERCTL { \ { 0x0235e4b1, 0x4701, 0x64b1 }, /* 155 */ \ { 0x023de8b3, 0x1801, 0x68b3 } /* 622 */ \ } #define HE_REGT_CS_ERSTAT { \ { 0x1280, 0x64b1 }, /* 155 */ \ { 0x1280, 0x68b3 }, /* 622 */ \ } #define HE_REGT_CS_RTFWR { \ 0xf424, /* 155 */ \ 0x14585 /* 622 */ \ } #define HE_REGT_CS_RTATR { \ 0x4680, /* 155 */ \ 0x4680 /* 622 */ \ } #define HE_REGT_CS_BWALLOC { \ { 0x000563b7, 0x64b1, 0x5ab1, 0xe4b1, 0xdab1, 0x64b1 }, /* 155 */\ { 0x00159ece, 0x68b3, 0x5eb3, 0xe8b3, 0xdeb3, 0x68b3 }, /* 622 */\ } #define HE_REGT_CS_ORCF { \ { 0x6, 0x1e }, /* 155 */ \ { 0x5, 0x14 } /* 622 */ \ } /* * TSRs - NR is relative to the starting number of the block */ #define HE_REGO_TSRA(BASE,CID,NR) ((BASE) + ((CID) << 3) + (NR)) #define HE_REGO_TSRB(BASE,CID,NR) ((BASE) + ((CID) << 2) + (NR)) #define HE_REGO_TSRC(BASE,CID,NR) ((BASE) + ((CID) << 1) + (NR)) #define HE_REGO_TSRD(BASE,CID) ((BASE) + (CID)) #define HE_REGM_TSR0_CONN_STATE (7 << 28) #define HE_REGS_TSR0_CONN_STATE 28 #define HE_REGM_TSR0_USE_WMIN (1 << 23) #define HE_REGM_TSR0_GROUP (7 << 18) #define HE_REGS_TSR0_GROUP 18 #define HE_REGM_TSR0_TRAFFIC (3 << 16) #define HE_REGS_TSR0_TRAFFIC 16 #define HE_REGM_TSR0_TRAFFIC_CBR 0 #define HE_REGM_TSR0_TRAFFIC_UBR 1 #define HE_REGM_TSR0_TRAFFIC_ABR 2 #define HE_REGM_TSR0_PROT (1 << 15) #define HE_REGM_TSR0_AAL (3 << 12) #define HE_REGS_TSR0_AAL 12 #define HE_REGM_TSR0_AAL_5 0 #define HE_REGM_TSR0_AAL_0 1 #define HE_REGM_TSR0_AAL_0T 2 #define HE_REGM_TSR0_HALT_ER (1 << 11) #define HE_REGM_TSR0_MARK_CI (1 << 10) #define HE_REGM_TSR0_MARK_ER (1 << 9) #define HE_REGM_TSR0_UPDATE_GER (1 << 8) #define HE_REGM_TSR0_RC 0xff #define HE_REGM_TSR1_PCR (0x7fff << 16) #define HE_REGS_TSR1_PCR 16 #define HE_REGM_TSR1_MCR (0x7fff << 0) #define HE_REGS_TSR1_MCR 0 #define HE_REGM_TSR2_ACR (0x7fff << 16) #define HE_REGS_TSR2_ACR 16 #define HE_REGM_TSR3_NRM (0xff << 24) #define HE_REGS_TSR3_NRM 24 #define HE_REGM_TSR3_CRM (0xff << 0) #define HE_REGS_TSR3_CRM 0 -#define HE_REGM_TSR4_FLUSH (1 << 31) +#define HE_REGM_TSR4_FLUSH (1U << 31) #define HE_REGM_TSR4_SESS_END (1 << 30) #define HE_REGM_TSR4_OAM_CRC10 (1 << 28) #define HE_REGM_TSR4_NULL_CRC10 (1 << 27) #define HE_REGM_TSR4_PROT (1 << 26) #define HE_REGM_TSR4_AAL (3 << 24) #define HE_REGS_TSR4_AAL 24 #define HE_REGM_TSR4_AAL_5 0 #define HE_REGM_TSR4_AAL_0 1 #define HE_REGM_TSR4_AAL_0T 2 #define HE_REGM_TSR9_INIT 0x00100000 #define HE_REGM_TSR11_ICR (0x7fff << 16) #define HE_REGS_TSR11_ICR 16 #define HE_REGM_TSR11_TRM (0x7 << 13) #define HE_REGS_TSR11_TRM 13 #define HE_REGM_TSR11_NRM (0x7 << 10) #define HE_REGS_TSR11_NRM 10 #define HE_REGM_TSR11_ADTF 0x3ff #define HE_REGS_TSR11_ADTF 0 #define HE_REGM_TSR13_RDF (0xf << 23) #define HE_REGS_TSR13_RDF 23 #define HE_REGM_TSR13_RIF (0xf << 19) #define HE_REGS_TSR13_RIF 19 #define HE_REGM_TSR13_CDF (0x7 << 16) #define HE_REGS_TSR13_CDF 16 #define HE_REGM_TSR13_CRM 0xffff #define HE_REGS_TSR13_CRM 0 -#define HE_REGM_TSR14_CBR_DELETE (1 << 31) +#define HE_REGM_TSR14_CBR_DELETE (1U << 31) #define HE_REGM_TSR14_ABR_CLOSE (1 << 16) /* * RSRs */ #define HE_REGO_RSRA(BASE,CID,NR) ((BASE) + ((CID) << 3) + (NR)) #define HE_REGO_RSRB(BASE,CID,NR) ((BASE) + ((CID) << 1) + (NR)) #define HE_REGM_RSR0_PTI7 (1 << 15) #define HE_REGM_RSR0_RM (1 << 14) #define HE_REGM_RSR0_F5OAM (1 << 13) #define HE_REGM_RSR0_STARTPDU (1 << 10) #define HE_REGM_RSR0_OPEN (1 << 6) #define HE_REGM_RSR0_PPD (1 << 5) #define HE_REGM_RSR0_EPD (1 << 4) #define HE_REGM_RSR0_TCPCS (1 << 3) #define HE_REGM_RSR0_AAL 0x7 #define HE_REGM_RSR0_AAL_5 0x0 #define HE_REGM_RSR0_AAL_0 0x1 #define HE_REGM_RSR0_AAL_0T 0x2 #define HE_REGM_RSR0_AAL_RAW 0x3 #define HE_REGM_RSR0_AAL_RAWCRC10 0x4 #define HE_REGM_RSR1_AQI (1 << 20) #define HE_REGM_RSR1_RBPL_ONLY (1 << 19) #define HE_REGM_RSR1_GROUP (7 << 16) #define HE_REGS_RSR1_GROUP 16 #define HE_REGM_RSR4_AQI (1 << 30) #define HE_REGM_RSR4_GROUP (7 << 27) #define HE_REGS_RSR4_GROUP 27 #define HE_REGM_RSR4_RBPL_ONLY (1 << 26) /* * Relative to RCMABR_BA */ #define HE_REGO_CM_GQTBL 0x000 #define HE_REGL_CM_GQTBL 0x100 #define HE_REGO_CM_RGTBL 0x100 #define HE_REGL_CM_RGTBL 0x100 #define HE_REGO_CM_TNRMTBL 0x200 #define HE_REGL_CM_TNRMTBL 0x100 #define HE_REGO_CM_ORCF 0x300 #define HE_REGL_CM_ORCF 0x100 #define HE_REGO_CM_RTGTBL 0x400 #define HE_REGL_CM_RTGTBL 0x200 #define HE_REGO_CM_IRCF 0x600 #define HE_REGL_CM_IRCF 0x200 /* * Interrupt Status */ #define HE_REGM_ITYPE 0xf8 #define HE_REGM_IGROUP 0x07 #define HE_REGM_ITYPE_TBRQ (0x0 << 3) #define HE_REGM_ITYPE_TPD (0x1 << 3) #define HE_REGM_ITYPE_RBPS (0x2 << 3) #define HE_REGM_ITYPE_RBPL (0x3 << 3) #define HE_REGM_ITYPE_RBRQ (0x4 << 3) #define HE_REGM_ITYPE_RBRQT (0x5 << 3) #define HE_REGM_ITYPE_PHYS (0x6 << 3) #define HE_REGM_ITYPE_UNKNOWN 0xf8 #define HE_REGM_ITYPE_ERR 0x80 #define HE_REGM_ITYPE_PERR 0x81 #define HE_REGM_ITYPE_ABORT 0x82 #define HE_REGM_ITYPE_INVALID 0xf8 /* * Serial EEPROM */ #define HE_EEPROM_PROD_ID 0x08 #define HE_EEPROM_PROD_ID_LEN 30 #define HE_EEPROM_REV 0x26 #define HE_EEPROM_REV_LEN 4 #define HE_EEPROM_M_SN 0x3A #define HE_EEPROM_MEDIA 0x3E #define HE_EEPROM_MAC 0x42 #define HE_MEDIA_UTP155 0x06 #define HE_MEDIA_MMF155 0x26 #define HE_MEDIA_MMF622 0x27 #define HE_MEDIA_SMF155 0x46 #define HE_MEDIA_SMF622 0x47 #define HE_622_CLOCK 66667000 #define HE_155_CLOCK 50000000 /* * Statistics */ struct fatm_statshe { }; /* * Queue entries */ /* Receive Buffer Pool Queue entry */ struct he_rbpen { uint32_t phys; /* physical address */ uint32_t handle; /* handle or virtual address */ }; /* Receive Buffer Return Queue entry */ struct he_rbrqen { uint32_t addr; /* handle and flags */ uint32_t len; /* length and CID */ }; #define HE_REGM_RBRQ_ADDR 0xFFFFFFC0 #define HE_REGS_RBRQ_ADDR 6 #define HE_REGM_RBRQ_FLAGS 0x0000003F #define HE_REGM_RBRQ_HBUF_ERROR (1 << 0) #define HE_REGM_RBRQ_CON_CLOSED (1 << 1) #define HE_REGM_RBRQ_AAL5_PROT (1 << 2) #define HE_REGM_RBRQ_END_PDU (1 << 3) #define HE_REGM_RBRQ_LEN_ERROR (1 << 4) #define HE_REGM_RBRQ_CRC_ERROR (1 << 5) #define HE_REGM_RBRQ_CID (0x1fff << 16) #define HE_REGS_RBRQ_CID 16 #define HE_REGM_RBRQ_LEN 0xffff /* Transmit Packet Descriptor Ready Queue entry */ struct he_tpdrqen { uint32_t tpd; /* physical address */ uint32_t cid; /* connection id */ }; /* Transmit buffer return queue */ struct he_tbrqen { uint32_t addr; /* handle and flags */ }; #define HE_REGM_TBRQ_ADDR 0xffffffc0 #define HE_REGM_TBRQ_FLAGS 0x0000000a #define HE_REGM_TBRQ_EOS 0x00000008 #define HE_REGM_TBRQ_MULT 0x00000002 struct he_tpd { uint32_t addr; /* handle or virtual address and flags */ uint32_t res; /* reserved */ struct { uint32_t addr; /* buffer address */ uint32_t len; /* buffer length and flags */ } bufs[3]; }; #define HE_REGM_TPD_ADDR 0xffffffC0 #define HE_REGS_TPD_ADDR 6 #define HE_REGM_TPD_INTR 0x0001 #define HE_REGM_TPD_CLP 0x0002 #define HE_REGM_TPD_EOS 0x0004 #define HE_REGM_TPD_PTI 0x0038 #define HE_REGS_TPD_PTI 3 #define HE_REGM_TPD_LST 0x80000000 /* * The HOST STATUS PAGE */ struct he_hsp { struct { uint32_t tbrq_tail; uint32_t res1[15]; uint32_t rbrq_tail; uint32_t res2[15]; } group[8]; }; #define HE_MAX_PDU (65535) Index: head/sys/dev/hwpmc/hwpmc_piv.h =================================================================== --- head/sys/dev/hwpmc/hwpmc_piv.h (revision 258779) +++ head/sys/dev/hwpmc/hwpmc_piv.h (revision 258780) @@ -1,125 +1,125 @@ /*- * Copyright (c) 2005, Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Machine dependent interfaces */ #ifndef _DEV_HWPMC_PIV_H_ #define _DEV_HWPMC_PIV_H_ 1 /* Intel P4 PMCs */ #define P4_NPMCS 18 #define P4_NESCR 45 #define P4_INVALID_PMC_INDEX -1 #define P4_MAX_ESCR_PER_EVENT 2 #define P4_MAX_PMC_PER_ESCR 3 -#define P4_CCCR_OVF (1 << 31) +#define P4_CCCR_OVF (1U << 31) #define P4_CCCR_CASCADE (1 << 30) #define P4_CCCR_OVF_PMI_T1 (1 << 27) #define P4_CCCR_OVF_PMI_T0 (1 << 26) #define P4_CCCR_FORCE_OVF (1 << 25) #define P4_CCCR_EDGE (1 << 24) #define P4_CCCR_THRESHOLD_SHIFT 20 #define P4_CCCR_THRESHOLD_MASK 0x00F00000 #define P4_CCCR_TO_THRESHOLD(C) (((C) << P4_CCCR_THRESHOLD_SHIFT) & \ P4_CCCR_THRESHOLD_MASK) #define P4_CCCR_COMPLEMENT (1 << 19) #define P4_CCCR_COMPARE (1 << 18) #define P4_CCCR_ACTIVE_THREAD_SHIFT 16 #define P4_CCCR_ACTIVE_THREAD_MASK 0x00030000 #define P4_CCCR_TO_ACTIVE_THREAD(T) (((T) << P4_CCCR_ACTIVE_THREAD_SHIFT) & \ P4_CCCR_ACTIVE_THREAD_MASK) #define P4_CCCR_ESCR_SELECT_SHIFT 13 #define P4_CCCR_ESCR_SELECT_MASK 0x0000E000 #define P4_CCCR_TO_ESCR_SELECT(E) (((E) << P4_CCCR_ESCR_SELECT_SHIFT) & \ P4_CCCR_ESCR_SELECT_MASK) #define P4_CCCR_ENABLE (1 << 12) #define P4_CCCR_VALID_BITS (P4_CCCR_OVF | P4_CCCR_CASCADE | \ P4_CCCR_OVF_PMI_T1 | P4_CCCR_OVF_PMI_T0 | P4_CCCR_FORCE_OVF | \ P4_CCCR_EDGE | P4_CCCR_THRESHOLD_MASK | P4_CCCR_COMPLEMENT | \ P4_CCCR_COMPARE | P4_CCCR_ESCR_SELECT_MASK | P4_CCCR_ENABLE) #define P4_ESCR_EVENT_SELECT_SHIFT 25 #define P4_ESCR_EVENT_SELECT_MASK 0x7E000000 #define P4_ESCR_TO_EVENT_SELECT(E) (((E) << P4_ESCR_EVENT_SELECT_SHIFT) & \ P4_ESCR_EVENT_SELECT_MASK) #define P4_ESCR_EVENT_MASK_SHIFT 9 #define P4_ESCR_EVENT_MASK_MASK 0x01FFFE00 #define P4_ESCR_TO_EVENT_MASK(M) (((M) << P4_ESCR_EVENT_MASK_SHIFT) & \ P4_ESCR_EVENT_MASK_MASK) #define P4_ESCR_TAG_VALUE_SHIFT 5 #define P4_ESCR_TAG_VALUE_MASK 0x000001E0 #define P4_ESCR_TO_TAG_VALUE(T) (((T) << P4_ESCR_TAG_VALUE_SHIFT) & \ P4_ESCR_TAG_VALUE_MASK) #define P4_ESCR_TAG_ENABLE 0x00000010 #define P4_ESCR_T0_OS 0x00000008 #define P4_ESCR_T0_USR 0x00000004 #define P4_ESCR_T1_OS 0x00000002 #define P4_ESCR_T1_USR 0x00000001 #define P4_ESCR_OS P4_ESCR_T0_OS #define P4_ESCR_USR P4_ESCR_T0_USR #define P4_ESCR_VALID_BITS (P4_ESCR_EVENT_SELECT_MASK | \ P4_ESCR_EVENT_MASK_MASK | P4_ESCR_TAG_VALUE_MASK | \ P4_ESCR_TAG_ENABLE | P4_ESCR_T0_OS | P4_ESCR_T0_USR | P4_ESCR_T1_OS \ P4_ESCR_T1_USR) #define P4_PERFCTR_MASK 0xFFFFFFFFFFLL /* 40 bits */ #define P4_PERFCTR_OVERFLOWED(PMC) ((rdpmc(PMC) & (1LL << 39)) == 0) #define P4_CCCR_MSR_FIRST 0x360 /* MSR_BPU_CCCR0 */ #define P4_PERFCTR_MSR_FIRST 0x300 /* MSR_BPU_COUNTER0 */ #define P4_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (1 - (V)) #define P4_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (1 - (P)) struct pmc_md_p4_op_pmcallocate { uint32_t pm_p4_cccrconfig; uint32_t pm_p4_escrconfig; }; #ifdef _KERNEL /* MD extension for 'struct pmc' */ struct pmc_md_p4_pmc { uint32_t pm_p4_cccrvalue; uint32_t pm_p4_escrvalue; uint32_t pm_p4_escr; uint32_t pm_p4_escrmsr; }; /* * Prototypes */ int pmc_p4_initialize(struct pmc_mdep *_md, int _ncpus); void pmc_p4_finalize(struct pmc_mdep *md); #endif /* _KERNEL */ #endif /* _DEV_HWPMC_PIV_H_ */ Index: head/sys/dev/iwn/if_iwnreg.h =================================================================== --- head/sys/dev/iwn/if_iwnreg.h (revision 258779) +++ head/sys/dev/iwn/if_iwnreg.h (revision 258780) @@ -1,2130 +1,2130 @@ /* $FreeBSD$ */ /* $OpenBSD: if_iwnreg.h,v 1.40 2010/05/05 19:41:57 damien Exp $ */ /*- * Copyright (c) 2007, 2008 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __IF_IWNREG_H__ #define __IF_IWNREG_H__ #define IWN_CT_KILL_THRESHOLD 114 /* in Celsius */ #define IWN_CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */ #define IWN_TX_RING_COUNT 256 #define IWN_TX_RING_LOMARK 192 #define IWN_TX_RING_HIMARK 224 #define IWN_RX_RING_COUNT_LOG 6 #define IWN_RX_RING_COUNT (1 << IWN_RX_RING_COUNT_LOG) #define IWN4965_NTXQUEUES 16 #define IWN5000_NTXQUEUES 20 #define IWN4965_FIRSTAGGQUEUE 7 #define IWN5000_FIRSTAGGQUEUE 10 #define IWN4965_NDMACHNLS 7 #define IWN5000_NDMACHNLS 8 #define IWN_SRVC_DMACHNL 9 #define IWN_ICT_SIZE 4096 #define IWN_ICT_COUNT (IWN_ICT_SIZE / sizeof (uint32_t)) /* For cards with PAN command, default is IWN_CMD_QUEUE_NUM */ #define IWN_CMD_QUEUE_NUM 4 #define IWN_PAN_CMD_QUEUE 9 /* Maximum number of DMA segments for TX. */ #define IWN_MAX_SCATTER 20 /* RX buffers must be large enough to hold a full 4K A-MPDU. */ #define IWN_RBUF_SIZE (4 * 1024) #if defined(__LP64__) /* HW supports 36-bit DMA addresses. */ #define IWN_LOADDR(paddr) ((uint32_t)(paddr)) #define IWN_HIADDR(paddr) (((paddr) >> 32) & 0xf) #else #define IWN_LOADDR(paddr) (paddr) #define IWN_HIADDR(paddr) (0) #endif /* * Control and status registers. */ #define IWN_HW_IF_CONFIG 0x000 #define IWN_INT_COALESCING 0x004 #define IWN_INT_PERIODIC 0x005 /* use IWN_WRITE_1 */ #define IWN_INT 0x008 #define IWN_INT_MASK 0x00c #define IWN_FH_INT 0x010 #define IWN_GPIO_IN 0x018 /* read external chip pins */ #define IWN_RESET 0x020 #define IWN_GP_CNTRL 0x024 #define IWN_HW_REV 0x028 #define IWN_EEPROM 0x02c #define IWN_EEPROM_GP 0x030 #define IWN_OTP_GP 0x034 #define IWN_GIO 0x03c #define IWN_GP_UCODE 0x048 #define IWN_GP_DRIVER 0x050 #define IWN_UCODE_GP1 0x054 #define IWN_UCODE_GP1_SET 0x058 #define IWN_UCODE_GP1_CLR 0x05c #define IWN_UCODE_GP2 0x060 #define IWN_LED 0x094 #define IWN_DRAM_INT_TBL 0x0a0 #define IWN_SHADOW_REG_CTRL 0x0a8 #define IWN_GIO_CHICKEN 0x100 #define IWN_ANA_PLL 0x20c #define IWN_HW_REV_WA 0x22c #define IWN_DBG_HPET_MEM 0x240 #define IWN_DBG_LINK_PWR_MGMT 0x250 /* Need nic_lock for use above */ #define IWN_MEM_RADDR 0x40c #define IWN_MEM_WADDR 0x410 #define IWN_MEM_WDATA 0x418 #define IWN_MEM_RDATA 0x41c #define IWN_TARG_MBX_C 0x430 #define IWN_PRPH_WADDR 0x444 #define IWN_PRPH_RADDR 0x448 #define IWN_PRPH_WDATA 0x44c #define IWN_PRPH_RDATA 0x450 #define IWN_HBUS_TARG_WRPTR 0x460 /* * Flow-Handler registers. */ #define IWN_FH_TFBD_CTRL0(qid) (0x1900 + (qid) * 8) #define IWN_FH_TFBD_CTRL1(qid) (0x1904 + (qid) * 8) #define IWN_FH_KW_ADDR 0x197c #define IWN_FH_SRAM_ADDR(qid) (0x19a4 + (qid) * 4) #define IWN_FH_CBBC_QUEUE(qid) (0x19d0 + (qid) * 4) #define IWN_FH_STATUS_WPTR 0x1bc0 #define IWN_FH_RX_BASE 0x1bc4 #define IWN_FH_RX_WPTR 0x1bc8 #define IWN_FH_RX_CONFIG 0x1c00 #define IWN_FH_RX_STATUS 0x1c44 #define IWN_FH_TX_CONFIG(qid) (0x1d00 + (qid) * 32) #define IWN_FH_TXBUF_STATUS(qid) (0x1d08 + (qid) * 32) #define IWN_FH_TX_CHICKEN 0x1e98 #define IWN_FH_TX_STATUS 0x1eb0 /* * TX scheduler registers. */ #define IWN_SCHED_BASE 0xa02c00 #define IWN_SCHED_SRAM_ADDR (IWN_SCHED_BASE + 0x000) #define IWN5000_SCHED_DRAM_ADDR (IWN_SCHED_BASE + 0x008) #define IWN4965_SCHED_DRAM_ADDR (IWN_SCHED_BASE + 0x010) #define IWN5000_SCHED_TXFACT (IWN_SCHED_BASE + 0x010) #define IWN4965_SCHED_TXFACT (IWN_SCHED_BASE + 0x01c) #define IWN4965_SCHED_QUEUE_RDPTR(qid) (IWN_SCHED_BASE + 0x064 + (qid) * 4) #define IWN5000_SCHED_QUEUE_RDPTR(qid) (IWN_SCHED_BASE + 0x068 + (qid) * 4) #define IWN4965_SCHED_QCHAIN_SEL (IWN_SCHED_BASE + 0x0d0) #define IWN4965_SCHED_INTR_MASK (IWN_SCHED_BASE + 0x0e4) #define IWN5000_SCHED_QCHAIN_SEL (IWN_SCHED_BASE + 0x0e8) #define IWN4965_SCHED_QUEUE_STATUS(qid) (IWN_SCHED_BASE + 0x104 + (qid) * 4) #define IWN5000_SCHED_INTR_MASK (IWN_SCHED_BASE + 0x108) #define IWN5000_SCHED_QUEUE_STATUS(qid) (IWN_SCHED_BASE + 0x10c + (qid) * 4) #define IWN5000_SCHED_AGGR_SEL (IWN_SCHED_BASE + 0x248) /* * Offsets in TX scheduler's SRAM. */ #define IWN4965_SCHED_CTX_OFF 0x380 #define IWN4965_SCHED_CTX_LEN 416 #define IWN4965_SCHED_QUEUE_OFFSET(qid) (0x380 + (qid) * 8) #define IWN4965_SCHED_TRANS_TBL(qid) (0x500 + (qid) * 2) #define IWN5000_SCHED_CTX_OFF 0x600 #define IWN5000_SCHED_CTX_LEN 520 #define IWN5000_SCHED_QUEUE_OFFSET(qid) (0x600 + (qid) * 8) #define IWN5000_SCHED_TRANS_TBL(qid) (0x7e0 + (qid) * 2) /* * NIC internal memory offsets. */ #define IWN_APMG_CLK_CTRL 0x3000 #define IWN_APMG_CLK_EN 0x3004 #define IWN_APMG_CLK_DIS 0x3008 #define IWN_APMG_PS 0x300c #define IWN_APMG_DIGITAL_SVR 0x3058 #define IWN_APMG_ANALOG_SVR 0x306c #define IWN_APMG_PCI_STT 0x3010 #define IWN_BSM_WR_CTRL 0x3400 #define IWN_BSM_WR_MEM_SRC 0x3404 #define IWN_BSM_WR_MEM_DST 0x3408 #define IWN_BSM_WR_DWCOUNT 0x340c #define IWN_BSM_DRAM_TEXT_ADDR 0x3490 #define IWN_BSM_DRAM_TEXT_SIZE 0x3494 #define IWN_BSM_DRAM_DATA_ADDR 0x3498 #define IWN_BSM_DRAM_DATA_SIZE 0x349c #define IWN_BSM_SRAM_BASE 0x3800 /* Possible flags for register IWN_HW_IF_CONFIG. */ #define IWN_HW_IF_CONFIG_4965_R (1 << 4) #define IWN_HW_IF_CONFIG_MAC_SI (1 << 8) #define IWN_HW_IF_CONFIG_RADIO_SI (1 << 9) #define IWN_HW_IF_CONFIG_EEPROM_LOCKED (1 << 21) #define IWN_HW_IF_CONFIG_NIC_READY (1 << 22) #define IWN_HW_IF_CONFIG_HAP_WAKE_L1A (1 << 23) #define IWN_HW_IF_CONFIG_PREPARE_DONE (1 << 25) #define IWN_HW_IF_CONFIG_PREPARE (1 << 27) /* Possible values for register IWN_INT_PERIODIC. */ #define IWN_INT_PERIODIC_DIS 0x00 #define IWN_INT_PERIODIC_ENA 0xff /* Possible flags for registers IWN_PRPH_RADDR/IWN_PRPH_WADDR. */ #define IWN_PRPH_DWORD ((sizeof (uint32_t) - 1) << 24) /* Possible values for IWN_BSM_WR_MEM_DST. */ #define IWN_FW_TEXT_BASE 0x00000000 #define IWN_FW_DATA_BASE 0x00800000 /* Possible flags for register IWN_RESET. */ #define IWN_RESET_NEVO (1 << 0) #define IWN_RESET_SW (1 << 7) #define IWN_RESET_MASTER_DISABLED (1 << 8) #define IWN_RESET_STOP_MASTER (1 << 9) -#define IWN_RESET_LINK_PWR_MGMT_DIS (1 << 31) +#define IWN_RESET_LINK_PWR_MGMT_DIS (1U << 31) /* Possible flags for register IWN_GP_CNTRL. */ #define IWN_GP_CNTRL_MAC_ACCESS_ENA (1 << 0) #define IWN_GP_CNTRL_MAC_CLOCK_READY (1 << 0) #define IWN_GP_CNTRL_INIT_DONE (1 << 2) #define IWN_GP_CNTRL_MAC_ACCESS_REQ (1 << 3) #define IWN_GP_CNTRL_SLEEP (1 << 4) #define IWN_GP_CNTRL_RFKILL (1 << 27) /* Possible flags for register IWN_GIO_CHICKEN. */ #define IWN_GIO_CHICKEN_L1A_NO_L0S_RX (1 << 23) #define IWN_GIO_CHICKEN_DIS_L0S_TIMER (1 << 29) /* Possible flags for register IWN_GIO. */ #define IWN_GIO_L0S_ENA (1 << 1) /* Possible flags for register IWN_GP_DRIVER. */ #define IWN_GP_DRIVER_RADIO_3X3_HYB (0 << 0) #define IWN_GP_DRIVER_RADIO_2X2_HYB (1 << 0) #define IWN_GP_DRIVER_RADIO_2X2_IPA (2 << 0) #define IWN_GP_DRIVER_CALIB_VER6 (1 << 2) #define IWN_GP_DRIVER_6050_1X2 (1 << 3) #define IWN_GP_DRIVER_REG_BIT_RADIO_IQ_INVERT (1 << 7) #define IWN_GP_DRIVER_NONE 0 /* Possible flags for register IWN_UCODE_GP1_CLR. */ #define IWN_UCODE_GP1_RFKILL (1 << 1) #define IWN_UCODE_GP1_CMD_BLOCKED (1 << 2) #define IWN_UCODE_GP1_CTEMP_STOP_RF (1 << 3) #define IWN_UCODE_GP1_CFG_COMPLETE (1 << 5) /* Possible flags/values for register IWN_LED. */ #define IWN_LED_BSM_CTRL (1 << 5) #define IWN_LED_OFF 0x00000038 #define IWN_LED_ON 0x00000078 #define IWN_MAX_BLINK_TBL 10 #define IWN_LED_STATIC_ON 0 #define IWN_LED_STATIC_OFF 1 #define IWN_LED_SLOW_BLINK 2 #define IWN_LED_INT_BLINK 3 #define IWN_LED_UNIT 0x1388 /* 5 ms */ static const struct { uint16_t tpt; /* Mb/s */ uint8_t on_time; uint8_t off_time; } blink_tbl[] = { {300, 5, 5}, {200, 8, 8}, {100, 11, 11}, {70, 13, 13}, {50, 15, 15}, {20, 17, 17}, {10, 19, 19}, {5, 22, 22}, {1, 26, 26}, {0, 33, 33}, /* SOLID_ON */ }; /* Possible flags for register IWN_DRAM_INT_TBL. */ #define IWN_DRAM_INT_TBL_WRAP_CHECK (1 << 27) -#define IWN_DRAM_INT_TBL_ENABLE (1 << 31) +#define IWN_DRAM_INT_TBL_ENABLE (1U << 31) /* Possible values for register IWN_ANA_PLL. */ #define IWN_ANA_PLL_INIT 0x00880300 /* Possible flags for register IWN_FH_RX_STATUS. */ #define IWN_FH_RX_STATUS_IDLE (1 << 24) /* Possible flags for register IWN_BSM_WR_CTRL. */ #define IWN_BSM_WR_CTRL_START_EN (1 << 30) -#define IWN_BSM_WR_CTRL_START (1 << 31) +#define IWN_BSM_WR_CTRL_START (1U << 31) /* Possible flags for register IWN_INT. */ #define IWN_INT_ALIVE (1 << 0) #define IWN_INT_WAKEUP (1 << 1) #define IWN_INT_SW_RX (1 << 3) #define IWN_INT_CT_REACHED (1 << 6) #define IWN_INT_RF_TOGGLED (1 << 7) #define IWN_INT_SW_ERR (1 << 25) #define IWN_INT_SCHED (1 << 26) #define IWN_INT_FH_TX (1 << 27) #define IWN_INT_RX_PERIODIC (1 << 28) #define IWN_INT_HW_ERR (1 << 29) -#define IWN_INT_FH_RX (1 << 31) +#define IWN_INT_FH_RX (1U << 31) /* Shortcut. */ #define IWN_INT_MASK_DEF \ (IWN_INT_SW_ERR | IWN_INT_HW_ERR | IWN_INT_FH_TX | \ IWN_INT_FH_RX | IWN_INT_ALIVE | IWN_INT_WAKEUP | \ IWN_INT_SW_RX | IWN_INT_CT_REACHED | IWN_INT_RF_TOGGLED) /* Possible flags for register IWN_FH_INT. */ #define IWN_FH_INT_TX_CHNL(x) (1 << (x)) #define IWN_FH_INT_RX_CHNL(x) (1 << ((x) + 16)) #define IWN_FH_INT_HI_PRIOR (1 << 30) /* Shortcuts for the above. */ #define IWN_FH_INT_TX \ (IWN_FH_INT_TX_CHNL(0) | IWN_FH_INT_TX_CHNL(1)) #define IWN_FH_INT_RX \ (IWN_FH_INT_RX_CHNL(0) | IWN_FH_INT_RX_CHNL(1) | IWN_FH_INT_HI_PRIOR) /* Possible flags/values for register IWN_FH_TX_CONFIG. */ #define IWN_FH_TX_CONFIG_DMA_PAUSE 0 -#define IWN_FH_TX_CONFIG_DMA_ENA (1 << 31) +#define IWN_FH_TX_CONFIG_DMA_ENA (1U << 31) #define IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD (1 << 20) /* Possible flags/values for register IWN_FH_TXBUF_STATUS. */ #define IWN_FH_TXBUF_STATUS_TBNUM(x) ((x) << 20) #define IWN_FH_TXBUF_STATUS_TBIDX(x) ((x) << 12) #define IWN_FH_TXBUF_STATUS_TFBD_VALID 3 /* Possible flags for register IWN_FH_TX_CHICKEN. */ #define IWN_FH_TX_CHICKEN_SCHED_RETRY (1 << 1) /* Possible flags for register IWN_FH_TX_STATUS. */ #define IWN_FH_TX_STATUS_IDLE(chnl) (1 << ((chnl) + 16)) /* Possible flags for register IWN_FH_RX_CONFIG. */ -#define IWN_FH_RX_CONFIG_ENA (1 << 31) +#define IWN_FH_RX_CONFIG_ENA (1U << 31) #define IWN_FH_RX_CONFIG_NRBD(x) ((x) << 20) #define IWN_FH_RX_CONFIG_RB_SIZE_8K (1 << 16) #define IWN_FH_RX_CONFIG_SINGLE_FRAME (1 << 15) #define IWN_FH_RX_CONFIG_IRQ_DST_HOST (1 << 12) #define IWN_FH_RX_CONFIG_RB_TIMEOUT(x) ((x) << 4) #define IWN_FH_RX_CONFIG_IGN_RXF_EMPTY (1 << 2) /* Possible flags for register IWN_FH_TX_CONFIG. */ -#define IWN_FH_TX_CONFIG_DMA_ENA (1 << 31) +#define IWN_FH_TX_CONFIG_DMA_ENA (1U << 31) #define IWN_FH_TX_CONFIG_DMA_CREDIT_ENA (1 << 3) /* Possible flags for register IWN_EEPROM. */ #define IWN_EEPROM_READ_VALID (1 << 0) #define IWN_EEPROM_CMD (1 << 1) /* Possible flags for register IWN_EEPROM_GP. */ #define IWN_EEPROM_GP_IF_OWNER 0x00000180 /* Possible flags for register IWN_OTP_GP. */ #define IWN_OTP_GP_DEV_SEL_OTP (1 << 16) #define IWN_OTP_GP_RELATIVE_ACCESS (1 << 17) #define IWN_OTP_GP_ECC_CORR_STTS (1 << 20) #define IWN_OTP_GP_ECC_UNCORR_STTS (1 << 21) /* Possible flags for register IWN_SCHED_QUEUE_STATUS. */ #define IWN4965_TXQ_STATUS_ACTIVE 0x0007fc01 #define IWN4965_TXQ_STATUS_INACTIVE 0x0007fc00 #define IWN4965_TXQ_STATUS_AGGR_ENA (1 << 5 | 1 << 8) #define IWN4965_TXQ_STATUS_CHGACT (1 << 10) #define IWN5000_TXQ_STATUS_ACTIVE 0x00ff0018 #define IWN5000_TXQ_STATUS_INACTIVE 0x00ff0010 #define IWN5000_TXQ_STATUS_CHGACT (1 << 19) /* Possible flags for registers IWN_APMG_CLK_*. */ #define IWN_APMG_CLK_CTRL_DMA_CLK_RQT (1 << 9) #define IWN_APMG_CLK_CTRL_BSM_CLK_RQT (1 << 11) /* Possible flags for register IWN_APMG_PS. */ #define IWN_APMG_PS_EARLY_PWROFF_DIS (1 << 22) #define IWN_APMG_PS_PWR_SRC(x) ((x) << 24) #define IWN_APMG_PS_PWR_SRC_VMAIN 0 #define IWN_APMG_PS_PWR_SRC_VAUX 2 #define IWN_APMG_PS_PWR_SRC_MASK IWN_APMG_PS_PWR_SRC(3) #define IWN_APMG_PS_RESET_REQ (1 << 26) /* Possible flags for register IWN_APMG_DIGITAL_SVR. */ #define IWN_APMG_DIGITAL_SVR_VOLTAGE(x) (((x) & 0xf) << 5) #define IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK \ IWN_APMG_DIGITAL_SVR_VOLTAGE(0xf) #define IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32 \ IWN_APMG_DIGITAL_SVR_VOLTAGE(3) /* Possible flags for IWN_APMG_PCI_STT. */ #define IWN_APMG_PCI_STT_L1A_DIS (1 << 11) /* Possible flags for register IWN_BSM_DRAM_TEXT_SIZE. */ -#define IWN_FW_UPDATED (1 << 31) +#define IWN_FW_UPDATED (1U << 31) #define IWN_SCHED_WINSZ 64 #define IWN_SCHED_LIMIT 64 #define IWN4965_SCHED_COUNT 512 #define IWN5000_SCHED_COUNT (IWN_TX_RING_COUNT + IWN_SCHED_WINSZ) #define IWN4965_SCHEDSZ (IWN4965_NTXQUEUES * IWN4965_SCHED_COUNT * 2) #define IWN5000_SCHEDSZ (IWN5000_NTXQUEUES * IWN5000_SCHED_COUNT * 2) struct iwn_tx_desc { uint8_t reserved1[3]; uint8_t nsegs; struct { uint32_t addr; uint16_t len; } __packed segs[IWN_MAX_SCATTER]; /* Pad to 128 bytes. */ uint32_t reserved2; } __packed; struct iwn_rx_status { uint16_t closed_count; uint16_t closed_rx_count; uint16_t finished_count; uint16_t finished_rx_count; uint32_t reserved[2]; } __packed; struct iwn_rx_desc { /* * The first 4 bytes of the RX frame header contain both the RX frame * size and some flags. * Bit fields: * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request * 28-14: Reserved * 13-00: RX frame size */ uint32_t len; uint8_t type; #define IWN_UC_READY 1 #define IWN_ADD_NODE_DONE 24 #define IWN_TX_DONE 28 #define IWN_REPLY_LED_CMD 72 #define IWN5000_CALIBRATION_RESULT 102 #define IWN5000_CALIBRATION_DONE 103 #define IWN_START_SCAN 130 #define IWN_NOTIF_SCAN_RESULT 131 #define IWN_STOP_SCAN 132 #define IWN_RX_STATISTICS 156 #define IWN_BEACON_STATISTICS 157 #define IWN_STATE_CHANGED 161 #define IWN_BEACON_MISSED 162 #define IWN_RX_PHY 192 #define IWN_MPDU_RX_DONE 193 #define IWN_RX_DONE 195 #define IWN_RX_COMPRESSED_BA 197 uint8_t flags; /* 0:5 reserved, 6 abort, 7 internal */ uint8_t idx; /* position within TX queue */ uint8_t qid; /* 0:4 TX queue id - 5:6 reserved - 7 unsolicited RX * or uCode-originated notification */ } __packed; #define IWN_RX_DESC_QID_MSK 0x1F #define IWN_UNSOLICITED_RX_NOTIF 0x80 /* CARD_STATE_NOTIFICATION */ #define IWN_STATE_CHANGE_HW_CARD_DISABLED 0x01 #define IWN_STATE_CHANGE_SW_CARD_DISABLED 0x02 #define IWN_STATE_CHANGE_CT_CARD_DISABLED 0x04 #define IWN_STATE_CHANGE_RXON_CARD_DISABLED 0x10 /* Possible RX status flags. */ #define IWN_RX_NO_CRC_ERR (1 << 0) #define IWN_RX_NO_OVFL_ERR (1 << 1) /* Shortcut for the above. */ #define IWN_RX_NOERROR (IWN_RX_NO_CRC_ERR | IWN_RX_NO_OVFL_ERR) #define IWN_RX_MPDU_MIC_OK (1 << 6) #define IWN_RX_CIPHER_MASK (7 << 8) #define IWN_RX_CIPHER_CCMP (2 << 8) #define IWN_RX_MPDU_DEC (1 << 11) #define IWN_RX_DECRYPT_MASK (3 << 11) #define IWN_RX_DECRYPT_OK (3 << 11) struct iwn_tx_cmd { uint8_t code; #define IWN_CMD_RXON 16 #define IWN_CMD_RXON_ASSOC 17 #define IWN_CMD_EDCA_PARAMS 19 #define IWN_CMD_TIMING 20 #define IWN_CMD_ADD_NODE 24 #define IWN_CMD_TX_DATA 28 #define IWN_CMD_LINK_QUALITY 78 #define IWN_CMD_SET_LED 72 #define IWN5000_CMD_WIMAX_COEX 90 #define IWN_TEMP_NOTIFICATION 98 #define IWN5000_CMD_CALIB_CONFIG 101 #define IWN5000_CMD_CALIB_RESULT 102 #define IWN5000_CMD_CALIB_COMPLETE 103 #define IWN_CMD_SET_POWER_MODE 119 #define IWN_CMD_SCAN 128 #define IWN_CMD_SCAN_RESULTS 131 #define IWN_CMD_TXPOWER_DBM 149 #define IWN_CMD_TXPOWER 151 #define IWN5000_CMD_TX_ANT_CONFIG 152 #define IWN_CMD_BT_COEX 155 #define IWN_CMD_GET_STATISTICS 156 #define IWN_CMD_SET_CRITICAL_TEMP 164 #define IWN_CMD_SET_SENSITIVITY 168 #define IWN_CMD_PHY_CALIB 176 #define IWN_CMD_BT_COEX_PRIOTABLE 204 #define IWN_CMD_BT_COEX_PROT 205 #define IWN_CMD_BT_COEX_NOTIF 206 /* PAN commands */ #define IWN_CMD_WIPAN_PARAMS 0xb2 #define IWN_CMD_WIPAN_RXON 0xb3 #define IWN_CMD_WIPAN_RXON_TIMING 0xb4 #define IWN_CMD_WIPAN_RXON_ASSOC 0xb6 #define IWN_CMD_WIPAN_QOS_PARAM 0xb7 #define IWN_CMD_WIPAN_WEPKEY 0xb8 #define IWN_CMD_WIPAN_P2P_CHANNEL_SWITCH 0xb9 #define IWN_CMD_WIPAN_NOA_NOTIFICATION 0xbc #define IWN_CMD_WIPAN_DEACTIVATION_COMPLETE 0xbd uint8_t flags; uint8_t idx; uint8_t qid; uint8_t data[136]; } __packed; /* * Structure for IWN_CMD_GET_STATISTICS = (0x9c) 156 * all devices identical. * * This command triggers an immediate response containing uCode statistics. * The response is in the same format as IWN_BEACON_STATISTICS (0x9d) 157. * * If the CLEAR_STATS configuration flag is set, uCode will clear its * internal copy of the statistics (counters) after issuing the response. * This flag does not affect IWN_BEACON_STATISTICS after beacons (see below). * * If the DISABLE_NOTIF configuration flag is set, uCode will not issue * IWN_BEACON_STATISTICS after received beacons. This flag * does not affect the response to the IWN_CMD_GET_STATISTICS 0x9c itself. */ struct iwn_statistics_cmd { uint32_t configuration_flags; #define IWN_STATS_CONF_CLEAR_STATS htole32(0x1) #define IWN_STATS_CONF_DISABLE_NOTIF htole32(0x2) } __packed; /* Antenna flags, used in various commands. */ #define IWN_ANT_A (1 << 0) #define IWN_ANT_B (1 << 1) #define IWN_ANT_C (1 << 2) /* Shortcuts. */ #define IWN_ANT_AB (IWN_ANT_A | IWN_ANT_B) #define IWN_ANT_BC (IWN_ANT_B | IWN_ANT_C) #define IWN_ANT_AC (IWN_ANT_A | IWN_ANT_C) #define IWN_ANT_ABC (IWN_ANT_A | IWN_ANT_B | IWN_ANT_C) /* Structure for command IWN_CMD_RXON. */ struct iwn_rxon { uint8_t myaddr[IEEE80211_ADDR_LEN]; uint16_t reserved1; uint8_t bssid[IEEE80211_ADDR_LEN]; uint16_t reserved2; uint8_t wlap[IEEE80211_ADDR_LEN]; uint16_t reserved3; uint8_t mode; #define IWN_MODE_HOSTAP 1 #define IWN_MODE_STA 3 #define IWN_MODE_IBSS 4 #define IWN_MODE_MONITOR 6 #define IWN_MODE_2STA 8 #define IWN_MODE_P2P 9 uint8_t air; uint16_t rxchain; #define IWN_RXCHAIN_DRIVER_FORCE (1 << 0) #define IWN_RXCHAIN_VALID(x) (((x) & IWN_ANT_ABC) << 1) #define IWN_RXCHAIN_FORCE_SEL(x) (((x) & IWN_ANT_ABC) << 4) #define IWN_RXCHAIN_FORCE_MIMO_SEL(x) (((x) & IWN_ANT_ABC) << 7) #define IWN_RXCHAIN_IDLE_COUNT(x) ((x) << 10) #define IWN_RXCHAIN_MIMO_COUNT(x) ((x) << 12) #define IWN_RXCHAIN_MIMO_FORCE (1 << 14) uint8_t ofdm_mask; uint8_t cck_mask; uint16_t associd; uint32_t flags; #define IWN_RXON_24GHZ (1 << 0) #define IWN_RXON_CCK (1 << 1) #define IWN_RXON_AUTO (1 << 2) #define IWN_RXON_SHSLOT (1 << 4) #define IWN_RXON_SHPREAMBLE (1 << 5) #define IWN_RXON_NODIVERSITY (1 << 7) #define IWN_RXON_ANTENNA_A (1 << 8) #define IWN_RXON_ANTENNA_B (1 << 9) #define IWN_RXON_TSF (1 << 15) #define IWN_RXON_HT_HT40MINUS (1 << 22) #define IWN_RXON_HT_PROTMODE(x) (x << 23) #define IWN_RXON_HT_MODEPURE40 (1 << 25) #define IWN_RXON_HT_MODEMIXED (2 << 25) #define IWN_RXON_CTS_TO_SELF (1 << 30) uint32_t filter; #define IWN_FILTER_PROMISC (1 << 0) #define IWN_FILTER_CTL (1 << 1) #define IWN_FILTER_MULTICAST (1 << 2) #define IWN_FILTER_NODECRYPT (1 << 3) #define IWN_FILTER_BSS (1 << 5) #define IWN_FILTER_BEACON (1 << 6) uint8_t chan; uint8_t reserved4; uint8_t ht_single_mask; uint8_t ht_dual_mask; /* The following fields are for >=5000 Series only. */ uint8_t ht_triple_mask; uint8_t reserved5; uint16_t acquisition; uint16_t reserved6; } __packed; #define IWN4965_RXONSZ (sizeof (struct iwn_rxon) - 6) #define IWN5000_RXONSZ (sizeof (struct iwn_rxon)) /* Structure for command IWN_CMD_ASSOCIATE. */ struct iwn_assoc { uint32_t flags; uint32_t filter; uint8_t ofdm_mask; uint8_t cck_mask; uint16_t reserved; } __packed; /* Structure for command IWN_CMD_EDCA_PARAMS. */ struct iwn_edca_params { uint32_t flags; #define IWN_EDCA_UPDATE (1 << 0) #define IWN_EDCA_TXOP (1 << 4) struct { uint16_t cwmin; uint16_t cwmax; uint8_t aifsn; uint8_t reserved; uint16_t txoplimit; } __packed ac[WME_NUM_AC]; } __packed; /* Structure for command IWN_CMD_TIMING. */ struct iwn_cmd_timing { uint64_t tstamp; uint16_t bintval; uint16_t atim; uint32_t binitval; uint16_t lintval; uint8_t dtim_period; uint8_t delta_cp_bss_tbtts; } __packed; /* Structure for command IWN_CMD_ADD_NODE. */ struct iwn_node_info { uint8_t control; #define IWN_NODE_UPDATE (1 << 0) uint8_t reserved1[3]; uint8_t macaddr[IEEE80211_ADDR_LEN]; uint16_t reserved2; uint8_t id; #define IWN_ID_BSS 0 #define IWN_STA_ID 1 #define IWN_PAN_ID_BCAST 14 #define IWN5000_ID_BROADCAST 15 #define IWN4965_ID_BROADCAST 31 uint8_t flags; #define IWN_FLAG_SET_KEY (1 << 0) #define IWN_FLAG_SET_DISABLE_TID (1 << 1) #define IWN_FLAG_SET_TXRATE (1 << 2) #define IWN_FLAG_SET_ADDBA (1 << 3) #define IWN_FLAG_SET_DELBA (1 << 4) uint16_t reserved3; uint16_t kflags; #define IWN_KFLAG_CCMP (1 << 1) #define IWN_KFLAG_MAP (1 << 3) #define IWN_KFLAG_KID(kid) ((kid) << 8) #define IWN_KFLAG_INVALID (1 << 11) #define IWN_KFLAG_GROUP (1 << 14) uint8_t tsc2; /* TKIP TSC2 */ uint8_t reserved4; uint16_t ttak[5]; uint8_t kid; uint8_t reserved5; uint8_t key[16]; /* The following 3 fields are for 5000 Series only. */ uint64_t tsc; uint8_t rxmic[8]; uint8_t txmic[8]; uint32_t htflags; #define IWN_SMPS_MIMO_PROT (1 << 17) #define IWN_AMDPU_SIZE_FACTOR(x) ((x) << 19) #define IWN_NODE_HT40 (1 << 21) #define IWN_SMPS_MIMO_DIS (1 << 22) #define IWN_AMDPU_DENSITY(x) ((x) << 23) uint32_t mask; uint16_t disable_tid; uint16_t reserved6; uint8_t addba_tid; uint8_t delba_tid; uint16_t addba_ssn; uint32_t reserved7; } __packed; struct iwn4965_node_info { uint8_t control; uint8_t reserved1[3]; uint8_t macaddr[IEEE80211_ADDR_LEN]; uint16_t reserved2; uint8_t id; uint8_t flags; uint16_t reserved3; uint16_t kflags; uint8_t tsc2; /* TKIP TSC2 */ uint8_t reserved4; uint16_t ttak[5]; uint8_t kid; uint8_t reserved5; uint8_t key[16]; uint32_t htflags; uint32_t mask; uint16_t disable_tid; uint16_t reserved6; uint8_t addba_tid; uint8_t delba_tid; uint16_t addba_ssn; uint32_t reserved7; } __packed; #define IWN_RFLAG_MCS (1 << 8) #define IWN_RFLAG_CCK (1 << 9) #define IWN_RFLAG_GREENFIELD (1 << 10) #define IWN_RFLAG_HT40 (1 << 11) #define IWN_RFLAG_DUPLICATE (1 << 12) #define IWN_RFLAG_SGI (1 << 13) #define IWN_RFLAG_ANT(x) ((x) << 14) /* Structure for command IWN_CMD_TX_DATA. */ struct iwn_cmd_data { uint16_t len; uint16_t lnext; uint32_t flags; #define IWN_TX_NEED_PROTECTION (1 << 0) /* 5000 only */ #define IWN_TX_NEED_RTS (1 << 1) #define IWN_TX_NEED_CTS (1 << 2) #define IWN_TX_NEED_ACK (1 << 3) #define IWN_TX_LINKQ (1 << 4) #define IWN_TX_IMM_BA (1 << 6) #define IWN_TX_FULL_TXOP (1 << 7) #define IWN_TX_BT_DISABLE (1 << 12) /* bluetooth coexistence */ #define IWN_TX_AUTO_SEQ (1 << 13) #define IWN_TX_MORE_FRAG (1 << 14) #define IWN_TX_INSERT_TSTAMP (1 << 16) #define IWN_TX_NEED_PADDING (1 << 20) uint32_t scratch; uint32_t rate; uint8_t id; uint8_t security; #define IWN_CIPHER_WEP40 1 #define IWN_CIPHER_CCMP 2 #define IWN_CIPHER_TKIP 3 #define IWN_CIPHER_WEP104 9 uint8_t linkq; uint8_t reserved2; uint8_t key[16]; uint16_t fnext; uint16_t reserved3; uint32_t lifetime; #define IWN_LIFETIME_INFINITE 0xffffffff uint32_t loaddr; uint8_t hiaddr; uint8_t rts_ntries; uint8_t data_ntries; uint8_t tid; uint16_t timeout; uint16_t txop; } __packed; /* Structure for command IWN_CMD_LINK_QUALITY. */ #define IWN_MAX_TX_RETRIES 16 struct iwn_cmd_link_quality { uint8_t id; uint8_t reserved1; uint16_t ctl; uint8_t flags; uint8_t mimo; uint8_t antmsk_1stream; uint8_t antmsk_2stream; uint8_t ridx[WME_NUM_AC]; uint16_t ampdu_limit; uint8_t ampdu_threshold; uint8_t ampdu_max; uint32_t reserved2; uint32_t retry[IWN_MAX_TX_RETRIES]; uint32_t reserved3; } __packed; /* Structure for command IWN_CMD_SET_LED. */ struct iwn_cmd_led { uint32_t unit; /* multiplier (in usecs) */ uint8_t which; #define IWN_LED_ACTIVITY 1 #define IWN_LED_LINK 2 uint8_t off; uint8_t on; uint8_t reserved; } __packed; /* Structure for command IWN5000_CMD_WIMAX_COEX. */ struct iwn5000_wimax_coex { uint32_t flags; #define IWN_WIMAX_COEX_STA_TABLE_VALID (1 << 0) #define IWN_WIMAX_COEX_UNASSOC_WA_UNMASK (1 << 2) #define IWN_WIMAX_COEX_ASSOC_WA_UNMASK (1 << 3) #define IWN_WIMAX_COEX_ENABLE (1 << 7) struct iwn5000_wimax_event { uint8_t request; uint8_t window; uint8_t reserved; uint8_t flags; } __packed events[16]; } __packed; /* Structures for command IWN5000_CMD_CALIB_CONFIG. */ struct iwn5000_calib_elem { uint32_t enable; uint32_t start; #define IWN5000_CALIB_DC (1 << 1) uint32_t send; uint32_t apply; uint32_t reserved; } __packed; struct iwn5000_calib_status { struct iwn5000_calib_elem once; struct iwn5000_calib_elem perd; uint32_t flags; } __packed; struct iwn5000_calib_config { struct iwn5000_calib_status ucode; struct iwn5000_calib_status driver; uint32_t reserved; } __packed; /* Structure for command IWN_CMD_SET_POWER_MODE. */ struct iwn_pmgt_cmd { uint16_t flags; #define IWN_PS_ALLOW_SLEEP (1 << 0) #define IWN_PS_NOTIFY (1 << 1) #define IWN_PS_SLEEP_OVER_DTIM (1 << 2) #define IWN_PS_PCI_PMGT (1 << 3) #define IWN_PS_FAST_PD (1 << 4) #define IWN_PS_BEACON_FILTERING (1 << 5) #define IWN_PS_SHADOW_REG (1 << 6) #define IWN_PS_CT_KILL (1 << 7) #define IWN_PS_BT_SCD (1 << 8) #define IWN_PS_ADVANCED_PM (1 << 9) uint8_t keepalive; uint8_t debug; uint32_t rxtimeout; uint32_t txtimeout; uint32_t intval[5]; uint32_t beacons; } __packed; /* Structures for command IWN_CMD_SCAN. */ struct iwn_scan_essid { uint8_t id; uint8_t len; uint8_t data[IEEE80211_NWID_LEN]; } __packed; struct iwn_scan_hdr { uint16_t len; uint8_t scan_flags; uint8_t nchan; uint16_t quiet_time; uint16_t quiet_threshold; uint16_t crc_threshold; uint16_t rxchain; uint32_t max_svc; /* background scans */ uint32_t pause_svc; /* background scans */ uint32_t flags; uint32_t filter; /* Followed by a struct iwn_cmd_data. */ /* Followed by an array of 20 structs iwn_scan_essid. */ /* Followed by probe request body. */ /* Followed by an array of ``nchan'' structs iwn_scan_chan. */ } __packed; struct iwn_scan_chan { uint32_t flags; #define IWN_CHAN_PASSIVE (0 << 0) #define IWN_CHAN_ACTIVE (1 << 0) #define IWN_CHAN_NPBREQS(x) (((1 << (x)) - 1) << 1) uint16_t chan; uint8_t rf_gain; uint8_t dsp_gain; uint16_t active; /* msecs */ uint16_t passive; /* msecs */ } __packed; #define IWN_SCAN_CRC_TH_DISABLED 0 #define IWN_SCAN_CRC_TH_DEFAULT htole16(1) #define IWN_SCAN_CRC_TH_NEVER htole16(0xffff) /* Maximum size of a scan command. */ #define IWN_SCAN_MAXSZ (MCLBYTES - 4) #define IWN_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ #define IWN_ACTIVE_DWELL_TIME_52 (20) #define IWN_ACTIVE_DWELL_FACTOR_24 (3) #define IWN_ACTIVE_DWELL_FACTOR_52 (2) #define IWN_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ #define IWN_PASSIVE_DWELL_TIME_52 (10) #define IWN_PASSIVE_DWELL_BASE (100) #define IWN_CHANNEL_TUNE_TIME (5) #define IWN_SCAN_CHAN_TIMEOUT 2 /* Structure for command IWN_CMD_TXPOWER (4965AGN only.) */ #define IWN_RIDX_MAX 32 struct iwn4965_cmd_txpower { uint8_t band; uint8_t reserved1; uint8_t chan; uint8_t reserved2; struct { uint8_t rf_gain[2]; uint8_t dsp_gain[2]; } __packed power[IWN_RIDX_MAX + 1]; } __packed; /* Structure for command IWN_CMD_TXPOWER_DBM (5000 Series only.) */ struct iwn5000_cmd_txpower { int8_t global_limit; /* in half-dBm */ #define IWN5000_TXPOWER_AUTO 0x7f #define IWN5000_TXPOWER_MAX_DBM 16 uint8_t flags; #define IWN5000_TXPOWER_NO_CLOSED (1 << 6) int8_t srv_limit; /* in half-dBm */ uint8_t reserved; } __packed; /* Structures for command IWN_CMD_BLUETOOTH. */ struct iwn_bluetooth { uint8_t flags; #define IWN_BT_COEX_CHAN_ANN (1 << 0) #define IWN_BT_COEX_BT_PRIO (1 << 1) #define IWN_BT_COEX_2_WIRE (1 << 2) uint8_t lead_time; #define IWN_BT_LEAD_TIME_DEF 30 uint8_t max_kill; #define IWN_BT_MAX_KILL_DEF 5 uint8_t reserved; uint32_t kill_ack; uint32_t kill_cts; } __packed; struct iwn6000_btcoex_config { uint8_t flags; #define IWN_BT_FLAG_COEX6000_CHAN_INHIBITION 1 #define IWN_BT_FLAG_COEX6000_MODE_MASK ((1 << 3) | (1 << 4) | (1 << 5 )) #define IWN_BT_FLAG_COEX6000_MODE_SHIFT 3 #define IWN_BT_FLAG_COEX6000_MODE_DISABLED 0 #define IWN_BT_FLAG_COEX6000_MODE_LEGACY_2W 1 #define IWN_BT_FLAG_COEX6000_MODE_3W 2 #define IWN_BT_FLAG_COEX6000_MODE_4W 3 #define IWN_BT_FLAG_UCODE_DEFAULT (1 << 6) #define IWN_BT_FLAG_SYNC_2_BT_DISABLE (1 << 7) uint8_t lead_time; uint8_t max_kill; uint8_t bt3_t7_timer; uint32_t kill_ack; uint32_t kill_cts; uint8_t sample_time; uint8_t bt3_t2_timer; uint16_t bt4_reaction; uint32_t lookup_table[12]; uint16_t bt4_decision; uint16_t valid; uint8_t prio_boost; uint8_t tx_prio_boost; uint16_t rx_prio_boost; } __packed; /* Structure for enhanced command IWN_CMD_BLUETOOTH for 2000 Series. */ struct iwn2000_btcoex_config { uint8_t flags; /* Cf Flags in iwn6000_btcoex_config */ uint8_t lead_time; uint8_t max_kill; uint8_t bt3_t7_timer; uint32_t kill_ack; uint32_t kill_cts; uint8_t sample_time; uint8_t bt3_t2_timer; uint16_t bt4_reaction; uint32_t lookup_table[12]; uint16_t bt4_decision; uint16_t valid; uint32_t prio_boost; /* size change prior to iwn6000_btcoex_config */ uint8_t reserved; /* added prior to iwn6000_btcoex_config */ uint8_t tx_prio_boost; uint16_t rx_prio_boost; } __packed; struct iwn_btcoex_priotable { uint8_t calib_init1; uint8_t calib_init2; uint8_t calib_periodic_low1; uint8_t calib_periodic_low2; uint8_t calib_periodic_high1; uint8_t calib_periodic_high2; uint8_t dtim; uint8_t scan52; uint8_t scan24; uint8_t reserved[7]; } __packed; struct iwn_btcoex_prot { uint8_t open; uint8_t type; uint8_t reserved[2]; } __packed; /* Structure for command IWN_CMD_SET_CRITICAL_TEMP. */ struct iwn_critical_temp { uint32_t reserved; uint32_t tempM; uint32_t tempR; /* degK <-> degC conversion macros. */ #define IWN_CTOK(c) ((c) + 273) #define IWN_KTOC(k) ((k) - 273) #define IWN_CTOMUK(c) (((c) * 1000000) + 273150000) } __packed; /* Structures for command IWN_CMD_SET_SENSITIVITY. */ struct iwn_sensitivity_cmd { uint16_t which; #define IWN_SENSITIVITY_DEFAULTTBL 0 #define IWN_SENSITIVITY_WORKTBL 1 uint16_t energy_cck; uint16_t energy_ofdm; uint16_t corr_ofdm_x1; uint16_t corr_ofdm_mrc_x1; uint16_t corr_cck_mrc_x4; uint16_t corr_ofdm_x4; uint16_t corr_ofdm_mrc_x4; uint16_t corr_barker; uint16_t corr_barker_mrc; uint16_t corr_cck_x4; uint16_t energy_ofdm_th; } __packed; struct iwn_enhanced_sensitivity_cmd { uint16_t which; uint16_t energy_cck; uint16_t energy_ofdm; uint16_t corr_ofdm_x1; uint16_t corr_ofdm_mrc_x1; uint16_t corr_cck_mrc_x4; uint16_t corr_ofdm_x4; uint16_t corr_ofdm_mrc_x4; uint16_t corr_barker; uint16_t corr_barker_mrc; uint16_t corr_cck_x4; uint16_t energy_ofdm_th; /* "Enhanced" part. */ uint16_t ina_det_ofdm; uint16_t ina_det_cck; uint16_t corr_11_9_en; uint16_t ofdm_det_slope_mrc; uint16_t ofdm_det_icept_mrc; uint16_t ofdm_det_slope; uint16_t ofdm_det_icept; uint16_t cck_det_slope_mrc; uint16_t cck_det_icept_mrc; uint16_t cck_det_slope; uint16_t cck_det_icept; uint16_t reserved; } __packed; /* * Define maximal number of calib result send to runtime firmware * PS: TEMP_OFFSET count for 2 (std and v2) */ #define IWN5000_PHY_CALIB_MAX_RESULT 8 /* Structures for command IWN_CMD_PHY_CALIB. */ struct iwn_phy_calib { uint8_t code; #define IWN4965_PHY_CALIB_DIFF_GAIN 7 #define IWN5000_PHY_CALIB_DC 8 #define IWN5000_PHY_CALIB_LO 9 #define IWN5000_PHY_CALIB_TX_IQ 11 #define IWN5000_PHY_CALIB_CRYSTAL 15 #define IWN5000_PHY_CALIB_BASE_BAND 16 #define IWN5000_PHY_CALIB_TX_IQ_PERIODIC 17 #define IWN5000_PHY_CALIB_TEMP_OFFSET 18 #define IWN5000_PHY_CALIB_RESET_NOISE_GAIN 18 #define IWN5000_PHY_CALIB_NOISE_GAIN 19 uint8_t group; uint8_t ngroups; uint8_t isvalid; } __packed; struct iwn5000_phy_calib_crystal { uint8_t code; uint8_t group; uint8_t ngroups; uint8_t isvalid; uint8_t cap_pin[2]; uint8_t reserved[2]; } __packed; struct iwn5000_phy_calib_temp_offset { uint8_t code; uint8_t group; uint8_t ngroups; uint8_t isvalid; int16_t offset; #define IWN_DEFAULT_TEMP_OFFSET 2700 uint16_t reserved; } __packed; struct iwn5000_phy_calib_temp_offsetv2 { uint8_t code; uint8_t group; uint8_t ngroups; uint8_t isvalid; int16_t offset_high; int16_t offset_low; int16_t burnt_voltage_ref; int16_t reserved; } __packed; struct iwn_phy_calib_gain { uint8_t code; uint8_t group; uint8_t ngroups; uint8_t isvalid; int8_t gain[3]; uint8_t reserved; } __packed; /* Structure for command IWN_CMD_SPECTRUM_MEASUREMENT. */ struct iwn_spectrum_cmd { uint16_t len; uint8_t token; uint8_t id; uint8_t origin; uint8_t periodic; uint16_t timeout; uint32_t start; uint32_t reserved1; uint32_t flags; uint32_t filter; uint16_t nchan; uint16_t reserved2; struct { uint32_t duration; uint8_t chan; uint8_t type; #define IWN_MEASUREMENT_BASIC (1 << 0) #define IWN_MEASUREMENT_CCA (1 << 1) #define IWN_MEASUREMENT_RPI_HISTOGRAM (1 << 2) #define IWN_MEASUREMENT_NOISE_HISTOGRAM (1 << 3) #define IWN_MEASUREMENT_FRAME (1 << 4) #define IWN_MEASUREMENT_IDLE (1 << 7) uint16_t reserved; } __packed chan[10]; } __packed; /* Structure for IWN_UC_READY notification. */ #define IWN_NATTEN_GROUPS 5 struct iwn_ucode_info { uint8_t minor; uint8_t major; uint16_t reserved1; uint8_t revision[8]; uint8_t type; uint8_t subtype; #define IWN_UCODE_RUNTIME 0 #define IWN_UCODE_INIT 9 uint16_t reserved2; uint32_t logptr; uint32_t errptr; uint32_t tstamp; uint32_t valid; /* The following fields are for UCODE_INIT only. */ int32_t volt; struct { int32_t chan20MHz; int32_t chan40MHz; } __packed temp[4]; int32_t atten[IWN_NATTEN_GROUPS][2]; } __packed; /* Structures for IWN_TX_DONE notification. */ #define IWN_TX_STATUS_MSK 0xff #define TX_STATUS_SUCCESS 0x01 #define TX_STATUS_DIRECT_DONE 0x02 #define IWN_TX_SUCCESS 0x00 #define IWN_TX_FAIL 0x80 /* all failures have 0x80 set */ #define IWN_TX_FAIL_SHORT_LIMIT 0x82 /* too many RTS retries */ #define IWN_TX_FAIL_LONG_LIMIT 0x83 /* too many retries */ #define IWN_TX_FAIL_FIFO_UNDERRRUN 0x84 /* tx fifo not kept running */ #define IWN_TX_FAIL_DEST_IN_PS 0x88 /* sta found in power save */ #define IWN_TX_FAIL_TX_LOCKED 0x90 /* waiting to see traffic */ #define IWN_TX_FAIL_STA_INVALID 0x8b /* XXX STA invalid (???) */ struct iwn4965_tx_stat { uint8_t nframes; uint8_t btkillcnt; uint8_t rtsfailcnt; uint8_t ackfailcnt; uint32_t rate; uint16_t duration; uint16_t reserved; uint32_t power[2]; uint32_t status; } __packed; struct iwn5000_tx_stat { uint8_t nframes; /* 1 no aggregation, >1 aggregation */ uint8_t btkillcnt; uint8_t rtsfailcnt; uint8_t ackfailcnt; uint32_t rate; uint16_t duration; uint16_t reserved; uint32_t power[2]; uint32_t info; uint16_t seq; uint16_t len; uint8_t tlc; uint8_t ratid; /* tid (0:3), sta_id (4:7) */ uint8_t fc[2]; uint16_t status; uint16_t sequence; } __packed; /* Structure for IWN_BEACON_MISSED notification. */ struct iwn_beacon_missed { uint32_t consecutive; uint32_t total; uint32_t expected; uint32_t received; } __packed; /* Structure for IWN_MPDU_RX_DONE notification. */ struct iwn_rx_mpdu { uint16_t len; uint16_t reserved; } __packed; /* Structures for IWN_RX_DONE and IWN_MPDU_RX_DONE notifications. */ struct iwn4965_rx_phystat { uint16_t antenna; uint16_t agc; uint8_t rssi[6]; } __packed; struct iwn5000_rx_phystat { uint32_t reserved1; uint32_t agc; uint16_t rssi[3]; } __packed; struct iwn_rx_stat { uint8_t phy_len; uint8_t cfg_phy_len; #define IWN_STAT_MAXLEN 20 uint8_t id; uint8_t reserved1; uint64_t tstamp; uint32_t beacon; uint16_t flags; #define IWN_STAT_FLAG_SHPREAMBLE (1 << 2) uint16_t chan; uint8_t phybuf[32]; uint32_t rate; /* * rate bit fields * * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): * 2-0: 0) 6 Mbps * 1) 12 Mbps * 2) 18 Mbps * 3) 24 Mbps * 4) 36 Mbps * 5) 48 Mbps * 6) 54 Mbps * 7) 60 Mbps * * 4-3: 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) * * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data * * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"): * 3-0: 0xD) 6 Mbps * 0xF) 9 Mbps * 0x5) 12 Mbps * 0x7) 18 Mbps * 0x9) 24 Mbps * 0xB) 36 Mbps * 0x1) 48 Mbps * 0x3) 54 Mbps * * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): * 6-0: 10) 1 Mbps * 20) 2 Mbps * 55) 5.5 Mbps * 110) 11 Mbps * */ uint16_t len; uint16_t reserve3; } __packed; #define IWN_RSSI_TO_DBM 44 /* Structure for IWN_RX_COMPRESSED_BA notification. */ struct iwn_compressed_ba { uint8_t macaddr[IEEE80211_ADDR_LEN]; uint16_t reserved; uint8_t id; uint8_t tid; uint16_t seq; uint64_t bitmap; uint16_t qid; uint16_t ssn; } __packed; /* Structure for IWN_START_SCAN notification. */ struct iwn_start_scan { uint64_t tstamp; uint32_t tbeacon; uint8_t chan; uint8_t band; uint16_t reserved; uint32_t status; } __packed; /* Structure for IWN_STOP_SCAN notification. */ struct iwn_stop_scan { uint8_t nchan; uint8_t status; uint8_t reserved; uint8_t chan; uint64_t tsf; } __packed; /* Structure for IWN_SPECTRUM_MEASUREMENT notification. */ struct iwn_spectrum_notif { uint8_t id; uint8_t token; uint8_t idx; uint8_t state; #define IWN_MEASUREMENT_START 0 #define IWN_MEASUREMENT_STOP 1 uint32_t start; uint8_t band; uint8_t chan; uint8_t type; uint8_t reserved1; uint32_t cca_ofdm; uint32_t cca_cck; uint32_t cca_time; uint8_t basic; uint8_t reserved2[3]; uint32_t ofdm[8]; uint32_t cck[8]; uint32_t stop; uint32_t status; #define IWN_MEASUREMENT_OK 0 #define IWN_MEASUREMENT_CONCURRENT 1 #define IWN_MEASUREMENT_CSA_CONFLICT 2 #define IWN_MEASUREMENT_TGH_CONFLICT 3 #define IWN_MEASUREMENT_STOPPED 6 #define IWN_MEASUREMENT_TIMEOUT 7 #define IWN_MEASUREMENT_FAILED 8 } __packed; /* Structures for IWN_{RX,BEACON}_STATISTICS notification. */ struct iwn_rx_phy_stats { uint32_t ina; uint32_t fina; uint32_t bad_plcp; uint32_t bad_crc32; uint32_t overrun; uint32_t eoverrun; uint32_t good_crc32; uint32_t fa; uint32_t bad_fina_sync; uint32_t sfd_timeout; uint32_t fina_timeout; uint32_t no_rts_ack; uint32_t rxe_limit; uint32_t ack; uint32_t cts; uint32_t ba_resp; uint32_t dsp_kill; uint32_t bad_mh; uint32_t rssi_sum; uint32_t reserved; } __packed; struct iwn_rx_general_stats { uint32_t bad_cts; uint32_t bad_ack; uint32_t not_bss; uint32_t filtered; uint32_t bad_chan; uint32_t beacons; uint32_t missed_beacons; uint32_t adc_saturated; /* time in 0.8us */ uint32_t ina_searched; /* time in 0.8us */ uint32_t noise[3]; uint32_t flags; uint32_t load; uint32_t fa; uint32_t rssi[3]; uint32_t energy[3]; } __packed; struct iwn_rx_ht_phy_stats { uint32_t bad_plcp; uint32_t overrun; uint32_t eoverrun; uint32_t good_crc32; uint32_t bad_crc32; uint32_t bad_mh; uint32_t good_ampdu_crc32; uint32_t ampdu; uint32_t fragment; uint32_t reserved; } __packed; struct iwn_rx_stats { struct iwn_rx_phy_stats ofdm; struct iwn_rx_phy_stats cck; struct iwn_rx_general_stats general; struct iwn_rx_ht_phy_stats ht; } __packed; struct iwn_tx_stats { uint32_t preamble; uint32_t rx_detected; uint32_t bt_defer; uint32_t bt_kill; uint32_t short_len; uint32_t cts_timeout; uint32_t ack_timeout; uint32_t exp_ack; uint32_t ack; uint32_t msdu; uint32_t busrt_err1; uint32_t burst_err2; uint32_t cts_collision; uint32_t ack_collision; uint32_t ba_timeout; uint32_t ba_resched; uint32_t query_ampdu; uint32_t query; uint32_t query_ampdu_frag; uint32_t query_mismatch; uint32_t not_ready; uint32_t underrun; uint32_t bt_ht_kill; uint32_t rx_ba_resp; uint32_t reserved[2]; } __packed; struct iwn_general_stats { uint32_t temp; uint32_t temp_m; uint32_t burst_check; uint32_t burst; uint32_t reserved1[4]; uint32_t sleep; uint32_t slot_out; uint32_t slot_idle; uint32_t ttl_tstamp; uint32_t tx_ant_a; uint32_t tx_ant_b; uint32_t exec; uint32_t probe; uint32_t reserved2[2]; uint32_t rx_enabled; uint32_t reserved3[3]; } __packed; struct iwn_stats { uint32_t flags; struct iwn_rx_stats rx; struct iwn_tx_stats tx; struct iwn_general_stats general; } __packed; /* Firmware error dump. */ struct iwn_fw_dump { uint32_t valid; uint32_t id; uint32_t pc; uint32_t branch_link[2]; uint32_t interrupt_link[2]; uint32_t error_data[2]; uint32_t src_line; uint32_t tsf; uint32_t time[2]; } __packed; /* TLV firmware header. */ struct iwn_fw_tlv_hdr { uint32_t zero; /* Always 0, to differentiate from legacy. */ uint32_t signature; #define IWN_FW_SIGNATURE 0x0a4c5749 /* "IWL\n" */ uint8_t descr[64]; uint32_t rev; #define IWN_FW_API(x) (((x) >> 8) & 0xff) uint32_t build; uint64_t altmask; } __packed; /* TLV header. */ struct iwn_fw_tlv { uint16_t type; #define IWN_FW_TLV_MAIN_TEXT 1 #define IWN_FW_TLV_MAIN_DATA 2 #define IWN_FW_TLV_INIT_TEXT 3 #define IWN_FW_TLV_INIT_DATA 4 #define IWN_FW_TLV_BOOT_TEXT 5 #define IWN_FW_TLV_PBREQ_MAXLEN 6 #define IWN_FW_TLV_PAN 7 #define IWN_FW_TLV_RUNT_EVTLOG_PTR 8 #define IWN_FW_TLV_RUNT_EVTLOG_SIZE 9 #define IWN_FW_TLV_RUNT_ERRLOG_PTR 10 #define IWN_FW_TLV_INIT_EVTLOG_PTR 11 #define IWN_FW_TLV_INIT_EVTLOG_SIZE 12 #define IWN_FW_TLV_INIT_ERRLOG_PTR 13 #define IWN_FW_TLV_ENH_SENS 14 #define IWN_FW_TLV_PHY_CALIB 15 #define IWN_FW_TLV_WOWLAN_INST 16 #define IWN_FW_TLV_WOWLAN_DATA 17 #define IWN_FW_TLV_FLAGS 18 uint16_t alt; uint32_t len; } __packed; #define IWN4965_FW_TEXT_MAXSZ ( 96 * 1024) #define IWN4965_FW_DATA_MAXSZ ( 40 * 1024) #define IWN5000_FW_TEXT_MAXSZ (256 * 1024) #define IWN5000_FW_DATA_MAXSZ ( 80 * 1024) #define IWN_FW_BOOT_TEXT_MAXSZ 1024 #define IWN4965_FWSZ (IWN4965_FW_TEXT_MAXSZ + IWN4965_FW_DATA_MAXSZ) #define IWN5000_FWSZ IWN5000_FW_TEXT_MAXSZ /* * Microcode flags TLV (18.) */ /** * enum iwn_ucode_tlv_flag - ucode API flags * @IWN_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously * was a separate TLV but moved here to save space. * @IWN_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID, * treats good CRC threshold as a boolean * @IWN_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). * @IWN_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. * @IWN_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS * @IWN_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD * @IWN_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan * offload profile config command. * @IWN_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api * @IWN_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API. * @IWN_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses * @IWN_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API * @IWN_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element * from the probe request template. * @IWN_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping * connection when going back to D0 * @IWN_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) * @IWN_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) * @IWN_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan. * @IWN_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API * @IWN_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command * containing CAM (Continuous Active Mode) indication. */ enum iwn_ucode_tlv_flag { IWN_UCODE_TLV_FLAGS_PAN = (1 << 0), IWN_UCODE_TLV_FLAGS_NEWSCAN = (1 << 1), IWN_UCODE_TLV_FLAGS_MFP = (1 << 2), IWN_UCODE_TLV_FLAGS_P2P = (1 << 3), IWN_UCODE_TLV_FLAGS_DW_BC_TABLE = (1 << 4), IWN_UCODE_TLV_FLAGS_NEWBT_COEX = (1 << 5), IWN_UCODE_TLV_FLAGS_UAPSD = (1 << 6), IWN_UCODE_TLV_FLAGS_SHORT_BL = (1 << 7), IWN_UCODE_TLV_FLAGS_RX_ENERGY_API = (1 << 8), IWN_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = (1 << 9), IWN_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = (1 << 10), IWN_UCODE_TLV_FLAGS_BF_UPDATED = (1 << 11), IWN_UCODE_TLV_FLAGS_NO_BASIC_SSID = (1 << 12), IWN_UCODE_TLV_FLAGS_D3_CONTINUITY_API = (1 << 14), IWN_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = (1 << 15), IWN_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = (1 << 16), IWN_UCODE_TLV_FLAGS_SCHED_SCAN = (1 << 17), IWN_UCODE_TLV_FLAGS_STA_KEY_CMD = (1 << 19), IWN_UCODE_TLV_FLAGS_DEVICE_PS_CMD = (1 << 20), }; /* * Offsets into EEPROM. */ #define IWN_EEPROM_MAC 0x015 #define IWN_EEPROM_SKU_CAP 0x045 #define IWN_EEPROM_RFCFG 0x048 #define IWN4965_EEPROM_DOMAIN 0x060 #define IWN4965_EEPROM_BAND1 0x063 #define IWN5000_EEPROM_REG 0x066 #define IWN5000_EEPROM_CAL 0x067 #define IWN4965_EEPROM_BAND2 0x072 #define IWN4965_EEPROM_BAND3 0x080 #define IWN4965_EEPROM_BAND4 0x08d #define IWN4965_EEPROM_BAND5 0x099 #define IWN4965_EEPROM_BAND6 0x0a0 #define IWN4965_EEPROM_BAND7 0x0a8 #define IWN4965_EEPROM_MAXPOW 0x0e8 #define IWN4965_EEPROM_VOLTAGE 0x0e9 #define IWN4965_EEPROM_BANDS 0x0ea /* Indirect offsets. */ #define IWN5000_EEPROM_NO_HT40 0x000 #define IWN5000_EEPROM_DOMAIN 0x001 #define IWN5000_EEPROM_BAND1 0x004 #define IWN5000_EEPROM_BAND2 0x013 #define IWN5000_EEPROM_BAND3 0x021 #define IWN5000_EEPROM_BAND4 0x02e #define IWN5000_EEPROM_BAND5 0x03a #define IWN5000_EEPROM_BAND6 0x041 #define IWN6000_EEPROM_BAND6 0x040 #define IWN5000_EEPROM_BAND7 0x049 #define IWN6000_EEPROM_ENHINFO 0x054 #define IWN5000_EEPROM_CRYSTAL 0x128 #define IWN5000_EEPROM_TEMP 0x12a #define IWN5000_EEPROM_VOLT 0x12b /* Possible flags for IWN_EEPROM_SKU_CAP. */ #define IWN_EEPROM_SKU_CAP_11N (1 << 6) #define IWN_EEPROM_SKU_CAP_AMT (1 << 7) #define IWN_EEPROM_SKU_CAP_IPAN (1 << 8) /* Possible flags for IWN_EEPROM_RFCFG. */ #define IWN_RFCFG_TYPE(x) (((x) >> 0) & 0x3) #define IWN_RFCFG_STEP(x) (((x) >> 2) & 0x3) #define IWN_RFCFG_DASH(x) (((x) >> 4) & 0x3) #define IWN_RFCFG_TXANTMSK(x) (((x) >> 8) & 0xf) #define IWN_RFCFG_RXANTMSK(x) (((x) >> 12) & 0xf) struct iwn_eeprom_chan { uint8_t flags; #define IWN_EEPROM_CHAN_VALID (1 << 0) #define IWN_EEPROM_CHAN_IBSS (1 << 1) #define IWN_EEPROM_CHAN_ACTIVE (1 << 3) #define IWN_EEPROM_CHAN_RADAR (1 << 4) int8_t maxpwr; } __packed; struct iwn_eeprom_enhinfo { uint8_t flags; #define IWN_ENHINFO_VALID 0x01 #define IWN_ENHINFO_5GHZ 0x02 #define IWN_ENHINFO_OFDM 0x04 #define IWN_ENHINFO_HT40 0x08 #define IWN_ENHINFO_HTAP 0x10 #define IWN_ENHINFO_RES1 0x20 #define IWN_ENHINFO_RES2 0x40 #define IWN_ENHINFO_COMMON 0x80 uint8_t chan; int8_t chain[3]; /* max power in half-dBm */ uint8_t reserved; int8_t mimo2; /* max power in half-dBm */ int8_t mimo3; /* max power in half-dBm */ } __packed; struct iwn5000_eeprom_calib_hdr { uint8_t version; uint8_t pa_type; uint16_t volt; } __packed; #define IWN_NSAMPLES 3 struct iwn4965_eeprom_chan_samples { uint8_t num; struct { uint8_t temp; uint8_t gain; uint8_t power; int8_t pa_det; } samples[2][IWN_NSAMPLES]; } __packed; #define IWN_NBANDS 8 struct iwn4965_eeprom_band { uint8_t lo; /* low channel number */ uint8_t hi; /* high channel number */ struct iwn4965_eeprom_chan_samples chans[2]; } __packed; /* * Offsets of channels descriptions in EEPROM. */ static const uint32_t iwn4965_regulatory_bands[IWN_NBANDS] = { IWN4965_EEPROM_BAND1, IWN4965_EEPROM_BAND2, IWN4965_EEPROM_BAND3, IWN4965_EEPROM_BAND4, IWN4965_EEPROM_BAND5, IWN4965_EEPROM_BAND6, IWN4965_EEPROM_BAND7 }; static const uint32_t iwn5000_regulatory_bands[IWN_NBANDS] = { IWN5000_EEPROM_BAND1, IWN5000_EEPROM_BAND2, IWN5000_EEPROM_BAND3, IWN5000_EEPROM_BAND4, IWN5000_EEPROM_BAND5, IWN5000_EEPROM_BAND6, IWN5000_EEPROM_BAND7 }; static const uint32_t iwn6000_regulatory_bands[IWN_NBANDS] = { IWN5000_EEPROM_BAND1, IWN5000_EEPROM_BAND2, IWN5000_EEPROM_BAND3, IWN5000_EEPROM_BAND4, IWN5000_EEPROM_BAND5, IWN6000_EEPROM_BAND6, IWN5000_EEPROM_BAND7 }; static const uint32_t iwn1000_regulatory_bands[IWN_NBANDS] = { IWN5000_EEPROM_BAND1, IWN5000_EEPROM_BAND2, IWN5000_EEPROM_BAND3, IWN5000_EEPROM_BAND4, IWN5000_EEPROM_BAND5, IWN5000_EEPROM_BAND6, IWN5000_EEPROM_NO_HT40, }; static const uint32_t iwn2030_regulatory_bands[IWN_NBANDS] = { IWN5000_EEPROM_BAND1, IWN5000_EEPROM_BAND2, IWN5000_EEPROM_BAND3, IWN5000_EEPROM_BAND4, IWN5000_EEPROM_BAND5, IWN6000_EEPROM_BAND6, IWN5000_EEPROM_BAND7 }; #define IWN_CHAN_BANDS_COUNT 7 #define IWN_MAX_CHAN_PER_BAND 14 static const struct iwn_chan_band { uint8_t nchan; uint8_t chan[IWN_MAX_CHAN_PER_BAND]; } iwn_bands[] = { /* 20MHz channels, 2GHz band. */ { 14, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 } }, /* 20MHz channels, 5GHz band. */ { 13, { 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 } }, { 12, { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 } }, { 11, { 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 } }, { 6, { 145, 149, 153, 157, 161, 165 } }, /* 40MHz channels (primary channels), 2GHz band. */ { 7, { 1, 2, 3, 4, 5, 6, 7 } }, /* 40MHz channels (primary channels), 5GHz band. */ { 11, { 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 } } }; static const uint8_t iwn_bss_ac_to_queue[] = { 2, 3, 1, 0, }; static const uint8_t iwn_pan_ac_to_queue[] = { 5, 4, 6, 7, }; #define IWN1000_OTP_NBLOCKS 3 #define IWN6000_OTP_NBLOCKS 4 #define IWN6050_OTP_NBLOCKS 7 /* HW rate indices. */ #define IWN_RIDX_CCK1 0 #define IWN_RIDX_OFDM6 4 #define IWN4965_MAX_PWR_INDEX 107 #define IWN_POWERSAVE_LVL_NONE 0 #define IWN_POWERSAVE_LVL_VOIP_COMPATIBLE 1 #define IWN_POWERSAVE_LVL_MAX 5 #define IWN_POWERSAVE_LVL_DEFAULT IWN_POWERSAVE_LVL_NONE /* DTIM value to pass in for IWN_POWERSAVE_LVL_VOIP_COMPATIBLE */ #define IWN_POWERSAVE_DTIM_VOIP_COMPATIBLE 2 /* * RF Tx gain values from highest to lowest power (values obtained from * the reference driver.) */ static const uint8_t iwn4965_rf_gain_2ghz[IWN4965_MAX_PWR_INDEX + 1] = { 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, 0x34, 0x34, 0x34, 0x33, 0x33, 0x33, 0x32, 0x32, 0x32, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x06, 0x06, 0x06, 0x05, 0x05, 0x05, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const uint8_t iwn4965_rf_gain_5ghz[IWN4965_MAX_PWR_INDEX + 1] = { 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, 0x34, 0x34, 0x34, 0x33, 0x33, 0x33, 0x32, 0x32, 0x32, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x25, 0x25, 0x25, 0x24, 0x24, 0x24, 0x23, 0x23, 0x23, 0x22, 0x18, 0x18, 0x17, 0x17, 0x17, 0x16, 0x16, 0x16, 0x15, 0x15, 0x15, 0x14, 0x14, 0x14, 0x13, 0x13, 0x13, 0x12, 0x08, 0x08, 0x07, 0x07, 0x07, 0x06, 0x06, 0x06, 0x05, 0x05, 0x05, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* * DSP pre-DAC gain values from highest to lowest power (values obtained * from the reference driver.) */ static const uint8_t iwn4965_dsp_gain_2ghz[IWN4965_MAX_PWR_INDEX + 1] = { 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x61, 0x60, 0x5f, 0x5e, 0x5d, 0x5c, 0x5b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 0x49, 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b }; static const uint8_t iwn4965_dsp_gain_5ghz[IWN4965_MAX_PWR_INDEX + 1] = { 0x7b, 0x75, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x6e, 0x68, 0x62, 0x5d, 0x58, 0x53, 0x4e }; /* * Power saving settings (values obtained from the reference driver.) */ #define IWN_NDTIMRANGES 3 #define IWN_NPOWERLEVELS 6 static const struct iwn_pmgt { uint32_t rxtimeout; uint32_t txtimeout; uint32_t intval[5]; int skip_dtim; } iwn_pmgt[IWN_NDTIMRANGES][IWN_NPOWERLEVELS] = { /* DTIM <= 2 */ { { 0, 0, { 0, 0, 0, 0, 0 }, 0 }, /* CAM */ { 200, 500, { 1, 2, 2, 2, -1 }, 0 }, /* PS level 1 */ { 200, 300, { 1, 2, 2, 2, -1 }, 0 }, /* PS level 2 */ { 50, 100, { 2, 2, 2, 2, -1 }, 0 }, /* PS level 3 */ { 50, 25, { 2, 2, 4, 4, -1 }, 1 }, /* PS level 4 */ { 25, 25, { 2, 2, 4, 6, -1 }, 2 } /* PS level 5 */ }, /* 3 <= DTIM <= 10 */ { { 0, 0, { 0, 0, 0, 0, 0 }, 0 }, /* CAM */ { 200, 500, { 1, 2, 3, 4, 4 }, 0 }, /* PS level 1 */ { 200, 300, { 1, 2, 3, 4, 7 }, 0 }, /* PS level 2 */ { 50, 100, { 2, 4, 6, 7, 9 }, 0 }, /* PS level 3 */ { 50, 25, { 2, 4, 6, 9, 10 }, 1 }, /* PS level 4 */ { 25, 25, { 2, 4, 7, 10, 10 }, 2 } /* PS level 5 */ }, /* DTIM >= 11 */ { { 0, 0, { 0, 0, 0, 0, 0 }, 0 }, /* CAM */ { 200, 500, { 1, 2, 3, 4, -1 }, 0 }, /* PS level 1 */ { 200, 300, { 2, 4, 6, 7, -1 }, 0 }, /* PS level 2 */ { 50, 100, { 2, 7, 9, 9, -1 }, 0 }, /* PS level 3 */ { 50, 25, { 2, 7, 9, 9, -1 }, 0 }, /* PS level 4 */ { 25, 25, { 4, 7, 10, 10, -1 }, 0 } /* PS level 5 */ } }; struct iwn_sensitivity_limits { uint32_t min_ofdm_x1; uint32_t max_ofdm_x1; uint32_t min_ofdm_mrc_x1; uint32_t max_ofdm_mrc_x1; uint32_t min_ofdm_x4; uint32_t max_ofdm_x4; uint32_t min_ofdm_mrc_x4; uint32_t max_ofdm_mrc_x4; uint32_t min_cck_x4; uint32_t max_cck_x4; uint32_t min_cck_mrc_x4; uint32_t max_cck_mrc_x4; uint32_t min_energy_cck; uint32_t energy_cck; uint32_t energy_ofdm; }; /* * RX sensitivity limits (values obtained from the reference driver.) */ static const struct iwn_sensitivity_limits iwn4965_sensitivity_limits = { 105, 140, 220, 270, 85, 120, 170, 210, 125, 200, 200, 400, 97, 100, 100 }; static const struct iwn_sensitivity_limits iwn5000_sensitivity_limits = { 120, 120, /* min = max for performance bug in DSP. */ 240, 240, /* min = max for performance bug in DSP. */ 90, 120, 170, 210, 125, 200, 170, 400, 95, 95, 95 }; static const struct iwn_sensitivity_limits iwn5150_sensitivity_limits = { 105, 105, /* min = max for performance bug in DSP. */ 220, 220, /* min = max for performance bug in DSP. */ 90, 120, 170, 210, 125, 200, 170, 400, 95, 95, 95 }; static const struct iwn_sensitivity_limits iwn1000_sensitivity_limits = { 120, 155, 240, 290, 90, 120, 170, 210, 125, 200, 170, 400, 95, 95, 95 }; static const struct iwn_sensitivity_limits iwn6000_sensitivity_limits = { 105, 110, 192, 232, 80, 145, 128, 232, 125, 175, 160, 310, 97, 97, 100 }; /* Get value from linux kernel 3.2.+ in Drivers/net/wireless/iwlwifi/iwl-2000.c*/ static const struct iwn_sensitivity_limits iwn2030_sensitivity_limits = { 105,110, 128,232, 80,145, 128,232, 125,175, 160,310, 97, 97, 110 }; /* Map TID to TX scheduler's FIFO. */ static const uint8_t iwn_tid2fifo[] = { 1, 0, 0, 1, 2, 2, 3, 3, 7, 7, 7, 7, 7, 7, 7, 7, 3 }; /* WiFi/WiMAX coexist event priority table for 6050. */ static const struct iwn5000_wimax_event iwn6050_wimax_events[] = { { 0x04, 0x03, 0x00, 0x00 }, { 0x04, 0x03, 0x00, 0x03 }, { 0x04, 0x03, 0x00, 0x03 }, { 0x04, 0x03, 0x00, 0x03 }, { 0x04, 0x03, 0x00, 0x00 }, { 0x04, 0x03, 0x00, 0x07 }, { 0x04, 0x03, 0x00, 0x00 }, { 0x04, 0x03, 0x00, 0x03 }, { 0x04, 0x03, 0x00, 0x03 }, { 0x04, 0x03, 0x00, 0x00 }, { 0x06, 0x03, 0x00, 0x07 }, { 0x04, 0x03, 0x00, 0x00 }, { 0x06, 0x06, 0x00, 0x03 }, { 0x04, 0x03, 0x00, 0x07 }, { 0x04, 0x03, 0x00, 0x00 }, { 0x04, 0x03, 0x00, 0x00 } }; /* Firmware errors. */ static const char * const iwn_fw_errmsg[] = { "OK", "FAIL", "BAD_PARAM", "BAD_CHECKSUM", "NMI_INTERRUPT_WDG", "SYSASSERT", "FATAL_ERROR", "BAD_COMMAND", "HW_ERROR_TUNE_LOCK", "HW_ERROR_TEMPERATURE", "ILLEGAL_CHAN_FREQ", "VCC_NOT_STABLE", "FH_ERROR", "NMI_INTERRUPT_HOST", "NMI_INTERRUPT_ACTION_PT", "NMI_INTERRUPT_UNKNOWN", "UCODE_VERSION_MISMATCH", "HW_ERROR_ABS_LOCK", "HW_ERROR_CAL_LOCK_FAIL", "NMI_INTERRUPT_INST_ACTION_PT", "NMI_INTERRUPT_DATA_ACTION_PT", "NMI_TRM_HW_ER", "NMI_INTERRUPT_TRM", "NMI_INTERRUPT_BREAKPOINT" "DEBUG_0", "DEBUG_1", "DEBUG_2", "DEBUG_3", "ADVANCED_SYSASSERT" }; /* Find least significant bit that is set. */ #define IWN_LSB(x) ((((x) - 1) & (x)) ^ (x)) #define IWN_READ(sc, reg) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) #define IWN_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) #define IWN_WRITE_1(sc, reg, val) \ bus_space_write_1((sc)->sc_st, (sc)->sc_sh, (reg), (val)) #define IWN_SETBITS(sc, reg, mask) \ IWN_WRITE(sc, reg, IWN_READ(sc, reg) | (mask)) #define IWN_CLRBITS(sc, reg, mask) \ IWN_WRITE(sc, reg, IWN_READ(sc, reg) & ~(mask)) #define IWN_BARRIER_WRITE(sc) \ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \ BUS_SPACE_BARRIER_WRITE) #define IWN_BARRIER_READ_WRITE(sc) \ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) #endif /* __IF_IWNREG_H__ */ Index: head/sys/dev/mge/if_mgevar.h =================================================================== --- head/sys/dev/mge/if_mgevar.h (revision 258779) +++ head/sys/dev/mge/if_mgevar.h (revision 258780) @@ -1,354 +1,354 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __IF_MGE_H__ #define __IF_MGE_H__ #define MGE_INTR_COUNT 5 /* ETH controller occupies 5 IRQ lines */ #define MGE_TX_DESC_NUM 256 #define MGE_RX_DESC_NUM 256 #define MGE_RX_QUEUE_NUM 8 #define MGE_RX_DEFAULT_QUEUE 0 #define MGE_CHECKSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* Interrupt Coalescing types */ #define MGE_IC_RX 0 #define MGE_IC_TX 1 struct mge_desc { uint32_t cmd_status; uint16_t buff_size; uint16_t byte_count; bus_addr_t buffer; bus_addr_t next_desc; }; struct mge_desc_wrapper { bus_dmamap_t desc_dmap; struct mge_desc* mge_desc; bus_addr_t mge_desc_paddr; bus_dmamap_t buffer_dmap; struct mbuf* buffer; }; struct mge_softc { struct ifnet *ifp; /* per-interface network data */ phandle_t node; device_t dev; device_t miibus; struct mii_data *mii; struct resource *res[1 + MGE_INTR_COUNT]; /* resources */ void *ih_cookie[MGE_INTR_COUNT]; /* interrupt handlers cookies */ struct mtx transmit_lock; /* transmitter lock */ struct mtx receive_lock; /* receiver lock */ uint32_t mge_if_flags; uint32_t mge_media_status; struct callout wd_callout; int wd_timer; bus_dma_tag_t mge_desc_dtag; bus_dma_tag_t mge_tx_dtag; bus_dma_tag_t mge_rx_dtag; bus_addr_t tx_desc_start; bus_addr_t rx_desc_start; uint32_t tx_desc_curr; uint32_t rx_desc_curr; uint32_t tx_desc_used_idx; uint32_t tx_desc_used_count; uint32_t rx_ic_time; uint32_t tx_ic_time; struct mge_desc_wrapper mge_tx_desc[MGE_TX_DESC_NUM]; struct mge_desc_wrapper mge_rx_desc[MGE_RX_DESC_NUM]; uint32_t mge_tfut_ipg_max; /* TX FIFO Urgent Threshold */ uint32_t mge_rx_ipg_max; uint32_t mge_tx_arb_cfg; uint32_t mge_tx_tok_cfg; uint32_t mge_tx_tok_cnt; uint16_t mge_mtu; int mge_ver; int mge_intr_cnt; uint8_t mge_hw_csum; struct mge_softc *phy_sc; }; /* bus access macros */ #define MGE_READ(sc,reg) bus_read_4((sc)->res[0], (reg)) #define MGE_WRITE(sc,reg,val) bus_write_4((sc)->res[0], (reg), (val)) /* Locking macros */ #define MGE_TRANSMIT_LOCK(sc) do { \ mtx_assert(&(sc)->receive_lock, MA_NOTOWNED); \ mtx_lock(&(sc)->transmit_lock); \ } while (0) #define MGE_TRANSMIT_UNLOCK(sc) mtx_unlock(&(sc)->transmit_lock) #define MGE_TRANSMIT_LOCK_ASSERT(sc) mtx_assert(&(sc)->transmit_lock, MA_OWNED) #define MGE_RECEIVE_LOCK(sc) do { \ mtx_assert(&(sc)->transmit_lock, MA_NOTOWNED); \ mtx_lock(&(sc)->receive_lock); \ } while (0) #define MGE_RECEIVE_UNLOCK(sc) mtx_unlock(&(sc)->receive_lock) #define MGE_RECEIVE_LOCK_ASSERT(sc) mtx_assert(&(sc)->receive_lock, MA_OWNED) #define MGE_GLOBAL_LOCK(sc) do { \ if ((mtx_owned(&(sc)->transmit_lock) ? 1 : 0) != \ (mtx_owned(&(sc)->receive_lock) ? 1 : 0)) { \ panic("mge deadlock possibility detection!"); \ } \ mtx_lock(&(sc)->transmit_lock); \ mtx_lock(&(sc)->receive_lock); \ } while (0) #define MGE_GLOBAL_UNLOCK(sc) do { \ MGE_RECEIVE_UNLOCK(sc); \ MGE_TRANSMIT_UNLOCK(sc); \ } while (0) #define MGE_GLOBAL_LOCK_ASSERT(sc) do { \ MGE_TRANSMIT_LOCK_ASSERT(sc); \ MGE_RECEIVE_LOCK_ASSERT(sc); \ } while (0) /* SMI-related macros */ #define MGE_REG_PHYDEV 0x000 #define MGE_REG_SMI 0x004 #define MGE_SMI_READ (1 << 26) #define MGE_SMI_WRITE (0 << 26) #define MGE_SMI_READVALID (1 << 27) #define MGE_SMI_BUSY (1 << 28) /* TODO verify the timings and retries count w/specs */ #define MGE_SMI_READ_RETRIES 1000 #define MGE_SMI_READ_DELAY 100 #define MGE_SMI_WRITE_RETRIES 1000 #define MGE_SMI_WRITE_DELAY 100 /* MGE registers */ #define MGE_INT_CAUSE 0x080 #define MGE_INT_MASK 0x084 #define MGE_PORT_CONFIG 0x400 #define PORT_CONFIG_UPM (1 << 0) /* promiscuous */ #define PORT_CONFIG_DFLT_RXQ(val) (((val) & 7) << 1) /* default RX queue */ #define PORT_CONFIG_ARO_RXQ(val) (((val) & 7) << 4) /* ARP RX queue */ #define PORT_CONFIG_REJECT_BCAST (1 << 7) /* reject non-ip and non-arp bcast */ #define PORT_CONFIG_REJECT_IP_BCAST (1 << 8) /* reject ip bcast */ #define PORT_CONFIG_REJECT_ARP__BCAST (1 << 9) /* reject arp bcast */ #define PORT_CONFIG_AMNoTxES (1 << 12) /* Automatic mode not updating Error Summary in Tx descriptor */ #define PORT_CONFIG_TCP_CAP (1 << 14) /* capture tcp to a different queue */ #define PORT_CONFIG_UDP_CAP (1 << 15) /* capture udp to a different queue */ #define PORT_CONFIG_TCPQ (7 << 16) /* queue to capture tcp */ #define PORT_CONFIG_UDPQ (7 << 19) /* queue to capture udp */ #define PORT_CONFIG_BPDUQ (7 << 22) /* queue to capture bpdu */ #define PORT_CONFIG_RXCS (1 << 25) /* calculation Rx TCP checksum include pseudo header */ #define MGE_PORT_EXT_CONFIG 0x404 #define MGE_MAC_ADDR_L 0x414 #define MGE_MAC_ADDR_H 0x418 #define MGE_SDMA_CONFIG 0x41c #define MGE_SDMA_INT_ON_FRAME_BOUND (1 << 0) #define MGE_SDMA_RX_BURST_SIZE(val) (((val) & 7) << 1) #define MGE_SDMA_TX_BURST_SIZE(val) (((val) & 7) << 22) #define MGE_SDMA_BURST_1_WORD 0x0 #define MGE_SDMA_BURST_2_WORD 0x1 #define MGE_SDMA_BURST_4_WORD 0x2 #define MGE_SDMA_BURST_8_WORD 0x3 #define MGE_SDMA_BURST_16_WORD 0x4 #define MGE_SDMA_RX_BYTE_SWAP (1 << 4) #define MGE_SDMA_TX_BYTE_SWAP (1 << 5) #define MGE_SDMA_DESC_SWAP_MODE (1 << 6) #define MGE_PORT_SERIAL_CTRL 0x43c #define PORT_SERIAL_ENABLE (1 << 0) /* serial port enable */ #define PORT_SERIAL_FORCE_LINKUP (1 << 1) /* force link status to up */ #define PORT_SERIAL_AUTONEG (1 << 2) /* enable autoneg for duplex mode */ #define PORT_SERIAL_AUTONEG_FC (1 << 3) /* enable autoneg for FC */ #define PORT_SERIAL_PAUSE_ADV (1 << 4) /* advertise symmetric FC in autoneg */ #define PORT_SERIAL_FORCE_FC(val) (((val) & 3) << 5) /* pause enable & disable frames conf */ #define PORT_SERIAL_NO_PAUSE_DIS 0x00 #define PORT_SERIAL_PAUSE_DIS 0x01 #define PORT_SERIAL_FORCE_BP(val) (((val) & 3) << 7) /* transmitting JAM configuration */ #define PORT_SERIAL_NO_JAM 0x00 #define PORT_SERIAL_JAM 0x01 #define PORT_SERIAL_RES_BIT9 (1 << 9) #define PORT_SERIAL_FORCE_LINK_FAIL (1 << 10) #define PORT_SERIAL_SPEED_AUTONEG (1 << 13) #define PORT_SERIAL_FORCE_DTE_ADV (1 << 14) #define PORT_SERIAL_MRU(val) (((val) & 7) << 17) #define PORT_SERIAL_MRU_1518 0x0 #define PORT_SERIAL_MRU_1522 0x1 #define PORT_SERIAL_MRU_1552 0x2 #define PORT_SERIAL_MRU_9022 0x3 #define PORT_SERIAL_MRU_9192 0x4 #define PORT_SERIAL_MRU_9700 0x5 #define PORT_SERIAL_FULL_DUPLEX (1 << 21) #define PORT_SERIAL_FULL_DUPLEX_FC (1 << 22) #define PORT_SERIAL_GMII_SPEED_1000 (1 << 23) #define PORT_SERIAL_MII_SPEED_100 (1 << 24) #define MGE_PORT_STATUS 0x444 #define MGE_STATUS_LINKUP (1 << 1) #define MGE_STATUS_FULL_DUPLEX (1 << 2) #define MGE_STATUS_FLOW_CONTROL (1 << 3) #define MGE_STATUS_1000MB (1 << 4) #define MGE_STATUS_100MB (1 << 5) #define MGE_STATUS_TX_IN_PROG (1 << 7) #define MGE_STATUS_TX_FIFO_EMPTY (1 << 10) #define MGE_TX_QUEUE_CMD 0x448 #define MGE_ENABLE_TXQ (1 << 0) #define MGE_DISABLE_TXQ (1 << 8) /* 88F6281 only */ #define MGE_PORT_SERIAL_CTRL1 0x44c #define MGE_PCS_LOOPBACK (1 << 1) #define MGE_RGMII_EN (1 << 3) #define MGE_PORT_RESET (1 << 4) #define MGE_CLK125_BYPASS (1 << 5) #define MGE_INBAND_AUTONEG (1 << 6) #define MGE_INBAND_AUTONEG_BYPASS (1 << 6) #define MGE_INBAND_AUTONEG_RESTART (1 << 7) #define MGE_1000BASEX (1 << 11) #define MGE_BP_COLLISION_COUNT (1 << 15) #define MGE_COLLISION_LIMIT(val) (((val) & 0x3f) << 16) #define MGE_DROP_ODD_PREAMBLE (1 << 22) #define MGE_PORT_INT_CAUSE 0x460 #define MGE_PORT_INT_MASK 0x468 #define MGE_PORT_INT_RX (1 << 0) #define MGE_PORT_INT_EXTEND (1 << 1) #define MGE_PORT_INT_RXQ0 (1 << 2) #define MGE_PORT_INT_RXERR (1 << 10) #define MGE_PORT_INT_RXERRQ0 (1 << 11) -#define MGE_PORT_INT_SUM (1 << 31) +#define MGE_PORT_INT_SUM (1U << 31) #define MGE_PORT_INT_CAUSE_EXT 0x464 #define MGE_PORT_INT_MASK_EXT 0x46C #define MGE_PORT_INT_EXT_TXBUF0 (1 << 0) #define MGE_PORT_INT_EXT_TXERR0 (1 << 8) #define MGE_PORT_INT_EXT_PHYSC (1 << 16) #define MGE_PORT_INT_EXT_RXOR (1 << 18) #define MGE_PORT_INT_EXT_TXUR (1 << 19) #define MGE_PORT_INT_EXT_LC (1 << 20) #define MGE_PORT_INT_EXT_IAR (1 << 23) -#define MGE_PORT_INT_EXT_SUM (1 << 31) +#define MGE_PORT_INT_EXT_SUM (1U << 31) #define MGE_RX_FIFO_URGENT_TRSH 0x470 #define MGE_TX_FIFO_URGENT_TRSH 0x474 #define MGE_FIXED_PRIO_CONF 0x4dc #define MGE_FIXED_PRIO_EN(q) (1 << (q)) #define MGE_RX_CUR_DESC_PTR(q) (0x60c + ((q)<<4)) #define MGE_RX_QUEUE_CMD 0x680 #define MGE_ENABLE_RXQ(q) (1 << ((q) & 0x7)) #define MGE_ENABLE_RXQ_ALL (0xff) #define MGE_DISABLE_RXQ(q) (1 << (((q) & 0x7) + 8)) #define MGE_DISABLE_RXQ_ALL (0xff00) #define MGE_TX_CUR_DESC_PTR 0x6c0 #define MGE_TX_TOKEN_COUNT(q) (0x700 + ((q)<<4)) #define MGE_TX_TOKEN_CONF(q) (0x704 + ((q)<<4)) #define MGE_TX_ARBITER_CONF(q) (0x704 + ((q)<<4)) #define MGE_MCAST_REG_NUMBER 64 #define MGE_DA_FILTER_SPEC_MCAST(i) (0x1400 + ((i) << 2)) #define MGE_DA_FILTER_OTH_MCAST(i) (0x1500 + ((i) << 2)) #define MGE_UCAST_REG_NUMBER 4 #define MGE_DA_FILTER_UCAST(i) (0x1600 + ((i) << 2)) /* TX descriptor bits */ #define MGE_TX_LLC_SNAP (1 << 9) #define MGE_TX_NOT_FRAGMENT (1 << 10) #define MGE_TX_VLAN_TAGGED (1 << 15) #define MGE_TX_UDP (1 << 16) #define MGE_TX_GEN_L4_CSUM (1 << 17) #define MGE_TX_GEN_IP_CSUM (1 << 18) #define MGE_TX_PADDING (1 << 19) #define MGE_TX_LAST (1 << 20) #define MGE_TX_FIRST (1 << 21) #define MGE_TX_ETH_CRC (1 << 22) #define MGE_TX_EN_INT (1 << 23) #define MGE_TX_IP_HDR_SIZE(size) ((size << 11) & 0xFFFF) /* RX descriptor bits */ #define MGE_ERR_SUMMARY (1 << 0) #define MGE_ERR_MASK (3 << 1) #define MGE_RX_L4_PROTO_MASK (3 << 21) #define MGE_RX_L4_PROTO_TCP (0 << 21) #define MGE_RX_L4_PROTO_UDP (1 << 21) #define MGE_RX_L3_IS_IP (1 << 24) #define MGE_RX_IP_OK (1 << 25) #define MGE_RX_DESC_LAST (1 << 26) #define MGE_RX_DESC_FIRST (1 << 27) #define MGE_RX_ENABLE_INT (1 << 29) #define MGE_RX_L4_CSUM_OK (1 << 30) -#define MGE_DMA_OWNED (1 << 31) +#define MGE_DMA_OWNED (1U << 31) #define MGE_RX_IP_FRAGMENT (1 << 2) #define MGE_RX_L4_IS_TCP(status) ((status & MGE_RX_L4_PROTO_MASK) \ == MGE_RX_L4_PROTO_TCP) #define MGE_RX_L4_IS_UDP(status) ((status & MGE_RX_L4_PROTO_MASK) \ == MGE_RX_L4_PROTO_UDP) /* TX error codes */ #define MGE_TX_ERROR_LC (0 << 1) /* Late collision */ #define MGE_TX_ERROR_UR (1 << 1) /* Underrun error */ #define MGE_TX_ERROR_RL (2 << 1) /* Excessive collision */ /* RX error codes */ #define MGE_RX_ERROR_CE (0 << 1) /* CRC error */ #define MGE_RX_ERROR_OR (1 << 1) /* Overrun error */ #define MGE_RX_ERROR_MF (2 << 1) /* Max frame lenght error */ #define MGE_RX_ERROR_RE (3 << 1) /* Resource error */ #endif /* __IF_MGE_H__ */ Index: head/sys/dev/mpt/mpt_cam.c =================================================================== --- head/sys/dev/mpt/mpt_cam.c (revision 258779) +++ head/sys/dev/mpt/mpt_cam.c (revision 258780) @@ -1,5449 +1,5449 @@ /*- * FreeBSD/CAM specific routines for LSI '909 FC adapters. * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /*- * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_init.h" #include "dev/mpt/mpilib/mpi_targ.h" #include "dev/mpt/mpilib/mpi_fc.h" #include "dev/mpt/mpilib/mpi_sas.h" #include #include #include #if __FreeBSD_version >= 700025 #ifndef CAM_NEW_TRAN_CODE #define CAM_NEW_TRAN_CODE 1 #endif #endif static void mpt_poll(struct cam_sim *); static timeout_t mpt_timeout; static void mpt_action(struct cam_sim *, union ccb *); static int mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); static void mpt_setwidth(struct mpt_softc *, int, int); static void mpt_setsync(struct mpt_softc *, int, int, int); static int mpt_update_spi_config(struct mpt_softc *, int); static mpt_reply_handler_t mpt_scsi_reply_handler; static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; static mpt_reply_handler_t mpt_fc_els_reply_handler; static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, MSG_DEFAULT_REPLY *); static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); static int mpt_fc_reset_link(struct mpt_softc *, int); static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); static void mpt_recovery_thread(void *arg); static void mpt_recover_commands(struct mpt_softc *mpt); static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, u_int, u_int, u_int, int); static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); static void mpt_post_target_command(struct mpt_softc *, request_t *, int); static int mpt_add_els_buffers(struct mpt_softc *mpt); static int mpt_add_target_commands(struct mpt_softc *mpt); static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); static void mpt_target_start_io(struct mpt_softc *, union ccb *); static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, uint8_t, uint8_t const *); static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, tgt_resource_t *, int); static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; static mpt_reply_handler_t mpt_sata_pass_reply_handler; static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; static mpt_probe_handler_t mpt_cam_probe; static mpt_attach_handler_t mpt_cam_attach; static mpt_enable_handler_t mpt_cam_enable; static mpt_ready_handler_t mpt_cam_ready; static mpt_event_handler_t mpt_cam_event; static mpt_reset_handler_t mpt_cam_ioc_reset; static mpt_detach_handler_t mpt_cam_detach; static struct mpt_personality mpt_cam_personality = { .name = "mpt_cam", .probe = mpt_cam_probe, .attach = mpt_cam_attach, .enable = mpt_cam_enable, .ready = mpt_cam_ready, .event = mpt_cam_event, .reset = mpt_cam_ioc_reset, .detach = mpt_cam_detach, }; DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); int mpt_enable_sata_wc = -1; TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); static int mpt_cam_probe(struct mpt_softc *mpt) { int role; /* * Only attach to nodes that support the initiator or target role * (or want to) or have RAID physical devices that need CAM pass-thru * support. */ if (mpt->do_cfg_role) { role = mpt->cfg_role; } else { role = mpt->role; } if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { return (0); } return (ENODEV); } static int mpt_cam_attach(struct mpt_softc *mpt) { struct cam_devq *devq; mpt_handler_t handler; int maxq; int error; MPT_LOCK(mpt); TAILQ_INIT(&mpt->request_timeout_list); maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); handler.reply_handler = mpt_scsi_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_io_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } handler.reply_handler = mpt_scsi_tmf_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_tmf_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } /* * If we're fibre channel and could support target mode, we register * an ELS reply handler and give it resources. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_fc_els_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &fc_els_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } if (mpt_add_els_buffers(mpt) == FALSE) { error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } maxq -= mpt->els_cmds_allocated; } /* * If we support target mode, we register a reply handler for it, * but don't add command resources until we actually enable target * mode. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_scsi_tgt_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &mpt->scsi_tgt_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } if (mpt->is_sas) { handler.reply_handler = mpt_sata_pass_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &sata_pass_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } /* * We keep one request reserved for timeout TMF requests. */ mpt->tmf_req = mpt_get_request(mpt, FALSE); if (mpt->tmf_req == NULL) { mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } /* * Mark the request as free even though not on the free list. * There is only one TMF request allowed to be outstanding at * a time and the TMF routines perform their own allocation * tracking using the standard state flags. */ mpt->tmf_req->state = REQ_STATE_FREE; maxq--; /* * The rest of this is CAM foo, for which we need to drop our lock */ MPT_UNLOCK(mpt); if (mpt_spawn_recovery_thread(mpt) != 0) { mpt_prt(mpt, "Unable to spawn recovery thread!\n"); error = ENOMEM; goto cleanup; } /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); if (devq == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); error = ENOMEM; goto cleanup; } /* * Construct our SIM entry. */ mpt->sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->sim == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); cam_simq_free(devq); error = ENOMEM; goto cleanup; } /* * Register exactly this bus. */ MPT_LOCK(mpt); if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { mpt_prt(mpt, "Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); /* * Only register a second bus for RAID physical * devices if the controller supports RAID. */ if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (0); } /* * Create a "bus" to export all hidden disks to CAM. */ mpt->phydisk_sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->phydisk_sim == NULL) { mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); error = ENOMEM; goto cleanup; } /* * Register this bus. */ MPT_LOCK(mpt); if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != CAM_SUCCESS) { mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->phydisk_path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); return (0); cleanup: mpt_cam_detach(mpt); return (error); } /* * Read FC configuration information */ static int mpt_read_config_info_fc(struct mpt_softc *mpt) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; char *topology = NULL; int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", mpt->mpt_fcport_page0.Header.PageVersion, mpt->mpt_fcport_page0.Header.PageLength, mpt->mpt_fcport_page0.Header.PageNumber, mpt->mpt_fcport_page0.Header.PageType); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, sizeof(mpt->mpt_fcport_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read FC Port Page 0\n"); return (-1); } mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; switch (mpt->mpt_fcport_page0.Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: mpt->mpt_fcport_speed = 0; topology = ""; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: topology = "N-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: topology = "NL-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: topology = "F-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: topology = "FL-Port"; break; default: mpt->mpt_fcport_speed = 0; topology = "?"; break; } mpt_lprt(mpt, MPT_PRT_INFO, "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " "Speed %u-Gbit\n", topology, mpt->mpt_fcport_page0.WWNN.High, mpt->mpt_fcport_page0.WWNN.Low, mpt->mpt_fcport_page0.WWPN.High, mpt->mpt_fcport_page0.WWPN.Low, mpt->mpt_fcport_speed); MPT_UNLOCK(mpt); ctx = device_get_sysctl_ctx(mpt->dev); tree = device_get_sysctl_tree(mpt->dev); snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High, mpt->mpt_fcport_page0.WWNN.Low); snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High, mpt->mpt_fcport_page0.WWPN.Low); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, "World Wide Node Name"); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, "World Wide Port Name"); MPT_LOCK(mpt); return (0); } /* * Set FC configuration information. */ static int mpt_set_initial_config_fc(struct mpt_softc *mpt) { CONFIG_PAGE_FC_PORT_1 fc; U32 fl; int r, doit = 0; int role; r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, &fc.Header, FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1 header\n"); return (mpt_fc_reset_link(mpt, 1)); } r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, &fc.Header, sizeof (fc), FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1\n"); return (mpt_fc_reset_link(mpt, 1)); } mpt2host_config_page_fc_port_1(&fc); /* * Check our flags to make sure we support the role we want. */ doit = 0; role = 0; fl = fc.Flags; if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { role |= MPT_ROLE_INITIATOR; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { role |= MPT_ROLE_TARGET; } fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; if (mpt->do_cfg_role == 0) { role = mpt->cfg_role; } else { mpt->do_cfg_role = 0; } if (role != mpt->cfg_role) { if (mpt->cfg_role & MPT_ROLE_INITIATOR) { if ((role & MPT_ROLE_INITIATOR) == 0) { mpt_prt(mpt, "adding initiator role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; doit++; } else { mpt_prt(mpt, "keeping initiator role\n"); } } else if (role & MPT_ROLE_INITIATOR) { mpt_prt(mpt, "removing initiator role\n"); doit++; } if (mpt->cfg_role & MPT_ROLE_TARGET) { if ((role & MPT_ROLE_TARGET) == 0) { mpt_prt(mpt, "adding target role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; doit++; } else { mpt_prt(mpt, "keeping target role\n"); } } else if (role & MPT_ROLE_TARGET) { mpt_prt(mpt, "removing target role\n"); doit++; } mpt->role = mpt->cfg_role; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { mpt_prt(mpt, "adding OXID option\n"); fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; doit++; } } if (doit) { fc.Flags = fl; host2mpt_config_page_fc_port_1(&fc); r = mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, sizeof(fc), FALSE, 5000); if (r != 0) { mpt_prt(mpt, "failed to update NVRAM with changes\n"); return (0); } mpt_prt(mpt, "NOTE: NVRAM changes will not take " "effect until next reboot or IOC reset\n"); } return (0); } static int mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) { ConfigExtendedPageHeader_t hdr; struct mptsas_phyinfo *phyinfo; SasIOUnitPage0_t *buffer; int error, len, i; error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } len = hdr.ExtPageLength * 4; buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0, &hdr, buffer, len, 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } portinfo->num_phys = buffer->NumPhys; portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo->phy_info == NULL) { free(buffer, M_DEVBUF); error = ENOMEM; goto out; } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; phyinfo->phy_num = i; phyinfo->port_id = buffer->PhyData[i].Port; phyinfo->negotiated_link_rate = buffer->PhyData[i].NegotiatedLinkRate; phyinfo->handle = le16toh(buffer->PhyData[i].ControllerDevHandle); } free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasPhyPage0_t *buffer; int error; error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasPhyPage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } phy_info->hw_link_rate = buffer->HwLinkRate; phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasDevicePage0_t *buffer; uint64_t sas_address; int error = 0; bzero(device_info, sizeof(*device_info)); error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasDevicePage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } device_info->dev_handle = le16toh(buffer->DevHandle); device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); device_info->slot = le16toh(buffer->Slot); device_info->phy_num = buffer->PhyNum; device_info->physical_port = buffer->PhysicalPort; device_info->target_id = buffer->TargetID; device_info->bus = buffer->Bus; bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); device_info->sas_address = le64toh(sas_address); device_info->device_info = le32toh(buffer->DeviceInfo); free(buffer, M_DEVBUF); out: return (error); } /* * Read SAS configuration information. Nothing to do yet. */ static int mpt_read_config_info_sas(struct mpt_softc *mpt) { struct mptsas_portinfo *portinfo; struct mptsas_phyinfo *phyinfo; int error, i; portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo == NULL) return (ENOMEM); error = mptsas_sas_io_unit_pg0(mpt, portinfo); if (error) { free(portinfo, M_DEVBUF); return (0); } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; error = mptsas_sas_phy_pg0(mpt, phyinfo, (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << MPI_SAS_PHY_PGAD_FORM_SHIFT), i); if (error) break; error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->handle); if (error) break; phyinfo->identify.phy_num = phyinfo->phy_num = i; if (phyinfo->attached.dev_handle) error = mptsas_sas_device_pg0(mpt, &phyinfo->attached, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->attached.dev_handle); if (error) break; } mpt->sas_portinfo = portinfo; return (0); } static void mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, int enabled) { SataPassthroughRequest_t *pass; request_t *req; int error, status; req = mpt_get_request(mpt, 0); if (req == NULL) return; pass = req->req_vbuf; bzero(pass, sizeof(SataPassthroughRequest_t)); pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; pass->TargetID = devinfo->target_id; pass->Bus = devinfo->bus; pass->PassthroughFlags = 0; pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; pass->DataLength = 0; pass->MsgContext = htole32(req->index | sata_pass_handler_id); pass->CommandFIS[0] = 0x27; pass->CommandFIS[1] = 0x80; pass->CommandFIS[2] = 0xef; pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; pass->CommandFIS[7] = 0x40; pass->CommandFIS[15] = 0x08; mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 10 * 1000); if (error) { mpt_free_request(mpt, req); printf("error %d sending passthrough\n", error); return; } status = le16toh(req->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_free_request(mpt, req); printf("IOCSTATUS %d\n", status); return; } mpt_free_request(mpt, req); } /* * Set SAS configuration information. Nothing to do yet. */ static int mpt_set_initial_config_sas(struct mpt_softc *mpt) { struct mptsas_phyinfo *phyinfo; int i; if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { phyinfo = &mpt->sas_portinfo->phy_info[i]; if (phyinfo->attached.dev_handle == 0) continue; if ((phyinfo->attached.device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) continue; if (bootverbose) device_printf(mpt->dev, "%sabling SATA WC on phy %d\n", (mpt_enable_sata_wc) ? "En" : "Dis", i); mptsas_set_sata_wc(mpt, &phyinfo->attached, mpt_enable_sata_wc); } } return (0); } static int mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { if (req != NULL) { if (reply_frame != NULL) { req->IOCStatus = le16toh(reply_frame->IOCStatus); } req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { /* * Whew- we can free this request (late completion) */ mpt_free_request(mpt, req); } } return (TRUE); } /* * Read SCSI configuration information */ static int mpt_read_config_info_spi(struct mpt_softc *mpt) { int rv, i; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, &mpt->mpt_port_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", mpt->mpt_port_page0.Header.PageVersion, mpt->mpt_port_page0.Header.PageLength, mpt->mpt_port_page0.Header.PageNumber, mpt->mpt_port_page0.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, &mpt->mpt_port_page1.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", mpt->mpt_port_page1.Header.PageVersion, mpt->mpt_port_page1.Header.PageLength, mpt->mpt_port_page1.Header.PageNumber, mpt->mpt_port_page1.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, &mpt->mpt_port_page2.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", mpt->mpt_port_page2.Header.PageVersion, mpt->mpt_port_page2.Header.PageLength, mpt->mpt_port_page2.Header.PageNumber, mpt->mpt_port_page2.Header.PageType); for (i = 0; i < 16; i++) { rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, mpt->mpt_dev_page0[i].Header.PageVersion, mpt->mpt_dev_page0[i].Header.PageLength, mpt->mpt_dev_page0[i].Header.PageNumber, mpt->mpt_dev_page0[i].Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, mpt->mpt_dev_page1[i].Header.PageVersion, mpt->mpt_dev_page1[i].Header.PageLength, mpt->mpt_dev_page1[i].Header.PageNumber, mpt->mpt_dev_page1[i].Header.PageType); } /* * At this point, we don't *have* to fail. As long as we have * valid config header information, we can (barely) lurch * along. */ rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, sizeof(mpt->mpt_port_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 0\n"); } else { mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", mpt->mpt_port_page0.Capabilities, mpt->mpt_port_page0.PhysicalInterface); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, sizeof(mpt->mpt_port_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 1\n"); } else { mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", mpt->mpt_port_page1.Configuration, mpt->mpt_port_page1.OnBusTimerValue); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, sizeof(mpt->mpt_port_page2), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 2\n"); } else { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "Port Page 2: Flags %x Settings %x\n", mpt->mpt_port_page2.PortFlags, mpt->mpt_port_page2.PortSettings); mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); for (i = 0; i < 16; i++) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); } } for (i = 0; i < 16; i++) { rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 0\n", i); continue; } mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 0: Negotiated Params %x Information %x\n", i, mpt->mpt_dev_page0[i].NegotiatedParameters, mpt->mpt_dev_page0[i].Information); rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 1\n", i); continue; } mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 1: Requested Params %x Configuration %x\n", i, mpt->mpt_dev_page1[i].RequestedParameters, mpt->mpt_dev_page1[i].Configuration); } return (0); } /* * Validate SPI configuration information. * * In particular, validate SPI Port Page 1. */ static int mpt_set_initial_config_spi(struct mpt_softc *mpt) { int error, i, pp1val; mpt->mpt_disc_enable = 0xff; mpt->mpt_tag_enable = 0; pp1val = ((1 << mpt->mpt_ini_id) << MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; if (mpt->mpt_port_page1.Configuration != pp1val) { CONFIG_PAGE_SCSI_PORT_1 tmp; mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); tmp = mpt->mpt_port_page1; tmp.Configuration = pp1val; host2mpt_config_page_scsi_port_1(&tmp); error = mpt_write_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } error = mpt_read_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } mpt2host_config_page_scsi_port_1(&tmp); if (tmp.Configuration != pp1val) { mpt_prt(mpt, "failed to reset SPI Port Page 1 Config value\n"); return (-1); } mpt->mpt_port_page1 = tmp; } /* * The purpose of this exercise is to get * all targets back to async/narrow. * * We skip this step if the BIOS has already negotiated * speeds with the targets. */ i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "honoring BIOS transfer negotiations\n"); } else { for (i = 0; i < 16; i++) { mpt->mpt_dev_page1[i].RequestedParameters = 0; mpt->mpt_dev_page1[i].Configuration = 0; (void) mpt_update_spi_config(mpt, i); } } return (0); } static int mpt_cam_enable(struct mpt_softc *mpt) { int error; MPT_LOCK(mpt); error = EIO; if (mpt->is_fc) { if (mpt_read_config_info_fc(mpt)) { goto out; } if (mpt_set_initial_config_fc(mpt)) { goto out; } } else if (mpt->is_sas) { if (mpt_read_config_info_sas(mpt)) { goto out; } if (mpt_set_initial_config_sas(mpt)) { goto out; } } else if (mpt->is_spi) { if (mpt_read_config_info_spi(mpt)) { goto out; } if (mpt_set_initial_config_spi(mpt)) { goto out; } } error = 0; out: MPT_UNLOCK(mpt); return (error); } static void mpt_cam_ready(struct mpt_softc *mpt) { /* * If we're in target mode, hang out resources now * so we don't cause the world to hang talking to us. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { /* * Try to add some target command resources */ MPT_LOCK(mpt); if (mpt_add_target_commands(mpt) == FALSE) { mpt_prt(mpt, "failed to add target commands\n"); } MPT_UNLOCK(mpt); } mpt->ready = 1; } static void mpt_cam_detach(struct mpt_softc *mpt) { mpt_handler_t handler; MPT_LOCK(mpt); mpt->ready = 0; mpt_terminate_recovery_thread(mpt); handler.reply_handler = mpt_scsi_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_io_handler_id); handler.reply_handler = mpt_scsi_tmf_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_tmf_handler_id); handler.reply_handler = mpt_fc_els_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, fc_els_handler_id); handler.reply_handler = mpt_scsi_tgt_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, mpt->scsi_tgt_handler_id); handler.reply_handler = mpt_sata_pass_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, sata_pass_handler_id); if (mpt->tmf_req != NULL) { mpt->tmf_req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, mpt->tmf_req); mpt->tmf_req = NULL; } if (mpt->sas_portinfo != NULL) { free(mpt->sas_portinfo, M_DEVBUF); mpt->sas_portinfo = NULL; } if (mpt->sim != NULL) { xpt_free_path(mpt->path); xpt_bus_deregister(cam_sim_path(mpt->sim)); cam_sim_free(mpt->sim, TRUE); mpt->sim = NULL; } if (mpt->phydisk_sim != NULL) { xpt_free_path(mpt->phydisk_path); xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); cam_sim_free(mpt->phydisk_sim, TRUE); mpt->phydisk_sim = NULL; } MPT_UNLOCK(mpt); } /* This routine is used after a system crash to dump core onto the swap device. */ static void mpt_poll(struct cam_sim *sim) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)cam_sim_softc(sim); mpt_intr(mpt); } /* * Watchdog timeout routine for SCSI requests. */ static void mpt_timeout(void *arg) { union ccb *ccb; struct mpt_softc *mpt; request_t *req; ccb = (union ccb *)arg; mpt = ccb->ccb_h.ccb_mpt_ptr; MPT_LOCK_ASSERT(mpt); req = ccb->ccb_h.ccb_req_ptr; mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, req->serno, ccb, req->ccb); /* XXX: WHAT ARE WE TRYING TO DO HERE? */ if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); req->state |= REQ_STATE_TIMEDOUT; mpt_wakeup_recovery_thread(mpt); } } /* * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called * directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; bus_addr_t chain_list_addr; int first_lim, seg, this_seg_lim; uint32_t addr, cur_off, flags, nxt_off, tf; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE64 *se; SGE_CHAIN64 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: istgt = 0; sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE64 pointers and start doing CHAIN64 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE64 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof(bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { - addr |= (1 << 31); + addr |= (1U << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE64 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN64 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; if (sizeof (bus_addr_t) > 4) { ce->Address.High = htole32(((uint64_t)chain_list_addr) >> 32); } ce->Address.Low = htole32(chain_list_addr & 0xffffffff); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN64); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof (bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { - addr |= (1 << 31); + addr |= (1U << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; int seg, first_lim; uint32_t flags, nxt_off; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE32 *se; SGE_CHAIN32 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE32 pointers and start doing CHAIN32 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE32 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { uint32_t tf; memset(se, 0,sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE32 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { int this_seg_lim; uint32_t tf, cur_off; bus_addr_t chain_list_addr; /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN32 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; ce->Address = htole32(chain_list_addr); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN32); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { memset(se, 0, sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_start(struct cam_sim *sim, union ccb *ccb) { request_t *req; struct mpt_softc *mpt; MSG_SCSI_IO_REQUEST *mpt_req; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; bus_dmamap_callback_t *cb; target_id_t tgt; int raid_passthru; int error; /* Get the pointer for the physical addapter */ mpt = ccb->ccb_h.ccb_mpt_ptr; raid_passthru = (sim == mpt->phydisk_sim); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); #endif if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } /* * Link the ccb and the request structure so we can find * the other knowing either the request or the ccb */ req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* Now we build the command for the IOC */ mpt_req = req->req_vbuf; memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; if (raid_passthru) { mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } mpt_req->Bus = 0; /* we never set bus here */ } else { tgt = ccb->ccb_h.target_id; mpt_req->Bus = 0; /* XXX */ } mpt_req->SenseBufferLength = (csio->sense_len < MPT_SENSE_SIZE) ? csio->sense_len : MPT_SENSE_SIZE; /* * We use the message context to find the request structure when we * Get the command completion interrupt from the IOC. */ mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); /* Which physical device to do the I/O on */ mpt_req->TargetID = tgt; /* We assume a single level LUN type */ if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; } else { mpt_req->LUN[1] = ccb->ccb_h.target_lun; } /* Set the direction of the transfer */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { mpt_req->Control = MPI_SCSIIO_CONTROL_READ; } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; } else { mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ACA_TASK: mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; break; case MSG_ORDERED_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_SIMPLE_Q_TAG: default: mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; } } else { if (mpt->is_fc || mpt->is_sas) { mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; } else { /* XXX No such thing for a target doing packetized. */ mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; } } if (mpt->is_spi) { if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; } } mpt_req->Control = htole32(mpt_req->Control); /* Copy the scsi command block into place */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); } mpt_req->CDBLength = csio->cdb_len; mpt_req->DataLength = htole32(csio->dxfer_len); mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); /* * Do a *short* print here if we're set to MPT_PRT_DEBUG */ if (mpt->verbose == MPT_PRT_DEBUG) { U32 df; mpt_prt(mpt, "mpt_start: %s op 0x%x ", (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { mpt_prtc(mpt, "(%s %u byte%s ", (df == MPI_SCSIIO_CONTROL_READ)? "read" : "write", csio->dxfer_len, (csio->dxfer_len == 1)? ")" : "s)"); } mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt, (uintmax_t)ccb->ccb_h.target_lun, req, req->serno); } error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller queue * until our mapping is returned. */ xpt_freeze_simq(mpt->sim, 1); ccbh->status |= CAM_RELEASE_SIMQ; } } static int mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, int sleep_ok) { int error; uint16_t status; uint8_t response; error = mpt_scsi_send_tmf(mpt, (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 0, /* XXX How do I get the channel ID? */ tgt != CAM_TARGET_WILDCARD ? tgt : 0, lun != CAM_LUN_WILDCARD ? lun : 0, 0, sleep_ok); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. */ mpt_prt(mpt, "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); return (EIO); } /* Wait for bus reset to be processed by the IOC. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, sleep_ok, 5000); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error) { mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " "Resetting controller.\n"); mpt_reset(mpt, TRUE); return (ETIMEDOUT); } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); return (EIO); } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); return (EIO); } return (0); } static int mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) { int r = 0; request_t *req; PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (ENOMEM); } fc = req->req_vbuf; memset(fc, 0, sizeof(*fc)); fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; fc->MsgContext = htole32(req->index | fc_els_handler_id); mpt_send_cmd(mpt, req); if (dowait) { r = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, FALSE, 60 * 1000); if (r == 0) { mpt_free_request(mpt, req); } } return (r); } static int mpt_cam_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { uint32_t data0, data1; data0 = le32toh(msg->Data[0]); data1 = le32toh(msg->Data[1]); switch(msg->Event & 0xFF) { case MPI_EVENT_UNIT_ATTENTION: mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", (data0 >> 8) & 0xff, data0 & 0xff); break; case MPI_EVENT_IOC_BUS_RESET: /* We generated a bus reset */ mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", (data0 >> 8) & 0xff); xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_EXT_BUS_RESET: /* Someone else generated a bus reset */ mpt_prt(mpt, "External Bus Reset Detected\n"); /* * These replies don't return EventData like the MPI * spec says they do */ xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_RESCAN: #if __FreeBSD_version >= 600000 { union ccb *ccb; uint32_t pathid; /* * In general this means a device has been added to the loop. */ mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); if (mpt->ready == 0) { break; } if (mpt->phydisk_sim) { pathid = cam_sim_path(mpt->phydisk_sim); } else { pathid = cam_sim_path(mpt->sim); } /* * Allocate a CCB, create a wildcard path for this bus, * and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; } #else mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); break; #endif case MPI_EVENT_LINK_STATUS_CHANGE: mpt_prt(mpt, "Port %d: LinkState: %s\n", (data1 >> 8) & 0xff, ((data0 & 0xff) == 0)? "Failed" : "Active"); break; case MPI_EVENT_LOOP_STATE_CHANGE: switch ((data0 >> 16) & 0xff) { case 0x01: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " "(Loop Initialization)\n", (data1 >> 8) & 0xff, (data0 >> 8) & 0xff, (data0 ) & 0xff); switch ((data0 >> 8) & 0xff) { case 0xF7: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device needs AL_PA\n"); } else { mpt_prt(mpt, "Device %02x doesn't like " "FC performance\n", data0 & 0xFF); } break; case 0xF8: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device had loop failure " "at its receiver prior to acquiring" " AL_PA\n"); } else { mpt_prt(mpt, "Device %02x detected loop" " failure at its receiver\n", data0 & 0xFF); } break; default: mpt_prt(mpt, "Device %02x requests that device " "%02x reset itself\n", data0 & 0xFF, (data0 >> 8) & 0xFF); break; } break; case 0x02: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPE(%02x,%02x) (Loop Port Enable)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; case 0x03: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPB(%02x,%02x) (Loop Port Bypass)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; default: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " "FC event (%02x %02x %02x)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 16) & 0xff, /* Event */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); } break; case MPI_EVENT_LOGOUT: mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", (data1 >> 8) & 0xff, data0); break; case MPI_EVENT_QUEUE_FULL: { struct cam_sim *sim; struct cam_path *tmppath; struct ccb_relsim crs; PTR_EVENT_DATA_QUEUE_FULL pqf; lun_id_t lun_id; pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; pqf->CurrentDepth = le16toh(pqf->CurrentDepth); mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); if (mpt->phydisk_sim && mpt_is_raid_member(mpt, pqf->TargetID) != 0) { sim = mpt->phydisk_sim; } else { sim = mpt->sim; } for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), pqf->TargetID, lun_id) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create a path to send " "XPT_REL_SIMQ"); break; } xpt_setup_ccb(&crs.ccb_h, tmppath, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = pqf->CurrentDepth - 1; xpt_action((union ccb *)&crs); if (crs.ccb_h.status != CAM_REQ_CMP) { mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); } xpt_free_path(tmppath); } break; } case MPI_EVENT_IR_RESYNC_UPDATE: mpt_prt(mpt, "IR resync update %d completed\n", (data0 >> 16) & 0xff); break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { union ccb *ccb; struct cam_sim *sim; struct cam_path *tmppath; PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; if (mpt->phydisk_sim && mpt_is_raid_member(mpt, psdsc->TargetID) != 0) sim = mpt->phydisk_sim; else sim = mpt->sim; switch(psdsc->ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for async event"); break; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); break; case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "SAS device status change: Bus: 0x%02x TargetID: " "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, psdsc->TargetID, psdsc->ReasonCode); break; } break; } case MPI_EVENT_SAS_DISCOVERY_ERROR: { PTR_EVENT_DATA_DISCOVERY_ERROR pde; pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); mpt_lprt(mpt, MPT_PRT_WARN, "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", pde->Port, pde->DiscoveryStatus); break; } case MPI_EVENT_EVENT_CHANGE: case MPI_EVENT_INTEGRATED_RAID: case MPI_EVENT_IR2: case MPI_EVENT_LOG_ENTRY_ADDED: case MPI_EVENT_SAS_DISCOVERY: case MPI_EVENT_SAS_PHY_LINK_STATUS: case MPI_EVENT_SAS_SES: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", msg->Event & 0xFF); return (0); } return (1); } /* * Reply path for all SCSI I/O requests, called from our * interrupt handler by extracting our handler index from * the MsgContext field of the reply from the IOC. * * This routine is optimized for the common case of a * completion without error. All exception handling is * offloaded to non-inlined helper routines to minimize * cache footprint. */ static int mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_IO_REQUEST *scsi_req; union ccb *ccb; if (req->state == REQ_STATE_FREE) { mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); return (TRUE); } scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", req, req->serno); return (TRUE); } mpt_req_untimeout(req, mpt_timeout, ccb); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } if (reply_frame == NULL) { /* * Context only reply, completion without error status. */ ccb->csio.resid = 0; mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->csio.scsi_status = SCSI_STATUS_OK; } else { mpt_scsi_reply_frame_handler(mpt, req, reply_frame); } if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { struct scsi_inquiry_data *iq = (struct scsi_inquiry_data *)ccb->csio.data_ptr; if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { /* * Fake out the device type so that only the * pass-thru device will attach. */ iq->device &= ~0x1F; iq->device |= T_NODEVICE; } } if (mpt->verbose == MPT_PRT_DEBUG) { mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", req, req->serno); } KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); if ((req->state & REQ_STATE_TIMEDOUT) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); } else { mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", req, req->serno); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); } KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, ("CCB req needed wakeup")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); #endif mpt_free_request(mpt, req); return (TRUE); } static int mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); #endif tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; /* Record IOC Status and Response Code of TMF for any waiters. */ req->IOCStatus = le16toh(tmf_reply->IOCStatus); req->ResponseCode = tmf_reply->ResponseCode; mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", req, req->serno, le16toh(tmf_reply->IOCStatus)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { req->state |= REQ_STATE_DONE; wakeup(req); } else { mpt->tmf_req->state = REQ_STATE_FREE; } return (TRUE); } /* * XXX: Move to definitions file */ #define ELS 0x22 #define FC4LS 0x32 #define ABTS 0x81 #define BA_ACC 0x84 #define LS_RJT 0x01 #define LS_ACC 0x02 #define PLOGI 0x03 #define LOGO 0x05 #define SRR 0x14 #define PRLI 0x20 #define PRLO 0x21 #define ADISC 0x52 #define RSCN 0x61 static void mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) { uint32_t fl; MSG_LINK_SERVICE_RSP_REQUEST tmp; PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; /* * We are going to reuse the ELS request to send this response back. */ rsp = &tmp; memset(rsp, 0, sizeof(*rsp)); #ifdef USE_IMMEDIATE_LINK_DATA /* * Apparently the IMMEDIATE stuff doesn't seem to work. */ rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; #endif rsp->RspLength = length; rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; rsp->MsgContext = htole32(req->index | fc_els_handler_id); /* * Copy over information from the original reply frame to * it's correct place in the response. */ memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); /* * And now copy back the temporary area to the original frame. */ memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); rsp = req->req_vbuf; #ifdef USE_IMMEDIATE_LINK_DATA memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); #else { PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; bus_addr_t paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (length); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); } #endif /* * Send it on... */ mpt_send_cmd(mpt, req); } static int mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; U8 rctl; U8 type; U8 cmd; U16 status = le16toh(reply_frame->IOCStatus); U32 *elsbuf; int ioindex; int do_refresh = TRUE; #ifdef INVARIANTS KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("fc_els_reply_handler: req %p:%u for function %x on freelist!", req, req->serno, rp->Function)); if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } else { mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } #endif mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p function %x\n", req, req->serno, reply_frame, reply_frame->Function); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", status, reply_frame->Function); if (status == MPI_IOCSTATUS_INVALID_STATE) { /* * XXX: to get around shutdown issue */ mpt->disabled = 1; return (TRUE); } return (TRUE); } /* * If the function of a link service response, we recycle the * response to be a refresh for a new link service request. * * The request pointer is bogus in this case and we have to fetch * it based upon the TransactionContext. */ if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { /* Freddie Uncle Charlie Katie */ /* We don't get the IOINDEX as part of the Link Svc Rsp */ for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) if (mpt->els_cmd_ptrs[ioindex] == req) { break; } KASSERT(ioindex < mpt->els_cmds_allocated, ("can't find my mommie!")); /* remove from active list as we're going to re-post it */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); return (TRUE); } if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; if (req->state & REQ_STATE_TIMEDOUT) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Completed After Timeout\n"); mpt_free_request(mpt, req); } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Async Primitive Send Complete\n"); mpt_free_request(mpt, req); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Complete- Waking Waiter\n"); wakeup(req); } return (TRUE); } if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " "Length %d Message Flags %x\n", rp->Function, rp->Flags, rp->MsgLength, rp->MsgFlags); return (TRUE); } if (rp->MsgLength <= 5) { /* * This is just a ack of an original ELS buffer post */ mpt_lprt(mpt, MPT_PRT_DEBUG, "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); return (TRUE); } rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; cmd = be32toh(elsbuf[0]) >> 24; if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); return (TRUE); } ioindex = le32toh(rp->TransactionContext); req = mpt->els_cmd_ptrs[ioindex]; if (rctl == ELS && type == 1) { switch (cmd) { case PRLI: /* * Send back a PRLI ACC */ mpt_prt(mpt, "PRLI from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); elsbuf[0] = htobe32(0x02100014); elsbuf[1] |= htobe32(0x00000100); elsbuf[4] = htobe32(0x00000002); if (mpt->role & MPT_ROLE_TARGET) elsbuf[4] |= htobe32(0x00000010); if (mpt->role & MPT_ROLE_INITIATOR) elsbuf[4] |= htobe32(0x00000020); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; case PRLO: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0x02100014); elsbuf[1] = htobe32(0x08000100); mpt_prt(mpt, "PRLO from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; default: mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); break; } } else if (rctl == ABTS && type == 0) { uint16_t rx_id = le16toh(rp->Rxid); uint16_t ox_id = le16toh(rp->Oxid); request_t *tgt_req = NULL; mpt_prt(mpt, "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); if (rx_id >= mpt->mpt_max_tgtcmds) { mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); } else if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "No TGT CMD PTRS\n"); } else { tgt_req = mpt->tgt_cmd_ptrs[rx_id]; } if (tgt_req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); union ccb *ccb; uint32_t ct_id; /* * Check to make sure we have the correct command * The reply descriptor in the target state should * should contain an IoIndex that should match the * RX_ID. * * It'd be nice to have OX_ID to crosscheck with * as well. */ ct_id = GET_IO_INDEX(tgt->reply_desc); if (ct_id != rx_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", rx_id, ct_id); goto skip; } ccb = tgt->ccb; if (ccb) { mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n", ccb, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.flags, ccb->ccb_h.status); } mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " "%x nxfers %x\n", tgt->state, tgt->resid, tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers); skip: if (mpt_abort_target_cmd(mpt, tgt_req)) { mpt_prt(mpt, "unable to start TargetAbort\n"); } } else { mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); } memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0); elsbuf[1] = htobe32((ox_id << 16) | rx_id); elsbuf[2] = htobe32(0x000ffff); /* * Dork with the reply frame so that the response to it * will be correct. */ rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 12); do_refresh = FALSE; } else { mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); } if (do_refresh == TRUE) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); } return (TRUE); } /* * Clean up all SCSI Initiator personality state in response * to a controller reset. */ static void mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) { /* * The pending list is already run down by * the generic handler. Perform the same * operation on the timed out request list. */ mpt_complete_request_chain(mpt, &mpt->request_timeout_list, MPI_IOCSTATUS_INVALID_STATE); /* * XXX: We need to repost ELS and Target Command Buffers? */ /* * Inform the XPT that a bus reset has occurred. */ xpt_async(AC_BUS_RESET, mpt->path, NULL); } /* * Parse additional completion information in the reply * frame for SCSI I/O requests. */ static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { union ccb *ccb; MSG_SCSI_IO_REPLY *scsi_io_reply; u_int ioc_status; u_int sstate; MPT_DUMP_REPLY_FRAME(mpt, reply_frame); KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, ("MPT SCSI I/O Handler called with incorrect reply type")); KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, ("MPT SCSI I/O Handler called with continuation reply")); scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; ioc_status = le16toh(scsi_io_reply->IOCStatus); ioc_status &= MPI_IOCSTATUS_MASK; sstate = scsi_io_reply->SCSIState; ccb = req->ccb; ccb->csio.resid = ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { uint32_t sense_returned; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; sense_returned = le32toh(scsi_io_reply->SenseCount); if (sense_returned < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - sense_returned; else ccb->csio.sense_resid = 0; bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); bcopy(req->sense_vbuf, &ccb->csio.sense_data, min(ccb->csio.sense_len, sense_returned)); } if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { /* * Tag messages rejected, but non-tagged retry * was successful. XXXX mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); */ } switch(ioc_status) { case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* * XXX * Linux driver indicates that a zero * transfer length with this error code * indicates a CRC error. * * No need to swap the bytes for checking * against zero. */ if (scsi_io_reply->TransferCount == 0) { mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; } /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: case MPI_IOCSTATUS_SUCCESS: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { /* * Status was never returned for this transaction. */ mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { /* XXX Handle SPI-Packet and FCP-2 response info. */ mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); break; case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * Since selection timeouts and "device really not * there" are grouped into this error code, report * selection timeout. Selection timeouts are * typically retried before giving up on the device * whereas "device not there" errors are considered * unretryable. */ mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: mpt_set_ccb_status(ccb, CAM_PATH_INVALID); break; case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: mpt_set_ccb_status(ccb, CAM_TID_INVALID); break; case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: ccb->ccb_h.status = CAM_UA_TERMIO; break; case MPI_IOCSTATUS_INVALID_STATE: /* * The IOC has been reset. Emulate a bus reset. */ /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* * Don't clobber any timeout status that has * already been set for this transaction. We * want the SCSI layer to be able to differentiate * between the command we aborted due to timeout * and any innocent bystanders. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); break; case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); break; case MPI_IOCSTATUS_BUSY: mpt_set_ccb_status(ccb, CAM_BUSY); break; case MPI_IOCSTATUS_INVALID_FUNCTION: case MPI_IOCSTATUS_INVALID_SGL: case MPI_IOCSTATUS_INTERNAL_ERROR: case MPI_IOCSTATUS_INVALID_FIELD: default: /* XXX * Some of the above may need to kick * of a recovery action!!!! */ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { mpt_freeze_ccb(ccb); } return (TRUE); } static void mpt_action(struct cam_sim *sim, union ccb *ccb) { struct mpt_softc *mpt; struct ccb_trans_settings *cts; target_id_t tgt; lun_id_t lun; int raid_passthru; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); mpt = (struct mpt_softc *)cam_sim_softc(sim); raid_passthru = (sim == mpt->phydisk_sim); MPT_LOCK_ASSERT(mpt); tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ && ccb->ccb_h.func_code != XPT_RESET_BUS && ccb->ccb_h.func_code != XPT_RESET_DEV) { if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } } ccb->ccb_h.ccb_mpt_ptr = mpt; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } } /* Max supported CDB length is 16 bytes */ /* XXX Unless we implement the new 32byte message type */ if (ccb->csio.cdb_len > sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } #ifdef MPT_TEST_MULTIPATH if (mpt->failure_id == ccb->ccb_h.target_id) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; mpt_start(sim, ccb); return; case XPT_RESET_BUS: if (raid_passthru) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_RESET_DEV: if (ccb->ccb_h.func_code == XPT_RESET_BUS) { if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus\n"); } } else { xpt_print(ccb->ccb_h.path, "reset device\n"); } (void) mpt_bus_reset(mpt, tgt, lun, FALSE); /* * mpt_bus_reset is always successful in that it * will fall back to a hard reset should a bus * reset attempt fail. */ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); break; case XPT_CONT_TARGET_IO: mpt_prt(mpt, "cannot abort active CTIOs yet\n"); ccb->ccb_h.status = CAM_UA_ABORT; break; case XPT_SCSI_IO: ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } #ifdef CAM_NEW_TRAN_CODE #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) #else #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) #endif #define DP_DISC_ENABLE 0x1 #define DP_DISC_DISABL 0x2 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) #define DP_TQING_ENABLE 0x4 #define DP_TQING_DISABL 0x8 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) #define DP_WIDE 0x10 #define DP_NARROW 0x20 #define DP_WIDTH (DP_WIDE|DP_NARROW) #define DP_SYNC 0x40 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; #endif uint8_t dval; u_int period; u_int offset; int i, j; cts = &ccb->cts; if (mpt->is_fc || mpt->is_sas) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } #ifdef CAM_NEW_TRAN_CODE scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; /* * We can be called just to valid transport and proto versions */ if (scsi->valid == 0 && spi->valid == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } #endif /* * Skip attempting settings on RAID volume disks. * Other devices on the bus get the normal treatment. */ if (mpt->phydisk_sim && raid_passthru == 0 && mpt_is_raid_volume(mpt, tgt) != 0) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "no transfer settings for RAID vols\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; j = mpt->mpt_port_page2.PortFlags & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "honoring BIOS transfer negotiations\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } dval = 0; period = 0; offset = 0; #ifndef CAM_NEW_TRAN_CODE if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { dval |= cts->bus_width ? DP_WIDE : DP_NARROW; } if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { dval |= DP_SYNC; period = cts->sync_period; offset = cts->sync_offset; } #else if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? DP_WIDE : DP_NARROW; } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { dval |= DP_SYNC; offset = spi->sync_offset; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; offset = ptr->RequestedParameters; offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; } if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { dval |= DP_SYNC; period = spi->sync_period; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; period = ptr->RequestedParameters; period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; } #endif if (dval & DP_DISC_ENABLE) { mpt->mpt_disc_enable |= (1 << tgt); } else if (dval & DP_DISC_DISABL) { mpt->mpt_disc_enable &= ~(1 << tgt); } if (dval & DP_TQING_ENABLE) { mpt->mpt_tag_enable |= (1 << tgt); } else if (dval & DP_TQING_DISABL) { mpt->mpt_tag_enable &= ~(1 << tgt); } if (dval & DP_WIDTH) { mpt_setwidth(mpt, tgt, 1); } if (dval & DP_SYNC) { mpt_setsync(mpt, tgt, period, offset); } if (dval == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "set [%d]: 0x%x period 0x%x offset %d\n", tgt, dval, period, offset); if (mpt_update_spi_config(mpt, tgt)) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } break; } case XPT_GET_TRAN_SETTINGS: { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; cts->protocol = PROTO_SCSI; if (mpt->is_fc) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol_version = SCSI_REV_SPC; cts->transport = XPORT_FC; cts->transport_version = 0; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; } else if (mpt->is_sas) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } } scsi = &cts->proto_specific.scsi; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; #else cts = &ccb->cts; if (mpt->is_fc) { cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } else if (mpt->is_sas) { cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } else if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } #endif mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } cam_calc_geometry(ccg, /* extended */ 1); KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; cpi->max_target = mpt->port_facts[0].MaxDevices - 1; cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; /* * FC cards report MAX_DEVICES of 512, but * the MSG_SCSI_IO_REQUEST target id field * is only 8 bits. Until we fix the driver * to support 'channels' for bus overflow, * just limit it. */ if (cpi->max_target > 255) { cpi->max_target = 255; } /* * VMware ESX reports > 16 devices and then dies when we probe. */ if (mpt->is_spi && cpi->max_target > 15) { cpi->max_target = 15; } if (mpt->is_spi) cpi->max_lun = 7; else cpi->max_lun = MPT_MAX_LUNS; cpi->initiator_id = mpt->mpt_ini_id; cpi->bus_id = cam_sim_bus(sim); /* * The base speed is the speed of the underlying connection. */ #ifdef CAM_NEW_TRAN_CODE cpi->protocol = PROTO_SCSI; if (mpt->is_fc) { cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC; } else if (mpt->is_sas) { cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } #else if (mpt->is_fc) { cpi->hba_misc = PIM_NOBUSRESET; cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; } else if (mpt->is_sas) { cpi->hba_misc = PIM_NOBUSRESET; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; } else { cpi->hba_misc = PIM_SEQSCAN; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; } #endif /* * We give our fake RAID passhtru bus a width that is MaxVolumes * wide and restrict it to one lun. */ if (raid_passthru) { cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; cpi->initiator_id = cpi->max_target + 1; cpi->max_lun = 0; } if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { cpi->hba_misc |= PIM_NOINITIATOR; } if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_EN_LUN: /* Enable LUN as a target */ { int result; if (ccb->cel.enable) result = mpt_enable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); else result = mpt_disable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (result == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tgt_resource_t *trtp; lun_id_t lun = ccb->ccb_h.target_lun; ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = mpt; if (lun == CAM_LUN_WILDCARD) { if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } trtp = &mpt->trt_wildcard; } else if (lun >= MPT_MAX_LUNS) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } else { trtp = &mpt->trt[lun]; } if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun); STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, sim_links.stqe); } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE INOT lun %jx\n", (uintmax_t)lun); STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, sim_links.stqe); } else { mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); } mpt_set_ccb_status(ccb, CAM_REQ_INPROG); return; } case XPT_CONT_TARGET_IO: mpt_target_start_io(mpt, ccb); return; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static int mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; #endif target_id_t tgt; uint32_t dval, pval, oval; int rv; if (IS_CURRENT_SETTINGS(cts) == 0) { tgt = cts->ccb_h.target_id; } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { return (-1); } } else { tgt = cts->ccb_h.target_id; } /* * We aren't looking at Port Page 2 BIOS settings here- * sometimes these have been known to be bogus XXX. * * For user settings, we pick the max from port page 0 * * For current settings we read the current settings out from * device page 0 for that target. */ if (IS_CURRENT_SETTINGS(cts)) { CONFIG_PAGE_SCSI_DEVICE_0 tmp; dval = 0; tmp = mpt->mpt_dev_page0[tgt]; rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); return (rv); } mpt2host_config_page_scsi_device_0(&tmp); mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, tmp.NegotiatedParameters, tmp.Information); dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? DP_WIDE : DP_NARROW; dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? DP_DISC_ENABLE : DP_DISC_DISABL; dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? DP_TQING_ENABLE : DP_TQING_DISABL; oval = tmp.NegotiatedParameters; oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; pval = tmp.NegotiatedParameters; pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; mpt->mpt_dev_page0[tgt] = tmp; } else { dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; oval = mpt->mpt_port_page0.Capabilities; oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); pval = mpt->mpt_port_page0.Capabilities; pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); } #ifndef CAM_NEW_TRAN_CODE cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); cts->valid = 0; cts->sync_period = pval; cts->sync_offset = oval; cts->valid |= CCB_TRANS_SYNC_RATE_VALID; cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; if (dval & DP_WIDE) { cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; if (dval & DP_DISC_ENABLE) { cts->flags |= CCB_TRANS_DISC_ENB; } if (dval & DP_TQING_ENABLE) { cts->flags |= CCB_TRANS_TAG_ENB; } } #else spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; spi->sync_offset = oval; spi->sync_period = pval; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DP_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DP_TQING_ENABLE) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; if (dval & DP_DISC_ENABLE) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } } #endif mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); return (0); } static void mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; if (onoff) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; } else { ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; } } static void mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; if (period == 0) { return; } ptr->RequestedParameters |= period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; ptr->RequestedParameters |= offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; if (period < 0xa) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; } if (period < 0x9) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; } } static int mpt_update_spi_config(struct mpt_softc *mpt, int tgt) { CONFIG_PAGE_SCSI_DEVICE_1 tmp; int rv; mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); tmp = mpt->mpt_dev_page1[tgt]; host2mpt_config_page_scsi_device_1(&tmp); rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); return (-1); } return (0); } /****************************** Timeout Recovery ******************************/ static int mpt_spawn_recovery_thread(struct mpt_softc *mpt) { int error; error = mpt_kthread_create(mpt_recovery_thread, mpt, &mpt->recovery_thread, /*flags*/0, /*altstack*/0, "mpt_recovery%d", mpt->unit); return (error); } static void mpt_terminate_recovery_thread(struct mpt_softc *mpt) { if (mpt->recovery_thread == NULL) { return; } mpt->shutdwn_recovery = 1; wakeup(mpt); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); } static void mpt_recovery_thread(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); for (;;) { if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { if (mpt->shutdwn_recovery == 0) { mpt_sleep(mpt, mpt, PUSER, "idle", 0); } } if (mpt->shutdwn_recovery != 0) { break; } mpt_recover_commands(mpt); } mpt->recovery_thread = NULL; wakeup(&mpt->recovery_thread); MPT_UNLOCK(mpt); mpt_kthread_exit(0); } static int mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) { MSG_SCSI_TASK_MGMT *tmf_req; int error; /* * Wait for any current TMF request to complete. * We're only allowed to issue one TMF at a time. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, sleep_ok, MPT_TMF_MAX_TIMEOUT); if (error != 0) { mpt_reset(mpt, TRUE); return (ETIMEDOUT); } mpt_assign_serno(mpt, mpt->tmf_req); mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; memset(tmf_req, 0, sizeof(*tmf_req)); tmf_req->TargetID = target; tmf_req->Bus = channel; tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; tmf_req->TaskType = type; tmf_req->MsgFlags = flags; tmf_req->MsgContext = htole32(mpt->tmf_req->index | scsi_tmf_handler_id); if (lun > MPT_MAX_LUNS) { tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); tmf_req->LUN[1] = lun & 0xff; } else { tmf_req->LUN[1] = lun; } tmf_req->TaskMsgContext = abort_ctx; mpt_lprt(mpt, MPT_PRT_DEBUG, "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, mpt->tmf_req->serno, tmf_req->MsgContext); if (mpt->verbose > MPT_PRT_DEBUG) { mpt_print_request(tmf_req); } KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, ("mpt_scsi_send_tmf: tmf_req already on pending list")); TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); if (error != MPT_OK) { TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); mpt->tmf_req->state = REQ_STATE_FREE; mpt_reset(mpt, TRUE); } return (error); } /* * When a command times out, it is placed on the requeust_timeout_list * and we wake our recovery thread. The MPT-Fusion architecture supports * only a single TMF operation at a time, so we serially abort/bdr, etc, * the timedout transactions. The next TMF is issued either by the * completion handler of the current TMF waking our recovery thread, * or the TMF timeout handler causing a hard reset sequence. */ static void mpt_recover_commands(struct mpt_softc *mpt) { request_t *req; union ccb *ccb; int error; if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * No work to do- leave. */ mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); return; } /* * Flush any commands whose completion coincides with their timeout. */ mpt_intr(mpt); if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ mpt_prt(mpt, "Timedout requests already complete. " "Interrupts may not be functioning.\n"); mpt_enable_ints(mpt); return; } /* * We have no visibility into the current state of the * controller, so attempt to abort the commands in the * order they timed-out. For initiator commands, we * depend on the reply handler pulling requests off * the timeout list. */ while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { uint16_t status; uint8_t response; MSG_REQUEST_HEADER *hdrp = req->req_vbuf; mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", req, req->serno, hdrp->Function); ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "null ccb in timed out request. " "Resetting Controller.\n"); mpt_reset(mpt, TRUE); continue; } mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); /* * Check to see if this is not an initiator command and * deal with it differently if it is. */ switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: break; default: /* * XXX: FIX ME: need to abort target assists... */ mpt_prt(mpt, "just putting it back on the pend q\n"); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); continue; } error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, htole32(req->index | scsi_io_handler_id), TRUE); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. Our queue should be emptied * by the hard reset. */ continue; } error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, 500); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error != 0) { /* * If we've errored out,, reset the controller. */ mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " "Resetting controller\n"); mpt_reset(mpt, TRUE); continue; } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); continue; } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); continue; } mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); } } /************************ Target Mode Support ****************************/ static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) { MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; PTR_SGE_TRANSACTION32 tep; PTR_SGE_SIMPLE32 se; bus_addr_t paddr; uint32_t fl; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fc = req->req_vbuf; memset(fc, 0, MPT_REQUEST_AREA); fc->BufferCount = 1; fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; fc->MsgContext = htole32(req->index | fc_els_handler_id); /* * Okay, set up ELS buffer pointers. ELS buffer pointers * consist of a TE SGL element (with details length of zero) * followed by a SIMPLE SGL element which holds the address * of the buffer. */ tep = (PTR_SGE_TRANSACTION32) &fc->SGL; tep->ContextSize = 4; tep->Flags = 0; tep->TransactionContext[0] = htole32(ioindex); se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); mpt_lprt(mpt, MPT_PRT_DEBUG, "add ELS index %d ioindex %d for %p:%u\n", req->index, ioindex, req, req->serno); KASSERT(((req->state & REQ_STATE_LOCKED) != 0), ("mpt_fc_post_els: request not locked")); mpt_send_cmd(mpt, req); } static void mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) { PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; PTR_CMD_BUFFER_DESCRIPTOR cb; bus_addr_t paddr; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(req->req_vbuf, 0, MPT_REQUEST_AREA); MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; fc = req->req_vbuf; fc->BufferCount = 1; fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); cb = &fc->Buffer[0]; cb->IoIndex = htole16(ioindex); cb->u.PhysicalAddress32 = htole32((U32) paddr); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); } static int mpt_add_els_buffers(struct mpt_softc *mpt) { int i; if (mpt->is_fc == 0) { return (TRUE); } if (mpt->els_cmds_allocated) { return (TRUE); } mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->els_cmd_ptrs == NULL) { return (FALSE); } /* * Feed the chip some ELS buffer resources */ for (i = 0; i < MPT_MAX_ELS; i++) { request_t *req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->els_cmd_ptrs[i] = req; mpt_fc_post_els(mpt, req, i); } if (i == 0) { mpt_prt(mpt, "unable to add ELS buffer resources\n"); free(mpt->els_cmd_ptrs, M_DEVBUF); mpt->els_cmd_ptrs = NULL; return (FALSE); } if (i != MPT_MAX_ELS) { mpt_lprt(mpt, MPT_PRT_INFO, "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); } mpt->els_cmds_allocated = i; return(TRUE); } static int mpt_add_target_commands(struct mpt_softc *mpt) { int i, max; if (mpt->tgt_cmd_ptrs) { return (TRUE); } max = MPT_MAX_REQUESTS(mpt) >> 1; if (max > mpt->mpt_max_tgtcmds) { max = mpt->mpt_max_tgtcmds; } mpt->tgt_cmd_ptrs = malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "mpt_add_target_commands: could not allocate cmd ptrs\n"); return (FALSE); } for (i = 0; i < max; i++) { request_t *req; req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->tgt_cmd_ptrs[i] = req; mpt_post_target_command(mpt, req, i); } if (i == 0) { mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); free(mpt->tgt_cmd_ptrs, M_DEVBUF); mpt->tgt_cmd_ptrs = NULL; return (FALSE); } mpt->tgt_cmds_allocated = i; if (i < max) { mpt_lprt(mpt, MPT_PRT_INFO, "added %d of %d target bufs\n", i, max); } return (i); } static int mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 1; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (mpt->tenabled == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 1; } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 1; } else { mpt->trt[lun].enabled = 1; } return (0); } static int mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { int i; if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 0; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 0; } else { mpt->trt[lun].enabled = 0; } for (i = 0; i < MPT_MAX_LUNS; i++) { if (mpt->trt[lun].enabled) { break; } } if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 0; } return (0); } /* * Called with MPT lock held */ static void mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); switch (tgt->state) { case TGT_STATE_IN_CAM: break; case TGT_STATE_MOVING_DATA: mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; xpt_done(ccb); return; default: mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); mpt_tgt_dump_req_state(mpt, cmd_req); mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); return; } if (csio->dxfer_len) { bus_dmamap_callback_t *cb; PTR_MSG_TARGET_ASSIST_REQUEST ta; request_t *req; int error; KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, ("dxfer_len %u but direction is NONE", csio->dxfer_len)); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* * Record the currently active ccb and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { ta->LUN[0] = 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); ta->LUN[1] = csio->ccb_h.target_lun & 0xff; } else { ta->LUN[1] = csio->ccb_h.target_lun; } ta->RelativeOffset = tgt->bytes_xfered; ta->DataLength = ccb->csio.dxfer_len; if (ta->DataLength > tgt->resid) { ta->DataLength = tgt->resid; } /* * XXX Should be done after data transfer completes? */ tgt->resid -= csio->dxfer_len; tgt->bytes_xfered += csio->dxfer_len; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; } #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_AUTO_STATUS; } #endif tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; mpt_lprt(mpt, MPT_PRT_DEBUG, "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; /* * XXX: I don't know why this seems to happen, but * XXX: completing the CCB seems to make things happy. * XXX: This seems to happen if the initiator requests * XXX: enough data that we have to do multiple CTIOs. */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Meaningless STATUS CCB (%p): flags %x status %x " "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { sp = sense; memcpy(sp, &csio->sense_data, min(csio->sense_len, MPT_SENSE_SIZE)); } mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); } } static void mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, uint32_t lun, int send, uint8_t *data, size_t length) { mpt_tgt_state_t *tgt; PTR_MSG_TARGET_ASSIST_REQUEST ta; SGE_SIMPLE32 *se; uint32_t flags; uint8_t *dptr; bus_addr_t pptr; request_t *req; /* * We enter with resid set to the data load for the command. */ tgt = MPT_TGT_STATE(mpt, cmd_req); if (length == 0 || tgt->resid == 0) { tgt->resid = 0; mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); return; } if ((req = mpt_get_request(mpt, FALSE)) == NULL) { mpt_prt(mpt, "out of resources- dropping local response\n"); return; } tgt->is_local = 1; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); if (lun > MPT_MAX_LUNS) { ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); ta->LUN[1] = lun & 0xff; } else { ta->LUN[1] = lun; } ta->RelativeOffset = 0; ta->DataLength = length; dptr = req->req_vbuf; dptr += MPT_RQSL(mpt); pptr = req->req_pbuf; pptr += MPT_RQSL(mpt); memcpy(dptr, data, min(length, MPT_RQSL(mpt))); se = (SGE_SIMPLE32 *) &ta->SGL[0]; memset(se, 0,sizeof (*se)); flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (send) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } se->Address = pptr; MPI_pSGE_SET_LENGTH(se, length); flags |= MPI_SGE_FLAGS_LAST_ELEMENT; flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; MPI_pSGE_SET_FLAGS(se, flags); tgt->ccb = NULL; tgt->req = req; tgt->resid -= length; tgt->bytes_xfered = length; #ifdef WE_TRUST_AUTO_GOOD_STATUS tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; #else tgt->state = TGT_STATE_MOVING_DATA; #endif mpt_send_cmd(mpt, req); } /* * Abort queued up CCBs */ static cam_status mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) { struct mpt_hdr_stailq *lp; struct ccb_hdr *srch; int found = 0; union ccb *accb = ccb->cab.abort_ccb; tgt_resource_t *trtp; mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { trtp = &mpt->trt_wildcard; } else { trtp = &mpt->trt[ccb->ccb_h.target_lun]; } if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { lp = &trtp->atios; } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { lp = &trtp->inots; } else { return (CAM_REQ_INVALID); } STAILQ_FOREACH(srch, lp, sim_links.stqe) { if (srch == &accb->ccb_h) { found = 1; STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); break; } } if (found) { accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); return (CAM_REQ_CMP); } mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); return (CAM_PATH_INVALID); } /* * Ask the MPT to abort the current target command */ static int mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) { int error; request_t *req; PTR_MSG_TARGET_MODE_ABORT abtp; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (-1); } abtp = req->req_vbuf; memset(abtp, 0, sizeof (*abtp)); abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); error = 0; if (mpt->is_fc || mpt->is_sas) { mpt_send_cmd(mpt, req); } else { error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); } return (error); } /* * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the * FC929 to set bogus FC_RSP fields (nonzero residuals * but w/o RESID fields set). This causes QLogic initiators * to think maybe that a frame was lost. * * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because * we use allocated requests to do TARGET_ASSIST and we * need to know when to release them. */ static void mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, uint8_t status, uint8_t const *sense_data) { uint8_t *cmd_vbuf; mpt_tgt_state_t *tgt; PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; request_t *req; bus_addr_t paddr; int resplen = 0; uint32_t fl; cmd_vbuf = cmd_req->req_vbuf; cmd_vbuf += MPT_RQSL(mpt); tgt = MPT_TGT_STATE(mpt, cmd_req); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } if (ccb) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); } else { mpt_prt(mpt, "could not allocate status request- dropping\n"); } return; } req->ccb = ccb; if (ccb) { ccb->ccb_h.ccb_mpt_ptr = mpt; ccb->ccb_h.ccb_req_ptr = req; } /* * Record the currently active ccb, if any, and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; tgt->state = TGT_STATE_SENDING_STATUS; tp = req->req_vbuf; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(tp, 0, sizeof (*tp)); tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; uint8_t *sts_vbuf; uint32_t *rsp; sts_vbuf = req->req_vbuf; sts_vbuf += MPT_RQSL(mpt); rsp = (uint32_t *) sts_vbuf; memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); /* * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. * It has to be big-endian in memory and is organized * in 32 bit words, which are much easier to deal with * as words which are swizzled as needed. * * All we're filling here is the FC_RSP payload. * We may just have the chip synthesize it if * we have no residual and an OK status. * */ memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); rsp[2] = status; if (tgt->resid) { rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(tgt->resid); #ifdef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif } if (status == SCSI_STATUS_CHECK_COND) { int i; rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ rsp[4] = htobe32(MPT_SENSE_SIZE); if (sense_data) { memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); } else { mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" "TION but no sense data?\n"); memset(&rsp, 0, MPT_SENSE_SIZE); } for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { rsp[i] = htobe32(rsp[i]); } #ifdef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif } #ifndef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif rsp[2] = htobe32(rsp[2]); } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; tp->StatusCode = status; tp->QueueTag = htole16(sp->Tag); memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); } tp->ReplyWord = htole32(tgt->reply_desc); tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); #ifdef WE_CAN_USE_AUTO_REPOST tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; #endif if (status == SCSI_STATUS_OK && resplen == 0) { tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; } else { tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= resplen; tp->StatusDataSGE.FlagsLength = htole32(fl); } mpt_lprt(mpt, MPT_PRT_DEBUG, "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, req->serno, tgt->resid); if (ccb) { ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); } mpt_send_cmd(mpt, req); } static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, tgt_resource_t *trtp, int init_id) { struct ccb_immediate_notify *inot; mpt_tgt_state_t *tgt; tgt = MPT_TGT_STATE(mpt, req); inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); if (inot == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); return; } STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE INOT %p lun %jx\n", inot, (uintmax_t)inot->ccb_h.target_lun); inot->initiator_id = init_id; /* XXX */ /* * This is a somewhat grotesque attempt to map from task management * to old style SCSI messages. God help us all. */ switch (fc) { case MPT_ABORT_TASK_SET: inot->arg = MSG_ABORT_TAG; break; case MPT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case MPT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case MPT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case MPT_TERMINATE_TASK: inot->arg = MSG_ABORT_TAG; break; default: inot->arg = MSG_NOOP; break; } /* * XXX KDM we need the sequence/tag number for the target of the * task management operation, especially if it is an abort. */ tgt->ccb = (union ccb *) inot; inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; xpt_done((union ccb *)inot); } static void mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) { static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', '0', '0', '0', '1' }; struct ccb_accept_tio *atiop; lun_id_t lun; int tag_action = 0; mpt_tgt_state_t *tgt; tgt_resource_t *trtp = NULL; U8 *lunptr; U8 *vbuf; U16 itag; U16 ioindex; mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; uint8_t *cdbp; /* * Stash info for the current command where we can get at it later. */ vbuf = req->req_vbuf; vbuf += MPT_RQSL(mpt); /* * Get our state pointer set up. */ tgt = MPT_TGT_STATE(mpt, req); if (tgt->state != TGT_STATE_LOADED) { mpt_tgt_dump_req_state(mpt, req); panic("bad target state in mpt_scsi_tgt_atio"); } memset(tgt, 0, sizeof (mpt_tgt_state_t)); tgt->state = TGT_STATE_IN_CAM; tgt->reply_desc = reply_desc; ioindex = GET_IO_INDEX(reply_desc); if (mpt->verbose >= MPT_PRT_DEBUG) { mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); } if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc; fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; if (fc->FcpCntl[2]) { /* * Task Management Request */ switch (fc->FcpCntl[2]) { case 0x2: fct = MPT_ABORT_TASK_SET; break; case 0x4: fct = MPT_CLEAR_TASK_SET; break; case 0x20: fct = MPT_TARGET_RESET; break; case 0x40: fct = MPT_CLEAR_ACA; break; case 0x80: fct = MPT_TERMINATE_TASK; break; default: mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", fc->FcpCntl[2]); mpt_scsi_tgt_status(mpt, 0, req, SCSI_STATUS_OK, 0); return; } } else { switch (fc->FcpCntl[1]) { case 0: tag_action = MSG_SIMPLE_Q_TAG; break; case 1: tag_action = MSG_HEAD_OF_Q_TAG; break; case 2: tag_action = MSG_ORDERED_Q_TAG; break; default: /* * Bah. Ignore Untagged Queing and ACA */ tag_action = MSG_SIMPLE_Q_TAG; break; } } tgt->resid = be32toh(fc->FcpDl); cdbp = fc->FcpCdb; lunptr = fc->FcpLun; itag = be16toh(fc->OptionalOxid); } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; cdbp = ssp->CDB; lunptr = ssp->LogicalUnitNumber; itag = ssp->InitiatorTag; } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; cdbp = sp->CDB; lunptr = sp->LogicalUnitNumber; itag = sp->Tag; } /* * Generate a simple lun */ switch (lunptr[0] & 0xc0) { case 0x40: lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; break; case 0: lun = lunptr[1]; break; default: mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); lun = 0xffff; break; } /* * Deal with non-enabled or bad luns here. */ if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || mpt->trt[lun].enabled == 0) { if (mpt->twildcard) { trtp = &mpt->trt_wildcard; } else if (fct == MPT_NIL_TMT_VALUE) { /* * In this case, we haven't got an upstream listener * for either a specific lun or wildcard luns. We * have to make some sensible response. For regular * inquiry, just return some NOT HERE inquiry data. * For VPD inquiry, report illegal field in cdb. * For REQUEST SENSE, just return NO SENSE data. * REPORT LUNS gets illegal command. * All other commands get 'no such device'. */ uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; size_t len; memset(buf, 0, MPT_SENSE_SIZE); cond = SCSI_STATUS_CHECK_COND; buf[0] = 0xf0; buf[2] = 0x5; buf[7] = 0x8; sp = buf; tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); switch (cdbp[0]) { case INQUIRY: { if (cdbp[1] != 0) { buf[12] = 0x26; buf[13] = 0x01; break; } len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (null_iqd)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local inquiry %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, null_iqd, len); return; } case REQUEST_SENSE: { buf[2] = 0x0; len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (buf)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local reqsense %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, buf, len); return; } case REPORT_LUNS: mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); buf[12] = 0x26; return; default: mpt_lprt(mpt, MPT_PRT_DEBUG, "CMD 0x%x to unmanaged lun %jx\n", cdbp[0], (uintmax_t)lun); buf[12] = 0x25; break; } mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); return; } /* otherwise, leave trtp NULL */ } else { trtp = &mpt->trt[lun]; } /* * Deal with any task management */ if (fct != MPT_NIL_TMT_VALUE) { if (trtp == NULL) { mpt_prt(mpt, "task mgmt function %x but no listener\n", fct); mpt_scsi_tgt_status(mpt, 0, req, SCSI_STATUS_OK, 0); } else { mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, GET_INITIATOR_INDEX(reply_desc)); } return; } atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); if (atiop == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun, mpt->tenabled? "QUEUE FULL" : "BUSY"); mpt_scsi_tgt_status(mpt, NULL, req, mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, NULL); return; } STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE ATIO %p lun %jx\n", atiop, (uintmax_t)atiop->ccb_h.target_lun); atiop->ccb_h.ccb_mpt_ptr = mpt; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; atiop->init_id = GET_INITIATOR_INDEX(reply_desc); atiop->cdb_len = mpt_cdblen(cdbp[0], 16); memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); /* * The tag we construct here allows us to find the * original request that the command came in with. * * This way we don't have to depend on anything but the * tag to find things when CCBs show back up from CAM. */ atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); tgt->tag_id = atiop->tag_id; if (tag_action) { atiop->tag_action = tag_action; atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; } if (mpt->verbose >= MPT_PRT_DEBUG) { int i; mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop, (uintmax_t)atiop->ccb_h.target_lun); for (i = 0; i < atiop->cdb_len; i++) { mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, (i == (atiop->cdb_len - 1))? '>' : ' '); } mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", itag, atiop->tag_id, tgt->reply_desc, tgt->resid); } xpt_done((union ccb *)atiop); } static void mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, tgt->tag_id, tgt->state); } static void mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) { mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, req->index, req->index, req->state); mpt_tgt_dump_tgt_state(mpt, req); } static int mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int dbg; union ccb *ccb; U16 status; if (reply_frame == NULL) { /* * Figure out what the state of the command is. */ mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); #ifdef INVARIANTS mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); if (tgt->req) { mpt_req_not_spcl(mpt, tgt->req, "turbo scsi_tgt_reply associated req", __LINE__); } #endif switch(tgt->state) { case TGT_STATE_LOADED: /* * This is a new command starting. */ mpt_scsi_tgt_atio(mpt, req, reply_desc); break; case TGT_STATE_MOVING_DATA: { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request moving data"); /* NOTREACHED */ } if (ccb == NULL) { if (tgt->is_local == 0) { panic("mpt: turbo target reply with " "null associated ccb moving data"); /* NOTREACHED */ } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST local done\n"); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL); return (TRUE); } tgt->ccb = NULL; tgt->nxfers++; mpt_req_untimeout(req, mpt_timeout, ccb); mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); /* * Free the Target Assist Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * Do we need to send status now? That is, are * we done with all our data transfers? */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); tgt->state = TGT_STATE_IN_CAM; if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); break; } /* * Otherwise, send status (and sense) */ if (ccb->ccb_h.flags & CAM_SEND_SENSE) { sp = sense; memcpy(sp, &ccb->csio.sense_data, min(ccb->csio.sense_len, MPT_SENSE_SIZE)); } mpt_scsi_tgt_status(mpt, ccb, req, ccb->csio.scsi_status, sp); break; } case TGT_STATE_SENDING_STATUS: case TGT_STATE_MOVING_DATA_AND_STATUS: { int ioindex; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request sending status"); /* NOTREACHED */ } if (ccb) { tgt->ccb = NULL; if (tgt->state == TGT_STATE_MOVING_DATA_AND_STATUS) { tgt->nxfers++; } mpt_req_untimeout(req, mpt_timeout, ccb); if (ccb->ccb_h.flags & CAM_SEND_SENSE) { ccb->ccb_h.status |= CAM_SENT_SENSE; } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS tag %x sts %x flgs %x req " "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, ccb->ccb_h.flags, tgt->req); /* * Free the Target Send Status Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); /* * Notify CAM that we're done */ mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("ZERO ccb sts at %d", __LINE__)); tgt->ccb = NULL; } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS non-CAM for req %p:%u\n", tgt->req, tgt->req->serno); } TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * And re-post the Command Buffer. * This will reset the state. */ ioindex = GET_IO_INDEX(reply_desc); TAILQ_REMOVE(&mpt->request_pending_list, req, links); tgt->is_local = 0; mpt_post_target_command(mpt, req, ioindex); /* * And post a done for anyone who cares */ if (ccb) { if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); } break; } case TGT_STATE_NIL: /* XXX This Never Happens XXX */ tgt->state = TGT_STATE_LOADED; break; default: mpt_prt(mpt, "Unknown Target State 0x%x in Context " "Reply Function\n", tgt->state); } return (TRUE); } status = le16toh(reply_frame->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { dbg = MPT_PRT_ERROR; } else { dbg = MPT_PRT_DEBUG1; } mpt_lprt(mpt, dbg, "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", req, req->serno, reply_frame, reply_frame->Function, status); switch (reply_frame->Function) { case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: { mpt_tgt_state_t *tgt; #ifdef INVARIANTS mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); #endif if (status != MPI_IOCSTATUS_SUCCESS) { /* * XXX What to do? */ break; } tgt = MPT_TGT_STATE(mpt, req); KASSERT(tgt->state == TGT_STATE_LOADING, ("bad state 0x%x on reply to buffer post", tgt->state)); mpt_assign_serno(mpt, req); tgt->state = TGT_STATE_LOADED; break; } case MPI_FUNCTION_TARGET_ASSIST: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); #endif mpt_prt(mpt, "target assist completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_STATUS_SEND: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); #endif mpt_prt(mpt, "status send completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_MODE_ABORT: { PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; PTR_MSG_TARGET_MODE_ABORT abtp = (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); #endif mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; } default: mpt_prt(mpt, "Unknown Target Address Reply Function code: " "0x%x\n", reply_frame->Function); break; } return (TRUE); } Index: head/sys/dev/msk/if_mskreg.h =================================================================== --- head/sys/dev/msk/if_mskreg.h (revision 258779) +++ head/sys/dev/msk/if_mskreg.h (revision 258780) @@ -1,2595 +1,2595 @@ /****************************************************************************** * * Name: skgehw.h * Project: Gigabit Ethernet Adapters, Common Modules * Version: $Revision: 2.49 $ * Date: $Date: 2005/01/20 13:01:35 $ * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family * ******************************************************************************/ /****************************************************************************** * * LICENSE: * Copyright (C) Marvell International Ltd. and/or its affiliates * * The computer program files contained in this folder ("Files") * are provided to you under the BSD-type license terms provided * below, and any use of such Files and any derivative works * thereof created by you shall be governed by the following terms * and conditions: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * - Neither the name of Marvell nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * /LICENSE * ******************************************************************************/ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /*$FreeBSD$*/ /* * SysKonnect PCI vendor ID */ #define VENDORID_SK 0x1148 /* * Marvell PCI vendor ID */ #define VENDORID_MARVELL 0x11AB /* * D-Link PCI vendor ID */ #define VENDORID_DLINK 0x1186 /* * SysKonnect ethernet device IDs */ #define DEVICEID_SK_YUKON2 0x9000 #define DEVICEID_SK_YUKON2_EXPR 0x9e00 /* * Marvell gigabit ethernet device IDs */ #define DEVICEID_MRVL_8021CU 0x4340 #define DEVICEID_MRVL_8022CU 0x4341 #define DEVICEID_MRVL_8061CU 0x4342 #define DEVICEID_MRVL_8062CU 0x4343 #define DEVICEID_MRVL_8021X 0x4344 #define DEVICEID_MRVL_8022X 0x4345 #define DEVICEID_MRVL_8061X 0x4346 #define DEVICEID_MRVL_8062X 0x4347 #define DEVICEID_MRVL_8035 0x4350 #define DEVICEID_MRVL_8036 0x4351 #define DEVICEID_MRVL_8038 0x4352 #define DEVICEID_MRVL_8039 0x4353 #define DEVICEID_MRVL_8040 0x4354 #define DEVICEID_MRVL_8040T 0x4355 #define DEVICEID_MRVL_8042 0x4357 #define DEVICEID_MRVL_8048 0x435A #define DEVICEID_MRVL_4360 0x4360 #define DEVICEID_MRVL_4361 0x4361 #define DEVICEID_MRVL_4362 0x4362 #define DEVICEID_MRVL_4363 0x4363 #define DEVICEID_MRVL_4364 0x4364 #define DEVICEID_MRVL_4365 0x4365 #define DEVICEID_MRVL_436A 0x436A #define DEVICEID_MRVL_436B 0x436B #define DEVICEID_MRVL_436C 0x436C #define DEVICEID_MRVL_436D 0x436D #define DEVICEID_MRVL_4370 0x4370 #define DEVICEID_MRVL_4380 0x4380 #define DEVICEID_MRVL_4381 0x4381 /* * D-Link gigabit ethernet device ID */ #define DEVICEID_DLINK_DGE550SX 0x4001 #define DEVICEID_DLINK_DGE560SX 0x4002 #define DEVICEID_DLINK_DGE560T 0x4b00 -#define BIT_31 (1 << 31) +#define BIT_31 (1U << 31) #define BIT_30 (1 << 30) #define BIT_29 (1 << 29) #define BIT_28 (1 << 28) #define BIT_27 (1 << 27) #define BIT_26 (1 << 26) #define BIT_25 (1 << 25) #define BIT_24 (1 << 24) #define BIT_23 (1 << 23) #define BIT_22 (1 << 22) #define BIT_21 (1 << 21) #define BIT_20 (1 << 20) #define BIT_19 (1 << 19) #define BIT_18 (1 << 18) #define BIT_17 (1 << 17) #define BIT_16 (1 << 16) #define BIT_15 (1 << 15) #define BIT_14 (1 << 14) #define BIT_13 (1 << 13) #define BIT_12 (1 << 12) #define BIT_11 (1 << 11) #define BIT_10 (1 << 10) #define BIT_9 (1 << 9) #define BIT_8 (1 << 8) #define BIT_7 (1 << 7) #define BIT_6 (1 << 6) #define BIT_5 (1 << 5) #define BIT_4 (1 << 4) #define BIT_3 (1 << 3) #define BIT_2 (1 << 2) #define BIT_1 (1 << 1) #define BIT_0 (1 << 0) #define SHIFT31(x) ((x) << 31) #define SHIFT30(x) ((x) << 30) #define SHIFT29(x) ((x) << 29) #define SHIFT28(x) ((x) << 28) #define SHIFT27(x) ((x) << 27) #define SHIFT26(x) ((x) << 26) #define SHIFT25(x) ((x) << 25) #define SHIFT24(x) ((x) << 24) #define SHIFT23(x) ((x) << 23) #define SHIFT22(x) ((x) << 22) #define SHIFT21(x) ((x) << 21) #define SHIFT20(x) ((x) << 20) #define SHIFT19(x) ((x) << 19) #define SHIFT18(x) ((x) << 18) #define SHIFT17(x) ((x) << 17) #define SHIFT16(x) ((x) << 16) #define SHIFT15(x) ((x) << 15) #define SHIFT14(x) ((x) << 14) #define SHIFT13(x) ((x) << 13) #define SHIFT12(x) ((x) << 12) #define SHIFT11(x) ((x) << 11) #define SHIFT10(x) ((x) << 10) #define SHIFT9(x) ((x) << 9) #define SHIFT8(x) ((x) << 8) #define SHIFT7(x) ((x) << 7) #define SHIFT6(x) ((x) << 6) #define SHIFT5(x) ((x) << 5) #define SHIFT4(x) ((x) << 4) #define SHIFT3(x) ((x) << 3) #define SHIFT2(x) ((x) << 2) #define SHIFT1(x) ((x) << 1) #define SHIFT0(x) ((x) << 0) /* * PCI Configuration Space header */ #define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */ #define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */ #define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */ #define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */ #define PCI_OUR_STATUS 0x7c /* 32 bit Adapter Status Register */ #define PCI_OUR_REG_3 0x80 /* 32 bit Our Register 3 */ #define PCI_OUR_REG_4 0x84 /* 32 bit Our Register 4 */ #define PCI_OUR_REG_5 0x88 /* 32 bit Our Register 5 */ #define PCI_CFG_REG_0 0x90 /* 32 bit Config Register 0 */ #define PCI_CFG_REG_1 0x94 /* 32 bit Config Register 1 */ /* PCI Express Capability */ #define PEX_CAP_ID 0xe0 /* 8 bit PEX Capability ID */ #define PEX_NITEM 0xe1 /* 8 bit PEX Next Item Pointer */ #define PEX_CAP_REG 0xe2 /* 16 bit PEX Capability Register */ #define PEX_DEV_CAP 0xe4 /* 32 bit PEX Device Capabilities */ #define PEX_DEV_CTRL 0xe8 /* 16 bit PEX Device Control */ #define PEX_DEV_STAT 0xea /* 16 bit PEX Device Status */ #define PEX_LNK_CAP 0xec /* 32 bit PEX Link Capabilities */ #define PEX_LNK_CTRL 0xf0 /* 16 bit PEX Link Control */ #define PEX_LNK_STAT 0xf2 /* 16 bit PEX Link Status */ /* PCI Express Extended Capabilities */ #define PEX_ADV_ERR_REP 0x100 /* 32 bit PEX Advanced Error Reporting */ #define PEX_UNC_ERR_STAT 0x104 /* 32 bit PEX Uncorr. Errors Status */ #define PEX_UNC_ERR_MASK 0x108 /* 32 bit PEX Uncorr. Errors Mask */ #define PEX_UNC_ERR_SEV 0x10c /* 32 bit PEX Uncorr. Errors Severity */ #define PEX_COR_ERR_STAT 0x110 /* 32 bit PEX Correc. Errors Status */ #define PEX_COR_ERR_MASK 0x114 /* 32 bit PEX Correc. Errors Mask */ #define PEX_ADV_ERR_CAP_C 0x118 /* 32 bit PEX Advanced Error Cap./Ctrl */ #define PEX_HEADER_LOG 0x11c /* 4x32 bit PEX Header Log Register */ /* PCI_OUR_REG_1 32 bit Our Register 1 */ #define PCI_Y2_PIG_ENA BIT_31 /* Enable Plug-in-Go (YUKON-2) */ #define PCI_Y2_DLL_DIS BIT_30 /* Disable PCI DLL (YUKON-2) */ #define PCI_Y2_PHY2_COMA BIT_29 /* Set PHY 2 to Coma Mode (YUKON-2) */ #define PCI_Y2_PHY1_COMA BIT_28 /* Set PHY 1 to Coma Mode (YUKON-2) */ #define PCI_Y2_PHY2_POWD BIT_27 /* Set PHY 2 to Power Down (YUKON-2) */ #define PCI_Y2_PHY1_POWD BIT_26 /* Set PHY 1 to Power Down (YUKON-2) */ #define PCI_DIS_BOOT BIT_24 /* Disable BOOT via ROM */ #define PCI_EN_IO BIT_23 /* Mapping to I/O space */ #define PCI_EN_FPROM BIT_22 /* Enable FLASH mapping to memory */ /* 1 = Map Flash to memory */ /* 0 = Disable addr. dec */ #define PCI_PAGESIZE (3L<<20)/* Bit 21..20: FLASH Page Size */ #define PCI_PAGE_16 (0L<<20)/* 16 k pages */ #define PCI_PAGE_32K (1L<<20)/* 32 k pages */ #define PCI_PAGE_64K (2L<<20)/* 64 k pages */ #define PCI_PAGE_128K (3L<<20)/* 128 k pages */ #define PCI_PAGEREG (7L<<16)/* Bit 18..16: Page Register */ #define PCI_PEX_LEGNAT BIT_15 /* PEX PM legacy/native mode (YUKON-2) */ #define PCI_FORCE_BE BIT_14 /* Assert all BEs on MR */ #define PCI_DIS_MRL BIT_13 /* Disable Mem Read Line */ #define PCI_DIS_MRM BIT_12 /* Disable Mem Read Multiple */ #define PCI_DIS_MWI BIT_11 /* Disable Mem Write & Invalidate */ #define PCI_DISC_CLS BIT_10 /* Disc: cacheLsz bound */ #define PCI_BURST_DIS BIT_9 /* Burst Disable */ #define PCI_DIS_PCI_CLK BIT_8 /* Disable PCI clock driving */ #define PCI_SKEW_DAS (0xfL<<4)/* Bit 7.. 4: Skew Ctrl, DAS Ext */ #define PCI_SKEW_BASE 0xfL /* Bit 3.. 0: Skew Ctrl, Base */ #define PCI_CLS_OPT BIT_3 /* Cache Line Size opt. PCI-X (YUKON-2) */ /* PCI_OUR_REG_2 32 bit Our Register 2 */ #define PCI_VPD_WR_THR (0xff<<24) /* Bit 31..24: VPD Write Threshold */ #define PCI_DEV_SEL (0x7f<<17) /* Bit 23..17: EEPROM Device Select */ #define PCI_VPD_ROM_SZ (0x07<<14) /* Bit 16..14: VPD ROM Size */ /* Bit 13..12: reserved */ #define PCI_PATCH_DIR (0x0f<<8) /* Bit 11.. 8: Ext Patches dir 3..0 */ #define PCI_PATCH_DIR_3 BIT_11 #define PCI_PATCH_DIR_2 BIT_10 #define PCI_PATCH_DIR_1 BIT_9 #define PCI_PATCH_DIR_0 BIT_8 #define PCI_EXT_PATCHS (0x0f<<4) /* Bit 7.. 4: Extended Patches 3..0 */ #define PCI_EXT_PATCH_3 BIT_7 #define PCI_EXT_PATCH_2 BIT_6 #define PCI_EXT_PATCH_1 BIT_5 #define PCI_EXT_PATCH_0 BIT_4 #define PCI_EN_DUMMY_RD BIT_3 /* Enable Dummy Read */ #define PCI_REV_DESC BIT_2 /* Reverse Desc. Bytes */ #define PCI_USEDATA64 BIT_0 /* Use 64Bit Data bus ext */ /* PCI_OUR_STATUS 32 bit Adapter Status Register (Yukon-2) */ #define PCI_OS_PCI64B BIT_31 /* Conventional PCI 64 bits Bus */ #define PCI_OS_PCIX BIT_30 /* PCI-X Bus */ #define PCI_OS_MODE_MSK (3<<28) /* Bit 29..28: PCI-X Bus Mode Mask */ #define PCI_OS_PCI66M BIT_27 /* PCI 66 MHz Bus */ #define PCI_OS_PCI_X BIT_26 /* PCI/PCI-X Bus (0 = PEX) */ #define PCI_OS_DLLE_MSK (3<<24) /* Bit 25..24: DLL Status Indication */ #define PCI_OS_DLLR_MSK (0x0f<<20) /* Bit 23..20: DLL Row Counters Values */ #define PCI_OS_DLLC_MSK (0x0f<<16) /* Bit 19..16: DLL Col. Counters Values */ #define PCI_OS_SPEED(val) ((val & PCI_OS_MODE_MSK) >> 28) /* PCI-X Speed */ /* possible values for the speed field of the register */ #define PCI_OS_SPD_PCI 0 /* PCI Conventional Bus */ #define PCI_OS_SPD_X66 1 /* PCI-X 66MHz Bus */ #define PCI_OS_SPD_X100 2 /* PCI-X 100MHz Bus */ #define PCI_OS_SPD_X133 3 /* PCI-X 133MHz Bus */ /* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */ #define PCI_CLK_MACSEC_DIS BIT_17 /* Disable Clock MACSec. */ /* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ #define PCI_TIMER_VALUE_MSK (0xff<<16) /* Bit 23..16: Timer Value Mask */ #define PCI_FORCE_ASPM_REQUEST BIT_15 /* Force ASPM Request (A1 only) */ #define PCI_ASPM_GPHY_LINK_DOWN BIT_14 /* GPHY Link Down (A1 only) */ #define PCI_ASPM_INT_FIFO_EMPTY BIT_13 /* Internal FIFO Empty (A1 only) */ #define PCI_ASPM_CLKRUN_REQUEST BIT_12 /* CLKRUN Request (A1 only) */ #define PCI_ASPM_FORCE_CLKREQ_ENA BIT_4 /* Force CLKREQ Enable (A1b only) */ #define PCI_ASPM_CLKREQ_PAD_CTL BIT_3 /* CLKREQ PAD Control (A1 only) */ #define PCI_ASPM_A1_MODE_SELECT BIT_2 /* A1 Mode Select (A1 only) */ #define PCI_CLK_GATE_PEX_UNIT_ENA BIT_1 /* Enable Gate PEX Unit Clock */ #define PCI_CLK_GATE_ROOT_COR_ENA BIT_0 /* Enable Gate Root Core Clock */ /* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ /* Bit 31..27: for A3 & later */ #define PCI_CTL_DIV_CORE_CLK_ENA BIT_31 /* Divide Core Clock Enable */ #define PCI_CTL_SRESET_VMAIN_AV BIT_30 /* Soft Reset for Vmain_av De-Glitch */ #define PCI_CTL_BYPASS_VMAIN_AV BIT_29 /* Bypass En. for Vmain_av De-Glitch */ #define PCI_CTL_TIM_VMAIN_AV1 BIT_28 /* Bit 28..27: Timer Vmain_av Mask */ #define PCI_CTL_TIM_VMAIN_AV0 BIT_27 /* Bit 28..27: Timer Vmain_av Mask */ #define PCI_CTL_TIM_VMAIN_AV_MSK (BIT_28 | BIT_27) /* Bit 26..16: Release Clock on Event */ #define PCI_REL_PCIE_RST_DE_ASS BIT_26 /* PCIe Reset De-Asserted */ #define PCI_REL_GPHY_REC_PACKET BIT_25 /* GPHY Received Packet */ #define PCI_REL_INT_FIFO_N_EMPTY BIT_24 /* Internal FIFO Not Empty */ #define PCI_REL_MAIN_PWR_AVAIL BIT_23 /* Main Power Available */ #define PCI_REL_CLKRUN_REQ_REL BIT_22 /* CLKRUN Request Release */ #define PCI_REL_PCIE_RESET_ASS BIT_21 /* PCIe Reset Asserted */ #define PCI_REL_PME_ASSERTED BIT_20 /* PME Asserted */ #define PCI_REL_PCIE_EXIT_L1_ST BIT_19 /* PCIe Exit L1 State */ #define PCI_REL_LOADER_NOT_FIN BIT_18 /* EPROM Loader Not Finished */ #define PCI_REL_PCIE_RX_EX_IDLE BIT_17 /* PCIe Rx Exit Electrical Idle State */ #define PCI_REL_GPHY_LINK_UP BIT_16 /* GPHY Link Up */ /* Bit 10.. 0: Mask for Gate Clock */ #define PCI_GAT_PCIE_RST_ASSERTED BIT_10 /* PCIe Reset Asserted */ #define PCI_GAT_GPHY_N_REC_PACKET BIT_9 /* GPHY Not Received Packet */ #define PCI_GAT_INT_FIFO_EMPTY BIT_8 /* Internal FIFO Empty */ #define PCI_GAT_MAIN_PWR_N_AVAIL BIT_7 /* Main Power Not Available */ #define PCI_GAT_CLKRUN_REQ_REL BIT_6 /* CLKRUN Not Requested */ #define PCI_GAT_PCIE_RESET_ASS BIT_5 /* PCIe Reset Asserted */ #define PCI_GAT_PME_DE_ASSERTED BIT_4 /* PME De-Asserted */ #define PCI_GAT_PCIE_ENTER_L1_ST BIT_3 /* PCIe Enter L1 State */ #define PCI_GAT_LOADER_FINISHED BIT_2 /* EPROM Loader Finished */ #define PCI_GAT_PCIE_RX_EL_IDLE BIT_1 /* PCIe Rx Electrical Idle State */ #define PCI_GAT_GPHY_LINK_DOWN BIT_0 /* GPHY Link Down */ /* PCI_CFG_REG_1 32 bit Config Register 1 */ #define PCI_CF1_DIS_REL_EVT_RST BIT_24 /* Dis. Rel. Event during PCIE reset */ /* Bit 23..21: Release Clock on Event */ #define PCI_CF1_REL_LDR_NOT_FIN BIT_23 /* EEPROM Loader Not Finished */ #define PCI_CF1_REL_VMAIN_AVLBL BIT_22 /* Vmain available */ #define PCI_CF1_REL_PCIE_RESET BIT_21 /* PCI-E reset */ /* Bit 20..18: Gate Clock on Event */ #define PCI_CF1_GAT_LDR_NOT_FIN BIT_20 /* EEPROM Loader Finished */ #define PCI_CF1_GAT_PCIE_RX_IDLE BIT_19 /* PCI-E Rx Electrical idle */ #define PCI_CF1_GAT_PCIE_RESET BIT_18 /* PCI-E Reset */ #define PCI_CF1_PRST_PHY_CLKREQ BIT_17 /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ #define PCI_CF1_PCIE_RST_CLKREQ BIT_16 /* Enable PCI-E rst generate CLKREQ */ #define PCI_CF1_ENA_CFG_LDR_DONE BIT_8 /* Enable core level Config loader done */ #define PCI_CF1_ENA_TXBMU_RD_IDLE BIT_1 /* Enable TX BMU Read IDLE for ASPM */ #define PCI_CF1_ENA_TXBMU_WR_IDLE BIT_0 /* Enable TX BMU Write IDLE for ASPM */ /* PEX_DEV_CTRL 16 bit PEX Device Control (Yukon-2) */ #define PEX_DC_MAX_RRS_MSK (7<<12) /* Bit 14..12: Max. Read Request Size */ #define PEX_DC_EN_NO_SNOOP BIT_11 /* Enable No Snoop */ #define PEX_DC_EN_AUX_POW BIT_10 /* Enable AUX Power */ #define PEX_DC_EN_PHANTOM BIT_9 /* Enable Phantom Functions */ #define PEX_DC_EN_EXT_TAG BIT_8 /* Enable Extended Tag Field */ #define PEX_DC_MAX_PLS_MSK (7<<5) /* Bit 7.. 5: Max. Payload Size Mask */ #define PEX_DC_EN_REL_ORD BIT_4 /* Enable Relaxed Ordering */ #define PEX_DC_EN_UNS_RQ_RP BIT_3 /* Enable Unsupported Request Reporting */ #define PEX_DC_EN_FAT_ER_RP BIT_2 /* Enable Fatal Error Reporting */ #define PEX_DC_EN_NFA_ER_RP BIT_1 /* Enable Non-Fatal Error Reporting */ #define PEX_DC_EN_COR_ER_RP BIT_0 /* Enable Correctable Error Reporting */ #define PEX_DC_MAX_RD_RQ_SIZE(x) (SHIFT12(x) & PEX_DC_MAX_RRS_MSK) /* PEX_LNK_STAT 16 bit PEX Link Status (Yukon-2) */ #define PEX_LS_SLOT_CLK_CFG BIT_12 /* Slot Clock Config */ #define PEX_LS_LINK_TRAIN BIT_11 /* Link Training */ #define PEX_LS_TRAIN_ERROR BIT_10 /* Training Error */ #define PEX_LS_LINK_WI_MSK (0x3f<<4) /* Bit 9.. 4: Neg. Link Width Mask */ #define PEX_LS_LINK_SP_MSK 0x0f /* Bit 3.. 0: Link Speed Mask */ /* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */ #define PEX_UNSUP_REQ BIT_20 /* Unsupported Request Error */ #define PEX_MALFOR_TLP BIT_18 /* Malformed TLP */ #define PEX_RX_OV BIT_17 /* Receiver Overflow (not supported) */ #define PEX_UNEXP_COMP BIT_16 /* Unexpected Completion */ #define PEX_COMP_TO BIT_14 /* Completion Timeout */ #define PEX_FLOW_CTRL_P BIT_13 /* Flow Control Protocol Error */ #define PEX_POIS_TLP BIT_12 /* Poisoned TLP */ #define PEX_DATA_LINK_P BIT_4 /* Data Link Protocol Error */ #define PEX_FATAL_ERRORS (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P) /* Control Register File (Address Map) */ /* * Bank 0 */ #define B0_RAP 0x0000 /* 8 bit Register Address Port */ #define B0_CTST 0x0004 /* 16 bit Control/Status register */ #define B0_LED 0x0006 /* 8 Bit LED register */ #define B0_POWER_CTRL 0x0007 /* 8 Bit Power Control reg (YUKON only) */ #define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */ #define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */ #define B0_HWE_ISRC 0x0010 /* 32 bit HW Error Interrupt Src Reg */ #define B0_HWE_IMSK 0x0014 /* 32 bit HW Error Interrupt Mask Reg */ #define B0_SP_ISRC 0x0018 /* 32 bit Special Interrupt Source Reg 1 */ /* Special ISR registers (Yukon-2 only) */ #define B0_Y2_SP_ISRC2 0x001c /* 32 bit Special Interrupt Source Reg 2 */ #define B0_Y2_SP_ISRC3 0x0020 /* 32 bit Special Interrupt Source Reg 3 */ #define B0_Y2_SP_EISR 0x0024 /* 32 bit Enter ISR Reg */ #define B0_Y2_SP_LISR 0x0028 /* 32 bit Leave ISR Reg */ #define B0_Y2_SP_ICR 0x002c /* 32 bit Interrupt Control Reg */ /* * Bank 1 * - completely empty (this is the RAP Block window) * Note: if RAP = 1 this page is reserved */ /* * Bank 2 */ /* NA reg = 48 bit Network Address Register, 3x16 or 8x8 bit readable */ #define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */ #define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */ #define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */ #define B2_CONN_TYP 0x0118 /* 8 bit Connector type */ #define B2_PMD_TYP 0x0119 /* 8 bit PMD type */ #define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration / Chip Revision */ #define B2_CHIP_ID 0x011b /* 8 bit Chip Identification Number */ #define B2_E_0 0x011c /* 8 bit EPROM Byte 0 (ext. SRAM size */ #define B2_Y2_CLK_GATE 0x011d /* 8 bit Clock Gating (Yukon-2) */ #define B2_Y2_HW_RES 0x011e /* 8 bit HW Resources (Yukon-2) */ #define B2_E_3 0x011f /* 8 bit EPROM Byte 3 */ #define B2_Y2_CLK_CTRL 0x0120 /* 32 bit Core Clock Frequency Control */ #define B2_TI_INI 0x0130 /* 32 bit Timer Init Value */ #define B2_TI_VAL 0x0134 /* 32 bit Timer Value */ #define B2_TI_CTRL 0x0138 /* 8 bit Timer Control */ #define B2_TI_TEST 0x0139 /* 8 Bit Timer Test */ #define B2_IRQM_INI 0x0140 /* 32 bit IRQ Moderation Timer Init Reg.*/ #define B2_IRQM_VAL 0x0144 /* 32 bit IRQ Moderation Timer Value */ #define B2_IRQM_CTRL 0x0148 /* 8 bit IRQ Moderation Timer Control */ #define B2_IRQM_TEST 0x0149 /* 8 bit IRQ Moderation Timer Test */ #define B2_IRQM_MSK 0x014c /* 32 bit IRQ Moderation Mask */ #define B2_IRQM_HWE_MSK 0x0150 /* 32 bit IRQ Moderation HW Error Mask */ #define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */ #define B2_TST_CTRL2 0x0159 /* 8 bit Test Control Register 2 */ #define B2_GP_IO 0x015c /* 32 bit General Purpose I/O Register */ #define B2_I2C_CTRL 0x0160 /* 32 bit I2C HW Control Register */ #define B2_I2C_DATA 0x0164 /* 32 bit I2C HW Data Register */ #define B2_I2C_IRQ 0x0168 /* 32 bit I2C HW IRQ Register */ #define B2_I2C_SW 0x016c /* 32 bit I2C SW Port Register */ #define Y2_PEX_PHY_DATA 0x0170 /* 16 bit PEX PHY Data Register */ #define Y2_PEX_PHY_ADDR 0x0172 /* 16 bit PEX PHY Address Register */ /* * Bank 3 */ /* RAM Random Registers */ #define B3_RAM_ADDR 0x0180 /* 32 bit RAM Address, to read or write */ #define B3_RAM_DATA_LO 0x0184 /* 32 bit RAM Data Word (low dWord) */ #define B3_RAM_DATA_HI 0x0188 /* 32 bit RAM Data Word (high dWord) */ #define SELECT_RAM_BUFFER(rb, addr) (addr | (rb << 6)) /* Yukon-2 only */ /* RAM Interface Registers */ /* Yukon-2: use SELECT_RAM_BUFFER() to access the RAM buffer */ /* * The HW-Spec. calls this registers Timeout Value 0..11. But this names are * not usable in SW. Please notice these are NOT real timeouts, these are * the number of qWords transferred continuously. */ #define B3_RI_WTO_R1 0x0190 /* 8 bit WR Timeout Queue R1 (TO0) */ #define B3_RI_WTO_XA1 0x0191 /* 8 bit WR Timeout Queue XA1 (TO1) */ #define B3_RI_WTO_XS1 0x0192 /* 8 bit WR Timeout Queue XS1 (TO2) */ #define B3_RI_RTO_R1 0x0193 /* 8 bit RD Timeout Queue R1 (TO3) */ #define B3_RI_RTO_XA1 0x0194 /* 8 bit RD Timeout Queue XA1 (TO4) */ #define B3_RI_RTO_XS1 0x0195 /* 8 bit RD Timeout Queue XS1 (TO5) */ #define B3_RI_WTO_R2 0x0196 /* 8 bit WR Timeout Queue R2 (TO6) */ #define B3_RI_WTO_XA2 0x0197 /* 8 bit WR Timeout Queue XA2 (TO7) */ #define B3_RI_WTO_XS2 0x0198 /* 8 bit WR Timeout Queue XS2 (TO8) */ #define B3_RI_RTO_R2 0x0199 /* 8 bit RD Timeout Queue R2 (TO9) */ #define B3_RI_RTO_XA2 0x019a /* 8 bit RD Timeout Queue XA2 (TO10)*/ #define B3_RI_RTO_XS2 0x019b /* 8 bit RD Timeout Queue XS2 (TO11)*/ #define B3_RI_TO_VAL 0x019c /* 8 bit Current Timeout Count Val */ #define B3_RI_CTRL 0x01a0 /* 16 bit RAM Interface Control Register */ #define B3_RI_TEST 0x01a2 /* 8 bit RAM Interface Test Register */ /* * Bank 4 - 5 */ /* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ #define TXA_ITI_INI 0x0200 /* 32 bit Tx Arb Interval Timer Init Val*/ #define TXA_ITI_VAL 0x0204 /* 32 bit Tx Arb Interval Timer Value */ #define TXA_LIM_INI 0x0208 /* 32 bit Tx Arb Limit Counter Init Val */ #define TXA_LIM_VAL 0x020c /* 32 bit Tx Arb Limit Counter Value */ #define TXA_CTRL 0x0210 /* 8 bit Tx Arbiter Control Register */ #define TXA_TEST 0x0211 /* 8 bit Tx Arbiter Test Register */ #define TXA_STAT 0x0212 /* 8 bit Tx Arbiter Status Register */ #define MR_ADDR(Mac, Offs) (((Mac) << 7) + (Offs)) /* RSS key registers for Yukon-2 Family */ #define B4_RSS_KEY 0x0220 /* 4x32 bit RSS Key register (Yukon-2) */ /* RSS key register offsets */ #define KEY_IDX_0 0 /* offset for location of KEY 0 */ #define KEY_IDX_1 4 /* offset for location of KEY 1 */ #define KEY_IDX_2 8 /* offset for location of KEY 2 */ #define KEY_IDX_3 12 /* offset for location of KEY 3 */ /* 0x0280 - 0x0292: MAC 2 */ #define RSS_KEY_ADDR(Port, KeyIndex) \ ((B4_RSS_KEY | ( ((Port) == 0) ? 0 : 0x80)) + (KeyIndex)) /* * Bank 8 - 15 */ /* Receive and Transmit Queue Registers, use Q_ADDR() to access */ #define B8_Q_REGS 0x0400 /* Queue Register Offsets, use Q_ADDR() to access */ #define Q_D 0x00 /* 8*32 bit Current Descriptor */ #define Q_DA_L 0x20 /* 32 bit Current Descriptor Address Low dWord */ #define Q_DONE 0x24 /* 16 bit Done Index */ #define Q_AC_L 0x28 /* 32 bit Current Address Counter Low dWord */ #define Q_AC_H 0x2c /* 32 bit Current Address Counter High dWord */ #define Q_BC 0x30 /* 32 bit Current Byte Counter */ #define Q_CSR 0x34 /* 32 bit BMU Control/Status Register */ #define Q_F 0x38 /* 32 bit Flag Register */ #define Q_T1 0x3c /* 32 bit Test Register 1 */ #define Q_T1_TR 0x3c /* 8 bit Test Register 1 Transfer SM */ #define Q_T1_WR 0x3d /* 8 bit Test Register 1 Write Descriptor SM */ #define Q_T1_RD 0x3e /* 8 bit Test Register 1 Read Descriptor SM */ #define Q_T1_SV 0x3f /* 8 bit Test Register 1 Supervisor SM */ #define Q_WM 0x40 /* 16 bit FIFO Watermark */ #define Q_AL 0x42 /* 8 bit FIFO Alignment */ #define Q_RSP 0x44 /* 16 bit FIFO Read Shadow Pointer */ #define Q_RSL 0x46 /* 8 bit FIFO Read Shadow Level */ #define Q_RP 0x48 /* 8 bit FIFO Read Pointer */ #define Q_RL 0x4a /* 8 bit FIFO Read Level */ #define Q_WP 0x4c /* 8 bit FIFO Write Pointer */ #define Q_WSP 0x4d /* 8 bit FIFO Write Shadow Pointer */ #define Q_WL 0x4e /* 8 bit FIFO Write Level */ #define Q_WSL 0x4f /* 8 bit FIFO Write Shadow Level */ #define Q_ADDR(Queue, Offs) (B8_Q_REGS + (Queue) + (Offs)) /* Queue Prefetch Unit Offsets, use Y2_PREF_Q_ADDR() to address */ #define Y2_B8_PREF_REGS 0x0450 #define PREF_UNIT_CTRL_REG 0x00 /* 32 bit Prefetch Control register */ #define PREF_UNIT_LAST_IDX_REG 0x04 /* 16 bit Last Index */ #define PREF_UNIT_ADDR_LOW_REG 0x08 /* 32 bit List start addr, low part */ #define PREF_UNIT_ADDR_HI_REG 0x0c /* 32 bit List start addr, high part*/ #define PREF_UNIT_GET_IDX_REG 0x10 /* 16 bit Get Index */ #define PREF_UNIT_PUT_IDX_REG 0x14 /* 16 bit Put Index */ #define PREF_UNIT_FIFO_WP_REG 0x20 /* 8 bit FIFO write pointer */ #define PREF_UNIT_FIFO_RP_REG 0x24 /* 8 bit FIFO read pointer */ #define PREF_UNIT_FIFO_WM_REG 0x28 /* 8 bit FIFO watermark */ #define PREF_UNIT_FIFO_LEV_REG 0x2c /* 8 bit FIFO level */ #define PREF_UNIT_MASK_IDX 0x0fff #define Y2_PREF_Q_ADDR(Queue, Offs) (Y2_B8_PREF_REGS + (Queue) + (Offs)) /* * Bank 16 - 23 */ /* RAM Buffer Registers */ #define B16_RAM_REGS 0x0800 /* RAM Buffer Register Offsets, use RB_ADDR() to access */ #define RB_START 0x00 /* 32 bit RAM Buffer Start Address */ #define RB_END 0x04 /* 32 bit RAM Buffer End Address */ #define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */ #define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */ #define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Packet */ #define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Packet */ #define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */ #define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */ #define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */ #define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */ #define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */ #define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */ #define RB_TST2 0x2a /* 8 bit RAM Buffer Test Register 2 */ /* * Bank 24 */ /* Receive GMAC FIFO (YUKON and Yukon-2), use MR_ADDR() to access */ #define RX_GMF_EA 0x0c40 /* 32 bit Rx GMAC FIFO End Address */ #define RX_GMF_AF_THR 0x0c44 /* 32 bit Rx GMAC FIFO Almost Full Thresh. */ #define RX_GMF_CTRL_T 0x0c48 /* 32 bit Rx GMAC FIFO Control/Test */ #define RX_GMF_FL_MSK 0x0c4c /* 32 bit Rx GMAC FIFO Flush Mask */ #define RX_GMF_FL_THR 0x0c50 /* 32 bit Rx GMAC FIFO Flush Threshold */ #define RX_GMF_TR_THR 0x0c54 /* 32 bit Rx Truncation Threshold (Yukon-2) */ #define RX_GMF_UP_THR 0x0c58 /* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */ #define RX_GMF_LP_THR 0x0c5a /* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */ #define RX_GMF_VLAN 0x0c5c /* 32 bit Rx VLAN Type Register (Yukon-2) */ #define RX_GMF_WP 0x0c60 /* 32 bit Rx GMAC FIFO Write Pointer */ #define RX_GMF_WLEV 0x0c68 /* 32 bit Rx GMAC FIFO Write Level */ #define RX_GMF_RP 0x0c70 /* 32 bit Rx GMAC FIFO Read Pointer */ #define RX_GMF_RLEV 0x0c78 /* 32 bit Rx GMAC FIFO Read Level */ /* * Bank 25 */ /* 0x0c80 - 0x0cbf: MAC 2 */ /* 0x0cc0 - 0x0cff: reserved */ /* * Bank 26 */ /* Transmit GMAC FIFO (YUKON and Yukon-2), use MR_ADDR() to access */ #define TX_GMF_EA 0x0d40 /* 32 bit Tx GMAC FIFO End Address */ #define TX_GMF_AE_THR 0x0d44 /* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ #define TX_GMF_CTRL_T 0x0d48 /* 32 bit Tx GMAC FIFO Control/Test */ #define TX_GMF_VLAN 0x0d5c /* 32 bit Tx VLAN Type Register (Yukon-2) */ #define TX_GMF_WP 0x0d60 /* 32 bit Tx GMAC FIFO Write Pointer */ #define TX_GMF_WSP 0x0d64 /* 32 bit Tx GMAC FIFO Write Shadow Pointer */ #define TX_GMF_WLEV 0x0d68 /* 32 bit Tx GMAC FIFO Write Level */ #define TX_GMF_RP 0x0d70 /* 32 bit Tx GMAC FIFO Read Pointer */ #define TX_GMF_RSTP 0x0d74 /* 32 bit Tx GMAC FIFO Restart Pointer */ #define TX_GMF_RLEV 0x0d78 /* 32 bit Tx GMAC FIFO Read Level */ /* * Bank 27 */ /* 0x0d80 - 0x0dbf: MAC 2 */ /* 0x0daa - 0x0dff: reserved */ /* * Bank 28 */ /* Descriptor Poll Timer Registers */ #define B28_DPT_INI 0x0e00 /* 24 bit Descriptor Poll Timer Init Val */ #define B28_DPT_VAL 0x0e04 /* 24 bit Descriptor Poll Timer Curr Val */ #define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg */ #define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg */ /* Time Stamp Timer Registers (YUKON only) */ #define GMAC_TI_ST_VAL 0x0e14 /* 32 bit Time Stamp Timer Curr Val */ #define GMAC_TI_ST_CTRL 0x0e18 /* 8 bit Time Stamp Timer Ctrl Reg */ #define GMAC_TI_ST_TST 0x0e1a /* 8 bit Time Stamp Timer Test Reg */ /* Polling Unit Registers (Yukon-2 only) */ #define POLL_CTRL 0x0e20 /* 32 bit Polling Unit Control Reg */ #define POLL_LAST_IDX 0x0e24 /* 16 bit Polling Unit List Last Index */ #define POLL_LIST_ADDR_LO 0x0e28 /* 32 bit Poll. List Start Addr (low) */ #define POLL_LIST_ADDR_HI 0x0e2c /* 32 bit Poll. List Start Addr (high) */ /* ASF Subsystem Registers (Yukon-2 only) */ #define B28_Y2_SMB_CONFIG 0x0e40 /* 32 bit ASF SMBus Config Register */ #define B28_Y2_SMB_CSD_REG 0x0e44 /* 32 bit ASF SMB Control/Status/Data */ #define B28_Y2_CPU_WDOG 0x0e48 /* 32 bit Watchdog Register */ #define B28_Y2_ASF_IRQ_V_BASE 0x0e60 /* 32 bit ASF IRQ Vector Base */ #define B28_Y2_ASF_STAT_CMD 0x0e68 /* 32 bit ASF Status and Command Reg */ #define B28_Y2_ASF_HCU_CCSR 0x0e68 /* 32 bit ASF HCU CCSR (Yukon EX) */ #define B28_Y2_ASF_HOST_COM 0x0e6c /* 32 bit ASF Host Communication Reg */ #define B28_Y2_DATA_REG_1 0x0e70 /* 32 bit ASF/Host Data Register 1 */ #define B28_Y2_DATA_REG_2 0x0e74 /* 32 bit ASF/Host Data Register 2 */ #define B28_Y2_DATA_REG_3 0x0e78 /* 32 bit ASF/Host Data Register 3 */ #define B28_Y2_DATA_REG_4 0x0e7c /* 32 bit ASF/Host Data Register 4 */ /* * Bank 29 */ /* Status BMU Registers (Yukon-2 only)*/ #define STAT_CTRL 0x0e80 /* 32 bit Status BMU Control Reg */ #define STAT_LAST_IDX 0x0e84 /* 16 bit Status BMU Last Index */ #define STAT_LIST_ADDR_LO 0x0e88 /* 32 bit Status List Start Addr (low) */ #define STAT_LIST_ADDR_HI 0x0e8c /* 32 bit Status List Start Addr (high) */ #define STAT_TXA1_RIDX 0x0e90 /* 16 bit Status TxA1 Report Index Reg */ #define STAT_TXS1_RIDX 0x0e92 /* 16 bit Status TxS1 Report Index Reg */ #define STAT_TXA2_RIDX 0x0e94 /* 16 bit Status TxA2 Report Index Reg */ #define STAT_TXS2_RIDX 0x0e96 /* 16 bit Status TxS2 Report Index Reg */ #define STAT_TX_IDX_TH 0x0e98 /* 16 bit Status Tx Index Threshold Reg */ #define STAT_PUT_IDX 0x0e9c /* 16 bit Status Put Index Reg */ /* FIFO Control/Status Registers (Yukon-2 only)*/ #define STAT_FIFO_WP 0x0ea0 /* 8 bit Status FIFO Write Pointer Reg */ #define STAT_FIFO_RP 0x0ea4 /* 8 bit Status FIFO Read Pointer Reg */ #define STAT_FIFO_RSP 0x0ea6 /* 8 bit Status FIFO Read Shadow Ptr */ #define STAT_FIFO_LEVEL 0x0ea8 /* 8 bit Status FIFO Level Reg */ #define STAT_FIFO_SHLVL 0x0eaa /* 8 bit Status FIFO Shadow Level Reg */ #define STAT_FIFO_WM 0x0eac /* 8 bit Status FIFO Watermark Reg */ #define STAT_FIFO_ISR_WM 0x0ead /* 8 bit Status FIFO ISR Watermark Reg */ /* Level and ISR Timer Registers (Yukon-2 only)*/ #define STAT_LEV_TIMER_INI 0x0eb0 /* 32 bit Level Timer Init. Value Reg */ #define STAT_LEV_TIMER_CNT 0x0eb4 /* 32 bit Level Timer Counter Reg */ #define STAT_LEV_TIMER_CTRL 0x0eb8 /* 8 bit Level Timer Control Reg */ #define STAT_LEV_TIMER_TEST 0x0eb9 /* 8 bit Level Timer Test Reg */ #define STAT_TX_TIMER_INI 0x0ec0 /* 32 bit Tx Timer Init. Value Reg */ #define STAT_TX_TIMER_CNT 0x0ec4 /* 32 bit Tx Timer Counter Reg */ #define STAT_TX_TIMER_CTRL 0x0ec8 /* 8 bit Tx Timer Control Reg */ #define STAT_TX_TIMER_TEST 0x0ec9 /* 8 bit Tx Timer Test Reg */ #define STAT_ISR_TIMER_INI 0x0ed0 /* 32 bit ISR Timer Init. Value Reg */ #define STAT_ISR_TIMER_CNT 0x0ed4 /* 32 bit ISR Timer Counter Reg */ #define STAT_ISR_TIMER_CTRL 0x0ed8 /* 8 bit ISR Timer Control Reg */ #define STAT_ISR_TIMER_TEST 0x0ed9 /* 8 bit ISR Timer Test Reg */ #define ST_LAST_IDX_MASK 0x007f /* Last Index Mask */ #define ST_TXRP_IDX_MASK 0x0fff /* Tx Report Index Mask */ #define ST_TXTH_IDX_MASK 0x0fff /* Tx Threshold Index Mask */ #define ST_WM_IDX_MASK 0x3f /* FIFO Watermark Index Mask */ /* * Bank 30 */ /* GMAC and GPHY Control Registers (YUKON only) */ #define GMAC_CTRL 0x0f00 /* 32 bit GMAC Control Reg */ #define GPHY_CTRL 0x0f04 /* 32 bit GPHY Control Reg */ #define GMAC_IRQ_SRC 0x0f08 /* 8 bit GMAC Interrupt Source Reg */ #define GMAC_IRQ_MSK 0x0f0c /* 8 bit GMAC Interrupt Mask Reg */ #define GMAC_LINK_CTRL 0x0f10 /* 16 bit Link Control Reg */ /* Wake-up Frame Pattern Match Control Registers (YUKON only) */ #define WOL_REG_OFFS 0x20 /* HW-Bug: Address is + 0x20 against spec. */ #define WOL_CTRL_STAT 0x0f20 /* 16 bit WOL Control/Status Reg */ #define WOL_MATCH_CTL 0x0f22 /* 8 bit WOL Match Control Reg */ #define WOL_MATCH_RES 0x0f23 /* 8 bit WOL Match Result Reg */ #define WOL_MAC_ADDR_LO 0x0f24 /* 32 bit WOL MAC Address Low */ #define WOL_MAC_ADDR_HI 0x0f28 /* 16 bit WOL MAC Address High */ #define WOL_PATT_PME 0x0f2a /* 8 bit WOL PME Match Enable (Yukon-2) */ #define WOL_PATT_ASFM 0x0f2b /* 8 bit WOL ASF Match Enable (Yukon-2) */ #define WOL_PATT_RPTR 0x0f2c /* 8 bit WOL Pattern Read Pointer */ /* WOL Pattern Length Registers (YUKON only) */ #define WOL_PATT_LEN_LO 0x0f30 /* 32 bit WOL Pattern Length 3..0 */ #define WOL_PATT_LEN_HI 0x0f34 /* 24 bit WOL Pattern Length 6..4 */ /* WOL Pattern Counter Registers (YUKON only) */ #define WOL_PATT_CNT_0 0x0f38 /* 32 bit WOL Pattern Counter 3..0 */ #define WOL_PATT_CNT_4 0x0f3c /* 24 bit WOL Pattern Counter 6..4 */ /* * Bank 32 - 33 */ #define WOL_PATT_RAM_1 0x1000 /* WOL Pattern RAM Link 1 */ #define WOL_PATT_RAM_2 0x1400 /* WOL Pattern RAM Link 2 */ /* offset to configuration space on Yukon-2 */ #define Y2_CFG_SPC 0x1c00 #define BASE_GMAC_1 0x2800 /* GMAC 1 registers */ #define BASE_GMAC_2 0x3800 /* GMAC 2 registers */ /* * Control Register Bit Definitions: */ /* B0_CTST 24 bit Control/Status register */ #define Y2_VMAIN_AVAIL BIT_17 /* VMAIN available (YUKON-2 only) */ #define Y2_VAUX_AVAIL BIT_16 /* VAUX available (YUKON-2 only) */ #define Y2_HW_WOL_ON BIT_15 /* HW WOL On (Yukon-EC Ultra A1 only) */ #define Y2_HW_WOL_OFF BIT_14 /* HW WOL Off (Yukon-EC Ultra A1 only) */ #define Y2_ASF_ENABLE BIT_13 /* ASF Unit Enable (YUKON-2 only) */ #define Y2_ASF_DISABLE BIT_12 /* ASF Unit Disable (YUKON-2 only) */ #define Y2_CLK_RUN_ENA BIT_11 /* CLK_RUN Enable (YUKON-2 only) */ #define Y2_CLK_RUN_DIS BIT_10 /* CLK_RUN Disable (YUKON-2 only) */ #define Y2_LED_STAT_ON BIT_9 /* Status LED On (YUKON-2 only) */ #define Y2_LED_STAT_OFF BIT_8 /* Status LED Off (YUKON-2 only) */ #define CS_ST_SW_IRQ BIT_7 /* Set IRQ SW Request */ #define CS_CL_SW_IRQ BIT_6 /* Clear IRQ SW Request */ #define CS_STOP_DONE BIT_5 /* Stop Master is finished */ #define CS_STOP_MAST BIT_4 /* Command Bit to stop the master */ #define CS_MRST_CLR BIT_3 /* Clear Master Reset */ #define CS_MRST_SET BIT_2 /* Set Master Reset */ #define CS_RST_CLR BIT_1 /* Clear Software Reset */ #define CS_RST_SET BIT_0 /* Set Software Reset */ #define LED_STAT_ON BIT_1 /* Status LED On */ #define LED_STAT_OFF BIT_0 /* Status LED Off */ /* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ #define PC_VAUX_ENA BIT_7 /* Switch VAUX Enable */ #define PC_VAUX_DIS BIT_6 /* Switch VAUX Disable */ #define PC_VCC_ENA BIT_5 /* Switch VCC Enable */ #define PC_VCC_DIS BIT_4 /* Switch VCC Disable */ #define PC_VAUX_ON BIT_3 /* Switch VAUX On */ #define PC_VAUX_OFF BIT_2 /* Switch VAUX Off */ #define PC_VCC_ON BIT_1 /* Switch VCC On */ #define PC_VCC_OFF BIT_0 /* Switch VCC Off */ /* B0_ISRC 32 bit Interrupt Source Register */ /* B0_IMSK 32 bit Interrupt Mask Register */ /* B0_SP_ISRC 32 bit Special Interrupt Source Reg */ /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ /* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */ /* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */ /* B0_Y2_SP_EISR 32 bit Enter ISR Reg */ /* B0_Y2_SP_LISR 32 bit Leave ISR Reg */ #define Y2_IS_PORT_MASK(Port, Mask) ((Mask) << (Port*8)) #define Y2_IS_HW_ERR BIT_31 /* Interrupt HW Error */ #define Y2_IS_STAT_BMU BIT_30 /* Status BMU Interrupt */ #define Y2_IS_ASF BIT_29 /* ASF subsystem Interrupt */ #define Y2_IS_POLL_CHK BIT_27 /* Check IRQ from polling unit */ #define Y2_IS_TWSI_RDY BIT_26 /* IRQ on end of TWSI Tx */ #define Y2_IS_IRQ_SW BIT_25 /* SW forced IRQ */ #define Y2_IS_TIMINT BIT_24 /* IRQ from Timer */ #define Y2_IS_IRQ_PHY2 BIT_12 /* Interrupt from PHY 2 */ #define Y2_IS_IRQ_MAC2 BIT_11 /* Interrupt from MAC 2 */ #define Y2_IS_CHK_RX2 BIT_10 /* Descriptor error Rx 2 */ #define Y2_IS_CHK_TXS2 BIT_9 /* Descriptor error TXS 2 */ #define Y2_IS_CHK_TXA2 BIT_8 /* Descriptor error TXA 2 */ #define Y2_IS_PSM_ACK BIT_7 /* PSM Ack (Yukon Optima) */ #define Y2_IS_PTP_TIST BIT_6 /* PTP TIme Stamp (Yukon Optima) */ #define Y2_IS_PHY_QLNK BIT_5 /* PHY Quick Link (Yukon Optima) */ #define Y2_IS_IRQ_PHY1 BIT_4 /* Interrupt from PHY 1 */ #define Y2_IS_IRQ_MAC1 BIT_3 /* Interrupt from MAC 1 */ #define Y2_IS_CHK_RX1 BIT_2 /* Descriptor error Rx 1 */ #define Y2_IS_CHK_TXS1 BIT_1 /* Descriptor error TXS 1 */ #define Y2_IS_CHK_TXA1 BIT_0 /* Descriptor error TXA 1 */ #define Y2_IS_L1_MASK 0x0000001f /* IRQ Mask for port 1 */ #define Y2_IS_L2_MASK 0x00001f00 /* IRQ Mask for port 2 */ #define Y2_IS_ALL_MSK 0xef001f1f /* All Interrupt bits */ #define Y2_IS_PORT_A \ (Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1) #define Y2_IS_PORT_B \ (Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2) /* B0_HWE_ISRC 32 bit HW Error Interrupt Src Reg */ /* B0_HWE_IMSK 32 bit HW Error Interrupt Mask Reg */ /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ #define Y2_IS_TIST_OV BIT_29 /* Time Stamp Timer overflow interrupt */ #define Y2_IS_SENSOR BIT_28 /* Sensor interrupt */ #define Y2_IS_MST_ERR BIT_27 /* Master error interrupt */ #define Y2_IS_IRQ_STAT BIT_26 /* Status exception interrupt */ #define Y2_IS_PCI_EXP BIT_25 /* PCI-Express interrupt */ #define Y2_IS_PCI_NEXP BIT_24 /* PCI-Express error similar to PCI error */ #define Y2_IS_PAR_RD2 BIT_13 /* Read RAM parity error interrupt */ #define Y2_IS_PAR_WR2 BIT_12 /* Write RAM parity error interrupt */ #define Y2_IS_PAR_MAC2 BIT_11 /* MAC hardware fault interrupt */ #define Y2_IS_PAR_RX2 BIT_10 /* Parity Error Rx Queue 2 */ #define Y2_IS_TCP_TXS2 BIT_9 /* TCP length mismatch sync Tx queue IRQ */ #define Y2_IS_TCP_TXA2 BIT_8 /* TCP length mismatch async Tx queue IRQ */ #define Y2_IS_PAR_RD1 BIT_5 /* Read RAM parity error interrupt */ #define Y2_IS_PAR_WR1 BIT_4 /* Write RAM parity error interrupt */ #define Y2_IS_PAR_MAC1 BIT_3 /* MAC hardware fault interrupt */ #define Y2_IS_PAR_RX1 BIT_2 /* Parity Error Rx Queue 1 */ #define Y2_IS_TCP_TXS1 BIT_1 /* TCP length mismatch sync Tx queue IRQ */ #define Y2_IS_TCP_TXA1 BIT_0 /* TCP length mismatch async Tx queue IRQ */ #define Y2_HWE_L1_MASK (Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |\ Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1) #define Y2_HWE_L2_MASK (Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |\ Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2) #define Y2_HWE_ALL_MSK (Y2_IS_TIST_OV | /* Y2_IS_SENSOR | */ Y2_IS_MST_ERR |\ Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP |\ Y2_HWE_L1_MASK | Y2_HWE_L2_MASK) /* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ #define CFG_CHIP_R_MSK (0x0f<<4) /* Bit 7.. 4: Chip Revision */ #define CFG_DIS_M2_CLK BIT_1 /* Disable Clock for 2nd MAC */ #define CFG_SNG_MAC BIT_0 /* MAC Config: 0 = 2 MACs; 1 = 1 MAC */ /* B2_CHIP_ID 8 bit Chip Identification Number */ #define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */ #define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */ #define CHIP_ID_YUKON_LITE 0xb1 /* Chip ID for YUKON-Lite (Rev. A1-A3) */ #define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */ #define CHIP_ID_YUKON_XL 0xb3 /* Chip ID for YUKON-2 XL */ #define CHIP_ID_YUKON_EC_U 0xb4 /* Chip ID for YUKON-2 EC Ultra */ #define CHIP_ID_YUKON_EX 0xb5 /* Chip ID for YUKON-2 Extreme */ #define CHIP_ID_YUKON_EC 0xb6 /* Chip ID for YUKON-2 EC */ #define CHIP_ID_YUKON_FE 0xb7 /* Chip ID for YUKON-2 FE */ #define CHIP_ID_YUKON_FE_P 0xb8 /* Chip ID for YUKON-2 FE+ */ #define CHIP_ID_YUKON_SUPR 0xb9 /* Chip ID for YUKON-2 Supreme */ #define CHIP_ID_YUKON_UL_2 0xba /* Chip ID for YUKON-2 Ultra 2 */ #define CHIP_ID_YUKON_UNKNOWN 0xbb #define CHIP_ID_YUKON_OPT 0xbc /* Chip ID for YUKON-2 Optima */ #define CHIP_REV_YU_XL_A0 0 /* Chip Rev. for Yukon-2 A0 */ #define CHIP_REV_YU_XL_A1 1 /* Chip Rev. for Yukon-2 A1 */ #define CHIP_REV_YU_XL_A2 2 /* Chip Rev. for Yukon-2 A2 */ #define CHIP_REV_YU_XL_A3 3 /* Chip Rev. for Yukon-2 A3 */ #define CHIP_REV_YU_EC_A1 0 /* Chip Rev. for Yukon-EC A1/A0 */ #define CHIP_REV_YU_EC_A2 1 /* Chip Rev. for Yukon-EC A2 */ #define CHIP_REV_YU_EC_A3 2 /* Chip Rev. for Yukon-EC A3 */ #define CHIP_REV_YU_EC_U_A0 1 #define CHIP_REV_YU_EC_U_A1 2 #define CHIP_REV_YU_FE_P_A0 0 /* Chip Rev. for Yukon-2 FE+ A0 */ #define CHIP_REV_YU_EX_A0 1 /* Chip Rev. for Yukon-2 EX A0 */ #define CHIP_REV_YU_EX_B0 2 /* Chip Rev. for Yukon-2 EX B0 */ #define CHIP_REV_YU_SU_A0 0 /* Chip Rev. for Yukon-2 SUPR A0 */ #define CHIP_REV_YU_SU_B0 1 /* Chip Rev. for Yukon-2 SUPR B0 */ #define CHIP_REV_YU_SU_B1 3 /* Chip Rev. for Yukon-2 SUPR B1 */ /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ #define Y2_STATUS_LNK2_INAC BIT_7 /* Status Link 2 inactiv (0 = activ) */ #define Y2_CLK_GAT_LNK2_DIS BIT_6 /* Disable clock gating Link 2 */ #define Y2_COR_CLK_LNK2_DIS BIT_5 /* Disable Core clock Link 2 */ #define Y2_PCI_CLK_LNK2_DIS BIT_4 /* Disable PCI clock Link 2 */ #define Y2_STATUS_LNK1_INAC BIT_3 /* Status Link 1 inactiv (0 = activ) */ #define Y2_CLK_GAT_LNK1_DIS BIT_2 /* Disable clock gating Link 1 */ #define Y2_COR_CLK_LNK1_DIS BIT_1 /* Disable Core clock Link 1 */ #define Y2_PCI_CLK_LNK1_DIS BIT_0 /* Disable PCI clock Link 1 */ /* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */ #define CFG_LED_MODE_MSK (0x07<<2) /* Bit 4.. 2: LED Mode Mask */ #define CFG_LINK_2_AVAIL BIT_1 /* Link 2 available */ #define CFG_LINK_1_AVAIL BIT_0 /* Link 1 available */ #define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2) #define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL) /* B2_E_3 8 bit lower 4 bits used for HW self test result */ #define B2_E3_RES_MASK 0x0f /* B2_Y2_CLK_CTRL 32 bit Core Clock Frequency Control Register (Yukon-2/EC) */ /* Yukon-EC/FE */ #define Y2_CLK_DIV_VAL_MSK (0xff<<16) /* Bit 23..16: Clock Divisor Value */ #define Y2_CLK_DIV_VAL(x) (SHIFT16(x) & Y2_CLK_DIV_VAL_MSK) /* Yukon-2 */ #define Y2_CLK_DIV_VAL2_MSK (0x07<<21) /* Bit 23..21: Clock Divisor Value */ #define Y2_CLK_SELECT2_MSK (0x1f<<16) /* Bit 20..16: Clock Select */ #define Y2_CLK_DIV_VAL_2(x) (SHIFT21(x) & Y2_CLK_DIV_VAL2_MSK) #define Y2_CLK_SEL_VAL_2(x) (SHIFT16(x) & Y2_CLK_SELECT2_MSK) #define Y2_CLK_DIV_ENA BIT_1 /* Enable Core Clock Division */ #define Y2_CLK_DIV_DIS BIT_0 /* Disable Core Clock Division */ /* B2_TI_CTRL 8 bit Timer control */ /* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ #define TIM_START BIT_2 /* Start Timer */ #define TIM_STOP BIT_1 /* Stop Timer */ #define TIM_CLR_IRQ BIT_0 /* Clear Timer IRQ (!IRQM) */ /* B2_TI_TEST 8 Bit Timer Test */ /* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ /* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ #define TIM_T_ON BIT_2 /* Test mode on */ #define TIM_T_OFF BIT_1 /* Test mode off */ #define TIM_T_STEP BIT_0 /* Test step */ /* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */ /* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */ #define DPT_MSK 0x00ffffff /* Bit 23.. 0: Desc Poll Timer Bits */ /* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ #define DPT_START BIT_1 /* Start Descriptor Poll Timer */ #define DPT_STOP BIT_0 /* Stop Descriptor Poll Timer */ /* B2_TST_CTRL1 8 bit Test Control Register 1 */ #define TST_FRC_DPERR_MR BIT_7 /* force DATAPERR on MST RD */ #define TST_FRC_DPERR_MW BIT_6 /* force DATAPERR on MST WR */ #define TST_FRC_DPERR_TR BIT_5 /* force DATAPERR on TRG RD */ #define TST_FRC_DPERR_TW BIT_4 /* force DATAPERR on TRG WR */ #define TST_FRC_APERR_M BIT_3 /* force ADDRPERR on MST */ #define TST_FRC_APERR_T BIT_2 /* force ADDRPERR on TRG */ #define TST_CFG_WRITE_ON BIT_1 /* Enable Config Reg WR */ #define TST_CFG_WRITE_OFF BIT_0 /* Disable Config Reg WR */ /* B2_GP_IO */ #define GLB_GPIO_CLK_DEB_ENA BIT_31 /* Clock Debug Enable */ #define GLB_GPIO_CLK_DBG_MSK 0x3c000000 /* Clock Debug */ #define GLB_GPIO_INT_RST_D3_DIS BIT_15 /* Disable Internal Reset After D3 to D0 */ #define GLB_GPIO_LED_PAD_SPEED_UP BIT_14 /* LED PAD Speed Up */ #define GLB_GPIO_STAT_RACE_DIS BIT_13 /* Status Race Disable */ #define GLB_GPIO_TEST_SEL_MSK 0x00001800 /* Testmode Select */ #define GLB_GPIO_TEST_SEL_BASE BIT_11 #define GLB_GPIO_RAND_ENA BIT_10 /* Random Enable */ #define GLB_GPIO_RAND_BIT_1 BIT_9 /* Random Bit 1 */ /* B2_I2C_CTRL 32 bit I2C HW Control Register */ #define I2C_FLAG BIT_31 /* Start read/write if WR */ #define I2C_ADDR (0x7fff<<16) /* Bit 30..16: Addr to be RD/WR */ #define I2C_DEV_SEL (0x7f<<9) /* Bit 15.. 9: I2C Device Select */ #define I2C_BURST_LEN BIT_4 /* Burst Len, 1/4 bytes */ #define I2C_DEV_SIZE (7<<1) /* Bit 3.. 1: I2C Device Size */ #define I2C_025K_DEV (0<<1) /* 0: 256 Bytes or smal. */ #define I2C_05K_DEV (1<<1) /* 1: 512 Bytes */ #define I2C_1K_DEV (2<<1) /* 2: 1024 Bytes */ #define I2C_2K_DEV (3<<1) /* 3: 2048 Bytes */ #define I2C_4K_DEV (4<<1) /* 4: 4096 Bytes */ #define I2C_8K_DEV (5<<1) /* 5: 8192 Bytes */ #define I2C_16K_DEV (6<<1) /* 6: 16384 Bytes */ #define I2C_32K_DEV (7<<1) /* 7: 32768 Bytes */ #define I2C_STOP BIT_0 /* Interrupt I2C transfer */ /* B2_I2C_IRQ 32 bit I2C HW IRQ Register */ #define I2C_CLR_IRQ BIT_0 /* Clear I2C IRQ */ /* B2_I2C_SW 32 bit (8 bit access) I2C HW SW Port Register */ #define I2C_DATA_DIR BIT_2 /* direction of I2C_DATA */ #define I2C_DATA BIT_1 /* I2C Data Port */ #define I2C_CLK BIT_0 /* I2C Clock Port */ /* I2C Address */ #define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address (Volt and Temp) */ /* B2_BSC_CTRL 8 bit Blink Source Counter Control */ #define BSC_START BIT_1 /* Start Blink Source Counter */ #define BSC_STOP BIT_0 /* Stop Blink Source Counter */ /* B2_BSC_STAT 8 bit Blink Source Counter Status */ #define BSC_SRC BIT_0 /* Blink Source, 0=Off / 1=On */ /* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ #define BSC_T_ON BIT_2 /* Test mode on */ #define BSC_T_OFF BIT_1 /* Test mode off */ #define BSC_T_STEP BIT_0 /* Test step */ /* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */ #define PEX_RD_ACCESS BIT_31 /* Access Mode Read = 1, Write = 0 */ #define PEX_DB_ACCESS BIT_30 /* Access to debug register */ /* B3_RAM_ADDR 32 bit RAM Address, to read or write */ #define RAM_ADR_RAN 0x0007ffff /* Bit 18.. 0: RAM Address Range */ /* RAM Interface Registers */ /* B3_RI_CTRL 16 bit RAM Interface Control Register */ #define RI_CLR_RD_PERR BIT_9 /* Clear IRQ RAM Read Parity Err */ #define RI_CLR_WR_PERR BIT_8 /* Clear IRQ RAM Write Parity Err */ #define RI_RST_CLR BIT_1 /* Clear RAM Interface Reset */ #define RI_RST_SET BIT_0 /* Set RAM Interface Reset */ #define MSK_RI_TO_53 36 /* RAM interface timeout */ /* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ /* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ /* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ #define TXA_MAX_VAL 0x00ffffff/* Bit 23.. 0: Max TXA Timer/Cnt Val */ /* TXA_CTRL 8 bit Tx Arbiter Control Register */ #define TXA_ENA_FSYNC BIT_7 /* Enable force of sync Tx queue */ #define TXA_DIS_FSYNC BIT_6 /* Disable force of sync Tx queue */ #define TXA_ENA_ALLOC BIT_5 /* Enable alloc of free bandwidth */ #define TXA_DIS_ALLOC BIT_4 /* Disable alloc of free bandwidth */ #define TXA_START_RC BIT_3 /* Start sync Rate Control */ #define TXA_STOP_RC BIT_2 /* Stop sync Rate Control */ #define TXA_ENA_ARB BIT_1 /* Enable Tx Arbiter */ #define TXA_DIS_ARB BIT_0 /* Disable Tx Arbiter */ /* TXA_TEST 8 bit Tx Arbiter Test Register */ #define TXA_INT_T_ON BIT_5 /* Tx Arb Interval Timer Test On */ #define TXA_INT_T_OFF BIT_4 /* Tx Arb Interval Timer Test Off */ #define TXA_INT_T_STEP BIT_3 /* Tx Arb Interval Timer Step */ #define TXA_LIM_T_ON BIT_2 /* Tx Arb Limit Timer Test On */ #define TXA_LIM_T_OFF BIT_1 /* Tx Arb Limit Timer Test Off */ #define TXA_LIM_T_STEP BIT_0 /* Tx Arb Limit Timer Step */ /* TXA_STAT 8 bit Tx Arbiter Status Register */ #define TXA_PRIO_XS BIT_0 /* sync queue has prio to send */ /* Q_BC 32 bit Current Byte Counter */ #define BC_MAX 0xffff /* Bit 15.. 0: Byte counter */ /* Rx BMU Control / Status Registers (Yukon-2) */ #define BMU_IDLE BIT_31 /* BMU Idle State */ #define BMU_RX_TCP_PKT BIT_30 /* Rx TCP Packet (when RSS Hash enabled) */ #define BMU_RX_IP_PKT BIT_29 /* Rx IP Packet (when RSS Hash enabled) */ #define BMU_ENA_RX_RSS_HASH BIT_15 /* Enable Rx RSS Hash */ #define BMU_DIS_RX_RSS_HASH BIT_14 /* Disable Rx RSS Hash */ #define BMU_ENA_RX_CHKSUM BIT_13 /* Enable Rx TCP/IP Checksum Check */ #define BMU_DIS_RX_CHKSUM BIT_12 /* Disable Rx TCP/IP Checksum Check */ #define BMU_CLR_IRQ_PAR BIT_11 /* Clear IRQ on Parity errors (Rx) */ #define BMU_CLR_IRQ_TCP BIT_11 /* Clear IRQ on TCP segmen. error (Tx) */ #define BMU_CLR_IRQ_CHK BIT_10 /* Clear IRQ Check */ #define BMU_STOP BIT_9 /* Stop Rx/Tx Queue */ #define BMU_START BIT_8 /* Start Rx/Tx Queue */ #define BMU_FIFO_OP_ON BIT_7 /* FIFO Operational On */ #define BMU_FIFO_OP_OFF BIT_6 /* FIFO Operational Off */ #define BMU_FIFO_ENA BIT_5 /* Enable FIFO */ #define BMU_FIFO_RST BIT_4 /* Reset FIFO */ #define BMU_OP_ON BIT_3 /* BMU Operational On */ #define BMU_OP_OFF BIT_2 /* BMU Operational Off */ #define BMU_RST_CLR BIT_1 /* Clear BMU Reset (Enable) */ #define BMU_RST_SET BIT_0 /* Set BMU Reset */ #define BMU_CLR_RESET (BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR) #define BMU_OPER_INIT (BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | \ BMU_START | BMU_FIFO_ENA | BMU_OP_ON) /* Tx BMU Control / Status Registers (Yukon-2) */ /* Bit 31: same as for Rx */ #define BMU_TX_IPIDINCR_ON BIT_13 /* Enable IP ID Increment */ #define BMU_TX_IPIDINCR_OFF BIT_12 /* Disable IP ID Increment */ #define BMU_TX_CLR_IRQ_TCP BIT_11 /* Clear IRQ on TCP segm. length mism. */ /* Bit 10..0: same as for Rx */ /* Q_F 32 bit Flag Register */ #define F_TX_CHK_AUTO_OFF BIT_31 /* Tx checksum auto-calc Off(Yukon EX)*/ #define F_TX_CHK_AUTO_ON BIT_30 /* Tx checksum auto-calc On(Yukon EX)*/ #define F_ALM_FULL BIT_28 /* Rx FIFO: almost full */ #define F_EMPTY BIT_27 /* Tx FIFO: empty flag */ #define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */ #define F_WM_REACHED BIT_25 /* Watermark reached */ #define F_M_RX_RAM_DIS BIT_24 /* MAC Rx RAM Read Port disable */ #define F_FIFO_LEVEL (0x1f<<16) /* Bit 23..16: # of Qwords in FIFO */ #define F_WATER_MARK 0x0007ff/* Bit 10.. 0: Watermark */ /* Queue Prefetch Unit Offsets, use Y2_PREF_Q_ADDR() to address (Yukon-2 only)*/ /* PREF_UNIT_CTRL_REG 32 bit Prefetch Control register */ #define PREF_UNIT_OP_ON BIT_3 /* prefetch unit operational */ #define PREF_UNIT_OP_OFF BIT_2 /* prefetch unit not operational */ #define PREF_UNIT_RST_CLR BIT_1 /* Clear Prefetch Unit Reset */ #define PREF_UNIT_RST_SET BIT_0 /* Set Prefetch Unit Reset */ /* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ /* RB_START 32 bit RAM Buffer Start Address */ /* RB_END 32 bit RAM Buffer End Address */ /* RB_WP 32 bit RAM Buffer Write Pointer */ /* RB_RP 32 bit RAM Buffer Read Pointer */ /* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ /* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ /* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ /* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ /* RB_PC 32 bit RAM Buffer Packet Counter */ /* RB_LEV 32 bit RAM Buffer Level Register */ #define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ /* RB_TST2 8 bit RAM Buffer Test Register 2 */ #define RB_PC_DEC BIT_3 /* Packet Counter Decrement */ #define RB_PC_T_ON BIT_2 /* Packet Counter Test On */ #define RB_PC_T_OFF BIT_1 /* Packet Counter Test Off */ #define RB_PC_INC BIT_0 /* Packet Counter Increment */ /* RB_TST1 8 bit RAM Buffer Test Register 1 */ #define RB_WP_T_ON BIT_6 /* Write Pointer Test On */ #define RB_WP_T_OFF BIT_5 /* Write Pointer Test Off */ #define RB_WP_INC BIT_4 /* Write Pointer Increment */ #define RB_RP_T_ON BIT_2 /* Read Pointer Test On */ #define RB_RP_T_OFF BIT_1 /* Read Pointer Test Off */ #define RB_RP_INC BIT_0 /* Read Pointer Increment */ /* RB_CTRL 8 bit RAM Buffer Control Register */ #define RB_ENA_STFWD BIT_5 /* Enable Store & Forward */ #define RB_DIS_STFWD BIT_4 /* Disable Store & Forward */ #define RB_ENA_OP_MD BIT_3 /* Enable Operation Mode */ #define RB_DIS_OP_MD BIT_2 /* Disable Operation Mode */ #define RB_RST_CLR BIT_1 /* Clear RAM Buf STM Reset */ #define RB_RST_SET BIT_0 /* Set RAM Buf STM Reset */ /* RAM Buffer High Pause Threshold values */ #define MSK_RB_ULPP (8 * 1024) /* Upper Level in kB/8 */ #define MSK_RB_LLPP_S (10 * 1024) /* Lower Level for small Queues */ #define MSK_RB_LLPP_B (16 * 1024) /* Lower Level for big Queues */ /* Threshold values for Yukon-EC Ultra */ #define MSK_ECU_ULPP 0x0080 /* Upper Pause Threshold (multiples of 8) */ #define MSK_ECU_LLPP 0x0060 /* Lower Pause Threshold (multiples of 8) */ #define MSK_ECU_AE_THR 0x0070 /* Almost Empty Threshold */ #define MSK_ECU_TXFF_LEV 0x01a0 /* Tx BMU FIFO Level */ #define MSK_ECU_JUMBO_WM 0x01 #define MSK_BMU_RX_WM 0x600 /* BMU Rx Watermark */ #define MSK_BMU_TX_WM 0x600 /* BMU Tx Watermark */ /* performance sensitive drivers should set this define to 0x80 */ #define MSK_BMU_RX_WM_PEX 0x600 /* BMU Rx Watermark for PEX */ /* Receive and Transmit Queues */ #define Q_R1 0x0000 /* Receive Queue 1 */ #define Q_R2 0x0080 /* Receive Queue 2 */ #define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */ #define Q_XA1 0x0280 /* Asynchronous Transmit Queue 1 */ #define Q_XS2 0x0300 /* Synchronous Transmit Queue 2 */ #define Q_XA2 0x0380 /* Asynchronous Transmit Queue 2 */ #define Q_ASF_R1 0x100 /* ASF Rx Queue 1 */ #define Q_ASF_R2 0x180 /* ASF Rx Queue 2 */ #define Q_ASF_T1 0x140 /* ASF Tx Queue 1 */ #define Q_ASF_T2 0x1c0 /* ASF Tx Queue 2 */ #define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs)) /* Minimum RAM Buffer Rx Queue Size */ #define MSK_MIN_RXQ_SIZE 10 /* Minimum RAM Buffer Tx Queue Size */ #define MSK_MIN_TXQ_SIZE 10 /* Percentage of queue size from whole memory. 80 % for receive */ #define MSK_RAM_QUOTA_RX 80 /* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ #define WOL_CTL_LINK_CHG_OCC BIT_15 #define WOL_CTL_MAGIC_PKT_OCC BIT_14 #define WOL_CTL_PATTERN_OCC BIT_13 #define WOL_CTL_CLEAR_RESULT BIT_12 #define WOL_CTL_ENA_PME_ON_LINK_CHG BIT_11 #define WOL_CTL_DIS_PME_ON_LINK_CHG BIT_10 #define WOL_CTL_ENA_PME_ON_MAGIC_PKT BIT_9 #define WOL_CTL_DIS_PME_ON_MAGIC_PKT BIT_8 #define WOL_CTL_ENA_PME_ON_PATTERN BIT_7 #define WOL_CTL_DIS_PME_ON_PATTERN BIT_6 #define WOL_CTL_ENA_LINK_CHG_UNIT BIT_5 #define WOL_CTL_DIS_LINK_CHG_UNIT BIT_4 #define WOL_CTL_ENA_MAGIC_PKT_UNIT BIT_3 #define WOL_CTL_DIS_MAGIC_PKT_UNIT BIT_2 #define WOL_CTL_ENA_PATTERN_UNIT BIT_1 #define WOL_CTL_DIS_PATTERN_UNIT BIT_0 #define WOL_CTL_DEFAULT \ (WOL_CTL_DIS_PME_ON_LINK_CHG | \ WOL_CTL_DIS_PME_ON_PATTERN | \ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ WOL_CTL_DIS_LINK_CHG_UNIT | \ WOL_CTL_DIS_PATTERN_UNIT | \ WOL_CTL_DIS_MAGIC_PKT_UNIT) /* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ #define WOL_CTL_PATT_ENA(x) (BIT_0 << (x)) /* WOL_PATT_PME 8 bit WOL PME Match Enable (Yukon-2) */ #define WOL_PATT_FORCE_PME BIT_7 /* Generates a PME */ #define WOL_PATT_MATCH_PME_ALL 0x7f /* * Marvel-PHY Registers, indirect addressed over GMAC */ #define PHY_MARV_CTRL 0x00 /* 16 bit r/w PHY Control Register */ #define PHY_MARV_STAT 0x01 /* 16 bit r/o PHY Status Register */ #define PHY_MARV_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ #define PHY_MARV_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ #define PHY_MARV_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ #define PHY_MARV_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ #define PHY_MARV_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ #define PHY_MARV_NEPG 0x07 /* 16 bit r/w Next Page Register */ #define PHY_MARV_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */ /* Marvel-specific registers */ #define PHY_MARV_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */ #define PHY_MARV_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ /* 0x0b - 0x0e: reserved */ #define PHY_MARV_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ #define PHY_MARV_PHY_CTRL 0x10 /* 16 bit r/w PHY Specific Control Reg */ #define PHY_MARV_PHY_STAT 0x11 /* 16 bit r/o PHY Specific Status Reg */ #define PHY_MARV_INT_MASK 0x12 /* 16 bit r/w Interrupt Mask Reg */ #define PHY_MARV_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */ #define PHY_MARV_EXT_CTRL 0x14 /* 16 bit r/w Ext. PHY Specific Ctrl */ #define PHY_MARV_RXE_CNT 0x15 /* 16 bit r/w Receive Error Counter */ #define PHY_MARV_EXT_ADR 0x16 /* 16 bit r/w Ext. Ad. for Cable Diag. */ #define PHY_MARV_PORT_IRQ 0x17 /* 16 bit r/o Port 0 IRQ (88E1111 only) */ #define PHY_MARV_LED_CTRL 0x18 /* 16 bit r/w LED Control Reg */ #define PHY_MARV_LED_OVER 0x19 /* 16 bit r/w Manual LED Override Reg */ #define PHY_MARV_EXT_CTRL_2 0x1a /* 16 bit r/w Ext. PHY Specific Ctrl 2 */ #define PHY_MARV_EXT_P_STAT 0x1b /* 16 bit r/w Ext. PHY Spec. Stat Reg */ #define PHY_MARV_CABLE_DIAG 0x1c /* 16 bit r/o Cable Diagnostic Reg */ #define PHY_MARV_PAGE_ADDR 0x1d /* 16 bit r/w Extended Page Address Reg */ #define PHY_MARV_PAGE_DATA 0x1e /* 16 bit r/w Extended Page Data Reg */ /* for 10/100 Fast Ethernet PHY (88E3082 only) */ #define PHY_MARV_FE_LED_PAR 0x16 /* 16 bit r/w LED Parallel Select Reg. */ #define PHY_MARV_FE_LED_SER 0x17 /* 16 bit r/w LED Stream Select S. LED */ #define PHY_MARV_FE_VCT_TX 0x1a /* 16 bit r/w VCT Reg. for TXP/N Pins */ #define PHY_MARV_FE_VCT_RX 0x1b /* 16 bit r/o VCT Reg. for RXP/N Pins */ #define PHY_MARV_FE_SPEC_2 0x1c /* 16 bit r/w Specific Control Reg. 2 */ #define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY related regs */ #define PHY_CT_LOOP (1<<14) /* Bit 14: enable Loopback over PHY */ #define PHY_CT_SPS_LSB (1<<13) /* Bit 13: Speed select, lower bit */ #define PHY_CT_ANE (1<<12) /* Bit 12: Auto-Negotiation Enabled */ #define PHY_CT_PDOWN (1<<11) /* Bit 11: Power Down Mode */ #define PHY_CT_ISOL (1<<10) /* Bit 10: Isolate Mode */ #define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Auto-Negotiation */ #define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */ #define PHY_CT_COL_TST (1<<7) /* Bit 7: Collision Test enabled */ #define PHY_CT_SPS_MSB (1<<6) /* Bit 6: Speed select, upper bit */ #define PHY_CT_SP1000 PHY_CT_SPS_MSB /* enable speed of 1000 Mbps */ #define PHY_CT_SP100 PHY_CT_SPS_LSB /* enable speed of 100 Mbps */ #define PHY_CT_SP10 (0) /* enable speed of 10 Mbps */ #define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */ #define PHY_ST_PRE_SUP (1<<6) /* Bit 6: Preamble Suppression */ #define PHY_ST_AN_OVER (1<<5) /* Bit 5: Auto-Negotiation Over */ #define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remote Fault Condition Occured */ #define PHY_ST_AN_CAP (1<<3) /* Bit 3: Auto-Negotiation Capability */ #define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */ #define PHY_ST_JAB_DET (1<<1) /* Bit 1: Jabber Detected */ #define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */ #define PHY_I1_OUI_MSK (0x3f<<10) /* Bit 15..10: Organization Unique ID */ #define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */ #define PHY_I1_REV_MSK 0xf /* Bit 3.. 0: Revision Number */ /* different Marvell PHY Ids */ #define PHY_MARV_ID0_VAL 0x0141 /* Marvell Unique Identifier */ #define PHY_MARV_ID1_B0 0x0C23 /* Yukon (PHY 88E1011) */ #define PHY_MARV_ID1_B2 0x0C25 /* Yukon-Plus (PHY 88E1011) */ #define PHY_MARV_ID1_C2 0x0CC2 /* Yukon-EC (PHY 88E1111) */ #define PHY_MARV_ID1_Y2 0x0C91 /* Yukon-2 (PHY 88E1112) */ #define PHY_MARV_ID1_FE 0x0C83 /* Yukon-FE (PHY 88E3082 Rev.A1) */ #define PHY_MARV_ID1_ECU 0x0CB0 /* Yukon-2 (PHY 88E1149 Rev.B2?) */ /***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ #define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ #define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ #define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ #define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ #define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ #define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ #define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ /***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ /***** PHY_MARV_AUNE_LP 16 bit r/w Link Part Ability Reg *****/ #define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */ #define PHY_M_AN_ACK BIT_14 /* (ro) Acknowledge Received */ #define PHY_M_AN_RF BIT_13 /* Remote Fault */ #define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */ #define PHY_M_AN_PC BIT_10 /* MAC Pause implemented */ #define PHY_M_AN_100_T4 BIT_9 /* Not cap. 100Base-T4 (always 0) */ #define PHY_M_AN_100_FD BIT_8 /* Advertise 100Base-TX Full Duplex */ #define PHY_M_AN_100_HD BIT_7 /* Advertise 100Base-TX Half Duplex */ #define PHY_M_AN_10_FD BIT_6 /* Advertise 10Base-TX Full Duplex */ #define PHY_M_AN_10_HD BIT_5 /* Advertise 10Base-TX Half Duplex */ #define PHY_M_AN_SEL_MSK (0x1f<<4) /* Bit 4.. 0: Selector Field Mask */ /* special defines for FIBER (88E1011S only) */ #define PHY_M_AN_ASP_X BIT_8 /* Asymmetric Pause */ #define PHY_M_AN_PC_X BIT_7 /* MAC Pause implemented */ #define PHY_M_AN_1000X_AHD BIT_6 /* Advertise 10000Base-X Half Duplex */ #define PHY_M_AN_1000X_AFD BIT_5 /* Advertise 10000Base-X Full Duplex */ /* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ #define PHY_M_P_NO_PAUSE_X (0<<7) /* Bit 8.. 7: no Pause Mode */ #define PHY_M_P_SYM_MD_X (1<<7) /* Bit 8.. 7: symmetric Pause Mode */ #define PHY_M_P_ASYM_MD_X (2<<7) /* Bit 8.. 7: asymmetric Pause Mode */ #define PHY_M_P_BOTH_MD_X (3<<7) /* Bit 8.. 7: both Pause Mode */ /***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ #define PHY_M_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ #define PHY_M_1000C_MSE BIT_12 /* Manual Master/Slave Enable */ #define PHY_M_1000C_MSC BIT_11 /* M/S Configuration (1=Master) */ #define PHY_M_1000C_MPD BIT_10 /* Multi-Port Device */ #define PHY_M_1000C_AFD BIT_9 /* Advertise Full Duplex */ #define PHY_M_1000C_AHD BIT_8 /* Advertise Half Duplex */ /***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ #define PHY_M_PC_TX_FFD_MSK (3<<14) /* Bit 15..14: Tx FIFO Depth Mask */ #define PHY_M_PC_RX_FFD_MSK (3<<12) /* Bit 13..12: Rx FIFO Depth Mask */ #define PHY_M_PC_ASS_CRS_TX BIT_11 /* Assert CRS on Transmit */ #define PHY_M_PC_FL_GOOD BIT_10 /* Force Link Good */ #define PHY_M_PC_EN_DET_MSK (3<<8) /* Bit 9.. 8: Energy Detect Mask */ #define PHY_M_PC_ENA_EXT_D BIT_7 /* Enable Ext. Distance (10BT) */ #define PHY_M_PC_MDIX_MSK (3<<5) /* Bit 6.. 5: MDI/MDIX Config. Mask */ #define PHY_M_PC_DIS_125CLK BIT_4 /* Disable 125 CLK */ #define PHY_M_PC_MAC_POW_UP BIT_3 /* MAC Power up */ #define PHY_M_PC_SQE_T_ENA BIT_2 /* SQE Test Enabled */ #define PHY_M_PC_POL_R_DIS BIT_1 /* Polarity Reversal Disabled */ #define PHY_M_PC_DIS_JABBER BIT_0 /* Disable Jabber */ #define PHY_M_PC_EN_DET SHIFT8(2) /* Energy Detect (Mode 1) */ #define PHY_M_PC_EN_DET_PLUS SHIFT8(3) /* Energy Detect Plus (Mode 2) */ #define PHY_M_PC_MDI_XMODE(x) (SHIFT5(x) & PHY_M_PC_MDIX_MSK) #define PHY_M_PC_MAN_MDI 0 /* 00 = Manual MDI configuration */ #define PHY_M_PC_MAN_MDIX 1 /* 01 = Manual MDIX configuration */ #define PHY_M_PC_ENA_AUTO 3 /* 11 = Enable Automatic Crossover */ /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ #define PHY_M_PC_DIS_LINK_P BIT_15 /* Disable Link Pulses */ #define PHY_M_PC_DSC_MSK (7<<12) /* Bit 14..12: Downshift Counter */ #define PHY_M_PC_DOWN_S_ENA BIT_11 /* Downshift Enable */ /* !!! Errata in spec. (1 = disable) */ #define PHY_M_PC_DSC(x) (SHIFT12(x) & PHY_M_PC_DSC_MSK) /* 000=1x; 001=2x; 010=3x; 011=4x */ /* 100=5x; 101=6x; 110=7x; 111=8x */ /* for 10/100 Fast Ethernet PHY (88E3082 only) */ #define PHY_M_PC_ENA_DTE_DT BIT_15 /* Enable Data Terminal Equ. (DTE) Detect */ #define PHY_M_PC_ENA_ENE_DT BIT_14 /* Enable Energy Detect (sense & pulse) */ #define PHY_M_PC_DIS_NLP_CK BIT_13 /* Disable Normal Link Puls (NLP) Check */ #define PHY_M_PC_ENA_LIP_NP BIT_12 /* Enable Link Partner Next Page Reg. */ #define PHY_M_PC_DIS_NLP_GN BIT_11 /* Disable Normal Link Puls Generation */ #define PHY_M_PC_DIS_SCRAMB BIT_9 /* Disable Scrambler */ #define PHY_M_PC_DIS_FEFI BIT_8 /* Disable Far End Fault Indic. (FEFI) */ #define PHY_M_PC_SH_TP_SEL BIT_6 /* Shielded Twisted Pair Select */ #define PHY_M_PC_RX_FD_MSK (3<<2) /* Bit 3.. 2: Rx FIFO Depth Mask */ /***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ #define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */ #define PHY_M_PS_SPEED_1000 BIT_15 /* 10 = 1000 Mbps */ #define PHY_M_PS_SPEED_100 BIT_14 /* 01 = 100 Mbps */ #define PHY_M_PS_SPEED_10 0 /* 00 = 10 Mbps */ #define PHY_M_PS_FULL_DUP BIT_13 /* Full Duplex */ #define PHY_M_PS_PAGE_REC BIT_12 /* Page Received */ #define PHY_M_PS_SPDUP_RES BIT_11 /* Speed & Duplex Resolved */ #define PHY_M_PS_LINK_UP BIT_10 /* Link Up */ #define PHY_M_PS_CABLE_MSK (7<<7) /* Bit 9.. 7: Cable Length Mask */ #define PHY_M_PS_MDI_X_STAT BIT_6 /* MDI Crossover Stat (1=MDIX) */ #define PHY_M_PS_DOWNS_STAT BIT_5 /* Downshift Status (1=downsh.) */ #define PHY_M_PS_ENDET_STAT BIT_4 /* Energy Detect Status (1=act) */ #define PHY_M_PS_TX_P_EN BIT_3 /* Tx Pause Enabled */ #define PHY_M_PS_RX_P_EN BIT_2 /* Rx Pause Enabled */ #define PHY_M_PS_POL_REV BIT_1 /* Polarity Reversed */ #define PHY_M_PS_JABBER BIT_0 /* Jabber */ #define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) /* for 10/100 Fast Ethernet PHY (88E3082 only) */ #define PHY_M_PS_DTE_DETECT BIT_15 /* Data Terminal Equipment (DTE) Detected */ #define PHY_M_PS_RES_SPEED BIT_14 /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ /***** PHY_MARV_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ /***** PHY_MARV_INT_STAT 16 bit r/o Interrupt Status Reg *****/ #define PHY_M_IS_AN_ERROR BIT_15 /* Auto-Negotiation Error */ #define PHY_M_IS_LSP_CHANGE BIT_14 /* Link Speed Changed */ #define PHY_M_IS_DUP_CHANGE BIT_13 /* Duplex Mode Changed */ #define PHY_M_IS_AN_PR BIT_12 /* Page Received */ #define PHY_M_IS_AN_COMPL BIT_11 /* Auto-Negotiation Completed */ #define PHY_M_IS_LST_CHANGE BIT_10 /* Link Status Changed */ #define PHY_M_IS_SYMB_ERROR BIT_9 /* Symbol Error */ #define PHY_M_IS_FALSE_CARR BIT_8 /* False Carrier */ #define PHY_M_IS_FIFO_ERROR BIT_7 /* FIFO Overflow/Underrun Error */ #define PHY_M_IS_MDI_CHANGE BIT_6 /* MDI Crossover Changed */ #define PHY_M_IS_DOWNSH_DET BIT_5 /* Downshift Detected */ #define PHY_M_IS_END_CHANGE BIT_4 /* Energy Detect Changed */ #define PHY_M_IS_DTE_CHANGE BIT_2 /* DTE Power Det. Status Changed */ #define PHY_M_IS_POL_CHANGE BIT_1 /* Polarity Changed */ #define PHY_M_IS_JABBER BIT_0 /* Jabber */ #define PHY_M_DEF_MSK (PHY_M_IS_AN_ERROR | PHY_M_IS_AN_PR | \ PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR) /***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ #define PHY_M_EC_ENA_BC_EXT BIT_15 /* Enable Block Carr. Ext. (88E1111 only) */ #define PHY_M_EC_ENA_LIN_LB BIT_14 /* Enable Line Loopback (88E1111 only) */ #define PHY_M_EC_DIS_LINK_P BIT_12 /* Disable Link Pulses (88E1111 only) */ #define PHY_M_EC_M_DSC_MSK (3<<10) /* Bit 11..10: Master Downshift Counter */ /* (88E1011 only) */ #define PHY_M_EC_S_DSC_MSK (3<<8) /* Bit 9.. 8: Slave Downshift Counter */ /* (88E1011 only) */ #define PHY_M_EC_DSC_MSK_2 (7<<9) /* Bit 11.. 9: Downshift Counter */ /* (88E1111 only) */ #define PHY_M_EC_DOWN_S_ENA BIT_8 /* Downshift Enable (88E1111 only) */ /* !!! Errata in spec. (1 = disable) */ #define PHY_M_EC_RX_TIM_CT BIT_7 /* RGMII Rx Timing Control*/ #define PHY_M_EC_MAC_S_MSK (7<<4) /* Bit 6.. 4: Def. MAC interface speed */ #define PHY_M_EC_FIB_AN_ENA BIT_3 /* Fiber Auto-Neg. Enable (88E1011S only) */ #define PHY_M_EC_DTE_D_ENA BIT_2 /* DTE Detect Enable (88E1111 only) */ #define PHY_M_EC_TX_TIM_CT BIT_1 /* RGMII Tx Timing Control */ #define PHY_M_EC_TRANS_DIS BIT_0 /* Transmitter Disable (88E1111 only) */ #define PHY_M_EC_M_DSC(x) (SHIFT10(x) & PHY_M_EC_M_DSC_MSK) /* 00=1x; 01=2x; 10=3x; 11=4x */ #define PHY_M_EC_S_DSC(x) (SHIFT8(x) & PHY_M_EC_S_DSC_MSK) /* 00=dis; 01=1x; 10=2x; 11=3x */ #define PHY_M_EC_MAC_S(x) (SHIFT4(x) & PHY_M_EC_MAC_S_MSK) /* 01X=0; 110=2.5; 111=25 (MHz) */ #define PHY_M_EC_DSC_2(x) (SHIFT9(x) & PHY_M_EC_DSC_MSK_2) /* 000=1x; 001=2x; 010=3x; 011=4x */ /* 100=5x; 101=6x; 110=7x; 111=8x */ #define MAC_TX_CLK_0_MHZ 2 #define MAC_TX_CLK_2_5_MHZ 6 #define MAC_TX_CLK_25_MHZ 7 /***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ #define PHY_M_LEDC_DIS_LED BIT_15 /* Disable LED */ #define PHY_M_LEDC_PULS_MSK (7<<12) /* Bit 14..12: Pulse Stretch Mask */ #define PHY_M_LEDC_F_INT BIT_11 /* Force Interrupt */ #define PHY_M_LEDC_BL_R_MSK (7<<8) /* Bit 10.. 8: Blink Rate Mask */ #define PHY_M_LEDC_DP_C_LSB BIT_7 /* Duplex Control (LSB, 88E1111 only) */ #define PHY_M_LEDC_TX_C_LSB BIT_6 /* Tx Control (LSB, 88E1111 only) */ #define PHY_M_LEDC_LK_C_MSK (7<<3) /* Bit 5.. 3: Link Control Mask */ /* (88E1111 only) */ #define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4.. 3: Link Control Mask */ /* (88E1011 only) */ #define PHY_M_LEDC_DP_CTRL BIT_2 /* Duplex Control */ #define PHY_M_LEDC_DP_C_MSB BIT_2 /* Duplex Control (MSB, 88E1111 only) */ #define PHY_M_LEDC_RX_CTRL BIT_1 /* Rx Activity / Link */ #define PHY_M_LEDC_TX_CTRL BIT_0 /* Tx Activity / Link */ #define PHY_M_LEDC_TX_C_MSB BIT_0 /* Tx Control (MSB, 88E1111 only) */ #define PHY_M_LED_PULS_DUR(x) (SHIFT12(x) & PHY_M_LEDC_PULS_MSK) #define PULS_NO_STR 0 /* no pulse stretching */ #define PULS_21MS 1 /* 21 ms to 42 ms */ #define PULS_42MS 2 /* 42 ms to 84 ms */ #define PULS_84MS 3 /* 84 ms to 170 ms */ #define PULS_170MS 4 /* 170 ms to 340 ms */ #define PULS_340MS 5 /* 340 ms to 670 ms */ #define PULS_670MS 6 /* 670 ms to 1.3 s */ #define PULS_1300MS 7 /* 1.3 s to 2.7 s */ #define PHY_M_LED_BLINK_RT(x) (SHIFT8(x) & PHY_M_LEDC_BL_R_MSK) #define BLINK_42MS 0 /* 42 ms */ #define BLINK_84MS 1 /* 84 ms */ #define BLINK_170MS 2 /* 170 ms */ #define BLINK_340MS 3 /* 340 ms */ #define BLINK_670MS 4 /* 670 ms */ /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ #define PHY_M_LED_MO_SGMII(x) SHIFT14(x) /* Bit 15..14: SGMII AN Timer */ #define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */ #define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */ #define PHY_M_LED_MO_100(x) SHIFT6(x) /* Bit 7.. 6: Link 100 */ #define PHY_M_LED_MO_1000(x) SHIFT4(x) /* Bit 5.. 4: Link 1000 */ #define PHY_M_LED_MO_RX(x) SHIFT2(x) /* Bit 3.. 2: Rx */ #define PHY_M_LED_MO_TX(x) SHIFT0(x) /* Bit 1.. 0: Tx */ #define MO_LED_NORM 0 #define MO_LED_BLINK 1 #define MO_LED_OFF 2 #define MO_LED_ON 3 /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ #define PHY_M_EC2_FI_IMPED BIT_6 /* Fiber Input Impedance */ #define PHY_M_EC2_FO_IMPED BIT_5 /* Fiber Output Impedance */ #define PHY_M_EC2_FO_M_CLK BIT_4 /* Fiber Mode Clock Enable */ #define PHY_M_EC2_FO_BOOST BIT_3 /* Fiber Output Boost */ #define PHY_M_EC2_FO_AM_MSK 7 /* Bit 2.. 0: Fiber Output Amplitude */ /***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ #define PHY_M_FC_AUTO_SEL BIT_15 /* Fiber/Copper Auto Sel. Dis. */ #define PHY_M_FC_AN_REG_ACC BIT_14 /* Fiber/Copper AN Reg. Access */ #define PHY_M_FC_RESOLUTION BIT_13 /* Fiber/Copper Resolution */ #define PHY_M_SER_IF_AN_BP BIT_12 /* Ser. IF AN Bypass Enable */ #define PHY_M_SER_IF_BP_ST BIT_11 /* Ser. IF AN Bypass Status */ #define PHY_M_IRQ_POLARITY BIT_10 /* IRQ polarity */ #define PHY_M_DIS_AUT_MED BIT_9 /* Disable Aut. Medium Reg. Selection */ /* (88E1111 only) */ #define PHY_M_UNDOC1 BIT_7 /* undocumented bit !! */ #define PHY_M_DTE_POW_STAT BIT_4 /* DTE Power Status (88E1111 only) */ #define PHY_M_MODE_MASK 0xf /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ /***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ #define PHY_M_CABD_ENA_TEST BIT_15 /* Enable Test (Page 0) */ #define PHY_M_CABD_DIS_WAIT BIT_15 /* Disable Waiting Period (Page 1) */ /* (88E1111 only) */ #define PHY_M_CABD_STAT_MSK (3<<13) /* Bit 14..13: Status Mask */ #define PHY_M_CABD_AMPL_MSK (0x1f<<8) /* Bit 12.. 8: Amplitude Mask */ /* (88E1111 only) */ #define PHY_M_CABD_DIST_MSK 0xff /* Bit 7.. 0: Distance Mask */ /* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ #define CABD_STAT_NORMAL 0 #define CABD_STAT_SHORT 1 #define CABD_STAT_OPEN 2 #define CABD_STAT_FAIL 3 /* for 10/100 Fast Ethernet PHY (88E3082 only) */ /***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ #define PHY_M_FELP_LED2_MSK (0xf<<8) /* Bit 11.. 8: LED2 Mask (LINK) */ #define PHY_M_FELP_LED1_MSK (0xf<<4) /* Bit 7.. 4: LED1 Mask (ACT) */ #define PHY_M_FELP_LED0_MSK 0xf /* Bit 3.. 0: LED0 Mask (SPEED) */ #define PHY_M_FELP_LED2_CTRL(x) (SHIFT8(x) & PHY_M_FELP_LED2_MSK) #define PHY_M_FELP_LED1_CTRL(x) (SHIFT4(x) & PHY_M_FELP_LED1_MSK) #define PHY_M_FELP_LED0_CTRL(x) (SHIFT0(x) & PHY_M_FELP_LED0_MSK) #define LED_PAR_CTRL_COLX 0x00 #define LED_PAR_CTRL_ERROR 0x01 #define LED_PAR_CTRL_DUPLEX 0x02 #define LED_PAR_CTRL_DP_COL 0x03 #define LED_PAR_CTRL_SPEED 0x04 #define LED_PAR_CTRL_LINK 0x05 #define LED_PAR_CTRL_TX 0x06 #define LED_PAR_CTRL_RX 0x07 #define LED_PAR_CTRL_ACT 0x08 #define LED_PAR_CTRL_LNK_RX 0x09 #define LED_PAR_CTRL_LNK_AC 0x0a #define LED_PAR_CTRL_ACT_BL 0x0b #define LED_PAR_CTRL_TX_BL 0x0c #define LED_PAR_CTRL_RX_BL 0x0d #define LED_PAR_CTRL_COL_BL 0x0e #define LED_PAR_CTRL_INACT 0x0f /***** PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ #define PHY_M_FESC_DIS_WAIT BIT_2 /* Disable TDR Waiting Period */ #define PHY_M_FESC_ENA_MCLK BIT_1 /* Enable MAC Rx Clock in sleep mode */ #define PHY_M_FESC_SEL_CL_A BIT_0 /* Select Class A driver (100B-TX) */ /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ /***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/ #define PHY_M_FIB_FORCE_LNK BIT_10 /* Force Link Good */ #define PHY_M_FIB_SIGD_POL BIT_9 /* SIGDET Polarity */ #define PHY_M_FIB_TX_DIS BIT_3 /* Transmitter Disable */ /***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ #define PHY_M_MAC_MD_MSK (7<<7) /* Bit 9.. 7: Mode Select Mask */ #define PHY_M_MAC_MD_AUTO 3 /* Auto Copper/1000Base-X */ #define PHY_M_MAC_MD_COPPER 5 /* Copper only */ #define PHY_M_MAC_MD_1000BX 7 /* 1000Base-X only */ #define PHY_M_MAC_MODE_SEL(x) (SHIFT7(x) & PHY_M_MAC_MD_MSK) /***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ #define PHY_M_LEDC_LOS_MSK (0xf<<12) /* Bit 15..12: LOS LED Ctrl. Mask */ #define PHY_M_LEDC_INIT_MSK (0xf<<8) /* Bit 11.. 8: INIT LED Ctrl. Mask */ #define PHY_M_LEDC_STA1_MSK (0xf<<4) /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ #define PHY_M_LEDC_STA0_MSK 0xf /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ #define PHY_M_LEDC_LOS_CTRL(x) (SHIFT12(x) & PHY_M_LEDC_LOS_MSK) #define PHY_M_LEDC_INIT_CTRL(x) (SHIFT8(x) & PHY_M_LEDC_INIT_MSK) #define PHY_M_LEDC_STA1_CTRL(x) (SHIFT4(x) & PHY_M_LEDC_STA1_MSK) #define PHY_M_LEDC_STA0_CTRL(x) (SHIFT0(x) & PHY_M_LEDC_STA0_MSK) /***** PHY_MARV_PHY_STAT (page 3) 16 bit r/w Polarity Control Reg. *****/ #define PHY_M_POLC_LS1M_MSK (0xf<<12) /* Bit 15..12: LOS,STAT1 Mix % Mask */ #define PHY_M_POLC_IS0M_MSK (0xf<<8) /* Bit 11.. 8: INIT,STAT0 Mix % Mask */ #define PHY_M_POLC_LOS_MSK (0x3<<6) /* Bit 7.. 6: LOS Pol. Ctrl. Mask */ #define PHY_M_POLC_INIT_MSK (0x3<<4) /* Bit 5.. 4: INIT Pol. Ctrl. Mask */ #define PHY_M_POLC_STA1_MSK (0x3<<2) /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */ #define PHY_M_POLC_STA0_MSK 0x3 /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */ #define PHY_M_POLC_LS1_P_MIX(x) (SHIFT12(x) & PHY_M_POLC_LS1M_MSK) #define PHY_M_POLC_IS0_P_MIX(x) (SHIFT8(x) & PHY_M_POLC_IS0M_MSK) #define PHY_M_POLC_LOS_CTRL(x) (SHIFT6(x) & PHY_M_POLC_LOS_MSK) #define PHY_M_POLC_INIT_CTRL(x) (SHIFT4(x) & PHY_M_POLC_INIT_MSK) #define PHY_M_POLC_STA1_CTRL(x) (SHIFT2(x) & PHY_M_POLC_STA1_MSK) #define PHY_M_POLC_STA0_CTRL(x) (SHIFT0(x) & PHY_M_POLC_STA0_MSK) /* * GMAC registers * * The GMAC registers are 16 or 32 bits wide. * The GMACs host processor interface is 16 bits wide, * therefore ALL registers will be addressed with 16 bit accesses. * * Note: NA reg = Network Address e.g DA, SA etc. */ /* Port Registers */ #define GM_GP_STAT 0x0000 /* 16 bit r/o General Purpose Status */ #define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */ #define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */ #define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */ #define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow-Control */ #define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */ #define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */ /* Source Address Registers */ #define GM_SRC_ADDR_1L 0x001c /* 16 bit r/w Source Address 1 (low) */ #define GM_SRC_ADDR_1M 0x0020 /* 16 bit r/w Source Address 1 (middle) */ #define GM_SRC_ADDR_1H 0x0024 /* 16 bit r/w Source Address 1 (high) */ #define GM_SRC_ADDR_2L 0x0028 /* 16 bit r/w Source Address 2 (low) */ #define GM_SRC_ADDR_2M 0x002c /* 16 bit r/w Source Address 2 (middle) */ #define GM_SRC_ADDR_2H 0x0030 /* 16 bit r/w Source Address 2 (high) */ /* Multicast Address Hash Registers */ #define GM_MC_ADDR_H1 0x0034 /* 16 bit r/w Multicast Address Hash 1 */ #define GM_MC_ADDR_H2 0x0038 /* 16 bit r/w Multicast Address Hash 2 */ #define GM_MC_ADDR_H3 0x003c /* 16 bit r/w Multicast Address Hash 3 */ #define GM_MC_ADDR_H4 0x0040 /* 16 bit r/w Multicast Address Hash 4 */ /* Interrupt Source Registers */ #define GM_TX_IRQ_SRC 0x0044 /* 16 bit r/o Tx Overflow IRQ Source */ #define GM_RX_IRQ_SRC 0x0048 /* 16 bit r/o Rx Overflow IRQ Source */ #define GM_TR_IRQ_SRC 0x004c /* 16 bit r/o Tx/Rx Over. IRQ Source */ /* Interrupt Mask Registers */ #define GM_TX_IRQ_MSK 0x0050 /* 16 bit r/w Tx Overflow IRQ Mask */ #define GM_RX_IRQ_MSK 0x0054 /* 16 bit r/w Rx Overflow IRQ Mask */ #define GM_TR_IRQ_MSK 0x0058 /* 16 bit r/w Tx/Rx Over. IRQ Mask */ /* Serial Management Interface (SMI) Registers */ #define GM_SMI_CTRL 0x0080 /* 16 bit r/w SMI Control Register */ #define GM_SMI_DATA 0x0084 /* 16 bit r/w SMI Data Register */ #define GM_PHY_ADDR 0x0088 /* 16 bit r/w GPHY Address Register */ /* MIB Counters */ #define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ #define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ /* * MIB Counters base address definitions (low word) - * use offset 4 for access to high word (32 bit r/o) */ #define GM_RXF_UC_OK \ (GM_MIB_CNT_BASE + 0) /* Unicast Frames Received OK */ #define GM_RXF_BC_OK \ (GM_MIB_CNT_BASE + 8) /* Broadcast Frames Received OK */ #define GM_RXF_MPAUSE \ (GM_MIB_CNT_BASE + 16) /* Pause MAC Ctrl Frames Received */ #define GM_RXF_MC_OK \ (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */ #define GM_RXF_FCS_ERR \ (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */ #define GM_RXF_SPARE1 \ (GM_MIB_CNT_BASE + 40) /* Rx spare 1 */ #define GM_RXO_OK_LO \ (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */ #define GM_RXO_OK_HI \ (GM_MIB_CNT_BASE + 56) /* Octets Received OK High */ #define GM_RXO_ERR_LO \ (GM_MIB_CNT_BASE + 64) /* Octets Received Invalid Low */ #define GM_RXO_ERR_HI \ (GM_MIB_CNT_BASE + 72) /* Octets Received Invalid High */ #define GM_RXF_SHT \ (GM_MIB_CNT_BASE + 80) /* Frames <64 Byte Received OK */ #define GM_RXE_FRAG \ (GM_MIB_CNT_BASE + 88) /* Frames <64 Byte Received with FCS Err */ #define GM_RXF_64B \ (GM_MIB_CNT_BASE + 96) /* 64 Byte Rx Frame */ #define GM_RXF_127B \ (GM_MIB_CNT_BASE + 104) /* 65-127 Byte Rx Frame */ #define GM_RXF_255B \ (GM_MIB_CNT_BASE + 112) /* 128-255 Byte Rx Frame */ #define GM_RXF_511B \ (GM_MIB_CNT_BASE + 120) /* 256-511 Byte Rx Frame */ #define GM_RXF_1023B \ (GM_MIB_CNT_BASE + 128) /* 512-1023 Byte Rx Frame */ #define GM_RXF_1518B \ (GM_MIB_CNT_BASE + 136) /* 1024-1518 Byte Rx Frame */ #define GM_RXF_MAX_SZ \ (GM_MIB_CNT_BASE + 144) /* 1519-MaxSize Byte Rx Frame */ #define GM_RXF_LNG_ERR \ (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */ #define GM_RXF_JAB_PKT \ (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */ #define GM_RXF_SPARE2 \ (GM_MIB_CNT_BASE + 168) /* Rx spare 2 */ #define GM_RXE_FIFO_OV \ (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */ #define GM_RXF_SPARE3 \ (GM_MIB_CNT_BASE + 184) /* Rx spare 3 */ #define GM_TXF_UC_OK \ (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */ #define GM_TXF_BC_OK \ (GM_MIB_CNT_BASE + 200) /* Broadcast Frames Xmitted OK */ #define GM_TXF_MPAUSE \ (GM_MIB_CNT_BASE + 208) /* Pause MAC Ctrl Frames Xmitted */ #define GM_TXF_MC_OK \ (GM_MIB_CNT_BASE + 216) /* Multicast Frames Xmitted OK */ #define GM_TXO_OK_LO \ (GM_MIB_CNT_BASE + 224) /* Octets Transmitted OK Low */ #define GM_TXO_OK_HI \ (GM_MIB_CNT_BASE + 232) /* Octets Transmitted OK High */ #define GM_TXF_64B \ (GM_MIB_CNT_BASE + 240) /* 64 Byte Tx Frame */ #define GM_TXF_127B \ (GM_MIB_CNT_BASE + 248) /* 65-127 Byte Tx Frame */ #define GM_TXF_255B \ (GM_MIB_CNT_BASE + 256) /* 128-255 Byte Tx Frame */ #define GM_TXF_511B \ (GM_MIB_CNT_BASE + 264) /* 256-511 Byte Tx Frame */ #define GM_TXF_1023B \ (GM_MIB_CNT_BASE + 272) /* 512-1023 Byte Tx Frame */ #define GM_TXF_1518B \ (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */ #define GM_TXF_MAX_SZ \ (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */ #define GM_TXF_SPARE1 \ (GM_MIB_CNT_BASE + 296) /* Tx spare 1 */ #define GM_TXF_COL \ (GM_MIB_CNT_BASE + 304) /* Tx Collision */ #define GM_TXF_LAT_COL \ (GM_MIB_CNT_BASE + 312) /* Tx Late Collision */ #define GM_TXF_ABO_COL \ (GM_MIB_CNT_BASE + 320) /* Tx aborted due to Exces. Col. */ #define GM_TXF_MUL_COL \ (GM_MIB_CNT_BASE + 328) /* Tx Multiple Collision */ #define GM_TXF_SNG_COL \ (GM_MIB_CNT_BASE + 336) /* Tx Single Collision */ #define GM_TXE_FIFO_UR \ (GM_MIB_CNT_BASE + 344) /* Tx FIFO Underrun Event */ /*----------------------------------------------------------------------------*/ /* * GMAC Bit Definitions * * If the bit access behaviour differs from the register access behaviour * (r/w, r/o) this is documented after the bit number. * The following bit access behaviours are used: * (sc) self clearing * (r/o) read only */ /* GM_GP_STAT 16 bit r/o General Purpose Status Register */ #define GM_GPSR_SPEED BIT_15 /* Port Speed (1 = 100 Mbps) */ #define GM_GPSR_DUPLEX BIT_14 /* Duplex Mode (1 = Full) */ #define GM_GPSR_FC_TX_DIS BIT_13 /* Tx Flow-Control Mode Disabled */ #define GM_GPSR_LINK_UP BIT_12 /* Link Up Status */ #define GM_GPSR_PAUSE BIT_11 /* Pause State */ #define GM_GPSR_TX_ACTIVE BIT_10 /* Tx in Progress */ #define GM_GPSR_EXC_COL BIT_9 /* Excessive Collisions Occured */ #define GM_GPSR_LAT_COL BIT_8 /* Late Collisions Occured */ #define GM_GPSR_PHY_ST_CH BIT_5 /* PHY Status Change */ #define GM_GPSR_GIG_SPEED BIT_4 /* Gigabit Speed (1 = 1000 Mbps) */ #define GM_GPSR_PART_MODE BIT_3 /* Partition mode */ #define GM_GPSR_FC_RX_DIS BIT_2 /* Rx Flow-Control Mode Disabled */ /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ #define GM_GPCR_RMII_PH_ENA BIT_15 /* Enable RMII for PHY (Yukon-FE only) */ #define GM_GPCR_RMII_LB_ENA BIT_14 /* Enable RMII Loopback (Yukon-FE only) */ #define GM_GPCR_FC_TX_DIS BIT_13 /* Disable Tx Flow-Control Mode */ #define GM_GPCR_TX_ENA BIT_12 /* Enable Transmit */ #define GM_GPCR_RX_ENA BIT_11 /* Enable Receive */ #define GM_GPCR_LOOP_ENA BIT_9 /* Enable MAC Loopback Mode */ #define GM_GPCR_PART_ENA BIT_8 /* Enable Partition Mode */ #define GM_GPCR_GIGS_ENA BIT_7 /* Gigabit Speed (1000 Mbps) */ #define GM_GPCR_FL_PASS BIT_6 /* Force Link Pass */ #define GM_GPCR_DUP_FULL BIT_5 /* Full Duplex Mode */ #define GM_GPCR_FC_RX_DIS BIT_4 /* Disable Rx Flow-Control Mode */ #define GM_GPCR_SPEED_100 BIT_3 /* Port Speed 100 Mbps */ #define GM_GPCR_AU_DUP_DIS BIT_2 /* Disable Auto-Update Duplex */ #define GM_GPCR_AU_FCT_DIS BIT_1 /* Disable Auto-Update Flow-C. */ #define GM_GPCR_AU_SPD_DIS BIT_0 /* Disable Auto-Update Speed */ #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) #define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\ GM_GPCR_AU_SPD_DIS) /* GM_TX_CTRL 16 bit r/w Transmit Control Register */ #define GM_TXCR_FORCE_JAM BIT_15 /* Force Jam / Flow-Control */ #define GM_TXCR_CRC_DIS BIT_14 /* Disable insertion of CRC */ #define GM_TXCR_PAD_DIS BIT_13 /* Disable padding of packets */ #define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold Mask */ #define GM_TXCR_PAD_PAT_MSK 0xff /* Bit 7.. 0: Padding Pattern Mask */ /* (Yukon-2 only) */ #define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) #define TX_COL_DEF 0x04 /* GM_RX_CTRL 16 bit r/w Receive Control Register */ #define GM_RXCR_UCF_ENA BIT_15 /* Enable Unicast filtering */ #define GM_RXCR_MCF_ENA BIT_14 /* Enable Multicast filtering */ #define GM_RXCR_CRC_DIS BIT_13 /* Remove 4-byte CRC */ #define GM_RXCR_PASS_FC BIT_12 /* Pass FC packets to FIFO (Yukon-1 only) */ /* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ #define GM_TXPA_JAMLEN_MSK (3<<14) /* Bit 15..14: Jam Length Mask */ #define GM_TXPA_JAMIPG_MSK (0x1f<<9) /* Bit 13.. 9: Jam IPG Mask */ #define GM_TXPA_JAMDAT_MSK (0x1f<<4) /* Bit 8.. 4: IPG Jam to Data Mask */ #define GM_TXPA_BO_LIM_MSK 0x0f /* Bit 3.. 0: Backoff Limit Mask */ /* (Yukon-2 only) */ #define TX_JAM_LEN_VAL(x) (SHIFT14(x) & GM_TXPA_JAMLEN_MSK) #define TX_JAM_IPG_VAL(x) (SHIFT9(x) & GM_TXPA_JAMIPG_MSK) #define TX_IPG_JAM_DATA(x) (SHIFT4(x) & GM_TXPA_JAMDAT_MSK) #define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK) #define TX_JAM_LEN_DEF 0x03 #define TX_JAM_IPG_DEF 0x0b #define TX_IPG_JAM_DEF 0x1c #define TX_BOF_LIM_DEF 0x04 /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ #define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder */ /* r/o on Yukon, r/w on Yukon-EC */ #define GM_SMOD_LIMIT_4 BIT_10 /* 4 consecutive Tx trials */ #define GM_SMOD_VLAN_ENA BIT_9 /* Enable VLAN (Max. Frame Len) */ #define GM_SMOD_JUMBO_ENA BIT_8 /* Enable Jumbo (Max. Frame Len) */ #define GM_SMOD_IPG_MSK 0x1f /* Bit 4.. 0: Inter-Packet Gap (IPG) */ #define DATA_BLIND_VAL(x) (SHIFT11(x) & GM_SMOD_DATABL_MSK) #define IPG_DATA_VAL(x) ((x) & GM_SMOD_IPG_MSK) #define DATA_BLIND_DEF 0x04 #define IPG_DATA_DEF 0x1e /* GM_SMI_CTRL 16 bit r/w SMI Control Register */ #define GM_SMI_CT_PHY_A_MSK (0x1f<<11) /* Bit 15..11: PHY Device Address */ #define GM_SMI_CT_REG_A_MSK (0x1f<<6) /* Bit 10.. 6: PHY Register Address */ #define GM_SMI_CT_OP_RD BIT_5 /* OpCode Read (0=Write)*/ #define GM_SMI_CT_RD_VAL BIT_4 /* Read Valid (Read completed) */ #define GM_SMI_CT_BUSY BIT_3 /* Busy (Operation in progress) */ #define GM_SMI_CT_PHY_AD(x) (SHIFT11(x) & GM_SMI_CT_PHY_A_MSK) #define GM_SMI_CT_REG_AD(x) (SHIFT6(x) & GM_SMI_CT_REG_A_MSK) /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ #define GM_PAR_MIB_CLR BIT_5 /* Set MIB Clear Counter Mode */ #define GM_PAR_MIB_TST BIT_4 /* MIB Load Counter (Test Mode) */ /* Receive Frame Status Encoding */ #define GMR_FS_LEN_MSK (0xffff<<16) /* Bit 31..16: Rx Frame Length */ #define GMR_FS_VLAN BIT_13 /* VLAN Packet */ #define GMR_FS_JABBER BIT_12 /* Jabber Packet */ #define GMR_FS_UN_SIZE BIT_11 /* Undersize Packet */ #define GMR_FS_MC BIT_10 /* Multicast Packet */ #define GMR_FS_BC BIT_9 /* Broadcast Packet */ #define GMR_FS_RX_OK BIT_8 /* Receive OK (Good Packet) */ #define GMR_FS_GOOD_FC BIT_7 /* Good Flow-Control Packet */ #define GMR_FS_BAD_FC BIT_6 /* Bad Flow-Control Packet */ #define GMR_FS_MII_ERR BIT_5 /* MII Error */ #define GMR_FS_LONG_ERR BIT_4 /* Too Long Packet */ #define GMR_FS_FRAGMENT BIT_3 /* Fragment */ #define GMR_FS_CRC_ERR BIT_1 /* CRC Error */ #define GMR_FS_RX_FF_OV BIT_0 /* Rx FIFO Overflow */ #define GMR_FS_LEN_SHIFT 16 #define GMR_FS_ANY_ERR ( \ GMR_FS_RX_FF_OV | \ GMR_FS_CRC_ERR | \ GMR_FS_FRAGMENT | \ GMR_FS_LONG_ERR | \ GMR_FS_MII_ERR | \ GMR_FS_BAD_FC | \ GMR_FS_GOOD_FC | \ GMR_FS_UN_SIZE | \ GMR_FS_JABBER) /* Rx GMAC FIFO Flush Mask (default) */ #define RX_FF_FL_DEF_MSK GMR_FS_ANY_ERR /* Receive and Transmit GMAC FIFO Registers (YUKON only) */ /* RX_GMF_EA 32 bit Rx GMAC FIFO End Address */ /* RX_GMF_AF_THR 32 bit Rx GMAC FIFO Almost Full Thresh. */ /* RX_GMF_WP 32 bit Rx GMAC FIFO Write Pointer */ /* RX_GMF_WLEV 32 bit Rx GMAC FIFO Write Level */ /* RX_GMF_RP 32 bit Rx GMAC FIFO Read Pointer */ /* RX_GMF_RLEV 32 bit Rx GMAC FIFO Read Level */ /* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ /* TX_GMF_AE_THR 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ /* TX_GMF_WP 32 bit Tx GMAC FIFO Write Pointer */ /* TX_GMF_WSP 32 bit Tx GMAC FIFO Write Shadow Pointer */ /* TX_GMF_WLEV 32 bit Tx GMAC FIFO Write Level */ /* TX_GMF_RP 32 bit Tx GMAC FIFO Read Pointer */ /* TX_GMF_RSTP 32 bit Tx GMAC FIFO Restart Pointer */ /* TX_GMF_RLEV 32 bit Tx GMAC FIFO Read Level */ /* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ #define RX_TRUNC_ON BIT_27 /* enable packet truncation */ #define RX_TRUNC_OFF BIT_26 /* disable packet truncation */ #define RX_VLAN_STRIP_ON BIT_25 /* enable VLAN stripping */ #define RX_VLAN_STRIP_OFF BIT_24 /* disable VLAN stripping */ #define GMF_RX_MACSEC_FLUSH_ON BIT_23 #define GMF_RX_MACSEC_FLUSH_OFF BIT_22 #define GMF_RX_OVER_ON BIT_19 /* enable flushing on receive overrun */ #define GMF_RX_OVER_OFF BIT_18 /* disable flushing on receive overrun */ #define GMF_ASF_RX_OVER_ON BIT_17 /* enable flushing of ASF when overrun */ #define GMF_ASF_RX_OVER_OFF BIT_16 /* disable flushing of ASF when overrun */ #define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */ #define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */ #define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */ #define GMF_RP_TST_ON BIT_10 /* Read Pointer Test On */ #define GMF_RP_TST_OFF BIT_9 /* Read Pointer Test Off */ #define GMF_RP_STEP BIT_8 /* Read Pointer Step/Increment */ #define GMF_RX_F_FL_ON BIT_7 /* Rx FIFO Flush Mode On */ #define GMF_RX_F_FL_OFF BIT_6 /* Rx FIFO Flush Mode Off */ #define GMF_CLI_RX_FO BIT_5 /* Clear IRQ Rx FIFO Overrun */ #define GMF_CLI_RX_FC BIT_4 /* Clear IRQ Rx Frame Complete */ #define GMF_OPER_ON BIT_3 /* Operational Mode On */ #define GMF_OPER_OFF BIT_2 /* Operational Mode Off */ #define GMF_RST_CLR BIT_1 /* Clear GMAC FIFO Reset */ #define GMF_RST_SET BIT_0 /* Set GMAC FIFO Reset */ /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test (YUKON and Yukon-2) */ #define TX_STFW_DIS BIT_31 /* Disable Store & Forward (Yukon-EC Ultra) */ #define TX_STFW_ENA BIT_30 /* Enable Store & Forward (Yukon-EC Ultra) */ #define TX_VLAN_TAG_ON BIT_25 /* enable VLAN tagging */ #define TX_VLAN_TAG_OFF BIT_24 /* disable VLAN tagging */ #define TX_JUMBO_ENA BIT_23 /* Enable Jumbo Mode (Yukon-EC Ultra) */ #define TX_JUMBO_DIS BIT_22 /* Disable Jumbo Mode (Yukon-EC Ultra) */ #define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */ #define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */ #define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */ /* Bits 15..8: same as for RX_GMF_CTRL_T */ #define GMF_CLI_TX_FU BIT_6 /* Clear IRQ Tx FIFO Underrun */ #define GMF_CLI_TX_FC BIT_5 /* Clear IRQ Tx Frame Complete */ #define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */ /* Bits 3..0: same as for RX_GMF_CTRL_T */ #define GMF_RX_CTRL_DEF (GMF_OPER_ON | GMF_RX_F_FL_ON) #define GMF_TX_CTRL_DEF GMF_OPER_ON #define RX_GMF_AF_THR_MIN 0x0c /* Rx GMAC FIFO Almost Full Thresh. min. */ #define RX_GMF_FL_THR_DEF 0x0a /* Rx GMAC FIFO Flush Threshold default */ /* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ #define GMT_ST_START BIT_2 /* Start Time Stamp Timer */ #define GMT_ST_STOP BIT_1 /* Stop Time Stamp Timer */ #define GMT_ST_CLR_IRQ BIT_0 /* Clear Time Stamp Timer IRQ */ /* POLL_CTRL 32 bit Polling Unit control register (Yukon-2 only) */ #define PC_CLR_IRQ_CHK BIT_5 /* Clear IRQ Check */ #define PC_POLL_RQ BIT_4 /* Poll Request Start */ #define PC_POLL_OP_ON BIT_3 /* Operational Mode On */ #define PC_POLL_OP_OFF BIT_2 /* Operational Mode Off */ #define PC_POLL_RST_CLR BIT_1 /* Clear Polling Unit Reset (Enable) */ #define PC_POLL_RST_SET BIT_0 /* Set Polling Unit Reset */ /* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */ /* This register is used by the host driver software */ #define Y2_ASF_OS_PRES BIT_4 /* ASF operation system present */ #define Y2_ASF_RESET BIT_3 /* ASF system in reset state */ #define Y2_ASF_RUNNING BIT_2 /* ASF system operational */ #define Y2_ASF_CLR_HSTI BIT_1 /* Clear ASF IRQ */ #define Y2_ASF_IRQ BIT_0 /* Issue an IRQ to ASF system */ #define Y2_ASF_UC_STATE (3<<2) /* ASF uC State */ #define Y2_ASF_CLK_HALT 0 /* ASF system clock stopped */ /* B28_Y2_ASF_HCU_CCSR 32bit CPU Control and Status Register (Yukon EX) */ #define Y2_ASF_HCU_CCSR_SMBALERT_MONITOR BIT_27 /* SMBALERT pin monitor */ #define Y2_ASF_HCU_CCSR_CPU_SLEEP BIT_26 /* CPU sleep status */ #define Y2_ASF_HCU_CCSR_CS_TO BIT_25 /* Clock Stretching Timeout */ #define Y2_ASF_HCU_CCSR_WDOG BIT_24 /* Watchdog Reset */ #define Y2_ASF_HCU_CCSR_CLR_IRQ_HOST BIT_17 /* Clear IRQ_HOST */ #define Y2_ASF_HCU_CCSR_SET_IRQ_HCU BIT_16 /* Set IRQ_HCU */ #define Y2_ASF_HCU_CCSR_AHB_RST BIT_9 /* Reset AHB bridge */ #define Y2_ASF_HCU_CCSR_CPU_RST_MODE BIT_8 /* CPU Reset Mode */ #define Y2_ASF_HCU_CCSR_SET_SYNC_CPU BIT_5 #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE1 BIT_4 #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE0 BIT_3 #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK (BIT_4 | BIT_3) /* CPU Clock Divide */ #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_BASE BIT_3 #define Y2_ASF_HCU_CCSR_OS_PRSNT BIT_2 /* ASF OS Present */ /* Microcontroller State */ #define Y2_ASF_HCU_CCSR_UC_STATE_MSK 3 #define Y2_ASF_HCU_CCSR_UC_STATE_BASE BIT_0 #define Y2_ASF_HCU_CCSR_ASF_RESET 0 #define Y2_ASF_HCU_CCSR_ASF_HALTED BIT_1 #define Y2_ASF_HCU_CCSR_ASF_RUNNING BIT_0 /* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ /* This register is used by the ASF firmware */ #define Y2_ASF_CLR_ASFI BIT_1 /* Clear host IRQ */ #define Y2_ASF_HOST_IRQ BIT_0 /* Issue an IRQ to HOST system */ /* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ #define SC_STAT_CLR_IRQ BIT_4 /* Status Burst IRQ clear */ #define SC_STAT_OP_ON BIT_3 /* Operational Mode On */ #define SC_STAT_OP_OFF BIT_2 /* Operational Mode Off */ #define SC_STAT_RST_CLR BIT_1 /* Clear Status Unit Reset (Enable) */ #define SC_STAT_RST_SET BIT_0 /* Set Status Unit Reset */ /* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ #define GMC_SEC_RST BIT_15 /* MAC SEC RST */ #define GMC_SEC_RST_OFF BIT_14 /* MAC SEC RST Off */ #define GMC_BYP_MACSECRX_ON BIT_13 /* Bypass MAC SEC RX */ #define GMC_BYP_MACSECRX_OFF BIT_12 /* Bypass MAC SEC RX Off */ #define GMC_BYP_MACSECTX_ON BIT_11 /* Bypass MAC SEC TX */ #define GMC_BYP_MACSECTX_OFF BIT_10 /* Bypass MAC SEC TX Off */ #define GMC_BYP_RETR_ON BIT_9 /* Bypass MAC retransmit FIFO On */ #define GMC_BYP_RETR_OFF BIT_8 /* Bypass MAC retransmit FIFO Off */ #define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */ #define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */ #define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */ #define GMC_F_LOOPB_OFF BIT_4 /* FIFO Loopback Off */ #define GMC_PAUSE_ON BIT_3 /* Pause On */ #define GMC_PAUSE_OFF BIT_2 /* Pause Off */ #define GMC_RST_CLR BIT_1 /* Clear GMAC Reset */ #define GMC_RST_SET BIT_0 /* Set GMAC Reset */ /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ #define GPC_SEL_BDT BIT_28 /* Select Bi-Dir. Transfer for MDC/MDIO */ #define GPC_INT_POL BIT_27 /* IRQ Polarity is Active Low */ #define GPC_75_OHM BIT_26 /* Use 75 Ohm Termination instead of 50 */ #define GPC_DIS_FC BIT_25 /* Disable Automatic Fiber/Copper Detection */ #define GPC_DIS_SLEEP BIT_24 /* Disable Energy Detect */ #define GPC_HWCFG_M_3 BIT_23 /* HWCFG_MODE[3] */ #define GPC_HWCFG_M_2 BIT_22 /* HWCFG_MODE[2] */ #define GPC_HWCFG_M_1 BIT_21 /* HWCFG_MODE[1] */ #define GPC_HWCFG_M_0 BIT_20 /* HWCFG_MODE[0] */ #define GPC_ANEG_0 BIT_19 /* ANEG[0] */ #define GPC_ENA_XC BIT_18 /* Enable MDI crossover */ #define GPC_DIS_125 BIT_17 /* Disable 125 MHz clock */ #define GPC_ANEG_3 BIT_16 /* ANEG[3] */ #define GPC_ANEG_2 BIT_15 /* ANEG[2] */ #define GPC_ANEG_1 BIT_14 /* ANEG[1] */ #define GPC_ENA_PAUSE BIT_13 /* Enable Pause (SYM_OR_REM) */ #define GPC_PHYADDR_4 BIT_12 /* Bit 4 of Phy Addr */ #define GPC_PHYADDR_3 BIT_11 /* Bit 3 of Phy Addr */ #define GPC_PHYADDR_2 BIT_10 /* Bit 2 of Phy Addr */ #define GPC_PHYADDR_1 BIT_9 /* Bit 1 of Phy Addr */ #define GPC_PHYADDR_0 BIT_8 /* Bit 0 of Phy Addr */ #define GPC_RST_CLR BIT_1 /* Clear GPHY Reset */ #define GPC_RST_SET BIT_0 /* Set GPHY Reset */ /* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ /* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ #define GM_IS_RX_CO_OV BIT_5 /* Receive Counter Overflow IRQ */ #define GM_IS_TX_CO_OV BIT_4 /* Transmit Counter Overflow IRQ */ #define GM_IS_TX_FF_UR BIT_3 /* Transmit FIFO Underrun */ #define GM_IS_TX_COMPL BIT_2 /* Frame Transmission Complete */ #define GM_IS_RX_FF_OR BIT_1 /* Receive FIFO Overrun */ #define GM_IS_RX_COMPL BIT_0 /* Frame Reception Complete */ #define GMAC_DEF_MSK (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV | GM_IS_TX_FF_UR) /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ #define GMLC_RST_CLR BIT_1 /* Clear GMAC Link Reset */ #define GMLC_RST_SET BIT_0 /* Set GMAC Link Reset */ #define MSK_PORT_A 0 #define MSK_PORT_B 1 /* Register access macros */ #define CSR_WRITE_4(sc, reg, val) \ bus_write_4((sc)->msk_res[0], (reg), (val)) #define CSR_WRITE_2(sc, reg, val) \ bus_write_2((sc)->msk_res[0], (reg), (val)) #define CSR_WRITE_1(sc, reg, val) \ bus_write_1((sc)->msk_res[0], (reg), (val)) #define CSR_READ_4(sc, reg) \ bus_read_4((sc)->msk_res[0], (reg)) #define CSR_READ_2(sc, reg) \ bus_read_2((sc)->msk_res[0], (reg)) #define CSR_READ_1(sc, reg) \ bus_read_1((sc)->msk_res[0], (reg)) #define CSR_PCI_WRITE_4(sc, reg, val) \ bus_write_4((sc)->msk_res[0], Y2_CFG_SPC + (reg), (val)) #define CSR_PCI_WRITE_2(sc, reg, val) \ bus_write_2((sc)->msk_res[0], Y2_CFG_SPC + (reg), (val)) #define CSR_PCI_WRITE_1(sc, reg, val) \ bus_write_1((sc)->msk_res[0], Y2_CFG_SPC + (reg), (val)) #define CSR_PCI_READ_4(sc, reg) \ bus_read_4((sc)->msk_res[0], Y2_CFG_SPC + (reg)) #define CSR_PCI_READ_2(sc, reg) \ bus_read_2((sc)->msk_res[0], Y2_CFG_SPC + (reg)) #define CSR_PCI_READ_1(sc, reg) \ bus_read_1((sc)->msk_res[0], Y2_CFG_SPC + (reg)) #define MSK_IF_READ_4(sc_if, reg) \ CSR_READ_4((sc_if)->msk_softc, (reg)) #define MSK_IF_READ_2(sc_if, reg) \ CSR_READ_2((sc_if)->msk_softc, (reg)) #define MSK_IF_READ_1(sc_if, reg) \ CSR_READ_1((sc_if)->msk_softc, (reg)) #define MSK_IF_WRITE_4(sc_if, reg, val) \ CSR_WRITE_4((sc_if)->msk_softc, (reg), (val)) #define MSK_IF_WRITE_2(sc_if, reg, val) \ CSR_WRITE_2((sc_if)->msk_softc, (reg), (val)) #define MSK_IF_WRITE_1(sc_if, reg, val) \ CSR_WRITE_1((sc_if)->msk_softc, (reg), (val)) #define GMAC_REG(port, reg) \ ((BASE_GMAC_1 + (port) * (BASE_GMAC_2 - BASE_GMAC_1)) | (reg)) #define GMAC_WRITE_2(sc, port, reg, val) \ CSR_WRITE_2((sc), GMAC_REG((port), (reg)), (val)) #define GMAC_READ_2(sc, port, reg) \ CSR_READ_2((sc), GMAC_REG((port), (reg))) /* GPHY address (bits 15..11 of SMI control reg) */ #define PHY_ADDR_MARV 0 #define MSK_ADDR_LO(x) ((uint64_t) (x) & 0xffffffffUL) #define MSK_ADDR_HI(x) ((uint64_t) (x) >> 32) /* * At first I guessed 8 bytes, the size of a single descriptor, would be * required alignment constraints. But, it seems that Yukon II have 4096 * bytes boundary alignment constraints. */ #define MSK_RING_ALIGN 4096 #define MSK_STAT_ALIGN 4096 /* Rx descriptor data structure */ struct msk_rx_desc { uint32_t msk_addr; uint32_t msk_control; }; /* Tx descriptor data structure */ struct msk_tx_desc { uint32_t msk_addr; uint32_t msk_control; }; /* Status descriptor data structure */ struct msk_stat_desc { uint32_t msk_status; uint32_t msk_control; }; /* mask and shift value to get Tx async queue status for port 1 */ #define STLE_TXA1_MSKL 0x00000fff #define STLE_TXA1_SHIFTL 0 /* mask and shift value to get Tx sync queue status for port 1 */ #define STLE_TXS1_MSKL 0x00fff000 #define STLE_TXS1_SHIFTL 12 /* mask and shift value to get Tx async queue status for port 2 */ #define STLE_TXA2_MSKL 0xff000000 #define STLE_TXA2_SHIFTL 24 #define STLE_TXA2_MSKH 0x000f /* this one shifts up */ #define STLE_TXA2_SHIFTH 8 /* mask and shift value to get Tx sync queue status for port 2 */ #define STLE_TXS2_MSKL 0x00000000 #define STLE_TXS2_SHIFTL 0 #define STLE_TXS2_MSKH 0xfff0 #define STLE_TXS2_SHIFTH 4 /* YUKON-2 bit values */ #define HW_OWNER 0x80000000 #define SW_OWNER 0x00000000 #define PU_PUTIDX_VALID 0x10000000 /* YUKON-2 Control flags */ #define UDPTCP 0x00010000 #define CALSUM 0x00020000 #define WR_SUM 0x00040000 #define INIT_SUM 0x00080000 #define LOCK_SUM 0x00100000 #define INS_VLAN 0x00200000 #define FRC_STAT 0x00400000 #define EOP 0x00800000 #define TX_LOCK 0x01000000 #define BUF_SEND 0x02000000 #define PACKET_SEND 0x04000000 #define NO_WARNING 0x40000000 #define NO_UPDATE 0x80000000 /* YUKON-2 Rx/Tx opcodes defines */ #define OP_TCPWRITE 0x11000000 #define OP_TCPSTART 0x12000000 #define OP_TCPINIT 0x14000000 #define OP_TCPLCK 0x18000000 #define OP_TCPCHKSUM OP_TCPSTART #define OP_TCPIS (OP_TCPINIT | OP_TCPSTART) #define OP_TCPLW (OP_TCPLCK | OP_TCPWRITE) #define OP_TCPLSW (OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE) #define OP_TCPLISW (OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE) #define OP_ADDR64 0x21000000 #define OP_VLAN 0x22000000 #define OP_ADDR64VLAN (OP_ADDR64 | OP_VLAN) #define OP_LRGLEN 0x24000000 #define OP_LRGLENVLAN (OP_LRGLEN | OP_VLAN) #define OP_MSS 0x28000000 #define OP_MSSVLAN (OP_MSS | OP_VLAN) #define OP_BUFFER 0x40000000 #define OP_PACKET 0x41000000 #define OP_LARGESEND 0x43000000 /* YUKON-2 STATUS opcodes defines */ #define OP_RXSTAT 0x60000000 #define OP_RXTIMESTAMP 0x61000000 #define OP_RXVLAN 0x62000000 #define OP_RXCHKS 0x64000000 #define OP_RXCHKSVLAN (OP_RXCHKS | OP_RXVLAN) #define OP_RXTIMEVLAN (OP_RXTIMESTAMP | OP_RXVLAN) #define OP_RSS_HASH 0x65000000 #define OP_TXINDEXLE 0x68000000 /* YUKON-2 SPECIAL opcodes defines */ #define OP_PUTIDX 0x70000000 #define STLE_OP_MASK 0xff000000 #define STLE_CSS_MASK 0x00ff0000 #define STLE_LEN_MASK 0x0000ffff /* CSS defined in status LE(valid for descriptor V2 format). */ #define CSS_TCPUDP_CSUM_OK 0x00800000 #define CSS_UDP 0x00400000 #define CSS_TCP 0x00200000 #define CSS_IPFRAG 0x00100000 #define CSS_IPV6 0x00080000 #define CSS_IPV4_CSUM_OK 0x00040000 #define CSS_IPV4 0x00020000 #define CSS_PORT 0x00010000 /* Descriptor Bit Definition */ /* TxCtrl Transmit Buffer Control Field */ /* RxCtrl Receive Buffer Control Field */ #define BMU_OWN BIT_31 /* OWN bit: 0=host/1=BMU */ #define BMU_STF BIT_30 /* Start of Frame */ #define BMU_EOF BIT_29 /* End of Frame */ #define BMU_IRQ_EOB BIT_28 /* Req "End of Buffer" IRQ */ #define BMU_IRQ_EOF BIT_27 /* Req "End of Frame" IRQ */ /* TxCtrl specific bits */ #define BMU_STFWD BIT_26 /* (Tx) Store & Forward Frame */ #define BMU_NO_FCS BIT_25 /* (Tx) Disable MAC FCS (CRC) generation */ #define BMU_SW BIT_24 /* (Tx) 1 bit res. for SW use */ /* RxCtrl specific bits */ #define BMU_DEV_0 BIT_26 /* (Rx) Transfer data to Dev0 */ #define BMU_STAT_VAL BIT_25 /* (Rx) Rx Status Valid */ #define BMU_TIST_VAL BIT_24 /* (Rx) Rx TimeStamp Valid */ /* Bit 23..16: BMU Check Opcodes */ #define BMU_CHECK (0x55<<16) /* Default BMU check */ #define BMU_TCP_CHECK (0x56<<16) /* Descr with TCP ext */ #define BMU_UDP_CHECK (0x57<<16) /* Descr with UDP ext (YUKON only) */ #define BMU_BBC 0xffff /* Bit 15.. 0: Buffer Byte Counter */ /* * Controller requires an additional LE op code for 64bit DMA operation. * Driver uses fixed number of RX buffers such that this limitation * reduces number of available RX buffers with 64bit DMA so double * number of RX buffers on platforms that support 64bit DMA. For TX * side, controller requires an additional OP_ADDR64 op code if a TX * buffer uses different high address value than previously used one. * Driver monitors high DMA address change in TX and inserts an * OP_ADDR64 op code if the high DMA address is changed. Driver * allocates 50% more total TX buffers on platforms that support 64bit * DMA. */ #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) #define MSK_64BIT_DMA #define MSK_TX_RING_CNT 384 #define MSK_RX_RING_CNT 512 #else #undef MSK_64BIT_DMA #define MSK_TX_RING_CNT 256 #define MSK_RX_RING_CNT 256 #endif #define MSK_RX_BUF_ALIGN 8 #define MSK_JUMBO_RX_RING_CNT MSK_RX_RING_CNT #define MSK_MAXTXSEGS 32 #define MSK_TSO_MAXSGSIZE 4096 #define MSK_TSO_MAXSIZE (65535 + sizeof(struct ether_vlan_header)) /* * It seems that the hardware requires extra descriptors(LEs) to offload * TCP/UDP checksum, VLAN hardware tag insertion and TSO. * * 1 descriptor for TCP/UDP checksum offload. * 1 descriptor VLAN hardware tag insertion. * 1 descriptor for TSO(TCP Segmentation Offload) * 1 descriptor for each 64bits DMA transfers */ #ifdef MSK_64BIT_DMA #define MSK_RESERVED_TX_DESC_CNT (MSK_MAXTXSEGS + 3) #else #define MSK_RESERVED_TX_DESC_CNT 3 #endif #define MSK_JUMBO_FRAMELEN 9022 #define MSK_JUMBO_MTU (MSK_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN) #define MSK_MAX_FRAMELEN \ (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN) #define MSK_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN) struct msk_txdesc { struct mbuf *tx_m; bus_dmamap_t tx_dmamap; struct msk_tx_desc *tx_le; }; struct msk_rxdesc { struct mbuf *rx_m; bus_dmamap_t rx_dmamap; struct msk_rx_desc *rx_le; }; struct msk_chain_data { bus_dma_tag_t msk_parent_tag; bus_dma_tag_t msk_tx_tag; struct msk_txdesc msk_txdesc[MSK_TX_RING_CNT]; bus_dma_tag_t msk_rx_tag; struct msk_rxdesc msk_rxdesc[MSK_RX_RING_CNT]; bus_dma_tag_t msk_tx_ring_tag; bus_dma_tag_t msk_rx_ring_tag; bus_dmamap_t msk_tx_ring_map; bus_dmamap_t msk_rx_ring_map; bus_dmamap_t msk_rx_sparemap; bus_dma_tag_t msk_jumbo_rx_tag; struct msk_rxdesc msk_jumbo_rxdesc[MSK_JUMBO_RX_RING_CNT]; bus_dma_tag_t msk_jumbo_rx_ring_tag; bus_dmamap_t msk_jumbo_rx_ring_map; bus_dmamap_t msk_jumbo_rx_sparemap; uint16_t msk_tso_mtu; uint32_t msk_last_csum; uint32_t msk_tx_high_addr; int msk_tx_prod; int msk_tx_cons; int msk_tx_cnt; int msk_tx_put; int msk_rx_cons; int msk_rx_prod; int msk_rx_putwm; }; struct msk_ring_data { struct msk_tx_desc *msk_tx_ring; bus_addr_t msk_tx_ring_paddr; struct msk_rx_desc *msk_rx_ring; bus_addr_t msk_rx_ring_paddr; struct msk_rx_desc *msk_jumbo_rx_ring; bus_addr_t msk_jumbo_rx_ring_paddr; }; #define MSK_TX_RING_ADDR(sc, i) \ ((sc)->msk_rdata.msk_tx_ring_paddr + sizeof(struct msk_tx_desc) * (i)) #define MSK_RX_RING_ADDR(sc, i) \ ((sc)->msk_rdata.msk_rx_ring_paddr + sizeof(struct msk_rx_desc) * (i)) #define MSK_JUMBO_RX_RING_ADDR(sc, i) \ ((sc)->msk_rdata.msk_jumbo_rx_ring_paddr + sizeof(struct msk_rx_desc) * (i)) #define MSK_TX_RING_SZ \ (sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT) #define MSK_RX_RING_SZ \ (sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT) #define MSK_JUMBO_RX_RING_SZ \ (sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT) #define MSK_INC(x, y) (x) = (x + 1) % y #ifdef MSK_64BIT_DMA #define MSK_RX_INC(x, y) (x) = (x + 2) % y #define MSK_RX_BUF_CNT (MSK_RX_RING_CNT / 2) #define MSK_JUMBO_RX_BUF_CNT (MSK_JUMBO_RX_RING_CNT / 2) #else #define MSK_RX_INC(x, y) (x) = (x + 1) % y #define MSK_RX_BUF_CNT MSK_RX_RING_CNT #define MSK_JUMBO_RX_BUF_CNT MSK_JUMBO_RX_RING_CNT #endif #define MSK_PCI_BUS 0 #define MSK_PCIX_BUS 1 #define MSK_PEX_BUS 2 #define MSK_PROC_DEFAULT (MSK_RX_RING_CNT / 2) #define MSK_PROC_MIN 30 #define MSK_PROC_MAX (MSK_RX_RING_CNT - 1) #define MSK_INT_HOLDOFF_DEFAULT 100 #define MSK_TX_TIMEOUT 5 #define MSK_PUT_WM 10 struct msk_mii_data { int port; uint32_t pmd; int mii_flags; }; /* Forward decl. */ struct msk_if_softc; struct msk_hw_stats { /* Rx stats. */ uint32_t rx_ucast_frames; uint32_t rx_bcast_frames; uint32_t rx_pause_frames; uint32_t rx_mcast_frames; uint32_t rx_crc_errs; uint32_t rx_spare1; uint64_t rx_good_octets; uint64_t rx_bad_octets; uint32_t rx_runts; uint32_t rx_runt_errs; uint32_t rx_pkts_64; uint32_t rx_pkts_65_127; uint32_t rx_pkts_128_255; uint32_t rx_pkts_256_511; uint32_t rx_pkts_512_1023; uint32_t rx_pkts_1024_1518; uint32_t rx_pkts_1519_max; uint32_t rx_pkts_too_long; uint32_t rx_pkts_jabbers; uint32_t rx_spare2; uint32_t rx_fifo_oflows; uint32_t rx_spare3; /* Tx stats. */ uint32_t tx_ucast_frames; uint32_t tx_bcast_frames; uint32_t tx_pause_frames; uint32_t tx_mcast_frames; uint64_t tx_octets; uint32_t tx_pkts_64; uint32_t tx_pkts_65_127; uint32_t tx_pkts_128_255; uint32_t tx_pkts_256_511; uint32_t tx_pkts_512_1023; uint32_t tx_pkts_1024_1518; uint32_t tx_pkts_1519_max; uint32_t tx_spare1; uint32_t tx_colls; uint32_t tx_late_colls; uint32_t tx_excess_colls; uint32_t tx_multi_colls; uint32_t tx_single_colls; uint32_t tx_underflows; }; /* Softc for the Marvell Yukon II controller. */ struct msk_softc { struct resource *msk_res[1]; /* I/O resource */ struct resource_spec *msk_res_spec; struct resource *msk_irq[1]; /* IRQ resources */ struct resource_spec *msk_irq_spec; void *msk_intrhand; /* irq handler handle */ device_t msk_dev; uint8_t msk_hw_id; uint8_t msk_hw_rev; uint8_t msk_bustype; uint8_t msk_num_port; int msk_expcap; int msk_pcixcap; int msk_ramsize; /* amount of SRAM on NIC */ uint32_t msk_pmd; /* physical media type */ uint32_t msk_intrmask; uint32_t msk_intrhwemask; uint32_t msk_pflags; int msk_clock; struct msk_if_softc *msk_if[2]; device_t msk_devs[2]; int msk_txqsize; int msk_rxqsize; int msk_txqstart[2]; int msk_txqend[2]; int msk_rxqstart[2]; int msk_rxqend[2]; bus_dma_tag_t msk_stat_tag; bus_dmamap_t msk_stat_map; struct msk_stat_desc *msk_stat_ring; bus_addr_t msk_stat_ring_paddr; int msk_int_holdoff; int msk_process_limit; int msk_stat_cons; int msk_stat_count; struct mtx msk_mtx; }; #define MSK_LOCK(_sc) mtx_lock(&(_sc)->msk_mtx) #define MSK_UNLOCK(_sc) mtx_unlock(&(_sc)->msk_mtx) #define MSK_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->msk_mtx, MA_OWNED) #define MSK_IF_LOCK(_sc) MSK_LOCK((_sc)->msk_softc) #define MSK_IF_UNLOCK(_sc) MSK_UNLOCK((_sc)->msk_softc) #define MSK_IF_LOCK_ASSERT(_sc) MSK_LOCK_ASSERT((_sc)->msk_softc) #define MSK_USECS(sc, us) ((sc)->msk_clock * (us)) /* Softc for each logical interface. */ struct msk_if_softc { struct ifnet *msk_ifp; /* interface info */ device_t msk_miibus; device_t msk_if_dev; int32_t msk_port; /* port # on controller */ int msk_framesize; int msk_phytype; int msk_phyaddr; uint32_t msk_flags; #define MSK_FLAG_MSI 0x0001 #define MSK_FLAG_FASTETHER 0x0004 #define MSK_FLAG_JUMBO 0x0008 #define MSK_FLAG_JUMBO_NOCSUM 0x0010 #define MSK_FLAG_RAMBUF 0x0020 #define MSK_FLAG_DESCV2 0x0040 #define MSK_FLAG_AUTOTX_CSUM 0x0080 #define MSK_FLAG_NOHWVLAN 0x0100 #define MSK_FLAG_NORXCHK 0x0200 #define MSK_FLAG_NORX_CSUM 0x0400 #define MSK_FLAG_SUSPEND 0x2000 #define MSK_FLAG_DETACH 0x4000 #define MSK_FLAG_LINK 0x8000 struct callout msk_tick_ch; int msk_watchdog_timer; uint32_t msk_txq; /* Tx. Async Queue offset */ uint32_t msk_txsq; /* Tx. Syn Queue offset */ uint32_t msk_rxq; /* Rx. Qeueue offset */ struct msk_chain_data msk_cdata; struct msk_ring_data msk_rdata; struct msk_softc *msk_softc; /* parent controller */ struct msk_hw_stats msk_stats; int msk_if_flags; uint16_t msk_vtag; /* VLAN tag id. */ uint32_t msk_csum; }; #define MSK_TIMEOUT 1000 #define MSK_PHY_POWERUP 1 #define MSK_PHY_POWERDOWN 0 Index: head/sys/dev/mvs/mvs.h =================================================================== --- head/sys/dev/mvs/mvs.h (revision 258779) +++ head/sys/dev/mvs/mvs.h (revision 258780) @@ -1,661 +1,661 @@ /*- * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "mvs_if.h" /* Chip registers */ #define CHIP_PCIEIC 0x1900 /* PCIe Interrupt Cause */ #define CHIP_PCIEIM 0x1910 /* PCIe Interrupt Mask */ #define CHIP_PCIIC 0x1d58 /* PCI Interrupt Cause */ #define CHIP_PCIIM 0x1d5c /* PCI Interrupt Mask */ #define CHIP_MIC 0x1d60 /* Main Interrupt Cause */ #define CHIP_MIM 0x1d64 /* Main Interrupt Mask */ #define CHIP_SOC_MIC 0x20 /* SoC Main Interrupt Cause */ #define CHIP_SOC_MIM 0x24 /* SoC Main Interrupt Mask */ #define IC_ERR_IRQ (1 << 0) /* shift by (2 * port #) */ #define IC_DONE_IRQ (1 << 1) /* shift by (2 * port #) */ #define IC_HC0 0x000001ff /* bits 0-8 = HC0 */ #define IC_HC_SHIFT 9 /* HC1 shift */ #define IC_HC1 (IC_HC0 << IC_HC_SHIFT) /* 9-17 = HC1 */ #define IC_ERR_HC0 0x00000055 /* HC0 ERR_IRQ */ #define IC_DONE_HC0 0x000000aa /* HC0 DONE_IRQ */ #define IC_ERR_HC1 (IC_ERR_HC0 << IC_HC_SHIFT) /* HC1 ERR_IRQ */ #define IC_DONE_HC1 (IC_DONE_HC0 << IC_HC_SHIFT) /* HC1 DONE_IRQ */ #define IC_HC0_COAL_DONE (1 << 8) /* HC0 IRQ coalescing */ #define IC_HC1_COAL_DONE (1 << 17) /* HC1 IRQ coalescing */ #define IC_PCI_ERR (1 << 18) #define IC_TRAN_COAL_LO_DONE (1 << 19) /* transaction coalescing */ #define IC_TRAN_COAL_HI_DONE (1 << 20) /* transaction coalescing */ #define IC_ALL_PORTS_COAL_DONE (1 << 21) /* GEN_II(E) IRQ coalescing */ #define IC_GPIO_INT (1 << 22) #define IC_SELF_INT (1 << 23) #define IC_TWSI_INT (1 << 24) #define IC_MAIN_RSVD (0xfe000000) /* bits 31-25 */ #define IC_MAIN_RSVD_5 (0xfff10000) /* bits 31-19 */ #define IC_MAIN_RSVD_SOC (0xfffffec0) /* bits 31-9, 7-6 */ #define CHIP_SOC_LED 0x2C /* SoC LED Configuration */ /* Additional mask for SoC devices with less than 4 channels */ #define CHIP_SOC_HC0_MASK(num) (0xff >> ((4 - (num)) * 2)) /* Chip CCC registers */ #define CHIP_ICC 0x18008 #define CHIP_ICC_ALL_PORTS (1 << 4) /* all ports irq event */ #define CHIP_ICT 0x180cc #define CHIP_ITT 0x180d0 #define CHIP_TRAN_COAL_CAUSE_LO 0x18088 #define CHIP_TRAN_COAL_CAUSE_HI 0x1808c /* Host Controller registers */ #define HC_SIZE 0x10000 #define HC_OFFSET 0x20000 #define HC_BASE(hc) ((hc) * HC_SIZE + HC_OFFSET) #define HC_CFG 0x0 /* Configuration */ #define HC_CFG_TIMEOUT_MASK (0xff << 0) #define HC_CFG_NODMABS (1 << 8) #define HC_CFG_NOEDMABS (1 << 9) #define HC_CFG_NOPRDBS (1 << 10) #define HC_CFG_TIMEOUTEN (1 << 16) /* Timer Enable */ #define HC_CFG_COALDIS(p) (1 << ((p) + 24))/* Coalescing Disable*/ #define HC_RQOP 0x4 /* Request Queue Out-Pointer */ #define HC_RQIP 0x8 /* Response Queue In-Pointer */ #define HC_ICT 0xc /* Interrupt Coalescing Threshold */ #define HC_ICT_SAICOALT_MASK 0x000000ff #define HC_ITT 0x10 /* Interrupt Time Threshold */ #define HC_ITT_SAITMTH_MASK 0x00ffffff #define HC_IC 0x14 /* Interrupt Cause */ #define HC_IC_DONE(p) (1 << (p)) /* SaCrpb/DMA Done */ #define HC_IC_COAL (1 << 4) /* Intr Coalescing */ #define HC_IC_DEV(p) (1 << ((p) + 8)) /* Device Intr */ /* Port registers */ #define PORT_SIZE 0x2000 #define PORT_OFFSET 0x2000 #define PORT_BASE(hc) ((hc) * PORT_SIZE + PORT_OFFSET) #define EDMA_CFG 0x0 /* Configuration */ #define EDMA_CFG_RESERVED (0x1f << 0) /* Queue len ? */ #define EDMA_CFG_ESATANATVCMDQUE (1 << 5) #define EDMA_CFG_ERDBSZ (1 << 8) #define EDMA_CFG_EQUE (1 << 9) #define EDMA_CFG_ERDBSZEXT (1 << 11) #define EDMA_CFG_RESERVED2 (1 << 12) #define EDMA_CFG_EWRBUFFERLEN (1 << 13) #define EDMA_CFG_EDEVERR (1 << 14) #define EDMA_CFG_EEDMAFBS (1 << 16) #define EDMA_CFG_ECUTTHROUGHEN (1 << 17) #define EDMA_CFG_EEARLYCOMPLETIONEN (1 << 18) #define EDMA_CFG_EEDMAQUELEN (1 << 19) #define EDMA_CFG_EHOSTQUEUECACHEEN (1 << 22) #define EDMA_CFG_EMASKRXPM (1 << 23) #define EDMA_CFG_RESUMEDIS (1 << 24) #define EDMA_CFG_EDMAFBS (1 << 26) #define EDMA_T 0x4 /* Timer */ #define EDMA_IEC 0x8 /* Interrupt Error Cause */ #define EDMA_IEM 0xc /* Interrupt Error Mask */ #define EDMA_IE_EDEVERR (1 << 2) /* EDMA Device Error */ #define EDMA_IE_EDEVDIS (1 << 3) /* EDMA Dev Disconn */ #define EDMA_IE_EDEVCON (1 << 4) /* EDMA Dev Conn */ #define EDMA_IE_SERRINT (1 << 5) #define EDMA_IE_ESELFDIS (1 << 7) /* EDMA Self Disable */ #define EDMA_IE_ETRANSINT (1 << 8) /* Transport Layer */ #define EDMA_IE_EIORDYERR (1 << 12) /* EDMA IORdy Error */ #define EDMA_IE_LINKXERR_SATACRC (1 << 0) /* SATA CRC error */ #define EDMA_IE_LINKXERR_INTERNALFIFO (1 << 1) /* internal FIFO err */ #define EDMA_IE_LINKXERR_LINKLAYERRESET (1 << 2) /* Link Layer is reset by the reception of SYNC primitive from device */ #define EDMA_IE_LINKXERR_OTHERERRORS (1 << 3) /* * Link state errors, coding errors, or running disparity errors occur * during FIS reception. */ #define EDMA_IE_LINKTXERR_FISTXABORTED (1 << 4) /* FIS Tx is aborted */ #define EDMA_IE_LINKCTLRXERR(x) ((x) << 13) /* Link Ctrl Recv Err */ #define EDMA_IE_LINKDATARXERR(x) ((x) << 17) /* Link Data Recv Err */ #define EDMA_IE_LINKCTLTXERR(x) ((x) << 21) /* Link Ctrl Tx Error */ #define EDMA_IE_LINKDATATXERR(x) ((x) << 26) /* Link Data Tx Error */ -#define EDMA_IE_TRANSPROTERR (1 << 31) /* Transport Proto E */ +#define EDMA_IE_TRANSPROTERR (1U << 31) /* Transport Proto E */ #define EDMA_IE_TRANSIENT (EDMA_IE_LINKCTLRXERR(0x0b) | \ EDMA_IE_LINKCTLTXERR(0x1f)) /* Non-fatal Errors */ #define EDMA_REQQBAH 0x10 /* Request Queue Base Address High */ #define EDMA_REQQIP 0x14 /* Request Queue In-Pointer */ #define EDMA_REQQOP 0x18 /* Request Queue Out-Pointer */ #define EDMA_REQQP_ERQQP_SHIFT 5 #define EDMA_REQQP_ERQQP_MASK 0x000003e0 #define EDMA_REQQP_ERQQBAP_MASK 0x00000c00 #define EDMA_REQQP_ERQQBA_MASK 0xfffff000 #define EDMA_RESQBAH 0x1c /* Response Queue Base Address High */ #define EDMA_RESQIP 0x20 /* Response Queue In-Pointer */ #define EDMA_RESQOP 0x24 /* Response Queue Out-Pointer */ #define EDMA_RESQP_ERPQP_SHIFT 3 #define EDMA_RESQP_ERPQP_MASK 0x000000f8 #define EDMA_RESQP_ERPQBAP_MASK 0x00000300 #define EDMA_RESQP_ERPQBA_MASK 0xfffffc00 #define EDMA_CMD 0x28 /* Command */ #define EDMA_CMD_EENEDMA (1 << 0) /* Enable EDMA */ #define EDMA_CMD_EDSEDMA (1 << 1) /* Disable EDMA */ #define EDMA_CMD_EATARST (1 << 2) /* ATA Device Reset */ #define EDMA_CMD_EEDMAFRZ (1 << 4) /* EDMA Freeze */ #define EDMA_TC 0x2c /* Test Control */ #define EDMA_S 0x30 /* Status */ #define EDMA_S_EDEVQUETAG(s) ((s) & 0x0000001f) #define EDMA_S_EDEVDIR_WRITE (0 << 5) #define EDMA_S_EDEVDIR_READ (1 << 5) #define EDMA_S_ECACHEEMPTY (1 << 6) #define EDMA_S_EDMAIDLE (1 << 7) #define EDMA_S_ESTATE(s) (((s) & 0x0000ff00) >> 8) #define EDMA_S_EIOID(s) (((s) & 0x003f0000) >> 16) #define EDMA_IORT 0x34 /* IORdy Timeout */ #define EDMA_CDT 0x40 /* Command Delay Threshold */ #define EDMA_HC 0x60 /* Halt Condition */ #define EDMA_UNKN_RESD 0x6C /* Unknown register */ #define EDMA_CQDCQOS(x) (0x90 + ((x) << 2) /* NCQ Done/TCQ Outstanding Status */ /* ATA register defines */ #define ATA_DATA 0x100 /* (RW) data */ #define ATA_FEATURE 0x104 /* (W) feature */ #define ATA_F_DMA 0x01 /* enable DMA */ #define ATA_F_OVL 0x02 /* enable overlap */ #define ATA_ERROR 0x104 /* (R) error */ #define ATA_E_ILI 0x01 /* illegal length */ #define ATA_E_NM 0x02 /* no media */ #define ATA_E_ABORT 0x04 /* command aborted */ #define ATA_E_MCR 0x08 /* media change request */ #define ATA_E_IDNF 0x10 /* ID not found */ #define ATA_E_MC 0x20 /* media changed */ #define ATA_E_UNC 0x40 /* uncorrectable data */ #define ATA_E_ICRC 0x80 /* UDMA crc error */ #define ATA_E_ATAPI_SENSE_MASK 0xf0 /* ATAPI sense key mask */ #define ATA_COUNT 0x108 /* (W) sector count */ #define ATA_IREASON 0x108 /* (R) interrupt reason */ #define ATA_I_CMD 0x01 /* cmd (1) | data (0) */ #define ATA_I_IN 0x02 /* read (1) | write (0) */ #define ATA_I_RELEASE 0x04 /* released bus (1) */ #define ATA_I_TAGMASK 0xf8 /* tag mask */ #define ATA_SECTOR 0x10c /* (RW) sector # */ #define ATA_CYL_LSB 0x110 /* (RW) cylinder# LSB */ #define ATA_CYL_MSB 0x114 /* (RW) cylinder# MSB */ #define ATA_DRIVE 0x118 /* (W) Sector/Drive/Head */ #define ATA_D_LBA 0x40 /* use LBA addressing */ #define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */ #define ATA_COMMAND 0x11c /* (W) command */ #define ATA_STATUS 0x11c /* (R) status */ #define ATA_S_ERROR 0x01 /* error */ #define ATA_S_INDEX 0x02 /* index */ #define ATA_S_CORR 0x04 /* data corrected */ #define ATA_S_DRQ 0x08 /* data request */ #define ATA_S_DSC 0x10 /* drive seek completed */ #define ATA_S_SERVICE 0x10 /* drive needs service */ #define ATA_S_DWF 0x20 /* drive write fault */ #define ATA_S_DMA 0x20 /* DMA ready */ #define ATA_S_READY 0x40 /* drive ready */ #define ATA_S_BUSY 0x80 /* busy */ #define ATA_CONTROL 0x120 /* (W) control */ #define ATA_A_IDS 0x02 /* disable interrupts */ #define ATA_A_RESET 0x04 /* RESET controller */ #define ATA_A_4BIT 0x08 /* 4 head bits */ #define ATA_A_HOB 0x80 /* High Order Byte enable */ #define ATA_ALTSTAT 0x120 /* (R) alternate status */ #define ATAPI_P_READ (ATA_S_DRQ | ATA_I_IN) #define ATAPI_P_WRITE (ATA_S_DRQ) #define ATAPI_P_CMDOUT (ATA_S_DRQ | ATA_I_CMD) #define ATAPI_P_DONEDRQ (ATA_S_DRQ | ATA_I_CMD | ATA_I_IN) #define ATAPI_P_DONE (ATA_I_CMD | ATA_I_IN) #define ATAPI_P_ABORT 0 /* Basic DMA Registers */ #define DMA_C 0x224 /* Basic DMA Command */ #define DMA_C_START (1 << 0) #define DMA_C_READ (1 << 3) #define DMA_C_DREGIONVALID (1 << 8) #define DMA_C_DREGIONLAST (1 << 9) #define DMA_C_CONTFROMPREV (1 << 10) #define DMA_C_DRBC(n) (((n) & 0xffff) << 16) #define DMA_S 0x228 /* Basic DMA Status */ #define DMA_S_ACT (1 << 0) /* Active */ #define DMA_S_ERR (1 << 1) /* Error */ #define DMA_S_PAUSED (1 << 2) /* Paused */ #define DMA_S_LAST (1 << 3) /* Last */ #define DMA_DTLBA 0x22c /* Descriptor Table Low Base Address */ #define DMA_DTLBA_MASK 0xfffffff0 #define DMA_DTHBA 0x230 /* Descriptor Table High Base Address */ #define DMA_DRLA 0x234 /* Data Region Low Address */ #define DMA_DRHA 0x238 /* Data Region High Address */ /* Serial-ATA Registers */ #define SATA_SS 0x300 /* SStatus */ #define SATA_SS_DET_MASK 0x0000000f #define SATA_SS_DET_NO_DEVICE 0x00000000 #define SATA_SS_DET_DEV_PRESENT 0x00000001 #define SATA_SS_DET_PHY_ONLINE 0x00000003 #define SATA_SS_DET_PHY_OFFLINE 0x00000004 #define SATA_SS_SPD_MASK 0x000000f0 #define SATA_SS_SPD_NO_SPEED 0x00000000 #define SATA_SS_SPD_GEN1 0x00000010 #define SATA_SS_SPD_GEN2 0x00000020 #define SATA_SS_SPD_GEN3 0x00000040 #define SATA_SS_IPM_MASK 0x00000f00 #define SATA_SS_IPM_NO_DEVICE 0x00000000 #define SATA_SS_IPM_ACTIVE 0x00000100 #define SATA_SS_IPM_PARTIAL 0x00000200 #define SATA_SS_IPM_SLUMBER 0x00000600 #define SATA_SE 0x304 /* SError */ #define SATA_SEIM 0x340 /* SError Interrupt Mask */ #define SATA_SE_DATA_CORRECTED 0x00000001 #define SATA_SE_COMM_CORRECTED 0x00000002 #define SATA_SE_DATA_ERR 0x00000100 #define SATA_SE_COMM_ERR 0x00000200 #define SATA_SE_PROT_ERR 0x00000400 #define SATA_SE_HOST_ERR 0x00000800 #define SATA_SE_PHY_CHANGED 0x00010000 #define SATA_SE_PHY_IERROR 0x00020000 #define SATA_SE_COMM_WAKE 0x00040000 #define SATA_SE_DECODE_ERR 0x00080000 #define SATA_SE_PARITY_ERR 0x00100000 #define SATA_SE_CRC_ERR 0x00200000 #define SATA_SE_HANDSHAKE_ERR 0x00400000 #define SATA_SE_LINKSEQ_ERR 0x00800000 #define SATA_SE_TRANSPORT_ERR 0x01000000 #define SATA_SE_UNKNOWN_FIS 0x02000000 #define SATA_SC 0x308 /* SControl */ #define SATA_SC_DET_MASK 0x0000000f #define SATA_SC_DET_IDLE 0x00000000 #define SATA_SC_DET_RESET 0x00000001 #define SATA_SC_DET_DISABLE 0x00000004 #define SATA_SC_SPD_MASK 0x000000f0 #define SATA_SC_SPD_NO_SPEED 0x00000000 #define SATA_SC_SPD_SPEED_GEN1 0x00000010 #define SATA_SC_SPD_SPEED_GEN2 0x00000020 #define SATA_SC_SPD_SPEED_GEN3 0x00000040 #define SATA_SC_IPM_MASK 0x00000f00 #define SATA_SC_IPM_NONE 0x00000000 #define SATA_SC_IPM_DIS_PARTIAL 0x00000100 #define SATA_SC_IPM_DIS_SLUMBER 0x00000200 #define SATA_SC_SPM_MASK 0x0000f000 #define SATA_SC_SPM_NONE 0x00000000 #define SATA_SC_SPM_PARTIAL 0x00001000 #define SATA_SC_SPM_SLUMBER 0x00002000 #define SATA_SC_SPM_ACTIVE 0x00004000 #define SATA_LTM 0x30c /* LTMode */ #define SATA_PHYM3 0x310 /* PHY Mode 3 */ #define SATA_PHYM4 0x314 /* PHY Mode 4 */ #define SATA_PHYM1 0x32c /* PHY Mode 1 */ #define SATA_PHYM2 0x330 /* PHY Mode 2 */ #define SATA_BISTC 0x334 /* BIST Control */ #define SATA_BISTDW1 0x338 /* BIST DW1 */ #define SATA_BISTDW2 0x33c /* BIST DW2 */ #define SATA_SATAICFG 0x050 /* Serial-ATA Interface Configuration */ #define SATA_SATAICFG_REFCLKCNF_20MHZ (0 << 0) #define SATA_SATAICFG_REFCLKCNF_25MHZ (1 << 0) #define SATA_SATAICFG_REFCLKCNF_30MHZ (2 << 0) #define SATA_SATAICFG_REFCLKCNF_40MHZ (3 << 0) #define SATA_SATAICFG_REFCLKCNF_MASK (3 << 0) #define SATA_SATAICFG_REFCLKDIV_1 (0 << 2) #define SATA_SATAICFG_REFCLKDIV_2 (1 << 2) /* Used 20 or 25MHz */ #define SATA_SATAICFG_REFCLKDIV_4 (2 << 2) /* Used 40MHz */ #define SATA_SATAICFG_REFCLKDIV_3 (3 << 2) /* Used 30MHz */ #define SATA_SATAICFG_REFCLKDIV_MASK (3 << 2) #define SATA_SATAICFG_REFCLKFEEDDIV_50 (0 << 4) /* or 100, when Gen2En is 1 */ #define SATA_SATAICFG_REFCLKFEEDDIV_60 (1 << 4) /* or 120. Used 25MHz */ #define SATA_SATAICFG_REFCLKFEEDDIV_75 (2 << 4) /* or 150. Used 20MHz */ #define SATA_SATAICFG_REFCLKFEEDDIV_90 (3 << 4) /* or 180 */ #define SATA_SATAICFG_REFCLKFEEDDIV_MASK (3 << 4) #define SATA_SATAICFG_PHYSSCEN (1 << 6) #define SATA_SATAICFG_GEN2EN (1 << 7) #define SATA_SATAICFG_COMMEN (1 << 8) #define SATA_SATAICFG_PHYSHUTDOWN (1 << 9) #define SATA_SATAICFG_TARGETMODE (1 << 10) /* 1 = Initiator */ #define SATA_SATAICFG_COMCHANNEL (1 << 11) #define SATA_SATAICFG_IGNOREBSY (1 << 24) #define SATA_SATAICFG_LINKRSTEN (1 << 25) #define SATA_SATAICFG_CMDRETXDS (1 << 26) #define SATA_SATAICTL 0x344 /* Serial-ATA Interface Control */ #define SATA_SATAICTL_PMPTX_MASK 0x0000000f #define SATA_SATAICTL_PMPTX_SHIFT 0 #define SATA_SATAICTL_VUM (1 << 8) #define SATA_SATAICTL_VUS (1 << 9) #define SATA_SATAICTL_EDMAACT (1 << 16) #define SATA_SATAICTL_CLEARSTAT (1 << 24) #define SATA_SATAICTL_SRST (1 << 25) #define SATA_SATAITC 0x348 /* Serial-ATA Interface Test Control */ #define SATA_SATAIS 0x34c /* Serial-ATA Interface Status */ #define SATA_VU 0x35c /* Vendor Unique */ #define SATA_FISC 0x360 /* FIS Configuration */ #define SATA_FISC_FISWAIT4RDYEN_B0 (1 << 0) /* Device to Host FIS */ #define SATA_FISC_FISWAIT4RDYEN_B1 (1 << 1) /* SDB FIS rcv with bit 0 */ #define SATA_FISC_FISWAIT4RDYEN_B2 (1 << 2) /* DMA Activate FIS */ #define SATA_FISC_FISWAIT4RDYEN_B3 (1 << 3) /* DMA Setup FIS */ #define SATA_FISC_FISWAIT4RDYEN_B4 (1 << 4) /* Data FIS first DW */ #define SATA_FISC_FISWAIT4RDYEN_B5 (1 << 5) /* Data FIS entire FIS */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B0 (1 << 8) /* Device to Host FIS with or */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B1 (1 << 9) /* SDB FIS rcv with bit */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B2 (1 << 10) /* SDB FIS rcv with */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B3 (1 << 11) /* BIST Acivate FIS */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B4 (1 << 12) /* PIO Setup FIS */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B5 (1 << 13) /* Data FIS with Link error */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B6 (1 << 14) /* Unrecognized FIS type */ #define SATA_FISC_FISWAIT4HOSTRDYEN_B7 (1 << 15) /* Any FIS */ #define SATA_FISC_FISDMAACTIVATESYNCRESP (1 << 16) #define SATA_FISC_FISUNRECTYPECONT (1 << 17) #define SATA_FISIC 0x364 /* FIS Interrupt Cause */ #define SATA_FISIM 0x368 /* FIS Interrupt Mask */ #define SATA_FISDW0 0x370 /* FIS DW0 */ #define SATA_FISDW1 0x374 /* FIS DW1 */ #define SATA_FISDW2 0x378 /* FIS DW2 */ #define SATA_FISDW3 0x37c /* FIS DW3 */ #define SATA_FISDW4 0x380 /* FIS DW4 */ #define SATA_FISDW5 0x384 /* FIS DW5 */ #define SATA_FISDW6 0x388 /* FIS DW6 */ #define SATA_PHYM9_GEN2 0x398 #define SATA_PHYM9_GEN1 0x39c #define SATA_PHYCFG_OFS 0x3a0 /* 65nm SoCs only */ #define MVS_MAX_PORTS 8 #define MVS_MAX_SLOTS 32 /* Pessimistic prognosis on number of required S/G entries */ #define MVS_SG_ENTRIES (btoc(MAXPHYS) + 1) /* EDMA Command Request Block (CRQB) Data */ struct mvs_crqb { uint32_t cprdbl; /* cPRD Desriptor Table Base Low Address */ uint32_t cprdbh; /* cPRD Desriptor Table Base High Address */ uint16_t ctrlflg; /* Control Flags */ #define MVS_CRQB_READ 0x0001 #define MVS_CRQB_TAG_MASK 0x003e #define MVS_CRQB_TAG_SHIFT 1 #define MVS_CRQB_PMP_MASK 0xf000 #define MVS_CRQB_PMP_SHIFT 12 uint8_t cmd[22]; } __packed; struct mvs_crqb_gen2e { uint32_t cprdbl; /* cPRD Desriptor Table Base Low Address */ uint32_t cprdbh; /* cPRD Desriptor Table Base High Address */ uint32_t ctrlflg; /* Control Flags */ #define MVS_CRQB2E_READ 0x00000001 #define MVS_CRQB2E_DTAG_MASK 0x0000003e #define MVS_CRQB2E_DTAG_SHIFT 1 #define MVS_CRQB2E_PMP_MASK 0x0000f000 #define MVS_CRQB2E_PMP_SHIFT 12 #define MVS_CRQB2E_CPRD 0x00010000 #define MVS_CRQB2E_HTAG_MASK 0x003e0000 #define MVS_CRQB2E_HTAG_SHIFT 17 uint32_t drbc; /* Data Region Byte Count */ uint8_t cmd[16]; } __packed; /* EDMA Phisical Region Descriptors (ePRD) Table Data Structure */ struct mvs_eprd { uint32_t prdbal; /* Address bits[31:1] */ uint32_t bytecount; /* Byte Count */ #define MVS_EPRD_MASK 0x0000ffff /* max 64KB */ #define MVS_EPRD_MAX (MVS_EPRD_MASK + 1) #define MVS_EPRD_EOF 0x80000000 uint32_t prdbah; /* Address bits[63:32] */ uint32_t resv; } __packed; /* Command request blocks. 32 commands. First 1Kbyte aligned. */ #define MVS_CRQB_OFFSET 0 #define MVS_CRQB_SIZE 32 /* sizeof(struct mvs_crqb) */ #define MVS_CRQB_MASK 0x000003e0 #define MVS_CRQB_SHIFT 5 #define MVS_CRQB_TO_ADDR(slot) ((slot) << MVS_CRQB_SHIFT) #define MVS_ADDR_TO_CRQB(addr) (((addr) & MVS_CRQB_MASK) >> MVS_CRQB_SHIFT) /* ePRD blocks. Up to 32 commands, Each 16byte aligned. */ #define MVS_EPRD_OFFSET (MVS_CRQB_OFFSET + MVS_CRQB_SIZE * MVS_MAX_SLOTS) #define MVS_EPRD_SIZE (MVS_SG_ENTRIES * 16) /* sizeof(struct mvs_eprd) */ /* Request work area. */ #define MVS_WORKRQ_SIZE (MVS_EPRD_OFFSET + MVS_EPRD_SIZE * MVS_MAX_SLOTS) /* EDMA Command Response Block (CRPB) Data */ struct mvs_crpb { uint16_t id; /* CRPB ID */ #define MVS_CRPB_TAG_MASK 0x001F #define MVS_CRPB_TAG_SHIFT 0 uint16_t rspflg; /* CPRB Response Flags */ #define MVS_CRPB_EDMASTS_MASK 0x007F #define MVS_CRPB_EDMASTS_SHIFT 0 #define MVS_CRPB_ATASTS_MASK 0xFF00 #define MVS_CRPB_ATASTS_SHIFT 8 uint32_t ts; /* CPRB Time Stamp */ } __packed; /* Command response blocks. 32 commands. First 256byte aligned. */ #define MVS_CRPB_OFFSET 0 #define MVS_CRPB_SIZE sizeof(struct mvs_crpb) #define MVS_CRPB_MASK 0x000000f8 #define MVS_CRPB_SHIFT 3 #define MVS_CRPB_TO_ADDR(slot) ((slot) << MVS_CRPB_SHIFT) #define MVS_ADDR_TO_CRPB(addr) (((addr) & MVS_CRPB_MASK) >> MVS_CRPB_SHIFT) /* Request work area. */ #define MVS_WORKRP_SIZE (MVS_CRPB_OFFSET + MVS_CRPB_SIZE * MVS_MAX_SLOTS) /* misc defines */ #define ATA_IRQ_RID 0 #define ATA_INTR_FLAGS (INTR_MPSAFE|INTR_TYPE_BIO|INTR_ENTROPY) struct ata_dmaslot { bus_dmamap_t data_map; /* Data DMA map */ bus_addr_t addr; /* Data address */ uint16_t len; /* Data size */ }; /* structure holding DMA related information */ struct mvs_dma { bus_dma_tag_t workrq_tag; /* Request workspace DMA tag */ bus_dmamap_t workrq_map; /* Request workspace DMA map */ uint8_t *workrq; /* Request workspace */ bus_addr_t workrq_bus; /* Request bus address */ bus_dma_tag_t workrp_tag; /* Reply workspace DMA tag */ bus_dmamap_t workrp_map; /* Reply workspace DMA map */ uint8_t *workrp; /* Reply workspace */ bus_addr_t workrp_bus; /* Reply bus address */ bus_dma_tag_t data_tag; /* Data DMA tag */ }; enum mvs_slot_states { MVS_SLOT_EMPTY, MVS_SLOT_LOADING, MVS_SLOT_RUNNING, MVS_SLOT_EXECUTING }; struct mvs_slot { device_t dev; /* Device handle */ int slot; /* Number of this slot */ int tag; /* Used command tag */ enum mvs_slot_states state; /* Slot state */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ }; struct mvs_device { int revision; int mode; u_int bytecount; u_int atapi; u_int tags; u_int caps; }; enum mvs_edma_mode { MVS_EDMA_UNKNOWN, MVS_EDMA_OFF, MVS_EDMA_ON, MVS_EDMA_QUEUED, MVS_EDMA_NCQ, }; /* structure describing an ATA channel */ struct mvs_channel { device_t dev; /* Device handle */ int unit; /* Physical channel */ struct resource *r_mem; /* Memory of this channel */ struct resource *r_irq; /* Interrupt of this channel */ void *ih; /* Interrupt handle */ struct mvs_dma dma; /* DMA data */ struct cam_sim *sim; struct cam_path *path; int quirks; #define MVS_Q_GENI 1 #define MVS_Q_GENII 2 #define MVS_Q_GENIIE 4 #define MVS_Q_SOC 8 #define MVS_Q_CT 16 #define MVS_Q_SOC65 32 int pm_level; /* power management level */ struct mvs_slot slot[MVS_MAX_SLOTS]; union ccb *hold[MVS_MAX_SLOTS]; int holdtag[MVS_MAX_SLOTS]; /* Tags used for held commands. */ struct mtx mtx; /* state lock */ int devices; /* What is present */ int pm_present; /* PM presence reported */ enum mvs_edma_mode curr_mode; /* Current EDMA mode */ int fbs_enabled; /* FIS-based switching enabled */ uint32_t oslots; /* Occupied slots */ uint32_t otagspd[16]; /* Occupied device tags */ uint32_t rslots; /* Running slots */ uint32_t aslots; /* Slots with atomic commands */ uint32_t eslots; /* Slots in error */ uint32_t toslots; /* Slots in timeout */ int numrslots; /* Number of running slots */ int numrslotspd[16];/* Number of running slots per dev */ int numpslots; /* Number of PIO slots */ int numdslots; /* Number of DMA slots */ int numtslots; /* Number of NCQ slots */ int numtslotspd[16];/* Number of NCQ slots per dev */ int numhslots; /* Number of held slots */ int recoverycmd; /* Our READ LOG active */ int fatalerr; /* Fatal error happend */ int lastslot; /* Last used slot */ int taggedtarget; /* Last tagged target */ int resetting; /* Hard-reset in progress. */ int resetpolldiv; /* Hard-reset poll divider. */ int out_idx; /* Next written CRQB */ int in_idx; /* Next read CRPB */ u_int transfersize; /* PIO transfer size */ u_int donecount; /* PIO bytes sent/received */ u_int basic_dma; /* Basic DMA used for ATAPI */ u_int fake_busy; /* Fake busy bit after command submission */ union ccb *frozen; /* Frozen command */ struct callout pm_timer; /* Power management events */ struct callout reset_timer; /* Hard-reset timeout */ struct mvs_device user[16]; /* User-specified settings */ struct mvs_device curr[16]; /* Current settings */ }; /* structure describing a MVS controller */ struct mvs_controller { device_t dev; int r_rid; struct resource *r_mem; struct rman sc_iomem; struct mvs_controller_irq { struct resource *r_irq; void *handle; int r_irq_rid; } irq; int quirks; int channels; int ccc; /* CCC timeout */ int cccc; /* CCC commands */ struct mtx mtx; /* MIM access lock */ int gmim; /* Globally wanted MIM bits */ int pmim; /* Port wanted MIM bits */ int mim; /* Current MIM bits */ int msi; /* MSI enabled */ int msia; /* MSI active */ struct { void (*function)(void *); void *argument; } interrupt[MVS_MAX_PORTS]; }; enum mvs_err_type { MVS_ERR_NONE, /* No error */ MVS_ERR_INVALID, /* Error detected by us before submitting. */ MVS_ERR_INNOCENT, /* Innocent victim. */ MVS_ERR_TFE, /* Task File Error. */ MVS_ERR_SATA, /* SATA error. */ MVS_ERR_TIMEOUT, /* Command execution timeout. */ MVS_ERR_NCQ, /* NCQ command error. CCB should be put on hold * until READ LOG executed to reveal error. */ }; struct mvs_intr_arg { void *arg; u_int cause; }; extern devclass_t mvs_devclass; /* macros to hide busspace uglyness */ #define ATA_INB(res, offset) \ bus_read_1((res), (offset)) #define ATA_INW(res, offset) \ bus_read_2((res), (offset)) #define ATA_INL(res, offset) \ bus_read_4((res), (offset)) #define ATA_INSW(res, offset, addr, count) \ bus_read_multi_2((res), (offset), (addr), (count)) #define ATA_INSW_STRM(res, offset, addr, count) \ bus_read_multi_stream_2((res), (offset), (addr), (count)) #define ATA_INSL(res, offset, addr, count) \ bus_read_multi_4((res), (offset), (addr), (count)) #define ATA_INSL_STRM(res, offset, addr, count) \ bus_read_multi_stream_4((res), (offset), (addr), (count)) #define ATA_OUTB(res, offset, value) \ bus_write_1((res), (offset), (value)) #define ATA_OUTW(res, offset, value) \ bus_write_2((res), (offset), (value)) #define ATA_OUTL(res, offset, value) \ bus_write_4((res), (offset), (value)); #define ATA_OUTSW(res, offset, addr, count) \ bus_write_multi_2((res), (offset), (addr), (count)) #define ATA_OUTSW_STRM(res, offset, addr, count) \ bus_write_multi_stream_2((res), (offset), (addr), (count)) #define ATA_OUTSL(res, offset, addr, count) \ bus_write_multi_4((res), (offset), (addr), (count)) #define ATA_OUTSL_STRM(res, offset, addr, count) \ bus_write_multi_stream_4((res), (offset), (addr), (count)) Index: head/sys/dev/mxge/mxge_mcp.h =================================================================== --- head/sys/dev/mxge/mxge_mcp.h (revision 258779) +++ head/sys/dev/mxge/mxge_mcp.h (revision 258780) @@ -1,522 +1,522 @@ /******************************************************************************* Copyright (c) 2006-2009, Myricom Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Myricom Inc, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $FreeBSD$ ***************************************************************************/ #ifndef _myri10ge_mcp_h #define _myri10ge_mcp_h #define MXGEFW_VERSION_MAJOR 1 #define MXGEFW_VERSION_MINOR 4 #if defined MXGEFW && !defined _stdint_h_ typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef signed long long int64_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; typedef unsigned long long uint64_t; #endif /* 8 Bytes */ struct mcp_dma_addr { uint32_t high; uint32_t low; }; typedef struct mcp_dma_addr mcp_dma_addr_t; /* 4 Bytes */ struct mcp_slot { uint16_t checksum; uint16_t length; }; typedef struct mcp_slot mcp_slot_t; #ifdef MXGEFW_NDIS /* 8-byte descriptor, exclusively used by NDIS drivers. */ struct mcp_slot_8 { /* Place hash value at the top so it gets written before length. * The driver polls length. */ uint32_t hash; uint16_t checksum; uint16_t length; }; typedef struct mcp_slot_8 mcp_slot_8_t; /* Two bits of length in mcp_slot are used to indicate hash type. */ #define MXGEFW_RSS_HASH_NULL (0 << 14) /* bit 15:14 = 00 */ #define MXGEFW_RSS_HASH_IPV4 (1 << 14) /* bit 15:14 = 01 */ #define MXGEFW_RSS_HASH_TCP_IPV4 (2 << 14) /* bit 15:14 = 10 */ #define MXGEFW_RSS_HASH_MASK (3 << 14) /* bit 15:14 = 11 */ #endif /* 64 Bytes */ struct mcp_cmd { uint32_t cmd; uint32_t data0; /* will be low portion if data > 32 bits */ /* 8 */ uint32_t data1; /* will be high portion if data > 32 bits */ uint32_t data2; /* currently unused.. */ /* 16 */ struct mcp_dma_addr response_addr; /* 24 */ uint8_t pad[40]; }; typedef struct mcp_cmd mcp_cmd_t; /* 8 Bytes */ struct mcp_cmd_response { uint32_t data; uint32_t result; }; typedef struct mcp_cmd_response mcp_cmd_response_t; /* flags used in mcp_kreq_ether_send_t: The SMALL flag is only needed in the first segment. It is raised for packets that are total less or equal 512 bytes. The CKSUM flag must be set in all segments. The PADDED flags is set if the packet needs to be padded, and it must be set for all segments. The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative length of all previous segments was odd. */ #define MXGEFW_FLAGS_SMALL 0x1 #define MXGEFW_FLAGS_TSO_HDR 0x1 #define MXGEFW_FLAGS_FIRST 0x2 #define MXGEFW_FLAGS_ALIGN_ODD 0x4 #define MXGEFW_FLAGS_CKSUM 0x8 #define MXGEFW_FLAGS_TSO_LAST 0x8 #define MXGEFW_FLAGS_NO_TSO 0x10 #define MXGEFW_FLAGS_TSO_CHOP 0x10 #define MXGEFW_FLAGS_TSO_PLD 0x20 #define MXGEFW_SEND_SMALL_SIZE 1520 #define MXGEFW_MAX_MTU 9400 union mcp_pso_or_cumlen { uint16_t pseudo_hdr_offset; uint16_t cum_len; }; typedef union mcp_pso_or_cumlen mcp_pso_or_cumlen_t; #define MXGEFW_MAX_SEND_DESC 12 #define MXGEFW_PAD 2 /* 16 Bytes */ struct mcp_kreq_ether_send { uint32_t addr_high; uint32_t addr_low; uint16_t pseudo_hdr_offset; uint16_t length; uint8_t pad; uint8_t rdma_count; uint8_t cksum_offset; /* where to start computing cksum */ uint8_t flags; /* as defined above */ }; typedef struct mcp_kreq_ether_send mcp_kreq_ether_send_t; /* 8 Bytes */ struct mcp_kreq_ether_recv { uint32_t addr_high; uint32_t addr_low; }; typedef struct mcp_kreq_ether_recv mcp_kreq_ether_recv_t; /* Commands */ #define MXGEFW_BOOT_HANDOFF 0xfc0000 #define MXGEFW_BOOT_DUMMY_RDMA 0xfc01c0 #define MXGEFW_ETH_CMD 0xf80000 #define MXGEFW_ETH_SEND_4 0x200000 #define MXGEFW_ETH_SEND_1 0x240000 #define MXGEFW_ETH_SEND_2 0x280000 #define MXGEFW_ETH_SEND_3 0x2c0000 #define MXGEFW_ETH_RECV_SMALL 0x300000 #define MXGEFW_ETH_RECV_BIG 0x340000 #define MXGEFW_ETH_SEND_GO 0x380000 #define MXGEFW_ETH_SEND_STOP 0x3C0000 #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) enum myri10ge_mcp_cmd_type { MXGEFW_CMD_NONE = 0, /* Reset the mcp, it is left in a safe state, waiting for the driver to set all its parameters */ MXGEFW_CMD_RESET = 1, /* get the version number of the current firmware.. (may be available in the eeprom strings..? */ MXGEFW_GET_MCP_VERSION = 2, /* Parameters which must be set by the driver before it can issue MXGEFW_CMD_ETHERNET_UP. They persist until the next MXGEFW_CMD_RESET is issued */ MXGEFW_CMD_SET_INTRQ_DMA = 3, /* data0 = LSW of the host address * data1 = MSW of the host address * data2 = slice number if multiple slices are used */ MXGEFW_CMD_SET_BIG_BUFFER_SIZE = 4, /* in bytes, power of 2 */ MXGEFW_CMD_SET_SMALL_BUFFER_SIZE = 5, /* in bytes */ /* Parameters which refer to lanai SRAM addresses where the driver must issue PIO writes for various things */ MXGEFW_CMD_GET_SEND_OFFSET = 6, MXGEFW_CMD_GET_SMALL_RX_OFFSET = 7, MXGEFW_CMD_GET_BIG_RX_OFFSET = 8, /* data0 = slice number if multiple slices are used */ MXGEFW_CMD_GET_IRQ_ACK_OFFSET = 9, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET = 10, /* Parameters which refer to rings stored on the MCP, and whose size is controlled by the mcp */ MXGEFW_CMD_GET_SEND_RING_SIZE = 11, /* in bytes */ MXGEFW_CMD_GET_RX_RING_SIZE = 12, /* in bytes */ /* Parameters which refer to rings stored in the host, and whose size is controlled by the host. Note that all must be physically contiguous and must contain a power of 2 number of entries. */ MXGEFW_CMD_SET_INTRQ_SIZE = 13, /* in bytes */ -#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) +#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1U << 31) /* command to bring ethernet interface up. Above parameters (plus mtu & mac address) must have been exchanged prior to issuing this command */ MXGEFW_CMD_ETHERNET_UP = 14, /* command to bring ethernet interface down. No further sends or receives may be processed until an MXGEFW_CMD_ETHERNET_UP is issued, and all interrupt queues must be flushed prior to ack'ing this command */ MXGEFW_CMD_ETHERNET_DOWN = 15, /* commands the driver may issue live, without resetting the nic. Note that increasing the mtu "live" should only be done if the driver has already supplied buffers sufficiently large to handle the new mtu. Decreasing the mtu live is safe */ MXGEFW_CMD_SET_MTU = 16, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET = 17, /* in microseconds */ MXGEFW_CMD_SET_STATS_INTERVAL = 18, /* in microseconds */ MXGEFW_CMD_SET_STATS_DMA_OBSOLETE = 19, /* replaced by SET_STATS_DMA_V2 */ MXGEFW_ENABLE_PROMISC = 20, MXGEFW_DISABLE_PROMISC = 21, MXGEFW_SET_MAC_ADDRESS = 22, MXGEFW_ENABLE_FLOW_CONTROL = 23, MXGEFW_DISABLE_FLOW_CONTROL = 24, /* do a DMA test data0,data1 = DMA address data2 = RDMA length (MSH), WDMA length (LSH) command return data = repetitions (MSH), 0.5-ms ticks (LSH) */ MXGEFW_DMA_TEST = 25, MXGEFW_ENABLE_ALLMULTI = 26, MXGEFW_DISABLE_ALLMULTI = 27, /* returns MXGEFW_CMD_ERROR_MULTICAST if there is no room in the cache data0,MSH(data1) = multicast group address */ MXGEFW_JOIN_MULTICAST_GROUP = 28, /* returns MXGEFW_CMD_ERROR_MULTICAST if the address is not in the cache, or is equal to FF-FF-FF-FF-FF-FF data0,MSH(data1) = multicast group address */ MXGEFW_LEAVE_MULTICAST_GROUP = 29, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS = 30, MXGEFW_CMD_SET_STATS_DMA_V2 = 31, /* data0, data1 = bus addr, * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows * adding new stuff to mcp_irq_data without changing the ABI * * If multiple slices are used, data2 contains both the size of the * structure (in the lower 16 bits) and the slice number * (in the upper 16 bits). */ MXGEFW_CMD_UNALIGNED_TEST = 32, /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned chipset */ MXGEFW_CMD_UNALIGNED_STATUS = 33, /* return data = boolean, true if the chipset is known to be unaligned */ MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS = 34, /* data0 = number of big buffers to use. It must be 0 or a power of 2. * 0 indicates that the NIC consumes as many buffers as they are required * for packet. This is the default behavior. * A power of 2 number indicates that the NIC always uses the specified * number of buffers for each big receive packet. * It is up to the driver to ensure that this value is big enough for * the NIC to be able to receive maximum-sized packets. */ MXGEFW_CMD_GET_MAX_RSS_QUEUES = 35, MXGEFW_CMD_ENABLE_RSS_QUEUES = 36, /* data0 = number of slices n (0, 1, ..., n-1) to enable * data1 = interrupt mode | use of multiple transmit queues. * 0=share one INTx/MSI. * 1=use one MSI-X per queue. * If all queues share one interrupt, the driver must have set * RSS_SHARED_INTERRUPT_DMA before enabling queues. * 2=enable both receive and send queues. * Without this bit set, only one send queue (slice 0's send queue) * is enabled. The receive queues are always enabled. */ #define MXGEFW_SLICE_INTR_MODE_SHARED 0x0 #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1 #define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET = 37, MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA = 38, /* data0, data1 = bus address lsw, msw */ MXGEFW_CMD_GET_RSS_TABLE_OFFSET = 39, /* get the offset of the indirection table */ MXGEFW_CMD_SET_RSS_TABLE_SIZE = 40, /* set the size of the indirection table */ MXGEFW_CMD_GET_RSS_KEY_OFFSET = 41, /* get the offset of the secret key */ MXGEFW_CMD_RSS_KEY_UPDATED = 42, /* tell nic that the secret key's been updated */ MXGEFW_CMD_SET_RSS_ENABLE = 43, /* data0 = enable/disable rss * 0: disable rss. nic does not distribute receive packets. * 1: enable rss. nic distributes receive packets among queues. * data1 = hash type * 1: IPV4 (required by RSS) * 2: TCP_IPV4 (required by RSS) * 3: IPV4 | TCP_IPV4 (required by RSS) * 4: source port * 5: source port + destination port */ #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 #define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5 #define MXGEFW_RSS_HASH_TYPE_MAX 0x5 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE = 44, /* Return data = the max. size of the entire headers of a IPv6 TSO packet. * If the header size of a IPv6 TSO packet is larger than the specified * value, then the driver must not use TSO. * This size restriction only applies to IPv6 TSO. * For IPv4 TSO, the maximum size of the headers is fixed, and the NIC * always has enough header buffer to store maximum-sized headers. */ MXGEFW_CMD_SET_TSO_MODE = 45, /* data0 = TSO mode. * 0: Linux/FreeBSD style (NIC default) * 1: NDIS/NetBSD style */ #define MXGEFW_TSO_MODE_LINUX 0 #define MXGEFW_TSO_MODE_NDIS 1 MXGEFW_CMD_MDIO_READ = 46, /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ MXGEFW_CMD_MDIO_WRITE = 47, /* data0 = dev_addr, data1 = register/addr, data2 = value */ MXGEFW_CMD_I2C_READ = 48, /* Starts to get a fresh copy of one byte or of the module i2c table, the * obtained data is cached inside the xaui-xfi chip : * data0 : 0 => get one byte, 1=> get 256 bytes * data1 : If data0 == 0: location to refresh * bit 7:0 register location * bit 8:15 is the i2c slave addr (0 is interpreted as 0xA1) * bit 23:16 is the i2c bus number (for multi-port NICs) * If data0 == 1: unused * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes * During the i2c operation, MXGEFW_CMD_I2C_READ or MXGEFW_CMD_I2C_BYTE attempts * will return MXGEFW_CMD_ERROR_BUSY */ MXGEFW_CMD_I2C_BYTE = 49, /* Return the last obtained copy of a given byte in the xfp i2c table * (copy cached during the last relevant MXGEFW_CMD_I2C_READ) * data0 : index of the desired table entry * Return data = the byte stored at the requested index in the table */ MXGEFW_CMD_GET_VPUMP_OFFSET = 50, /* Return data = NIC memory offset of mcp_vpump_public_global */ MXGEFW_CMD_RESET_VPUMP = 51, /* Resets the VPUMP state */ MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE = 52, /* data0 = mcp_slot type to use. * 0 = the default 4B mcp_slot * 1 = 8B mcp_slot_8 */ #define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 #define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 MXGEFW_CMD_SET_THROTTLE_FACTOR = 53, /* set the throttle factor for ethp_z8e data0 = throttle_factor throttle_factor = 256 * pcie-raw-speed / tx_speed tx_speed = 256 * pcie-raw-speed / throttle_factor For PCI-E x8: pcie-raw-speed == 16Gb/s For PCI-E x4: pcie-raw-speed == 8Gb/s ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s */ MXGEFW_CMD_VPUMP_UP = 54, /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ MXGEFW_CMD_GET_VPUMP_CLK = 55, /* Get the lanai clock */ MXGEFW_CMD_GET_DCA_OFFSET = 56, /* offset of dca control for WDMAs */ /* VMWare NetQueue commands */ MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57, MXGEFW_CMD_NETQ_ADD_FILTER = 58, /* data0 = filter_id << 16 | queue << 8 | type */ /* data1 = MS4 of MAC Addr */ /* data2 = LS2_MAC << 16 | VLAN_tag */ MXGEFW_CMD_NETQ_DEL_FILTER = 59, /* data0 = filter_id */ MXGEFW_CMD_NETQ_QUERY1 = 60, MXGEFW_CMD_NETQ_QUERY2 = 61, MXGEFW_CMD_NETQ_QUERY3 = 62, MXGEFW_CMD_NETQ_QUERY4 = 63, MXGEFW_CMD_RELAX_RXBUFFER_ALIGNMENT = 64, /* When set, small receive buffers can cross page boundaries. * Both small and big receive buffers may start at any address. * This option has performance implications, so use with caution. */ }; typedef enum myri10ge_mcp_cmd_type myri10ge_mcp_cmd_type_t; enum myri10ge_mcp_cmd_status { MXGEFW_CMD_OK = 0, MXGEFW_CMD_UNKNOWN = 1, MXGEFW_CMD_ERROR_RANGE = 2, MXGEFW_CMD_ERROR_BUSY = 3, MXGEFW_CMD_ERROR_EMPTY = 4, MXGEFW_CMD_ERROR_CLOSED = 5, MXGEFW_CMD_ERROR_HASH_ERROR = 6, MXGEFW_CMD_ERROR_BAD_PORT = 7, MXGEFW_CMD_ERROR_RESOURCES = 8, MXGEFW_CMD_ERROR_MULTICAST = 9, MXGEFW_CMD_ERROR_UNALIGNED = 10, MXGEFW_CMD_ERROR_NO_MDIO = 11, MXGEFW_CMD_ERROR_I2C_FAILURE = 12, MXGEFW_CMD_ERROR_I2C_ABSENT = 13, MXGEFW_CMD_ERROR_BAD_PCIE_LINK = 14 }; typedef enum myri10ge_mcp_cmd_status myri10ge_mcp_cmd_status_t; #define MXGEFW_OLD_IRQ_DATA_LEN 40 struct mcp_irq_data { /* add new counters at the beginning */ uint32_t future_use[1]; uint32_t dropped_pause; uint32_t dropped_unicast_filtered; uint32_t dropped_bad_crc32; uint32_t dropped_bad_phy; uint32_t dropped_multicast_filtered; /* 40 Bytes */ uint32_t send_done_count; #define MXGEFW_LINK_DOWN 0 #define MXGEFW_LINK_UP 1 #define MXGEFW_LINK_MYRINET 2 #define MXGEFW_LINK_UNKNOWN 3 uint32_t link_up; uint32_t dropped_link_overflow; uint32_t dropped_link_error_or_filtered; uint32_t dropped_runt; uint32_t dropped_overrun; uint32_t dropped_no_small_buffer; uint32_t dropped_no_big_buffer; uint32_t rdma_tags_available; uint8_t tx_stopped; uint8_t link_down; uint8_t stats_updated; uint8_t valid; }; typedef struct mcp_irq_data mcp_irq_data_t; #ifdef MXGEFW_NDIS /* Exclusively used by NDIS drivers */ struct mcp_rss_shared_interrupt { uint8_t pad[2]; uint8_t queue; uint8_t valid; }; #endif /* definitions for NETQ filter type */ #define MXGEFW_NETQ_FILTERTYPE_NONE 0 #define MXGEFW_NETQ_FILTERTYPE_MACADDR 1 #define MXGEFW_NETQ_FILTERTYPE_VLAN 2 #define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3 #endif /* _myri10ge_mcp_h */ Index: head/sys/dev/qlxge/qls_dump.c =================================================================== --- head/sys/dev/qlxge/qls_dump.c (revision 258779) +++ head/sys/dev/qlxge/qls_dump.c (revision 258780) @@ -1,1992 +1,1992 @@ /* * Copyright (c) 2013-2014 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qls_dump.c */ #include __FBSDID("$FreeBSD$"); #include "qls_os.h" #include "qls_hw.h" #include "qls_def.h" #include "qls_glbl.h" #include "qls_dump.h" qls_mpi_coredump_t ql_mpi_coredump; #define Q81_CORE_SEG_NUM 1 #define Q81_TEST_LOGIC_SEG_NUM 2 #define Q81_RMII_SEG_NUM 3 #define Q81_FCMAC1_SEG_NUM 4 #define Q81_FCMAC2_SEG_NUM 5 #define Q81_FC1_MBOX_SEG_NUM 6 #define Q81_IDE_SEG_NUM 7 #define Q81_NIC1_MBOX_SEG_NUM 8 #define Q81_SMBUS_SEG_NUM 9 #define Q81_FC2_MBOX_SEG_NUM 10 #define Q81_NIC2_MBOX_SEG_NUM 11 #define Q81_I2C_SEG_NUM 12 #define Q81_MEMC_SEG_NUM 13 #define Q81_PBUS_SEG_NUM 14 #define Q81_MDE_SEG_NUM 15 #define Q81_NIC1_CONTROL_SEG_NUM 16 #define Q81_NIC2_CONTROL_SEG_NUM 17 #define Q81_NIC1_XGMAC_SEG_NUM 18 #define Q81_NIC2_XGMAC_SEG_NUM 19 #define Q81_WCS_RAM_SEG_NUM 20 #define Q81_MEMC_RAM_SEG_NUM 21 #define Q81_XAUI1_AN_SEG_NUM 22 #define Q81_XAUI1_HSS_PCS_SEG_NUM 23 #define Q81_XFI1_AN_SEG_NUM 24 #define Q81_XFI1_TRAIN_SEG_NUM 25 #define Q81_XFI1_HSS_PCS_SEG_NUM 26 #define Q81_XFI1_HSS_TX_SEG_NUM 27 #define Q81_XFI1_HSS_RX_SEG_NUM 28 #define Q81_XFI1_HSS_PLL_SEG_NUM 29 #define Q81_INTR_STATES_SEG_NUM 31 #define Q81_ETS_SEG_NUM 34 #define Q81_PROBE_DUMP_SEG_NUM 35 #define Q81_ROUTING_INDEX_SEG_NUM 36 #define Q81_MAC_PROTOCOL_SEG_NUM 37 #define Q81_XAUI2_AN_SEG_NUM 38 #define Q81_XAUI2_HSS_PCS_SEG_NUM 39 #define Q81_XFI2_AN_SEG_NUM 40 #define Q81_XFI2_TRAIN_SEG_NUM 41 #define Q81_XFI2_HSS_PCS_SEG_NUM 42 #define Q81_XFI2_HSS_TX_SEG_NUM 43 #define Q81_XFI2_HSS_RX_SEG_NUM 44 #define Q81_XFI2_HSS_PLL_SEG_NUM 45 #define Q81_WQC1_SEG_NUM 46 #define Q81_CQC1_SEG_NUM 47 #define Q81_WQC2_SEG_NUM 48 #define Q81_CQC2_SEG_NUM 49 #define Q81_SEM_REGS_SEG_NUM 50 enum { Q81_PAUSE_SRC_LO = 0x00000100, Q81_PAUSE_SRC_HI = 0x00000104, Q81_GLOBAL_CFG = 0x00000108, Q81_GLOBAL_CFG_RESET = (1 << 0), /*Control*/ Q81_GLOBAL_CFG_JUMBO = (1 << 6), /*Control*/ Q81_GLOBAL_CFG_TX_STAT_EN = (1 << 10), /*Control*/ Q81_GLOBAL_CFG_RX_STAT_EN = (1 << 11), /*Control*/ Q81_TX_CFG = 0x0000010c, Q81_TX_CFG_RESET = (1 << 0), /*Control*/ Q81_TX_CFG_EN = (1 << 1), /*Control*/ Q81_TX_CFG_PREAM = (1 << 2), /*Control*/ Q81_RX_CFG = 0x00000110, Q81_RX_CFG_RESET = (1 << 0), /*Control*/ Q81_RX_CFG_EN = (1 << 1), /*Control*/ Q81_RX_CFG_PREAM = (1 << 2), /*Control*/ Q81_FLOW_CTL = 0x0000011c, Q81_PAUSE_OPCODE = 0x00000120, Q81_PAUSE_TIMER = 0x00000124, Q81_PAUSE_FRM_DEST_LO = 0x00000128, Q81_PAUSE_FRM_DEST_HI = 0x0000012c, Q81_MAC_TX_PARAMS = 0x00000134, - Q81_MAC_TX_PARAMS_JUMBO = (1 << 31), /*Control*/ + Q81_MAC_TX_PARAMS_JUMBO = (1U << 31), /*Control*/ Q81_MAC_TX_PARAMS_SIZE_SHIFT = 16, /*Control*/ Q81_MAC_RX_PARAMS = 0x00000138, Q81_MAC_SYS_INT = 0x00000144, Q81_MAC_SYS_INT_MASK = 0x00000148, Q81_MAC_MGMT_INT = 0x0000014c, Q81_MAC_MGMT_IN_MASK = 0x00000150, Q81_EXT_ARB_MODE = 0x000001fc, Q81_TX_PKTS = 0x00000200, Q81_TX_PKTS_LO = 0x00000204, Q81_TX_BYTES = 0x00000208, Q81_TX_BYTES_LO = 0x0000020C, Q81_TX_MCAST_PKTS = 0x00000210, Q81_TX_MCAST_PKTS_LO = 0x00000214, Q81_TX_BCAST_PKTS = 0x00000218, Q81_TX_BCAST_PKTS_LO = 0x0000021C, Q81_TX_UCAST_PKTS = 0x00000220, Q81_TX_UCAST_PKTS_LO = 0x00000224, Q81_TX_CTL_PKTS = 0x00000228, Q81_TX_CTL_PKTS_LO = 0x0000022c, Q81_TX_PAUSE_PKTS = 0x00000230, Q81_TX_PAUSE_PKTS_LO = 0x00000234, Q81_TX_64_PKT = 0x00000238, Q81_TX_64_PKT_LO = 0x0000023c, Q81_TX_65_TO_127_PKT = 0x00000240, Q81_TX_65_TO_127_PKT_LO = 0x00000244, Q81_TX_128_TO_255_PKT = 0x00000248, Q81_TX_128_TO_255_PKT_LO = 0x0000024c, Q81_TX_256_511_PKT = 0x00000250, Q81_TX_256_511_PKT_LO = 0x00000254, Q81_TX_512_TO_1023_PKT = 0x00000258, Q81_TX_512_TO_1023_PKT_LO = 0x0000025c, Q81_TX_1024_TO_1518_PKT = 0x00000260, Q81_TX_1024_TO_1518_PKT_LO = 0x00000264, Q81_TX_1519_TO_MAX_PKT = 0x00000268, Q81_TX_1519_TO_MAX_PKT_LO = 0x0000026c, Q81_TX_UNDERSIZE_PKT = 0x00000270, Q81_TX_UNDERSIZE_PKT_LO = 0x00000274, Q81_TX_OVERSIZE_PKT = 0x00000278, Q81_TX_OVERSIZE_PKT_LO = 0x0000027c, Q81_RX_HALF_FULL_DET = 0x000002a0, Q81_TX_HALF_FULL_DET_LO = 0x000002a4, Q81_RX_OVERFLOW_DET = 0x000002a8, Q81_TX_OVERFLOW_DET_LO = 0x000002ac, Q81_RX_HALF_FULL_MASK = 0x000002b0, Q81_TX_HALF_FULL_MASK_LO = 0x000002b4, Q81_RX_OVERFLOW_MASK = 0x000002b8, Q81_TX_OVERFLOW_MASK_LO = 0x000002bc, Q81_STAT_CNT_CTL = 0x000002c0, Q81_STAT_CNT_CTL_CLEAR_TX = (1 << 0), /*Control*/ Q81_STAT_CNT_CTL_CLEAR_RX = (1 << 1), /*Control*/ Q81_AUX_RX_HALF_FULL_DET = 0x000002d0, Q81_AUX_TX_HALF_FULL_DET = 0x000002d4, Q81_AUX_RX_OVERFLOW_DET = 0x000002d8, Q81_AUX_TX_OVERFLOW_DET = 0x000002dc, Q81_AUX_RX_HALF_FULL_MASK = 0x000002f0, Q81_AUX_TX_HALF_FULL_MASK = 0x000002f4, Q81_AUX_RX_OVERFLOW_MASK = 0x000002f8, Q81_AUX_TX_OVERFLOW_MASK = 0x000002fc, Q81_RX_BYTES = 0x00000300, Q81_RX_BYTES_LO = 0x00000304, Q81_RX_BYTES_OK = 0x00000308, Q81_RX_BYTES_OK_LO = 0x0000030c, Q81_RX_PKTS = 0x00000310, Q81_RX_PKTS_LO = 0x00000314, Q81_RX_PKTS_OK = 0x00000318, Q81_RX_PKTS_OK_LO = 0x0000031c, Q81_RX_BCAST_PKTS = 0x00000320, Q81_RX_BCAST_PKTS_LO = 0x00000324, Q81_RX_MCAST_PKTS = 0x00000328, Q81_RX_MCAST_PKTS_LO = 0x0000032c, Q81_RX_UCAST_PKTS = 0x00000330, Q81_RX_UCAST_PKTS_LO = 0x00000334, Q81_RX_UNDERSIZE_PKTS = 0x00000338, Q81_RX_UNDERSIZE_PKTS_LO = 0x0000033c, Q81_RX_OVERSIZE_PKTS = 0x00000340, Q81_RX_OVERSIZE_PKTS_LO = 0x00000344, Q81_RX_JABBER_PKTS = 0x00000348, Q81_RX_JABBER_PKTS_LO = 0x0000034c, Q81_RX_UNDERSIZE_FCERR_PKTS = 0x00000350, Q81_RX_UNDERSIZE_FCERR_PKTS_LO = 0x00000354, Q81_RX_DROP_EVENTS = 0x00000358, Q81_RX_DROP_EVENTS_LO = 0x0000035c, Q81_RX_FCERR_PKTS = 0x00000360, Q81_RX_FCERR_PKTS_LO = 0x00000364, Q81_RX_ALIGN_ERR = 0x00000368, Q81_RX_ALIGN_ERR_LO = 0x0000036c, Q81_RX_SYMBOL_ERR = 0x00000370, Q81_RX_SYMBOL_ERR_LO = 0x00000374, Q81_RX_MAC_ERR = 0x00000378, Q81_RX_MAC_ERR_LO = 0x0000037c, Q81_RX_CTL_PKTS = 0x00000380, Q81_RX_CTL_PKTS_LO = 0x00000384, Q81_RX_PAUSE_PKTS = 0x00000388, Q81_RX_PAUSE_PKTS_LO = 0x0000038c, Q81_RX_64_PKTS = 0x00000390, Q81_RX_64_PKTS_LO = 0x00000394, Q81_RX_65_TO_127_PKTS = 0x00000398, Q81_RX_65_TO_127_PKTS_LO = 0x0000039c, Q81_RX_128_255_PKTS = 0x000003a0, Q81_RX_128_255_PKTS_LO = 0x000003a4, Q81_RX_256_511_PKTS = 0x000003a8, Q81_RX_256_511_PKTS_LO = 0x000003ac, Q81_RX_512_TO_1023_PKTS = 0x000003b0, Q81_RX_512_TO_1023_PKTS_LO = 0x000003b4, Q81_RX_1024_TO_1518_PKTS = 0x000003b8, Q81_RX_1024_TO_1518_PKTS_LO = 0x000003bc, Q81_RX_1519_TO_MAX_PKTS = 0x000003c0, Q81_RX_1519_TO_MAX_PKTS_LO = 0x000003c4, Q81_RX_LEN_ERR_PKTS = 0x000003c8, Q81_RX_LEN_ERR_PKTS_LO = 0x000003cc, Q81_MDIO_TX_DATA = 0x00000400, Q81_MDIO_RX_DATA = 0x00000410, Q81_MDIO_CMD = 0x00000420, Q81_MDIO_PHY_ADDR = 0x00000430, Q81_MDIO_PORT = 0x00000440, Q81_MDIO_STATUS = 0x00000450, Q81_TX_CBFC_PAUSE_FRAMES0 = 0x00000500, Q81_TX_CBFC_PAUSE_FRAMES0_LO = 0x00000504, Q81_TX_CBFC_PAUSE_FRAMES1 = 0x00000508, Q81_TX_CBFC_PAUSE_FRAMES1_LO = 0x0000050C, Q81_TX_CBFC_PAUSE_FRAMES2 = 0x00000510, Q81_TX_CBFC_PAUSE_FRAMES2_LO = 0x00000514, Q81_TX_CBFC_PAUSE_FRAMES3 = 0x00000518, Q81_TX_CBFC_PAUSE_FRAMES3_LO = 0x0000051C, Q81_TX_CBFC_PAUSE_FRAMES4 = 0x00000520, Q81_TX_CBFC_PAUSE_FRAMES4_LO = 0x00000524, Q81_TX_CBFC_PAUSE_FRAMES5 = 0x00000528, Q81_TX_CBFC_PAUSE_FRAMES5_LO = 0x0000052C, Q81_TX_CBFC_PAUSE_FRAMES6 = 0x00000530, Q81_TX_CBFC_PAUSE_FRAMES6_LO = 0x00000534, Q81_TX_CBFC_PAUSE_FRAMES7 = 0x00000538, Q81_TX_CBFC_PAUSE_FRAMES7_LO = 0x0000053C, Q81_TX_FCOE_PKTS = 0x00000540, Q81_TX_FCOE_PKTS_LO = 0x00000544, Q81_TX_MGMT_PKTS = 0x00000548, Q81_TX_MGMT_PKTS_LO = 0x0000054C, Q81_RX_CBFC_PAUSE_FRAMES0 = 0x00000568, Q81_RX_CBFC_PAUSE_FRAMES0_LO = 0x0000056C, Q81_RX_CBFC_PAUSE_FRAMES1 = 0x00000570, Q81_RX_CBFC_PAUSE_FRAMES1_LO = 0x00000574, Q81_RX_CBFC_PAUSE_FRAMES2 = 0x00000578, Q81_RX_CBFC_PAUSE_FRAMES2_LO = 0x0000057C, Q81_RX_CBFC_PAUSE_FRAMES3 = 0x00000580, Q81_RX_CBFC_PAUSE_FRAMES3_LO = 0x00000584, Q81_RX_CBFC_PAUSE_FRAMES4 = 0x00000588, Q81_RX_CBFC_PAUSE_FRAMES4_LO = 0x0000058C, Q81_RX_CBFC_PAUSE_FRAMES5 = 0x00000590, Q81_RX_CBFC_PAUSE_FRAMES5_LO = 0x00000594, Q81_RX_CBFC_PAUSE_FRAMES6 = 0x00000598, Q81_RX_CBFC_PAUSE_FRAMES6_LO = 0x0000059C, Q81_RX_CBFC_PAUSE_FRAMES7 = 0x000005A0, Q81_RX_CBFC_PAUSE_FRAMES7_LO = 0x000005A4, Q81_RX_FCOE_PKTS = 0x000005A8, Q81_RX_FCOE_PKTS_LO = 0x000005AC, Q81_RX_MGMT_PKTS = 0x000005B0, Q81_RX_MGMT_PKTS_LO = 0x000005B4, Q81_RX_NIC_FIFO_DROP = 0x000005B8, Q81_RX_NIC_FIFO_DROP_LO = 0x000005BC, Q81_RX_FCOE_FIFO_DROP = 0x000005C0, Q81_RX_FCOE_FIFO_DROP_LO = 0x000005C4, Q81_RX_MGMT_FIFO_DROP = 0x000005C8, Q81_RX_MGMT_FIFO_DROP_LO = 0x000005CC, Q81_RX_PKTS_PRIORITY0 = 0x00000600, Q81_RX_PKTS_PRIORITY0_LO = 0x00000604, Q81_RX_PKTS_PRIORITY1 = 0x00000608, Q81_RX_PKTS_PRIORITY1_LO = 0x0000060C, Q81_RX_PKTS_PRIORITY2 = 0x00000610, Q81_RX_PKTS_PRIORITY2_LO = 0x00000614, Q81_RX_PKTS_PRIORITY3 = 0x00000618, Q81_RX_PKTS_PRIORITY3_LO = 0x0000061C, Q81_RX_PKTS_PRIORITY4 = 0x00000620, Q81_RX_PKTS_PRIORITY4_LO = 0x00000624, Q81_RX_PKTS_PRIORITY5 = 0x00000628, Q81_RX_PKTS_PRIORITY5_LO = 0x0000062C, Q81_RX_PKTS_PRIORITY6 = 0x00000630, Q81_RX_PKTS_PRIORITY6_LO = 0x00000634, Q81_RX_PKTS_PRIORITY7 = 0x00000638, Q81_RX_PKTS_PRIORITY7_LO = 0x0000063C, Q81_RX_OCTETS_PRIORITY0 = 0x00000640, Q81_RX_OCTETS_PRIORITY0_LO = 0x00000644, Q81_RX_OCTETS_PRIORITY1 = 0x00000648, Q81_RX_OCTETS_PRIORITY1_LO = 0x0000064C, Q81_RX_OCTETS_PRIORITY2 = 0x00000650, Q81_RX_OCTETS_PRIORITY2_LO = 0x00000654, Q81_RX_OCTETS_PRIORITY3 = 0x00000658, Q81_RX_OCTETS_PRIORITY3_LO = 0x0000065C, Q81_RX_OCTETS_PRIORITY4 = 0x00000660, Q81_RX_OCTETS_PRIORITY4_LO = 0x00000664, Q81_RX_OCTETS_PRIORITY5 = 0x00000668, Q81_RX_OCTETS_PRIORITY5_LO = 0x0000066C, Q81_RX_OCTETS_PRIORITY6 = 0x00000670, Q81_RX_OCTETS_PRIORITY6_LO = 0x00000674, Q81_RX_OCTETS_PRIORITY7 = 0x00000678, Q81_RX_OCTETS_PRIORITY7_LO = 0x0000067C, Q81_TX_PKTS_PRIORITY0 = 0x00000680, Q81_TX_PKTS_PRIORITY0_LO = 0x00000684, Q81_TX_PKTS_PRIORITY1 = 0x00000688, Q81_TX_PKTS_PRIORITY1_LO = 0x0000068C, Q81_TX_PKTS_PRIORITY2 = 0x00000690, Q81_TX_PKTS_PRIORITY2_LO = 0x00000694, Q81_TX_PKTS_PRIORITY3 = 0x00000698, Q81_TX_PKTS_PRIORITY3_LO = 0x0000069C, Q81_TX_PKTS_PRIORITY4 = 0x000006A0, Q81_TX_PKTS_PRIORITY4_LO = 0x000006A4, Q81_TX_PKTS_PRIORITY5 = 0x000006A8, Q81_TX_PKTS_PRIORITY5_LO = 0x000006AC, Q81_TX_PKTS_PRIORITY6 = 0x000006B0, Q81_TX_PKTS_PRIORITY6_LO = 0x000006B4, Q81_TX_PKTS_PRIORITY7 = 0x000006B8, Q81_TX_PKTS_PRIORITY7_LO = 0x000006BC, Q81_TX_OCTETS_PRIORITY0 = 0x000006C0, Q81_TX_OCTETS_PRIORITY0_LO = 0x000006C4, Q81_TX_OCTETS_PRIORITY1 = 0x000006C8, Q81_TX_OCTETS_PRIORITY1_LO = 0x000006CC, Q81_TX_OCTETS_PRIORITY2 = 0x000006D0, Q81_TX_OCTETS_PRIORITY2_LO = 0x000006D4, Q81_TX_OCTETS_PRIORITY3 = 0x000006D8, Q81_TX_OCTETS_PRIORITY3_LO = 0x000006DC, Q81_TX_OCTETS_PRIORITY4 = 0x000006E0, Q81_TX_OCTETS_PRIORITY4_LO = 0x000006E4, Q81_TX_OCTETS_PRIORITY5 = 0x000006E8, Q81_TX_OCTETS_PRIORITY5_LO = 0x000006EC, Q81_TX_OCTETS_PRIORITY6 = 0x000006F0, Q81_TX_OCTETS_PRIORITY6_LO = 0x000006F4, Q81_TX_OCTETS_PRIORITY7 = 0x000006F8, Q81_TX_OCTETS_PRIORITY7_LO = 0x000006FC, Q81_RX_DISCARD_PRIORITY0 = 0x00000700, Q81_RX_DISCARD_PRIORITY0_LO = 0x00000704, Q81_RX_DISCARD_PRIORITY1 = 0x00000708, Q81_RX_DISCARD_PRIORITY1_LO = 0x0000070C, Q81_RX_DISCARD_PRIORITY2 = 0x00000710, Q81_RX_DISCARD_PRIORITY2_LO = 0x00000714, Q81_RX_DISCARD_PRIORITY3 = 0x00000718, Q81_RX_DISCARD_PRIORITY3_LO = 0x0000071C, Q81_RX_DISCARD_PRIORITY4 = 0x00000720, Q81_RX_DISCARD_PRIORITY4_LO = 0x00000724, Q81_RX_DISCARD_PRIORITY5 = 0x00000728, Q81_RX_DISCARD_PRIORITY5_LO = 0x0000072C, Q81_RX_DISCARD_PRIORITY6 = 0x00000730, Q81_RX_DISCARD_PRIORITY6_LO = 0x00000734, Q81_RX_DISCARD_PRIORITY7 = 0x00000738, Q81_RX_DISCARD_PRIORITY7_LO = 0x0000073C }; static void qls_mpid_seg_hdr(qls_mpid_seg_hdr_t *seg_hdr, uint32_t seg_num, uint32_t seg_size, unsigned char *desc) { memset(seg_hdr, 0, sizeof(qls_mpid_seg_hdr_t)); seg_hdr->cookie = Q81_MPID_COOKIE; seg_hdr->seg_num = seg_num; seg_hdr->seg_size = seg_size; memcpy(seg_hdr->desc, desc, (sizeof(seg_hdr->desc))-1); return; } static int qls_wait_reg_rdy(qla_host_t *ha , uint32_t reg, uint32_t bit, uint32_t err_bit) { uint32_t data; int count = 10; while (count) { data = READ_REG32(ha, reg); if (data & err_bit) return (-1); else if (data & bit) return (0); qls_mdelay(__func__, 10); count--; } return (-1); } static int qls_rd_mpi_reg(qla_host_t *ha, uint32_t reg, uint32_t *data) { int ret; ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY, Q81_CTL_PROC_ADDR_ERR); if (ret) goto exit_qls_rd_mpi_reg; WRITE_REG32(ha, Q81_CTL_PROC_ADDR, reg | Q81_CTL_PROC_ADDR_READ); ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY, Q81_CTL_PROC_ADDR_ERR); if (ret) goto exit_qls_rd_mpi_reg; *data = READ_REG32(ha, Q81_CTL_PROC_DATA); exit_qls_rd_mpi_reg: return (ret); } static int qls_wr_mpi_reg(qla_host_t *ha, uint32_t reg, uint32_t data) { int ret = 0; ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY, Q81_CTL_PROC_ADDR_ERR); if (ret) goto exit_qls_wr_mpi_reg; WRITE_REG32(ha, Q81_CTL_PROC_DATA, data); WRITE_REG32(ha, Q81_CTL_PROC_ADDR, reg); ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY, Q81_CTL_PROC_ADDR_ERR); exit_qls_wr_mpi_reg: return (ret); } #define Q81_TEST_LOGIC_FUNC_PORT_CONFIG 0x1002 #define Q81_INVALID_NUM 0xFFFFFFFF #define Q81_NIC1_FUNC_ENABLE 0x00000001 #define Q81_NIC1_FUNC_MASK 0x0000000e #define Q81_NIC1_FUNC_SHIFT 1 #define Q81_NIC2_FUNC_ENABLE 0x00000010 #define Q81_NIC2_FUNC_MASK 0x000000e0 #define Q81_NIC2_FUNC_SHIFT 5 #define Q81_FUNCTION_SHIFT 6 static uint32_t qls_get_other_fnum(qla_host_t *ha) { int ret; uint32_t o_func; uint32_t test_logic; uint32_t nic1_fnum = Q81_INVALID_NUM; uint32_t nic2_fnum = Q81_INVALID_NUM; ret = qls_rd_mpi_reg(ha, Q81_TEST_LOGIC_FUNC_PORT_CONFIG, &test_logic); if (ret) return(Q81_INVALID_NUM); if (test_logic & Q81_NIC1_FUNC_ENABLE) nic1_fnum = (test_logic & Q81_NIC1_FUNC_MASK) >> Q81_NIC1_FUNC_SHIFT; if (test_logic & Q81_NIC2_FUNC_ENABLE) nic2_fnum = (test_logic & Q81_NIC2_FUNC_MASK) >> Q81_NIC2_FUNC_SHIFT; if (ha->pci_func == 0) o_func = nic2_fnum; else o_func = nic1_fnum; return(o_func); } static uint32_t qls_rd_ofunc_reg(qla_host_t *ha, uint32_t reg) { uint32_t ofunc; uint32_t data; int ret = 0; ofunc = qls_get_other_fnum(ha); if (ofunc == Q81_INVALID_NUM) return(Q81_INVALID_NUM); reg = Q81_CTL_PROC_ADDR_REG_BLOCK | (ofunc << Q81_FUNCTION_SHIFT) | reg; ret = qls_rd_mpi_reg(ha, reg, &data); if (ret != 0) return(Q81_INVALID_NUM); return(data); } static void qls_wr_ofunc_reg(qla_host_t *ha, uint32_t reg, uint32_t value) { uint32_t ofunc; int ret = 0; ofunc = qls_get_other_fnum(ha); if (ofunc == Q81_INVALID_NUM) return; reg = Q81_CTL_PROC_ADDR_REG_BLOCK | (ofunc << Q81_FUNCTION_SHIFT) | reg; ret = qls_wr_mpi_reg(ha, reg, value); return; } static int qls_wait_ofunc_reg_rdy(qla_host_t *ha , uint32_t reg, uint32_t bit, uint32_t err_bit) { uint32_t data; int count = 10; while (count) { data = qls_rd_ofunc_reg(ha, reg); if (data & err_bit) return (-1); else if (data & bit) return (0); qls_mdelay(__func__, 10); count--; } return (-1); } #define Q81_XG_SERDES_ADDR_RDY BIT_31 #define Q81_XG_SERDES_ADDR_READ BIT_30 static int qls_rd_ofunc_serdes_reg(qla_host_t *ha, uint32_t reg, uint32_t *data) { int ret; /* wait for reg to come ready */ ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XG_SERDES_ADDR >> 2), Q81_XG_SERDES_ADDR_RDY, 0); if (ret) goto exit_qls_rd_ofunc_serdes_reg; /* set up for reg read */ qls_wr_ofunc_reg(ha, (Q81_CTL_XG_SERDES_ADDR >> 2), (reg | Q81_XG_SERDES_ADDR_READ)); /* wait for reg to come ready */ ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XG_SERDES_ADDR >> 2), Q81_XG_SERDES_ADDR_RDY, 0); if (ret) goto exit_qls_rd_ofunc_serdes_reg; /* get the data */ *data = qls_rd_ofunc_reg(ha, (Q81_CTL_XG_SERDES_DATA >> 2)); exit_qls_rd_ofunc_serdes_reg: return ret; } #define Q81_XGMAC_ADDR_RDY BIT_31 #define Q81_XGMAC_ADDR_R BIT_30 #define Q81_XGMAC_ADDR_XME BIT_29 static int qls_rd_ofunc_xgmac_reg(qla_host_t *ha, uint32_t reg, uint32_t *data) { int ret = 0; ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XGMAC_ADDR >> 2), Q81_XGMAC_ADDR_RDY, Q81_XGMAC_ADDR_XME); if (ret) goto exit_qls_rd_ofunc_xgmac_reg; qls_wr_ofunc_reg(ha, (Q81_XGMAC_ADDR_RDY >> 2), (reg | Q81_XGMAC_ADDR_R)); ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XGMAC_ADDR >> 2), Q81_XGMAC_ADDR_RDY, Q81_XGMAC_ADDR_XME); if (ret) goto exit_qls_rd_ofunc_xgmac_reg; *data = qls_rd_ofunc_reg(ha, Q81_CTL_XGMAC_DATA); exit_qls_rd_ofunc_xgmac_reg: return ret; } static int qls_rd_serdes_reg(qla_host_t *ha, uint32_t reg, uint32_t *data) { int ret; ret = qls_wait_reg_rdy(ha, Q81_CTL_XG_SERDES_ADDR, Q81_XG_SERDES_ADDR_RDY, 0); if (ret) goto exit_qls_rd_serdes_reg; WRITE_REG32(ha, Q81_CTL_XG_SERDES_ADDR, \ (reg | Q81_XG_SERDES_ADDR_READ)); ret = qls_wait_reg_rdy(ha, Q81_CTL_XG_SERDES_ADDR, Q81_XG_SERDES_ADDR_RDY, 0); if (ret) goto exit_qls_rd_serdes_reg; *data = READ_REG32(ha, Q81_CTL_XG_SERDES_DATA); exit_qls_rd_serdes_reg: return ret; } static void qls_get_both_serdes(qla_host_t *ha, uint32_t addr, uint32_t *dptr, uint32_t *ind_ptr, uint32_t dvalid, uint32_t ind_valid) { int ret = -1; if (dvalid) ret = qls_rd_serdes_reg(ha, addr, dptr); if (ret) *dptr = Q81_BAD_DATA; ret = -1; if(ind_valid) ret = qls_rd_ofunc_serdes_reg(ha, addr, ind_ptr); if (ret) *ind_ptr = Q81_BAD_DATA; } #define Q81_XFI1_POWERED_UP 0x00000005 #define Q81_XFI2_POWERED_UP 0x0000000A #define Q81_XAUI_POWERED_UP 0x00000001 static int qls_rd_serdes_regs(qla_host_t *ha, qls_mpi_coredump_t *mpi_dump) { int ret; uint32_t xfi_d_valid, xfi_ind_valid, xaui_d_valid, xaui_ind_valid; uint32_t temp, xaui_reg, i; uint32_t *dptr, *indptr; xfi_d_valid = xfi_ind_valid = xaui_d_valid = xaui_ind_valid = 0; xaui_reg = 0x800; ret = qls_rd_ofunc_serdes_reg(ha, xaui_reg, &temp); if (ret) temp = 0; if ((temp & Q81_XAUI_POWERED_UP) == Q81_XAUI_POWERED_UP) xaui_ind_valid = 1; ret = qls_rd_serdes_reg(ha, xaui_reg, &temp); if (ret) temp = 0; if ((temp & Q81_XAUI_POWERED_UP) == Q81_XAUI_POWERED_UP) xaui_d_valid = 1; ret = qls_rd_serdes_reg(ha, 0x1E06, &temp); if (ret) temp = 0; if ((temp & Q81_XFI1_POWERED_UP) == Q81_XFI1_POWERED_UP) { if (ha->pci_func & 1) xfi_ind_valid = 1; /* NIC 2, so the indirect (NIC1) xfi is up*/ else xfi_d_valid = 1; } if((temp & Q81_XFI2_POWERED_UP) == Q81_XFI2_POWERED_UP) { if(ha->pci_func & 1) xfi_d_valid = 1; /* NIC 2, so the indirect (NIC1) xfi is up */ else xfi_ind_valid = 1; } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xaui_an); indptr = (uint32_t *)(&mpi_dump->serdes1_xaui_an); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xaui_an); indptr = (uint32_t *)(&mpi_dump->serdes2_xaui_an); } for (i = 0; i <= 0x000000034; i += 4, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xaui_d_valid, xaui_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xaui_hss_pcs); indptr = (uint32_t *)(&mpi_dump->serdes1_xaui_hss_pcs); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xaui_hss_pcs); indptr = (uint32_t *)(&mpi_dump->serdes2_xaui_hss_pcs); } for (i = 0x800; i <= 0x880; i += 4, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xaui_d_valid, xaui_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_an); indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_an); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_an); indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_an); } for (i = 0x1000; i <= 0x1034; i += 4, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xfi_d_valid, xfi_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_train); indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_train); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_train); indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_train); } for (i = 0x1050; i <= 0x107c; i += 4, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xfi_d_valid, xfi_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pcs); indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pcs); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pcs); indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pcs); } for (i = 0x1800; i <= 0x1838; i += 4, dptr++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xfi_d_valid, xfi_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_tx); indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_tx); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_tx); indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_tx); } for (i = 0x1c00; i <= 0x1c1f; i++, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xfi_d_valid, xfi_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_rx); indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_rx); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_rx); indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_rx); } for (i = 0x1c40; i <= 0x1c5f; i++, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xfi_d_valid, xfi_ind_valid); } if (ha->pci_func & 1) { dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pll); indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pll); } else { dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pll); indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pll); } for (i = 0x1e00; i <= 0x1e1f; i++, dptr ++, indptr ++) { qls_get_both_serdes(ha, i, dptr, indptr, xfi_d_valid, xfi_ind_valid); } return(0); } static int qls_unpause_mpi_risc(qla_host_t *ha) { uint32_t data; data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS); if (!(data & Q81_CTL_HCS_RISC_PAUSED)) return -1; WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, \ Q81_CTL_HCS_CMD_CLR_RISC_PAUSE); return 0; } static int qls_pause_mpi_risc(qla_host_t *ha) { uint32_t data; int count = 10; WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, \ Q81_CTL_HCS_CMD_SET_RISC_PAUSE); do { data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS); if (data & Q81_CTL_HCS_RISC_PAUSED) break; qls_mdelay(__func__, 10); count--; } while (count); return ((count == 0) ? -1 : 0); } static void qls_get_intr_states(qla_host_t *ha, uint32_t *buf) { int i; for (i = 0; i < MAX_RX_RINGS; i++, buf++) { WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, (0x037f0300 + i)); *buf = READ_REG32(ha, Q81_CTL_INTR_ENABLE); } } static int qls_rd_xgmac_reg(qla_host_t *ha, uint32_t reg, uint32_t*data) { int ret = 0; ret = qls_wait_reg_rdy(ha, Q81_CTL_XGMAC_ADDR, Q81_XGMAC_ADDR_RDY, Q81_XGMAC_ADDR_XME); if (ret) goto exit_qls_rd_xgmac_reg; WRITE_REG32(ha, Q81_CTL_XGMAC_ADDR, (reg | Q81_XGMAC_ADDR_R)); ret = qls_wait_reg_rdy(ha, Q81_CTL_XGMAC_ADDR, Q81_XGMAC_ADDR_RDY, Q81_XGMAC_ADDR_XME); if (ret) goto exit_qls_rd_xgmac_reg; *data = READ_REG32(ha, Q81_CTL_XGMAC_DATA); exit_qls_rd_xgmac_reg: return ret; } static int qls_rd_xgmac_regs(qla_host_t *ha, uint32_t *buf, uint32_t o_func) { int ret = 0; int i; for (i = 0; i < Q81_XGMAC_REGISTER_END; i += 4, buf ++) { switch (i) { case Q81_PAUSE_SRC_LO : case Q81_PAUSE_SRC_HI : case Q81_GLOBAL_CFG : case Q81_TX_CFG : case Q81_RX_CFG : case Q81_FLOW_CTL : case Q81_PAUSE_OPCODE : case Q81_PAUSE_TIMER : case Q81_PAUSE_FRM_DEST_LO : case Q81_PAUSE_FRM_DEST_HI : case Q81_MAC_TX_PARAMS : case Q81_MAC_RX_PARAMS : case Q81_MAC_SYS_INT : case Q81_MAC_SYS_INT_MASK : case Q81_MAC_MGMT_INT : case Q81_MAC_MGMT_IN_MASK : case Q81_EXT_ARB_MODE : case Q81_TX_PKTS : case Q81_TX_PKTS_LO : case Q81_TX_BYTES : case Q81_TX_BYTES_LO : case Q81_TX_MCAST_PKTS : case Q81_TX_MCAST_PKTS_LO : case Q81_TX_BCAST_PKTS : case Q81_TX_BCAST_PKTS_LO : case Q81_TX_UCAST_PKTS : case Q81_TX_UCAST_PKTS_LO : case Q81_TX_CTL_PKTS : case Q81_TX_CTL_PKTS_LO : case Q81_TX_PAUSE_PKTS : case Q81_TX_PAUSE_PKTS_LO : case Q81_TX_64_PKT : case Q81_TX_64_PKT_LO : case Q81_TX_65_TO_127_PKT : case Q81_TX_65_TO_127_PKT_LO : case Q81_TX_128_TO_255_PKT : case Q81_TX_128_TO_255_PKT_LO : case Q81_TX_256_511_PKT : case Q81_TX_256_511_PKT_LO : case Q81_TX_512_TO_1023_PKT : case Q81_TX_512_TO_1023_PKT_LO : case Q81_TX_1024_TO_1518_PKT : case Q81_TX_1024_TO_1518_PKT_LO : case Q81_TX_1519_TO_MAX_PKT : case Q81_TX_1519_TO_MAX_PKT_LO : case Q81_TX_UNDERSIZE_PKT : case Q81_TX_UNDERSIZE_PKT_LO : case Q81_TX_OVERSIZE_PKT : case Q81_TX_OVERSIZE_PKT_LO : case Q81_RX_HALF_FULL_DET : case Q81_TX_HALF_FULL_DET_LO : case Q81_RX_OVERFLOW_DET : case Q81_TX_OVERFLOW_DET_LO : case Q81_RX_HALF_FULL_MASK : case Q81_TX_HALF_FULL_MASK_LO : case Q81_RX_OVERFLOW_MASK : case Q81_TX_OVERFLOW_MASK_LO : case Q81_STAT_CNT_CTL : case Q81_AUX_RX_HALF_FULL_DET : case Q81_AUX_TX_HALF_FULL_DET : case Q81_AUX_RX_OVERFLOW_DET : case Q81_AUX_TX_OVERFLOW_DET : case Q81_AUX_RX_HALF_FULL_MASK : case Q81_AUX_TX_HALF_FULL_MASK : case Q81_AUX_RX_OVERFLOW_MASK : case Q81_AUX_TX_OVERFLOW_MASK : case Q81_RX_BYTES : case Q81_RX_BYTES_LO : case Q81_RX_BYTES_OK : case Q81_RX_BYTES_OK_LO : case Q81_RX_PKTS : case Q81_RX_PKTS_LO : case Q81_RX_PKTS_OK : case Q81_RX_PKTS_OK_LO : case Q81_RX_BCAST_PKTS : case Q81_RX_BCAST_PKTS_LO : case Q81_RX_MCAST_PKTS : case Q81_RX_MCAST_PKTS_LO : case Q81_RX_UCAST_PKTS : case Q81_RX_UCAST_PKTS_LO : case Q81_RX_UNDERSIZE_PKTS : case Q81_RX_UNDERSIZE_PKTS_LO : case Q81_RX_OVERSIZE_PKTS : case Q81_RX_OVERSIZE_PKTS_LO : case Q81_RX_JABBER_PKTS : case Q81_RX_JABBER_PKTS_LO : case Q81_RX_UNDERSIZE_FCERR_PKTS : case Q81_RX_UNDERSIZE_FCERR_PKTS_LO : case Q81_RX_DROP_EVENTS : case Q81_RX_DROP_EVENTS_LO : case Q81_RX_FCERR_PKTS : case Q81_RX_FCERR_PKTS_LO : case Q81_RX_ALIGN_ERR : case Q81_RX_ALIGN_ERR_LO : case Q81_RX_SYMBOL_ERR : case Q81_RX_SYMBOL_ERR_LO : case Q81_RX_MAC_ERR : case Q81_RX_MAC_ERR_LO : case Q81_RX_CTL_PKTS : case Q81_RX_CTL_PKTS_LO : case Q81_RX_PAUSE_PKTS : case Q81_RX_PAUSE_PKTS_LO : case Q81_RX_64_PKTS : case Q81_RX_64_PKTS_LO : case Q81_RX_65_TO_127_PKTS : case Q81_RX_65_TO_127_PKTS_LO : case Q81_RX_128_255_PKTS : case Q81_RX_128_255_PKTS_LO : case Q81_RX_256_511_PKTS : case Q81_RX_256_511_PKTS_LO : case Q81_RX_512_TO_1023_PKTS : case Q81_RX_512_TO_1023_PKTS_LO : case Q81_RX_1024_TO_1518_PKTS : case Q81_RX_1024_TO_1518_PKTS_LO : case Q81_RX_1519_TO_MAX_PKTS : case Q81_RX_1519_TO_MAX_PKTS_LO : case Q81_RX_LEN_ERR_PKTS : case Q81_RX_LEN_ERR_PKTS_LO : case Q81_MDIO_TX_DATA : case Q81_MDIO_RX_DATA : case Q81_MDIO_CMD : case Q81_MDIO_PHY_ADDR : case Q81_MDIO_PORT : case Q81_MDIO_STATUS : case Q81_TX_CBFC_PAUSE_FRAMES0 : case Q81_TX_CBFC_PAUSE_FRAMES0_LO : case Q81_TX_CBFC_PAUSE_FRAMES1 : case Q81_TX_CBFC_PAUSE_FRAMES1_LO : case Q81_TX_CBFC_PAUSE_FRAMES2 : case Q81_TX_CBFC_PAUSE_FRAMES2_LO : case Q81_TX_CBFC_PAUSE_FRAMES3 : case Q81_TX_CBFC_PAUSE_FRAMES3_LO : case Q81_TX_CBFC_PAUSE_FRAMES4 : case Q81_TX_CBFC_PAUSE_FRAMES4_LO : case Q81_TX_CBFC_PAUSE_FRAMES5 : case Q81_TX_CBFC_PAUSE_FRAMES5_LO : case Q81_TX_CBFC_PAUSE_FRAMES6 : case Q81_TX_CBFC_PAUSE_FRAMES6_LO : case Q81_TX_CBFC_PAUSE_FRAMES7 : case Q81_TX_CBFC_PAUSE_FRAMES7_LO : case Q81_TX_FCOE_PKTS : case Q81_TX_FCOE_PKTS_LO : case Q81_TX_MGMT_PKTS : case Q81_TX_MGMT_PKTS_LO : case Q81_RX_CBFC_PAUSE_FRAMES0 : case Q81_RX_CBFC_PAUSE_FRAMES0_LO : case Q81_RX_CBFC_PAUSE_FRAMES1 : case Q81_RX_CBFC_PAUSE_FRAMES1_LO : case Q81_RX_CBFC_PAUSE_FRAMES2 : case Q81_RX_CBFC_PAUSE_FRAMES2_LO : case Q81_RX_CBFC_PAUSE_FRAMES3 : case Q81_RX_CBFC_PAUSE_FRAMES3_LO : case Q81_RX_CBFC_PAUSE_FRAMES4 : case Q81_RX_CBFC_PAUSE_FRAMES4_LO : case Q81_RX_CBFC_PAUSE_FRAMES5 : case Q81_RX_CBFC_PAUSE_FRAMES5_LO : case Q81_RX_CBFC_PAUSE_FRAMES6 : case Q81_RX_CBFC_PAUSE_FRAMES6_LO : case Q81_RX_CBFC_PAUSE_FRAMES7 : case Q81_RX_CBFC_PAUSE_FRAMES7_LO : case Q81_RX_FCOE_PKTS : case Q81_RX_FCOE_PKTS_LO : case Q81_RX_MGMT_PKTS : case Q81_RX_MGMT_PKTS_LO : case Q81_RX_NIC_FIFO_DROP : case Q81_RX_NIC_FIFO_DROP_LO : case Q81_RX_FCOE_FIFO_DROP : case Q81_RX_FCOE_FIFO_DROP_LO : case Q81_RX_MGMT_FIFO_DROP : case Q81_RX_MGMT_FIFO_DROP_LO : case Q81_RX_PKTS_PRIORITY0 : case Q81_RX_PKTS_PRIORITY0_LO : case Q81_RX_PKTS_PRIORITY1 : case Q81_RX_PKTS_PRIORITY1_LO : case Q81_RX_PKTS_PRIORITY2 : case Q81_RX_PKTS_PRIORITY2_LO : case Q81_RX_PKTS_PRIORITY3 : case Q81_RX_PKTS_PRIORITY3_LO : case Q81_RX_PKTS_PRIORITY4 : case Q81_RX_PKTS_PRIORITY4_LO : case Q81_RX_PKTS_PRIORITY5 : case Q81_RX_PKTS_PRIORITY5_LO : case Q81_RX_PKTS_PRIORITY6 : case Q81_RX_PKTS_PRIORITY6_LO : case Q81_RX_PKTS_PRIORITY7 : case Q81_RX_PKTS_PRIORITY7_LO : case Q81_RX_OCTETS_PRIORITY0 : case Q81_RX_OCTETS_PRIORITY0_LO : case Q81_RX_OCTETS_PRIORITY1 : case Q81_RX_OCTETS_PRIORITY1_LO : case Q81_RX_OCTETS_PRIORITY2 : case Q81_RX_OCTETS_PRIORITY2_LO : case Q81_RX_OCTETS_PRIORITY3 : case Q81_RX_OCTETS_PRIORITY3_LO : case Q81_RX_OCTETS_PRIORITY4 : case Q81_RX_OCTETS_PRIORITY4_LO : case Q81_RX_OCTETS_PRIORITY5 : case Q81_RX_OCTETS_PRIORITY5_LO : case Q81_RX_OCTETS_PRIORITY6 : case Q81_RX_OCTETS_PRIORITY6_LO : case Q81_RX_OCTETS_PRIORITY7 : case Q81_RX_OCTETS_PRIORITY7_LO : case Q81_TX_PKTS_PRIORITY0 : case Q81_TX_PKTS_PRIORITY0_LO : case Q81_TX_PKTS_PRIORITY1 : case Q81_TX_PKTS_PRIORITY1_LO : case Q81_TX_PKTS_PRIORITY2 : case Q81_TX_PKTS_PRIORITY2_LO : case Q81_TX_PKTS_PRIORITY3 : case Q81_TX_PKTS_PRIORITY3_LO : case Q81_TX_PKTS_PRIORITY4 : case Q81_TX_PKTS_PRIORITY4_LO : case Q81_TX_PKTS_PRIORITY5 : case Q81_TX_PKTS_PRIORITY5_LO : case Q81_TX_PKTS_PRIORITY6 : case Q81_TX_PKTS_PRIORITY6_LO : case Q81_TX_PKTS_PRIORITY7 : case Q81_TX_PKTS_PRIORITY7_LO : case Q81_TX_OCTETS_PRIORITY0 : case Q81_TX_OCTETS_PRIORITY0_LO : case Q81_TX_OCTETS_PRIORITY1 : case Q81_TX_OCTETS_PRIORITY1_LO : case Q81_TX_OCTETS_PRIORITY2 : case Q81_TX_OCTETS_PRIORITY2_LO : case Q81_TX_OCTETS_PRIORITY3 : case Q81_TX_OCTETS_PRIORITY3_LO : case Q81_TX_OCTETS_PRIORITY4 : case Q81_TX_OCTETS_PRIORITY4_LO : case Q81_TX_OCTETS_PRIORITY5 : case Q81_TX_OCTETS_PRIORITY5_LO : case Q81_TX_OCTETS_PRIORITY6 : case Q81_TX_OCTETS_PRIORITY6_LO : case Q81_TX_OCTETS_PRIORITY7 : case Q81_TX_OCTETS_PRIORITY7_LO : case Q81_RX_DISCARD_PRIORITY0 : case Q81_RX_DISCARD_PRIORITY0_LO : case Q81_RX_DISCARD_PRIORITY1 : case Q81_RX_DISCARD_PRIORITY1_LO : case Q81_RX_DISCARD_PRIORITY2 : case Q81_RX_DISCARD_PRIORITY2_LO : case Q81_RX_DISCARD_PRIORITY3 : case Q81_RX_DISCARD_PRIORITY3_LO : case Q81_RX_DISCARD_PRIORITY4 : case Q81_RX_DISCARD_PRIORITY4_LO : case Q81_RX_DISCARD_PRIORITY5 : case Q81_RX_DISCARD_PRIORITY5_LO : case Q81_RX_DISCARD_PRIORITY6 : case Q81_RX_DISCARD_PRIORITY6_LO : case Q81_RX_DISCARD_PRIORITY7 : case Q81_RX_DISCARD_PRIORITY7_LO : if (o_func) ret = qls_rd_ofunc_xgmac_reg(ha, i, buf); else ret = qls_rd_xgmac_reg(ha, i, buf); if (ret) *buf = Q81_BAD_DATA; break; default: break; } } return 0; } static int qls_get_mpi_regs(qla_host_t *ha, uint32_t *buf, uint32_t offset, uint32_t count) { int i, ret = 0; for (i = 0; i < count; i++, buf++) { ret = qls_rd_mpi_reg(ha, (offset + i), buf); if (ret) return ret; } return (ret); } static int qls_get_mpi_shadow_regs(qla_host_t *ha, uint32_t *buf) { uint32_t i; int ret; #define Q81_RISC_124 0x0000007c #define Q81_RISC_127 0x0000007f #define Q81_SHADOW_OFFSET 0xb0000000 for (i = 0; i < Q81_MPI_CORE_SH_REGS_CNT; i++, buf++) { ret = qls_wr_mpi_reg(ha, (Q81_CTL_PROC_ADDR_RISC_INT_REG | Q81_RISC_124), (Q81_SHADOW_OFFSET | i << 20)); if (ret) goto exit_qls_get_mpi_shadow_regs; ret = qls_mpi_risc_rd_reg(ha, (Q81_CTL_PROC_ADDR_RISC_INT_REG | Q81_RISC_127), buf); if (ret) goto exit_qls_get_mpi_shadow_regs; } exit_qls_get_mpi_shadow_regs: return ret; } #define SYS_CLOCK (0x00) #define PCI_CLOCK (0x80) #define FC_CLOCK (0x140) #define XGM_CLOCK (0x180) #define Q81_ADDRESS_REGISTER_ENABLE 0x00010000 #define Q81_UP 0x00008000 #define Q81_MAX_MUX 0x40 #define Q81_MAX_MODULES 0x1F static uint32_t * qls_get_probe(qla_host_t *ha, uint32_t clock, uint8_t *valid, uint32_t *buf) { uint32_t module, mux_sel, probe, lo_val, hi_val; for (module = 0; module < Q81_MAX_MODULES; module ++) { if (valid[module]) { for (mux_sel = 0; mux_sel < Q81_MAX_MUX; mux_sel++) { probe = clock | Q81_ADDRESS_REGISTER_ENABLE | mux_sel | (module << 9); WRITE_REG32(ha, Q81_CTL_XG_PROBE_MUX_ADDR,\ probe); lo_val = READ_REG32(ha,\ Q81_CTL_XG_PROBE_MUX_DATA); if (mux_sel == 0) { *buf = probe; buf ++; } probe |= Q81_UP; WRITE_REG32(ha, Q81_CTL_XG_PROBE_MUX_ADDR,\ probe); hi_val = READ_REG32(ha,\ Q81_CTL_XG_PROBE_MUX_DATA); *buf = lo_val; buf++; *buf = hi_val; buf++; } } } return(buf); } static int qls_get_probe_dump(qla_host_t *ha, uint32_t *buf) { uint8_t sys_clock_valid_modules[0x20] = { 1, // 0x00 1, // 0x01 1, // 0x02 0, // 0x03 1, // 0x04 1, // 0x05 1, // 0x06 1, // 0x07 1, // 0x08 1, // 0x09 1, // 0x0A 1, // 0x0B 1, // 0x0C 1, // 0x0D 1, // 0x0E 0, // 0x0F 1, // 0x10 1, // 0x11 1, // 0x12 1, // 0x13 0, // 0x14 0, // 0x15 0, // 0x16 0, // 0x17 0, // 0x18 0, // 0x19 0, // 0x1A 0, // 0x1B 0, // 0x1C 0, // 0x1D 0, // 0x1E 0 // 0x1F }; uint8_t pci_clock_valid_modules[0x20] = { 1, // 0x00 0, // 0x01 0, // 0x02 0, // 0x03 0, // 0x04 0, // 0x05 1, // 0x06 1, // 0x07 0, // 0x08 0, // 0x09 0, // 0x0A 0, // 0x0B 0, // 0x0C 0, // 0x0D 1, // 0x0E 0, // 0x0F 0, // 0x10 0, // 0x11 0, // 0x12 0, // 0x13 0, // 0x14 0, // 0x15 0, // 0x16 0, // 0x17 0, // 0x18 0, // 0x19 0, // 0x1A 0, // 0x1B 0, // 0x1C 0, // 0x1D 0, // 0x1E 0 // 0x1F }; uint8_t xgm_clock_valid_modules[0x20] = { 1, // 0x00 0, // 0x01 0, // 0x02 1, // 0x03 0, // 0x04 0, // 0x05 0, // 0x06 0, // 0x07 1, // 0x08 1, // 0x09 0, // 0x0A 0, // 0x0B 1, // 0x0C 1, // 0x0D 1, // 0x0E 0, // 0x0F 1, // 0x10 1, // 0x11 0, // 0x12 0, // 0x13 0, // 0x14 0, // 0x15 0, // 0x16 0, // 0x17 0, // 0x18 0, // 0x19 0, // 0x1A 0, // 0x1B 0, // 0x1C 0, // 0x1D 0, // 0x1E 0 // 0x1F }; uint8_t fc_clock_valid_modules[0x20] = { 1, // 0x00 0, // 0x01 0, // 0x02 0, // 0x03 0, // 0x04 0, // 0x05 0, // 0x06 0, // 0x07 0, // 0x08 0, // 0x09 0, // 0x0A 0, // 0x0B 1, // 0x0C 1, // 0x0D 0, // 0x0E 0, // 0x0F 0, // 0x10 0, // 0x11 0, // 0x12 0, // 0x13 0, // 0x14 0, // 0x15 0, // 0x16 0, // 0x17 0, // 0x18 0, // 0x19 0, // 0x1A 0, // 0x1B 0, // 0x1C 0, // 0x1D 0, // 0x1E 0 // 0x1F }; qls_wr_mpi_reg(ha, 0x100e, 0x18a20000); buf = qls_get_probe(ha, SYS_CLOCK, sys_clock_valid_modules, buf); buf = qls_get_probe(ha, PCI_CLOCK, pci_clock_valid_modules, buf); buf = qls_get_probe(ha, XGM_CLOCK, xgm_clock_valid_modules, buf); buf = qls_get_probe(ha, FC_CLOCK, fc_clock_valid_modules, buf); return(0); } static void qls_get_ridx_registers(qla_host_t *ha, uint32_t *buf) { uint32_t type, idx, idx_max; uint32_t r_idx; uint32_t r_data; uint32_t val; for (type = 0; type < 4; type ++) { if (type < 2) idx_max = 8; else idx_max = 16; for (idx = 0; idx < idx_max; idx ++) { val = 0x04000000 | (type << 16) | (idx << 8); WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, val); r_idx = 0; while ((r_idx & 0x40000000) == 0) r_idx = READ_REG32(ha, Q81_CTL_ROUTING_INDEX); r_data = READ_REG32(ha, Q81_CTL_ROUTING_DATA); *buf = type; buf ++; *buf = idx; buf ++; *buf = r_idx; buf ++; *buf = r_data; buf ++; } } } static void qls_get_mac_proto_regs(qla_host_t *ha, uint32_t* buf) { #define Q81_RS_AND_ADR 0x06000000 #define Q81_RS_ONLY 0x04000000 #define Q81_NUM_TYPES 10 uint32_t result_index, result_data; uint32_t type; uint32_t index; uint32_t offset; uint32_t val; uint32_t initial_val; uint32_t max_index; uint32_t max_offset; for (type = 0; type < Q81_NUM_TYPES; type ++) { switch (type) { case 0: // CAM initial_val = Q81_RS_AND_ADR; max_index = 512; max_offset = 3; break; case 1: // Multicast MAC Address initial_val = Q81_RS_ONLY; max_index = 32; max_offset = 2; break; case 2: // VLAN filter mask case 3: // MC filter mask initial_val = Q81_RS_ONLY; max_index = 4096; max_offset = 1; break; case 4: // FC MAC addresses initial_val = Q81_RS_ONLY; max_index = 4; max_offset = 2; break; case 5: // Mgmt MAC addresses initial_val = Q81_RS_ONLY; max_index = 8; max_offset = 2; break; case 6: // Mgmt VLAN addresses initial_val = Q81_RS_ONLY; max_index = 16; max_offset = 1; break; case 7: // Mgmt IPv4 address initial_val = Q81_RS_ONLY; max_index = 4; max_offset = 1; break; case 8: // Mgmt IPv6 address initial_val = Q81_RS_ONLY; max_index = 4; max_offset = 4; break; case 9: // Mgmt TCP/UDP Dest port initial_val = Q81_RS_ONLY; max_index = 4; max_offset = 1; break; default: printf("Bad type!!! 0x%08x\n", type); max_index = 0; max_offset = 0; break; } for (index = 0; index < max_index; index ++) { for (offset = 0; offset < max_offset; offset ++) { val = initial_val | (type << 16) | (index << 4) | (offset); WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX,\ val); result_index = 0; while ((result_index & 0x40000000) == 0) result_index = READ_REG32(ha, \ Q81_CTL_MAC_PROTO_ADDR_INDEX); result_data = READ_REG32(ha,\ Q81_CTL_MAC_PROTO_ADDR_DATA); *buf = result_index; buf ++; *buf = result_data; buf ++; } } } } static int qls_get_ets_regs(qla_host_t *ha, uint32_t *buf) { int ret = 0; int i; for(i = 0; i < 8; i ++, buf ++) { WRITE_REG32(ha, Q81_CTL_NIC_ENH_TX_SCHD, \ ((i << 29) | 0x08000000)); *buf = READ_REG32(ha, Q81_CTL_NIC_ENH_TX_SCHD); } for(i = 0; i < 2; i ++, buf ++) { WRITE_REG32(ha, Q81_CTL_CNA_ENH_TX_SCHD, \ ((i << 29) | 0x08000000)); *buf = READ_REG32(ha, Q81_CTL_CNA_ENH_TX_SCHD); } return ret; } int qls_mpi_core_dump(qla_host_t *ha) { int ret; int i; uint32_t reg, reg_val; qls_mpi_coredump_t *mpi_dump = &ql_mpi_coredump; ret = qls_pause_mpi_risc(ha); if (ret) { printf("Failed RISC pause. Status = 0x%.08x\n",ret); return(-1); } memset(&(mpi_dump->mpi_global_header), 0, sizeof(qls_mpid_glbl_hdr_t)); mpi_dump->mpi_global_header.cookie = Q81_MPID_COOKIE; mpi_dump->mpi_global_header.hdr_size = sizeof(qls_mpid_glbl_hdr_t); mpi_dump->mpi_global_header.img_size = sizeof(qls_mpi_coredump_t); memcpy(mpi_dump->mpi_global_header.id, "MPI Coredump", sizeof(mpi_dump->mpi_global_header.id)); qls_mpid_seg_hdr(&mpi_dump->nic1_regs_seg_hdr, Q81_NIC1_CONTROL_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic1_regs)), "NIC1 Registers"); qls_mpid_seg_hdr(&mpi_dump->nic2_regs_seg_hdr, Q81_NIC2_CONTROL_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic2_regs)), "NIC2 Registers"); qls_mpid_seg_hdr(&mpi_dump->xgmac1_seg_hdr, Q81_NIC1_XGMAC_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->xgmac1)), "NIC1 XGMac Registers"); qls_mpid_seg_hdr(&mpi_dump->xgmac2_seg_hdr, Q81_NIC2_XGMAC_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->xgmac2)), "NIC2 XGMac Registers"); if (ha->pci_func & 1) { for (i = 0; i < 64; i++) mpi_dump->nic2_regs[i] = READ_REG32(ha, i * sizeof(uint32_t)); for (i = 0; i < 64; i++) mpi_dump->nic1_regs[i] = qls_rd_ofunc_reg(ha, (i * sizeof(uint32_t)) / 4); qls_rd_xgmac_regs(ha, &mpi_dump->xgmac2[0], 0); qls_rd_xgmac_regs(ha, &mpi_dump->xgmac1[0], 1); } else { for (i = 0; i < 64; i++) mpi_dump->nic1_regs[i] = READ_REG32(ha, i * sizeof(uint32_t)); for (i = 0; i < 64; i++) mpi_dump->nic2_regs[i] = qls_rd_ofunc_reg(ha, (i * sizeof(uint32_t)) / 4); qls_rd_xgmac_regs(ha, &mpi_dump->xgmac1[0], 0); qls_rd_xgmac_regs(ha, &mpi_dump->xgmac2[0], 1); } qls_mpid_seg_hdr(&mpi_dump->xaui1_an_hdr, Q81_XAUI1_AN_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xaui_an)), "XAUI1 AN Registers"); qls_mpid_seg_hdr(&mpi_dump->xaui1_hss_pcs_hdr, Q81_XAUI1_HSS_PCS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xaui_hss_pcs)), "XAUI1 HSS PCS Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi1_an_hdr, Q81_XFI1_AN_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_an)), "XFI1 AN Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi1_train_hdr, Q81_XFI1_TRAIN_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_train)), "XFI1 TRAIN Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_pcs_hdr, Q81_XFI1_HSS_PCS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_hss_pcs)), "XFI1 HSS PCS Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_tx_hdr, Q81_XFI1_HSS_TX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_hss_tx)), "XFI1 HSS TX Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_rx_hdr, Q81_XFI1_HSS_RX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_hss_rx)), "XFI1 HSS RX Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_pll_hdr, Q81_XFI1_HSS_PLL_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_hss_pll)), "XFI1 HSS PLL Registers"); qls_mpid_seg_hdr(&mpi_dump->xaui2_an_hdr, Q81_XAUI2_AN_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xaui_an)), "XAUI2 AN Registers"); qls_mpid_seg_hdr(&mpi_dump->xaui2_hss_pcs_hdr, Q81_XAUI2_HSS_PCS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xaui_hss_pcs)), "XAUI2 HSS PCS Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi2_an_hdr, Q81_XFI2_AN_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_an)), "XFI2 AN Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi2_train_hdr, Q81_XFI2_TRAIN_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_train)), "XFI2 TRAIN Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_pcs_hdr, Q81_XFI2_HSS_PCS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_hss_pcs)), "XFI2 HSS PCS Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_tx_hdr, Q81_XFI2_HSS_TX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_hss_tx)), "XFI2 HSS TX Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_rx_hdr, Q81_XFI2_HSS_RX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_hss_rx)), "XFI2 HSS RX Registers"); qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_pll_hdr, Q81_XFI2_HSS_PLL_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_hss_pll)), "XFI2 HSS PLL Registers"); qls_rd_serdes_regs(ha, mpi_dump); qls_mpid_seg_hdr(&mpi_dump->core_regs_seg_hdr, Q81_CORE_SEG_NUM, (sizeof(mpi_dump->core_regs_seg_hdr) + sizeof(mpi_dump->mpi_core_regs) + sizeof(mpi_dump->mpi_core_sh_regs)), "Core Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->mpi_core_regs[0], Q81_MPI_CORE_REGS_ADDR, Q81_MPI_CORE_REGS_CNT); ret = qls_get_mpi_shadow_regs(ha, &mpi_dump->mpi_core_sh_regs[0]); qls_mpid_seg_hdr(&mpi_dump->test_logic_regs_seg_hdr, Q81_TEST_LOGIC_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->test_logic_regs)), "Test Logic Regs"); ret = qls_get_mpi_regs(ha, &mpi_dump->test_logic_regs[0], Q81_TEST_REGS_ADDR, Q81_TEST_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->rmii_regs_seg_hdr, Q81_RMII_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->rmii_regs)), "RMII Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->rmii_regs[0], Q81_RMII_REGS_ADDR, Q81_RMII_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->fcmac1_regs_seg_hdr, Q81_FCMAC1_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fcmac1_regs)), "FCMAC1 Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->fcmac1_regs[0], Q81_FCMAC1_REGS_ADDR, Q81_FCMAC_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->fcmac2_regs_seg_hdr, Q81_FCMAC2_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fcmac2_regs)), "FCMAC2 Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->fcmac2_regs[0], Q81_FCMAC2_REGS_ADDR, Q81_FCMAC_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->fc1_mbx_regs_seg_hdr, Q81_FC1_MBOX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fc1_mbx_regs)), "FC1 MBox Regs"); ret = qls_get_mpi_regs(ha, &mpi_dump->fc1_mbx_regs[0], Q81_FC1_MBX_REGS_ADDR, Q81_FC_MBX_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->ide_regs_seg_hdr, Q81_IDE_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->ide_regs)), "IDE Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->ide_regs[0], Q81_IDE_REGS_ADDR, Q81_IDE_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->nic1_mbx_regs_seg_hdr, Q81_NIC1_MBOX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic1_mbx_regs)), "NIC1 MBox Regs"); ret = qls_get_mpi_regs(ha, &mpi_dump->nic1_mbx_regs[0], Q81_NIC1_MBX_REGS_ADDR, Q81_NIC_MBX_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->smbus_regs_seg_hdr, Q81_SMBUS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->smbus_regs)), "SMBus Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->smbus_regs[0], Q81_SMBUS_REGS_ADDR, Q81_SMBUS_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->fc2_mbx_regs_seg_hdr, Q81_FC2_MBOX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fc2_mbx_regs)), "FC2 MBox Regs"); ret = qls_get_mpi_regs(ha, &mpi_dump->fc2_mbx_regs[0], Q81_FC2_MBX_REGS_ADDR, Q81_FC_MBX_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->nic2_mbx_regs_seg_hdr, Q81_NIC2_MBOX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic2_mbx_regs)), "NIC2 MBox Regs"); ret = qls_get_mpi_regs(ha, &mpi_dump->nic2_mbx_regs[0], Q81_NIC2_MBX_REGS_ADDR, Q81_NIC_MBX_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->i2c_regs_seg_hdr, Q81_I2C_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->i2c_regs)), "I2C Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->i2c_regs[0], Q81_I2C_REGS_ADDR, Q81_I2C_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->memc_regs_seg_hdr, Q81_MEMC_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->memc_regs)), "MEMC Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->memc_regs[0], Q81_MEMC_REGS_ADDR, Q81_MEMC_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->pbus_regs_seg_hdr, Q81_PBUS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->pbus_regs)), "PBUS Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->pbus_regs[0], Q81_PBUS_REGS_ADDR, Q81_PBUS_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->mde_regs_seg_hdr, Q81_MDE_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->mde_regs)), "MDE Registers"); ret = qls_get_mpi_regs(ha, &mpi_dump->mde_regs[0], Q81_MDE_REGS_ADDR, Q81_MDE_REGS_CNT); qls_mpid_seg_hdr(&mpi_dump->intr_states_seg_hdr, Q81_INTR_STATES_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->intr_states)), "INTR States"); qls_get_intr_states(ha, &mpi_dump->intr_states[0]); qls_mpid_seg_hdr(&mpi_dump->probe_dump_seg_hdr, Q81_PROBE_DUMP_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->probe_dump)), "Probe Dump"); qls_get_probe_dump(ha, &mpi_dump->probe_dump[0]); qls_mpid_seg_hdr(&mpi_dump->routing_reg_seg_hdr, Q81_ROUTING_INDEX_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->routing_regs)), "Routing Regs"); qls_get_ridx_registers(ha, &mpi_dump->routing_regs[0]); qls_mpid_seg_hdr(&mpi_dump->mac_prot_reg_seg_hdr, Q81_MAC_PROTOCOL_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->mac_prot_regs)), "MAC Prot Regs"); qls_get_mac_proto_regs(ha, &mpi_dump->mac_prot_regs[0]); qls_mpid_seg_hdr(&mpi_dump->ets_seg_hdr, Q81_ETS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->ets)), "ETS Registers"); ret = qls_get_ets_regs(ha, &mpi_dump->ets[0]); qls_mpid_seg_hdr(&mpi_dump->sem_regs_seg_hdr, Q81_SEM_REGS_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->sem_regs)), "Sem Registers"); for(i = 0; i < Q81_MAX_SEMAPHORE_FUNCTIONS ; i ++) { reg = Q81_CTL_PROC_ADDR_REG_BLOCK | (i << Q81_FUNCTION_SHIFT) | (Q81_CTL_SEMAPHORE >> 2); ret = qls_mpi_risc_rd_reg(ha, reg, ®_val); mpi_dump->sem_regs[i] = reg_val; if (ret != 0) mpi_dump->sem_regs[i] = Q81_BAD_DATA; } ret = qls_unpause_mpi_risc(ha); if (ret) printf("Failed RISC unpause. Status = 0x%.08x\n",ret); ret = qls_mpi_reset(ha); if (ret) printf("Failed RISC reset. Status = 0x%.08x\n",ret); WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, 0x80008000); qls_mpid_seg_hdr(&mpi_dump->memc_ram_seg_hdr, Q81_MEMC_RAM_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->memc_ram)), "MEMC RAM"); ret = qls_mbx_dump_risc_ram(ha, &mpi_dump->memc_ram[0], Q81_MEMC_RAM_ADDR, Q81_MEMC_RAM_CNT); if (ret) printf("Failed Dump of MEMC RAM. Status = 0x%.08x\n",ret); qls_mpid_seg_hdr(&mpi_dump->code_ram_seg_hdr, Q81_WCS_RAM_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->code_ram)), "WCS RAM"); ret = qls_mbx_dump_risc_ram(ha, &mpi_dump->memc_ram[0], Q81_CODE_RAM_ADDR, Q81_CODE_RAM_CNT); if (ret) printf("Failed Dump of CODE RAM. Status = 0x%.08x\n",ret); qls_mpid_seg_hdr(&mpi_dump->wqc1_seg_hdr, Q81_WQC1_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->wqc1)), "WQC 1"); qls_mpid_seg_hdr(&mpi_dump->wqc2_seg_hdr, Q81_WQC2_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->wqc2)), "WQC 2"); qls_mpid_seg_hdr(&mpi_dump->cqc1_seg_hdr, Q81_CQC1_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->cqc1)), "CQC 1"); qls_mpid_seg_hdr(&mpi_dump->cqc2_seg_hdr, Q81_CQC2_SEG_NUM, (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->cqc2)), "CQC 2"); return 0; } Index: head/sys/dev/ral/rt2560reg.h =================================================================== --- head/sys/dev/ral/rt2560reg.h (revision 258779) +++ head/sys/dev/ral/rt2560reg.h (revision 258780) @@ -1,489 +1,489 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define RT2560_DEFAULT_RSSI_CORR 0x79 #define RT2560_NOISE_FLOOR -95 #define RT2560_TX_RING_COUNT 48 #define RT2560_ATIM_RING_COUNT 4 #define RT2560_PRIO_RING_COUNT 16 #define RT2560_BEACON_RING_COUNT 1 #define RT2560_RX_RING_COUNT 32 #define RT2560_TX_DESC_SIZE (sizeof (struct rt2560_tx_desc)) #define RT2560_RX_DESC_SIZE (sizeof (struct rt2560_rx_desc)) #define RT2560_MAX_SCATTER 1 /* * Control and status registers. */ #define RT2560_CSR0 0x0000 /* ASIC version number */ #define RT2560_CSR1 0x0004 /* System control */ #define RT2560_CSR3 0x000c /* STA MAC address 0 */ #define RT2560_CSR4 0x0010 /* STA MAC address 1 */ #define RT2560_CSR5 0x0014 /* BSSID 0 */ #define RT2560_CSR6 0x0018 /* BSSID 1 */ #define RT2560_CSR7 0x001c /* Interrupt source */ #define RT2560_CSR8 0x0020 /* Interrupt mask */ #define RT2560_CSR9 0x0024 /* Maximum frame length */ #define RT2560_SECCSR0 0x0028 /* WEP control */ #define RT2560_CSR11 0x002c /* Back-off control */ #define RT2560_CSR12 0x0030 /* Synchronization configuration 0 */ #define RT2560_CSR13 0x0034 /* Synchronization configuration 1 */ #define RT2560_CSR14 0x0038 /* Synchronization control */ #define RT2560_CSR15 0x003c /* Synchronization status */ #define RT2560_CSR16 0x0040 /* TSF timer 0 */ #define RT2560_CSR17 0x0044 /* TSF timer 1 */ #define RT2560_CSR18 0x0048 /* IFS timer 0 */ #define RT2560_CSR19 0x004c /* IFS timer 1 */ #define RT2560_CSR20 0x0050 /* WAKEUP timer */ #define RT2560_CSR21 0x0054 /* EEPROM control */ #define RT2560_CSR22 0x0058 /* CFP control */ #define RT2560_TXCSR0 0x0060 /* TX control */ #define RT2560_TXCSR1 0x0064 /* TX configuration */ #define RT2560_TXCSR2 0x0068 /* TX descriptor configuration */ #define RT2560_TXCSR3 0x006c /* TX ring base address */ #define RT2560_TXCSR4 0x0070 /* TX ATIM ring base address */ #define RT2560_TXCSR5 0x0074 /* TX PRIO ring base address */ #define RT2560_TXCSR6 0x0078 /* Beacon base address */ #define RT2560_TXCSR7 0x007c /* AutoResponder control */ #define RT2560_RXCSR0 0x0080 /* RX control */ #define RT2560_RXCSR1 0x0084 /* RX descriptor configuration */ #define RT2560_RXCSR2 0x0088 /* RX ring base address */ #define RT2560_PCICSR 0x008c /* PCI control */ #define RT2560_RXCSR3 0x0090 /* BBP ID 0 */ #define RT2560_TXCSR9 0x0094 /* OFDM TX BBP */ #define RT2560_ARSP_PLCP_0 0x0098 /* Auto Responder PLCP address */ #define RT2560_ARSP_PLCP_1 0x009c /* Auto Responder Basic Rate mask */ #define RT2560_CNT0 0x00a0 /* FCS error counter */ #define RT2560_CNT1 0x00ac /* PLCP error counter */ #define RT2560_CNT2 0x00b0 /* Long error counter */ #define RT2560_CNT3 0x00b8 /* CCA false alarm counter */ #define RT2560_CNT4 0x00bc /* RX FIFO Overflow counter */ #define RT2560_CNT5 0x00c0 /* Tx FIFO Underrun counter */ #define RT2560_PWRCSR0 0x00c4 /* Power mode configuration */ #define RT2560_PSCSR0 0x00c8 /* Power state transition time */ #define RT2560_PSCSR1 0x00cc /* Power state transition time */ #define RT2560_PSCSR2 0x00d0 /* Power state transition time */ #define RT2560_PSCSR3 0x00d4 /* Power state transition time */ #define RT2560_PWRCSR1 0x00d8 /* Manual power control/status */ #define RT2560_TIMECSR 0x00dc /* Timer control */ #define RT2560_MACCSR0 0x00e0 /* MAC configuration */ #define RT2560_MACCSR1 0x00e4 /* MAC configuration */ #define RT2560_RALINKCSR 0x00e8 /* Ralink RX auto-reset BBCR */ #define RT2560_BCNCSR 0x00ec /* Beacon interval control */ #define RT2560_BBPCSR 0x00f0 /* BBP serial control */ #define RT2560_RFCSR 0x00f4 /* RF serial control */ #define RT2560_LEDCSR 0x00f8 /* LED control */ #define RT2560_SECCSR3 0x00fc /* XXX not documented */ #define RT2560_DMACSR0 0x0100 /* Current RX ring address */ #define RT2560_DMACSR1 0x0104 /* Current Tx ring address */ #define RT2560_DMACSR2 0x0104 /* Current Priority ring address */ #define RT2560_DMACSR3 0x0104 /* Current ATIM ring address */ #define RT2560_TXACKCSR0 0x0110 /* XXX not documented */ #define RT2560_GPIOCSR 0x0120 /* */ #define RT2560_BBBPPCSR 0x0124 /* BBP Pin Control */ #define RT2560_FIFOCSR0 0x0128 /* TX FIFO pointer */ #define RT2560_FIFOCSR1 0x012c /* RX FIFO pointer */ #define RT2560_BCNOCSR 0x0130 /* Beacon time offset */ #define RT2560_RLPWCSR 0x0134 /* RX_PE Low Width */ #define RT2560_TESTCSR 0x0138 /* Test Mode Select */ #define RT2560_PLCP1MCSR 0x013c /* Signal/Service/Length of ACK @1M */ #define RT2560_PLCP2MCSR 0x0140 /* Signal/Service/Length of ACK @2M */ #define RT2560_PLCP5p5MCSR 0x0144 /* Signal/Service/Length of ACK @5.5M */ #define RT2560_PLCP11MCSR 0x0148 /* Signal/Service/Length of ACK @11M */ #define RT2560_ACKPCTCSR 0x014c /* ACK/CTS padload consume time */ #define RT2560_ARTCSR1 0x0150 /* ACK/CTS padload consume time */ #define RT2560_ARTCSR2 0x0154 /* ACK/CTS padload consume time */ #define RT2560_SECCSR1 0x0158 /* WEP control */ #define RT2560_BBPCSR1 0x015c /* BBP TX Configuration */ /* possible flags for register RXCSR0 */ #define RT2560_DISABLE_RX (1 << 0) #define RT2560_DROP_CRC_ERROR (1 << 1) #define RT2560_DROP_PHY_ERROR (1 << 2) #define RT2560_DROP_CTL (1 << 3) #define RT2560_DROP_NOT_TO_ME (1 << 4) #define RT2560_DROP_TODS (1 << 5) #define RT2560_DROP_VERSION_ERROR (1 << 6) /* possible flags for register CSR1 */ #define RT2560_RESET_ASIC (1 << 0) #define RT2560_RESET_BBP (1 << 1) #define RT2560_HOST_READY (1 << 2) /* possible flags for register CSR14 */ #define RT2560_ENABLE_TSF (1 << 0) #define RT2560_ENABLE_TSF_SYNC(x) (((x) & 0x3) << 1) #define RT2560_ENABLE_TBCN (1 << 3) #define RT2560_ENABLE_BEACON_GENERATOR (1 << 6) /* possible flags for register CSR21 */ #define RT2560_C (1 << 1) #define RT2560_S (1 << 2) #define RT2560_D (1 << 3) #define RT2560_Q (1 << 4) #define RT2560_93C46 (1 << 5) #define RT2560_SHIFT_D 3 #define RT2560_SHIFT_Q 4 /* possible flags for register TXCSR0 */ #define RT2560_KICK_TX (1 << 0) #define RT2560_KICK_ATIM (1 << 1) #define RT2560_KICK_PRIO (1 << 2) #define RT2560_ABORT_TX (1 << 3) /* possible flags for register SECCSR0 */ #define RT2560_KICK_DECRYPT (1 << 0) /* possible flags for register SECCSR1 */ #define RT2560_KICK_ENCRYPT (1 << 0) /* possible flags for register CSR7 */ #define RT2560_BEACON_EXPIRE 0x00000001 #define RT2560_WAKEUP_EXPIRE 0x00000002 #define RT2560_ATIM_EXPIRE 0x00000004 #define RT2560_TX_DONE 0x00000008 #define RT2560_ATIM_DONE 0x00000010 #define RT2560_PRIO_DONE 0x00000020 #define RT2560_RX_DONE 0x00000040 #define RT2560_DECRYPTION_DONE 0x00000080 #define RT2560_ENCRYPTION_DONE 0x00000100 #define RT2560_INTR_MASK \ (~(RT2560_BEACON_EXPIRE | RT2560_WAKEUP_EXPIRE | RT2560_TX_DONE | \ RT2560_PRIO_DONE | RT2560_RX_DONE | RT2560_DECRYPTION_DONE | \ RT2560_ENCRYPTION_DONE)) /* Tx descriptor */ struct rt2560_tx_desc { uint32_t flags; #define RT2560_TX_BUSY (1 << 0) #define RT2560_TX_VALID (1 << 1) #define RT2560_TX_RESULT_MASK 0x0000001c #define RT2560_TX_SUCCESS (0 << 2) #define RT2560_TX_SUCCESS_RETRY (1 << 2) #define RT2560_TX_FAIL_RETRY (2 << 2) #define RT2560_TX_FAIL_INVALID (3 << 2) #define RT2560_TX_FAIL_OTHER (4 << 2) #define RT2560_TX_MORE_FRAG (1 << 8) #define RT2560_TX_ACK (1 << 9) #define RT2560_TX_TIMESTAMP (1 << 10) #define RT2560_TX_OFDM (1 << 11) #define RT2560_TX_CIPHER_BUSY (1 << 12) #define RT2560_TX_IFS_MASK 0x00006000 #define RT2560_TX_IFS_BACKOFF (0 << 13) #define RT2560_TX_IFS_SIFS (1 << 13) #define RT2560_TX_IFS_NEWBACKOFF (2 << 13) #define RT2560_TX_IFS_NONE (3 << 13) #define RT2560_TX_LONG_RETRY (1 << 15) #define RT2560_TX_CIPHER_MASK 0xe0000000 #define RT2560_TX_CIPHER_NONE (0 << 29) #define RT2560_TX_CIPHER_WEP40 (1 << 29) #define RT2560_TX_CIPHER_WEP104 (2 << 29) #define RT2560_TX_CIPHER_TKIP (3 << 29) #define RT2560_TX_CIPHER_AES (4 << 29) #define RT2560_TX_RETRYCNT(v) (((v) >> 5) & 0x7) uint32_t physaddr; uint16_t wme; #define RT2560_LOGCWMAX(x) (((x) & 0xf) << 12) #define RT2560_LOGCWMIN(x) (((x) & 0xf) << 8) #define RT2560_AIFSN(x) (((x) & 0x3) << 6) #define RT2560_IVOFFSET(x) (((x) & 0x3f)) uint16_t reserved1; uint8_t plcp_signal; uint8_t plcp_service; #define RT2560_PLCP_LENGEXT 0x80 uint8_t plcp_length_lo; uint8_t plcp_length_hi; uint32_t iv; uint32_t eiv; uint8_t key[IEEE80211_KEYBUF_SIZE]; uint32_t reserved2[2]; } __packed; /* Rx descriptor */ struct rt2560_rx_desc { uint32_t flags; #define RT2560_RX_BUSY (1 << 0) #define RT2560_RX_CRC_ERROR (1 << 5) #define RT2560_RX_OFDM (1 << 6) #define RT2560_RX_PHY_ERROR (1 << 7) #define RT2560_RX_CIPHER_BUSY (1 << 8) #define RT2560_RX_ICV_ERROR (1 << 9) #define RT2560_RX_CIPHER_MASK 0xe0000000 #define RT2560_RX_CIPHER_NONE (0 << 29) #define RT2560_RX_CIPHER_WEP40 (1 << 29) #define RT2560_RX_CIPHER_WEP104 (2 << 29) #define RT2560_RX_CIPHER_TKIP (3 << 29) #define RT2560_RX_CIPHER_AES (4 << 29) uint32_t physaddr; uint8_t rate; uint8_t rssi; uint8_t ta[IEEE80211_ADDR_LEN]; uint32_t iv; uint32_t eiv; uint8_t key[IEEE80211_KEYBUF_SIZE]; uint32_t reserved[2]; } __packed; #define RAL_RF1 0 #define RAL_RF2 2 #define RAL_RF3 1 #define RAL_RF4 3 #define RT2560_RF1_AUTOTUNE 0x08000 #define RT2560_RF3_AUTOTUNE 0x00040 #define RT2560_BBP_BUSY (1 << 15) #define RT2560_BBP_WRITE (1 << 16) #define RT2560_RF_20BIT (20 << 24) -#define RT2560_RF_BUSY (1 << 31) +#define RT2560_RF_BUSY (1U << 31) #define RT2560_RF_2522 0x00 #define RT2560_RF_2523 0x01 #define RT2560_RF_2524 0x02 #define RT2560_RF_2525 0x03 #define RT2560_RF_2525E 0x04 #define RT2560_RF_2526 0x05 /* dual-band RF */ #define RT2560_RF_5222 0x10 #define RT2560_BBP_VERSION 0 #define RT2560_BBP_TX 2 #define RT2560_BBP_RX 14 #define RT2560_BBP_ANTA 0x00 #define RT2560_BBP_DIVERSITY 0x01 #define RT2560_BBP_ANTB 0x02 #define RT2560_BBP_ANTMASK 0x03 #define RT2560_BBP_FLIPIQ 0x04 #define RT2560_LED_MODE_DEFAULT 0 #define RT2560_LED_MODE_TXRX_ACTIVITY 1 #define RT2560_LED_MODE_SINGLE 2 #define RT2560_LED_MODE_ASUS 3 #define RT2560_JAPAN_FILTER 0x8 #define RT2560_EEPROM_DELAY 1 /* minimum hold time (microsecond) */ #define RT2560_EEPROM_CONFIG0 16 #define RT2560_EEPROM_BBP_BASE 19 #define RT2560_EEPROM_TXPOWER 35 #define RT2560_EEPROM_CALIBRATE 62 /* * control and status registers access macros */ #define RAL_READ(sc, reg) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) #define RAL_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) /* * EEPROM access macro */ #define RT2560_EEPROM_CTL(sc, val) do { \ RAL_WRITE((sc), RT2560_CSR21, (val)); \ DELAY(RT2560_EEPROM_DELAY); \ } while (/* CONSTCOND */0) /* * Default values for MAC registers; values taken from the reference driver. */ #define RT2560_DEF_MAC \ { RT2560_PSCSR0, 0x00020002 }, \ { RT2560_PSCSR1, 0x00000002 }, \ { RT2560_PSCSR2, 0x00020002 }, \ { RT2560_PSCSR3, 0x00000002 }, \ { RT2560_TIMECSR, 0x00003f21 }, \ { RT2560_CSR9, 0x00000780 }, \ { RT2560_CSR11, 0x07041483 }, \ { RT2560_CNT3, 0x00000000 }, \ { RT2560_TXCSR1, 0x07614562 }, \ { RT2560_ARSP_PLCP_0, 0x8c8d8b8a }, \ { RT2560_ACKPCTCSR, 0x7038140a }, \ { RT2560_ARTCSR1, 0x21212929 }, \ { RT2560_ARTCSR2, 0x1d1d1d1d }, \ { RT2560_RXCSR0, 0xffffffff }, \ { RT2560_RXCSR3, 0xb3aab3af }, \ { RT2560_PCICSR, 0x000003b8 }, \ { RT2560_PWRCSR0, 0x3f3b3100 }, \ { RT2560_GPIOCSR, 0x0000ff00 }, \ { RT2560_TESTCSR, 0x000000f0 }, \ { RT2560_PWRCSR1, 0x000001ff }, \ { RT2560_MACCSR0, 0x00213223 }, \ { RT2560_MACCSR1, 0x00235518 }, \ { RT2560_RLPWCSR, 0x00000040 }, \ { RT2560_RALINKCSR, 0x9a009a11 }, \ { RT2560_CSR7, 0xffffffff }, \ { RT2560_BBPCSR1, 0x82188200 }, \ { RT2560_TXACKCSR0, 0x00000020 }, \ { RT2560_SECCSR3, 0x0000e78f } /* * Default values for BBP registers; values taken from the reference driver. */ #define RT2560_DEF_BBP \ { 3, 0x02 }, \ { 4, 0x19 }, \ { 14, 0x1c }, \ { 15, 0x30 }, \ { 16, 0xac }, \ { 17, 0x48 }, \ { 18, 0x18 }, \ { 19, 0xff }, \ { 20, 0x1e }, \ { 21, 0x08 }, \ { 22, 0x08 }, \ { 23, 0x08 }, \ { 24, 0x80 }, \ { 25, 0x50 }, \ { 26, 0x08 }, \ { 27, 0x23 }, \ { 30, 0x10 }, \ { 31, 0x2b }, \ { 32, 0xb9 }, \ { 34, 0x12 }, \ { 35, 0x50 }, \ { 39, 0xc4 }, \ { 40, 0x02 }, \ { 41, 0x60 }, \ { 53, 0x10 }, \ { 54, 0x18 }, \ { 56, 0x08 }, \ { 57, 0x10 }, \ { 58, 0x08 }, \ { 61, 0x60 }, \ { 62, 0x10 }, \ { 75, 0xff } /* * Default values for RF register R2 indexed by channel numbers; values taken * from the reference driver. */ #define RT2560_RF2522_R2 \ { \ 0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814, \ 0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e \ } #define RT2560_RF2523_R2 \ { \ 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, \ 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 \ } #define RT2560_RF2524_R2 \ { \ 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, \ 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 \ } #define RT2560_RF2525_R2 \ { \ 0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d, \ 0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346 \ } #define RT2560_RF2525_HI_R2 \ { \ 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345, \ 0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e \ } #define RT2560_RF2525E_R2 \ { \ 0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463, \ 0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b \ } #define RT2560_RF2526_HI_R2 \ { \ 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d, \ 0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241 \ } #define RT2560_RF2526_R2 \ { \ 0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229, \ 0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d \ } /* * For dual-band RF, RF registers R1 and R4 also depend on channel number; * values taken from the reference driver. */ #define RT2560_RF5222 \ { 1, 0x08808, 0x0044d, 0x00282 }, \ { 2, 0x08808, 0x0044e, 0x00282 }, \ { 3, 0x08808, 0x0044f, 0x00282 }, \ { 4, 0x08808, 0x00460, 0x00282 }, \ { 5, 0x08808, 0x00461, 0x00282 }, \ { 6, 0x08808, 0x00462, 0x00282 }, \ { 7, 0x08808, 0x00463, 0x00282 }, \ { 8, 0x08808, 0x00464, 0x00282 }, \ { 9, 0x08808, 0x00465, 0x00282 }, \ { 10, 0x08808, 0x00466, 0x00282 }, \ { 11, 0x08808, 0x00467, 0x00282 }, \ { 12, 0x08808, 0x00468, 0x00282 }, \ { 13, 0x08808, 0x00469, 0x00282 }, \ { 14, 0x08808, 0x0046b, 0x00286 }, \ \ { 36, 0x08804, 0x06225, 0x00287 }, \ { 40, 0x08804, 0x06226, 0x00287 }, \ { 44, 0x08804, 0x06227, 0x00287 }, \ { 48, 0x08804, 0x06228, 0x00287 }, \ { 52, 0x08804, 0x06229, 0x00287 }, \ { 56, 0x08804, 0x0622a, 0x00287 }, \ { 60, 0x08804, 0x0622b, 0x00287 }, \ { 64, 0x08804, 0x0622c, 0x00287 }, \ \ { 100, 0x08804, 0x02200, 0x00283 }, \ { 104, 0x08804, 0x02201, 0x00283 }, \ { 108, 0x08804, 0x02202, 0x00283 }, \ { 112, 0x08804, 0x02203, 0x00283 }, \ { 116, 0x08804, 0x02204, 0x00283 }, \ { 120, 0x08804, 0x02205, 0x00283 }, \ { 124, 0x08804, 0x02206, 0x00283 }, \ { 128, 0x08804, 0x02207, 0x00283 }, \ { 132, 0x08804, 0x02208, 0x00283 }, \ { 136, 0x08804, 0x02209, 0x00283 }, \ { 140, 0x08804, 0x0220a, 0x00283 }, \ \ { 149, 0x08808, 0x02429, 0x00281 }, \ { 153, 0x08808, 0x0242b, 0x00281 }, \ { 157, 0x08808, 0x0242d, 0x00281 }, \ { 161, 0x08808, 0x0242f, 0x00281 } Index: head/sys/dev/ral/rt2661reg.h =================================================================== --- head/sys/dev/ral/rt2661reg.h (revision 258779) +++ head/sys/dev/ral/rt2661reg.h (revision 258780) @@ -1,488 +1,488 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define RT2661_NOISE_FLOOR -95 #define RT2661_TX_RING_COUNT 32 #define RT2661_MGT_RING_COUNT 32 #define RT2661_RX_RING_COUNT 64 #define RT2661_TX_DESC_SIZE (sizeof (struct rt2661_tx_desc)) #define RT2661_TX_DESC_WSIZE (RT2661_TX_DESC_SIZE / 4) #define RT2661_RX_DESC_SIZE (sizeof (struct rt2661_rx_desc)) #define RT2661_RX_DESC_WSIZE (RT2661_RX_DESC_SIZE / 4) #define RT2661_MAX_SCATTER 5 /* * Control and status registers. */ #define RT2661_HOST_CMD_CSR 0x0008 #define RT2661_MCU_CNTL_CSR 0x000c #define RT2661_SOFT_RESET_CSR 0x0010 #define RT2661_MCU_INT_SOURCE_CSR 0x0014 #define RT2661_MCU_INT_MASK_CSR 0x0018 #define RT2661_PCI_USEC_CSR 0x001c #define RT2661_H2M_MAILBOX_CSR 0x2100 #define RT2661_M2H_CMD_DONE_CSR 0x2104 #define RT2661_HW_BEACON_BASE0 0x2c00 #define RT2661_MAC_CSR0 0x3000 #define RT2661_MAC_CSR1 0x3004 #define RT2661_MAC_CSR2 0x3008 #define RT2661_MAC_CSR3 0x300c #define RT2661_MAC_CSR4 0x3010 #define RT2661_MAC_CSR5 0x3014 #define RT2661_MAC_CSR6 0x3018 #define RT2661_MAC_CSR7 0x301c #define RT2661_MAC_CSR8 0x3020 #define RT2661_MAC_CSR9 0x3024 #define RT2661_MAC_CSR10 0x3028 #define RT2661_MAC_CSR11 0x302c #define RT2661_MAC_CSR12 0x3030 #define RT2661_MAC_CSR13 0x3034 #define RT2661_MAC_CSR14 0x3038 #define RT2661_MAC_CSR15 0x303c #define RT2661_TXRX_CSR0 0x3040 #define RT2661_TXRX_CSR1 0x3044 #define RT2661_TXRX_CSR2 0x3048 #define RT2661_TXRX_CSR3 0x304c #define RT2661_TXRX_CSR4 0x3050 #define RT2661_TXRX_CSR5 0x3054 #define RT2661_TXRX_CSR6 0x3058 #define RT2661_TXRX_CSR7 0x305c #define RT2661_TXRX_CSR8 0x3060 #define RT2661_TXRX_CSR9 0x3064 #define RT2661_TXRX_CSR10 0x3068 #define RT2661_TXRX_CSR11 0x306c #define RT2661_TXRX_CSR12 0x3070 #define RT2661_TXRX_CSR13 0x3074 #define RT2661_TXRX_CSR14 0x3078 #define RT2661_TXRX_CSR15 0x307c #define RT2661_PHY_CSR0 0x3080 #define RT2661_PHY_CSR1 0x3084 #define RT2661_PHY_CSR2 0x3088 #define RT2661_PHY_CSR3 0x308c #define RT2661_PHY_CSR4 0x3090 #define RT2661_PHY_CSR5 0x3094 #define RT2661_PHY_CSR6 0x3098 #define RT2661_PHY_CSR7 0x309c #define RT2661_SEC_CSR0 0x30a0 #define RT2661_SEC_CSR1 0x30a4 #define RT2661_SEC_CSR2 0x30a8 #define RT2661_SEC_CSR3 0x30ac #define RT2661_SEC_CSR4 0x30b0 #define RT2661_SEC_CSR5 0x30b4 #define RT2661_STA_CSR0 0x30c0 #define RT2661_STA_CSR1 0x30c4 #define RT2661_STA_CSR2 0x30c8 #define RT2661_STA_CSR3 0x30cc #define RT2661_STA_CSR4 0x30d0 #define RT2661_AC0_BASE_CSR 0x3400 #define RT2661_AC1_BASE_CSR 0x3404 #define RT2661_AC2_BASE_CSR 0x3408 #define RT2661_AC3_BASE_CSR 0x340c #define RT2661_MGT_BASE_CSR 0x3410 #define RT2661_TX_RING_CSR0 0x3418 #define RT2661_TX_RING_CSR1 0x341c #define RT2661_AIFSN_CSR 0x3420 #define RT2661_CWMIN_CSR 0x3424 #define RT2661_CWMAX_CSR 0x3428 #define RT2661_TX_DMA_DST_CSR 0x342c #define RT2661_TX_CNTL_CSR 0x3430 #define RT2661_LOAD_TX_RING_CSR 0x3434 #define RT2661_RX_BASE_CSR 0x3450 #define RT2661_RX_RING_CSR 0x3454 #define RT2661_RX_CNTL_CSR 0x3458 #define RT2661_PCI_CFG_CSR 0x3460 #define RT2661_INT_SOURCE_CSR 0x3468 #define RT2661_INT_MASK_CSR 0x346c #define RT2661_E2PROM_CSR 0x3470 #define RT2661_AC_TXOP_CSR0 0x3474 #define RT2661_AC_TXOP_CSR1 0x3478 #define RT2661_TEST_MODE_CSR 0x3484 #define RT2661_IO_CNTL_CSR 0x3498 #define RT2661_MCU_CODE_BASE 0x4000 /* possible flags for register HOST_CMD_CSR */ #define RT2661_KICK_CMD (1 << 7) /* Host to MCU (8051) command identifiers */ #define RT2661_MCU_CMD_SLEEP 0x30 #define RT2661_MCU_CMD_WAKEUP 0x31 #define RT2661_MCU_SET_LED 0x50 #define RT2661_MCU_SET_RSSI_LED 0x52 /* possible flags for register MCU_CNTL_CSR */ #define RT2661_MCU_SEL (1 << 0) #define RT2661_MCU_RESET (1 << 1) #define RT2661_MCU_READY (1 << 2) /* possible flags for register MCU_INT_SOURCE_CSR */ #define RT2661_MCU_CMD_DONE 0xff #define RT2661_MCU_WAKEUP (1 << 8) #define RT2661_MCU_BEACON_EXPIRE (1 << 9) /* possible flags for register H2M_MAILBOX_CSR */ #define RT2661_H2M_BUSY (1 << 24) #define RT2661_TOKEN_NO_INTR 0xff /* possible flags for register MAC_CSR5 */ #define RT2661_ONE_BSSID 3 /* possible flags for register TXRX_CSR0 */ /* Tx filter flags are in the low 16 bits */ #define RT2661_AUTO_TX_SEQ (1 << 15) /* Rx filter flags are in the high 16 bits */ #define RT2661_DISABLE_RX (1 << 16) #define RT2661_DROP_CRC_ERROR (1 << 17) #define RT2661_DROP_PHY_ERROR (1 << 18) #define RT2661_DROP_CTL (1 << 19) #define RT2661_DROP_NOT_TO_ME (1 << 20) #define RT2661_DROP_TODS (1 << 21) #define RT2661_DROP_VER_ERROR (1 << 22) #define RT2661_DROP_MULTICAST (1 << 23) #define RT2661_DROP_BROADCAST (1 << 24) #define RT2661_DROP_ACKCTS (1 << 25) /* possible flags for register TXRX_CSR4 */ #define RT2661_SHORT_PREAMBLE (1 << 19) #define RT2661_MRR_ENABLED (1 << 20) #define RT2661_MRR_CCK_FALLBACK (1 << 23) /* possible flags for register TXRX_CSR9 */ #define RT2661_TSF_TICKING (1 << 16) #define RT2661_TSF_MODE(x) (((x) & 0x3) << 17) /* TBTT stands for Target Beacon Transmission Time */ #define RT2661_ENABLE_TBTT (1 << 19) #define RT2661_GENERATE_BEACON (1 << 20) /* possible flags for register PHY_CSR0 */ #define RT2661_PA_PE_2GHZ (1 << 16) #define RT2661_PA_PE_5GHZ (1 << 17) /* possible flags for register PHY_CSR3 */ #define RT2661_BBP_READ (1 << 15) #define RT2661_BBP_BUSY (1 << 16) /* possible flags for register PHY_CSR4 */ #define RT2661_RF_21BIT (21 << 24) -#define RT2661_RF_BUSY (1 << 31) +#define RT2661_RF_BUSY (1U << 31) /* possible values for register STA_CSR4 */ #define RT2661_TX_STAT_VALID (1 << 0) #define RT2661_TX_RESULT(v) (((v) >> 1) & 0x7) #define RT2661_TX_RETRYCNT(v) (((v) >> 4) & 0xf) #define RT2661_TX_QID(v) (((v) >> 8) & 0xf) #define RT2661_TX_SUCCESS 0 #define RT2661_TX_RETRY_FAIL 6 /* possible flags for register TX_CNTL_CSR */ #define RT2661_KICK_MGT (1 << 4) /* possible flags for register INT_SOURCE_CSR */ #define RT2661_TX_DONE (1 << 0) #define RT2661_RX_DONE (1 << 1) #define RT2661_TX0_DMA_DONE (1 << 16) #define RT2661_TX1_DMA_DONE (1 << 17) #define RT2661_TX2_DMA_DONE (1 << 18) #define RT2661_TX3_DMA_DONE (1 << 19) #define RT2661_MGT_DONE (1 << 20) /* possible flags for register E2PROM_CSR */ #define RT2661_C (1 << 1) #define RT2661_S (1 << 2) #define RT2661_D (1 << 3) #define RT2661_Q (1 << 4) #define RT2661_93C46 (1 << 5) /* Tx descriptor */ struct rt2661_tx_desc { uint32_t flags; #define RT2661_TX_BUSY (1 << 0) #define RT2661_TX_VALID (1 << 1) #define RT2661_TX_MORE_FRAG (1 << 2) #define RT2661_TX_NEED_ACK (1 << 3) #define RT2661_TX_TIMESTAMP (1 << 4) #define RT2661_TX_OFDM (1 << 5) #define RT2661_TX_IFS (1 << 6) #define RT2661_TX_LONG_RETRY (1 << 7) #define RT2661_TX_BURST (1 << 28) uint16_t wme; #define RT2661_QID(v) (v) #define RT2661_AIFSN(v) ((v) << 4) #define RT2661_LOGCWMIN(v) ((v) << 8) #define RT2661_LOGCWMAX(v) ((v) << 12) uint16_t xflags; #define RT2661_TX_HWSEQ (1 << 12) uint8_t plcp_signal; uint8_t plcp_service; #define RT2661_PLCP_LENGEXT 0x80 uint8_t plcp_length_lo; uint8_t plcp_length_hi; uint32_t iv; uint32_t eiv; uint8_t offset; uint8_t qid; #define RT2661_QID_MGT 13 uint8_t txpower; #define RT2661_DEFAULT_TXPOWER 0 uint8_t reserved1; uint32_t addr[RT2661_MAX_SCATTER]; uint16_t len[RT2661_MAX_SCATTER]; uint16_t reserved2; } __packed; /* Rx descriptor */ struct rt2661_rx_desc { uint32_t flags; #define RT2661_RX_BUSY (1 << 0) #define RT2661_RX_DROP (1 << 1) #define RT2661_RX_CRC_ERROR (1 << 6) #define RT2661_RX_OFDM (1 << 7) #define RT2661_RX_PHY_ERROR (1 << 8) #define RT2661_RX_CIPHER_MASK 0x00000600 uint8_t rate; uint8_t rssi; uint8_t reserved1; uint8_t offset; uint32_t iv; uint32_t eiv; uint32_t reserved2; uint32_t physaddr; uint32_t reserved3[10]; } __packed; #define RAL_RF1 0 #define RAL_RF2 2 #define RAL_RF3 1 #define RAL_RF4 3 /* dual-band RF */ #define RT2661_RF_5225 1 #define RT2661_RF_5325 2 /* single-band RF */ #define RT2661_RF_2527 3 #define RT2661_RF_2529 4 #define RT2661_RX_DESC_BACK 4 #define RT2661_SMART_MODE (1 << 0) #define RT2661_BBPR94_DEFAULT 6 #define RT2661_SHIFT_D 3 #define RT2661_SHIFT_Q 4 #define RT2661_EEPROM_MAC01 0x02 #define RT2661_EEPROM_MAC23 0x03 #define RT2661_EEPROM_MAC45 0x04 #define RT2661_EEPROM_ANTENNA 0x10 #define RT2661_EEPROM_CONFIG2 0x11 #define RT2661_EEPROM_BBP_BASE 0x13 #define RT2661_EEPROM_TXPOWER 0x23 #define RT2661_EEPROM_FREQ_OFFSET 0x2f #define RT2661_EEPROM_RSSI_2GHZ_OFFSET 0x4d #define RT2661_EEPROM_RSSI_5GHZ_OFFSET 0x4e #define RT2661_EEPROM_DELAY 1 /* minimum hold time (microsecond) */ /* * control and status registers access macros */ #define RAL_READ(sc, reg) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) #define RAL_READ_REGION_4(sc, offset, datap, count) \ bus_space_read_region_4((sc)->sc_st, (sc)->sc_sh, (offset), \ (datap), (count)) #define RAL_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) #define RAL_WRITE_REGION_1(sc, offset, datap, count) \ bus_space_write_region_1((sc)->sc_st, (sc)->sc_sh, (offset), \ (datap), (count)) /* * EEPROM access macro */ #define RT2661_EEPROM_CTL(sc, val) do { \ RAL_WRITE((sc), RT2661_E2PROM_CSR, (val)); \ DELAY(RT2661_EEPROM_DELAY); \ } while (/* CONSTCOND */0) /* * Default values for MAC registers; values taken from the reference driver. */ #define RT2661_DEF_MAC \ { RT2661_TXRX_CSR0, 0x0000b032 }, \ { RT2661_TXRX_CSR1, 0x9eb39eb3 }, \ { RT2661_TXRX_CSR2, 0x8a8b8c8d }, \ { RT2661_TXRX_CSR3, 0x00858687 }, \ { RT2661_TXRX_CSR7, 0x2e31353b }, \ { RT2661_TXRX_CSR8, 0x2a2a2a2c }, \ { RT2661_TXRX_CSR15, 0x0000000f }, \ { RT2661_MAC_CSR6, 0x00000fff }, \ { RT2661_MAC_CSR8, 0x016c030a }, \ { RT2661_MAC_CSR10, 0x00000718 }, \ { RT2661_MAC_CSR12, 0x00000004 }, \ { RT2661_MAC_CSR13, 0x0000e000 }, \ { RT2661_SEC_CSR0, 0x00000000 }, \ { RT2661_SEC_CSR1, 0x00000000 }, \ { RT2661_SEC_CSR5, 0x00000000 }, \ { RT2661_PHY_CSR1, 0x000023b0 }, \ { RT2661_PHY_CSR5, 0x060a100c }, \ { RT2661_PHY_CSR6, 0x00080606 }, \ { RT2661_PHY_CSR7, 0x00000a08 }, \ { RT2661_PCI_CFG_CSR, 0x3cca4808 }, \ { RT2661_AIFSN_CSR, 0x00002273 }, \ { RT2661_CWMIN_CSR, 0x00002344 }, \ { RT2661_CWMAX_CSR, 0x000034aa }, \ { RT2661_TEST_MODE_CSR, 0x00000200 }, \ { RT2661_M2H_CMD_DONE_CSR, 0xffffffff } /* * Default values for BBP registers; values taken from the reference driver. */ #define RT2661_DEF_BBP \ { 3, 0x00 }, \ { 15, 0x30 }, \ { 17, 0x20 }, \ { 21, 0xc8 }, \ { 22, 0x38 }, \ { 23, 0x06 }, \ { 24, 0xfe }, \ { 25, 0x0a }, \ { 26, 0x0d }, \ { 34, 0x12 }, \ { 37, 0x07 }, \ { 39, 0xf8 }, \ { 41, 0x60 }, \ { 53, 0x10 }, \ { 54, 0x18 }, \ { 60, 0x10 }, \ { 61, 0x04 }, \ { 62, 0x04 }, \ { 75, 0xfe }, \ { 86, 0xfe }, \ { 88, 0xfe }, \ { 90, 0x0f }, \ { 99, 0x00 }, \ { 102, 0x16 }, \ { 107, 0x04 } /* * Default settings for RF registers; values taken from the reference driver. */ #define RT2661_RF5225_1 \ { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, \ { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, \ { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, \ { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, \ { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, \ { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, \ { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, \ { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, \ { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, \ { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, \ { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, \ { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, \ { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, \ { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, \ \ { 36, 0x00b33, 0x01266, 0x26014, 0x30288 }, \ { 40, 0x00b33, 0x01268, 0x26014, 0x30280 }, \ { 44, 0x00b33, 0x01269, 0x26014, 0x30282 }, \ { 48, 0x00b33, 0x0126a, 0x26014, 0x30284 }, \ { 52, 0x00b33, 0x0126b, 0x26014, 0x30286 }, \ { 56, 0x00b33, 0x0126c, 0x26014, 0x30288 }, \ { 60, 0x00b33, 0x0126e, 0x26014, 0x30280 }, \ { 64, 0x00b33, 0x0126f, 0x26014, 0x30282 }, \ \ { 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 }, \ { 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 }, \ { 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 }, \ { 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 }, \ { 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 }, \ { 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 }, \ { 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 }, \ { 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 }, \ { 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 }, \ { 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 }, \ { 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 }, \ \ { 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 }, \ { 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 }, \ { 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 }, \ { 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 }, \ { 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 } #define RT2661_RF5225_2 \ { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, \ { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, \ { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, \ { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, \ { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, \ { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, \ { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, \ { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, \ { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, \ { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, \ { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, \ { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, \ { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, \ { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, \ \ { 36, 0x00b35, 0x11206, 0x26014, 0x30280 }, \ { 40, 0x00b34, 0x111a0, 0x26014, 0x30280 }, \ { 44, 0x00b34, 0x111a1, 0x26014, 0x30286 }, \ { 48, 0x00b34, 0x111a3, 0x26014, 0x30282 }, \ { 52, 0x00b34, 0x111a4, 0x26014, 0x30288 }, \ { 56, 0x00b34, 0x111a6, 0x26014, 0x30284 }, \ { 60, 0x00b34, 0x111a8, 0x26014, 0x30280 }, \ { 64, 0x00b34, 0x111a9, 0x26014, 0x30286 }, \ \ { 100, 0x00b35, 0x11226, 0x2e014, 0x30280 }, \ { 104, 0x00b35, 0x11228, 0x2e014, 0x30280 }, \ { 108, 0x00b35, 0x1122a, 0x2e014, 0x30280 }, \ { 112, 0x00b35, 0x1122c, 0x2e014, 0x30280 }, \ { 116, 0x00b35, 0x1122e, 0x2e014, 0x30280 }, \ { 120, 0x00b34, 0x111c0, 0x2e014, 0x30280 }, \ { 124, 0x00b34, 0x111c1, 0x2e014, 0x30286 }, \ { 128, 0x00b34, 0x111c3, 0x2e014, 0x30282 }, \ { 132, 0x00b34, 0x111c4, 0x2e014, 0x30288 }, \ { 136, 0x00b34, 0x111c6, 0x2e014, 0x30284 }, \ { 140, 0x00b34, 0x111c8, 0x2e014, 0x30280 }, \ \ { 149, 0x00b34, 0x111cb, 0x2e014, 0x30286 }, \ { 153, 0x00b34, 0x111cd, 0x2e014, 0x30282 }, \ { 157, 0x00b35, 0x11242, 0x2e014, 0x30285 }, \ { 161, 0x00b35, 0x11244, 0x2e014, 0x30285 }, \ { 165, 0x00b35, 0x11246, 0x2e014, 0x30285 } Index: head/sys/dev/ral/rt2860reg.h =================================================================== --- head/sys/dev/ral/rt2860reg.h (revision 258779) +++ head/sys/dev/ral/rt2860reg.h (revision 258780) @@ -1,1255 +1,1255 @@ /*- * Copyright (c) 2007 Damien Bergamini * Copyright (c) 2012 Bernhard Schmidt * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: rt2860reg.h,v 1.30 2010/05/10 18:17:10 damien Exp $ * $FreeBSD$ */ #define RT2860_NOISE_FLOOR -95 /* PCI registers */ #define RT2860_PCI_CFG 0x0000 #define RT2860_PCI_EECTRL 0x0004 #define RT2860_PCI_MCUCTRL 0x0008 #define RT2860_PCI_SYSCTRL 0x000c #define RT2860_PCIE_JTAG 0x0010 #define RT3090_AUX_CTRL 0x010c #define RT3070_OPT_14 0x0114 /* SCH/DMA registers */ #define RT2860_INT_STATUS 0x0200 #define RT2860_INT_MASK 0x0204 #define RT2860_WPDMA_GLO_CFG 0x0208 #define RT2860_WPDMA_RST_IDX 0x020c #define RT2860_DELAY_INT_CFG 0x0210 #define RT2860_WMM_AIFSN_CFG 0x0214 #define RT2860_WMM_CWMIN_CFG 0x0218 #define RT2860_WMM_CWMAX_CFG 0x021c #define RT2860_WMM_TXOP0_CFG 0x0220 #define RT2860_WMM_TXOP1_CFG 0x0224 #define RT2860_GPIO_CTRL 0x0228 #define RT2860_MCU_CMD_REG 0x022c #define RT2860_TX_BASE_PTR(qid) (0x0230 + (qid) * 16) #define RT2860_TX_MAX_CNT(qid) (0x0234 + (qid) * 16) #define RT2860_TX_CTX_IDX(qid) (0x0238 + (qid) * 16) #define RT2860_TX_DTX_IDX(qid) (0x023c + (qid) * 16) #define RT2860_RX_BASE_PTR 0x0290 #define RT2860_RX_MAX_CNT 0x0294 #define RT2860_RX_CALC_IDX 0x0298 #define RT2860_FS_DRX_IDX 0x029c #define RT2860_USB_DMA_CFG 0x02a0 /* RT2870 only */ #define RT2860_US_CYC_CNT 0x02a4 /* PBF registers */ #define RT2860_SYS_CTRL 0x0400 #define RT2860_HOST_CMD 0x0404 #define RT2860_PBF_CFG 0x0408 #define RT2860_MAX_PCNT 0x040c #define RT2860_BUF_CTRL 0x0410 #define RT2860_MCU_INT_STA 0x0414 #define RT2860_MCU_INT_ENA 0x0418 #define RT2860_TXQ_IO(qid) (0x041c + (qid) * 4) #define RT2860_RX0Q_IO 0x0424 #define RT2860_BCN_OFFSET0 0x042c #define RT2860_BCN_OFFSET1 0x0430 #define RT2860_TXRXQ_STA 0x0434 #define RT2860_TXRXQ_PCNT 0x0438 #define RT2860_PBF_DBG 0x043c #define RT2860_CAP_CTRL 0x0440 /* RT3070 registers */ #define RT3070_RF_CSR_CFG 0x0500 #define RT3070_EFUSE_CTRL 0x0580 #define RT3070_EFUSE_DATA0 0x0590 #define RT3070_EFUSE_DATA1 0x0594 #define RT3070_EFUSE_DATA2 0x0598 #define RT3070_EFUSE_DATA3 0x059c #define RT3090_OSC_CTRL 0x05a4 #define RT3070_LDO_CFG0 0x05d4 #define RT3070_GPIO_SWITCH 0x05dc /* MAC registers */ #define RT2860_ASIC_VER_ID 0x1000 #define RT2860_MAC_SYS_CTRL 0x1004 #define RT2860_MAC_ADDR_DW0 0x1008 #define RT2860_MAC_ADDR_DW1 0x100c #define RT2860_MAC_BSSID_DW0 0x1010 #define RT2860_MAC_BSSID_DW1 0x1014 #define RT2860_MAX_LEN_CFG 0x1018 #define RT2860_BBP_CSR_CFG 0x101c #define RT2860_RF_CSR_CFG0 0x1020 #define RT2860_RF_CSR_CFG1 0x1024 #define RT2860_RF_CSR_CFG2 0x1028 #define RT2860_LED_CFG 0x102c /* undocumented registers */ #define RT2860_DEBUG 0x10f4 /* MAC Timing control registers */ #define RT2860_XIFS_TIME_CFG 0x1100 #define RT2860_BKOFF_SLOT_CFG 0x1104 #define RT2860_NAV_TIME_CFG 0x1108 #define RT2860_CH_TIME_CFG 0x110c #define RT2860_PBF_LIFE_TIMER 0x1110 #define RT2860_BCN_TIME_CFG 0x1114 #define RT2860_TBTT_SYNC_CFG 0x1118 #define RT2860_TSF_TIMER_DW0 0x111c #define RT2860_TSF_TIMER_DW1 0x1120 #define RT2860_TBTT_TIMER 0x1124 #define RT2860_INT_TIMER_CFG 0x1128 #define RT2860_INT_TIMER_EN 0x112c #define RT2860_CH_IDLE_TIME 0x1130 /* MAC Power Save configuration registers */ #define RT2860_MAC_STATUS_REG 0x1200 #define RT2860_PWR_PIN_CFG 0x1204 #define RT2860_AUTO_WAKEUP_CFG 0x1208 /* MAC TX configuration registers */ #define RT2860_EDCA_AC_CFG(aci) (0x1300 + (aci) * 4) #define RT2860_EDCA_TID_AC_MAP 0x1310 #define RT2860_TX_PWR_CFG(ridx) (0x1314 + (ridx) * 4) #define RT2860_TX_PIN_CFG 0x1328 #define RT2860_TX_BAND_CFG 0x132c #define RT2860_TX_SW_CFG0 0x1330 #define RT2860_TX_SW_CFG1 0x1334 #define RT2860_TX_SW_CFG2 0x1338 #define RT2860_TXOP_THRES_CFG 0x133c #define RT2860_TXOP_CTRL_CFG 0x1340 #define RT2860_TX_RTS_CFG 0x1344 #define RT2860_TX_TIMEOUT_CFG 0x1348 #define RT2860_TX_RTY_CFG 0x134c #define RT2860_TX_LINK_CFG 0x1350 #define RT2860_HT_FBK_CFG0 0x1354 #define RT2860_HT_FBK_CFG1 0x1358 #define RT2860_LG_FBK_CFG0 0x135c #define RT2860_LG_FBK_CFG1 0x1360 #define RT2860_CCK_PROT_CFG 0x1364 #define RT2860_OFDM_PROT_CFG 0x1368 #define RT2860_MM20_PROT_CFG 0x136c #define RT2860_MM40_PROT_CFG 0x1370 #define RT2860_GF20_PROT_CFG 0x1374 #define RT2860_GF40_PROT_CFG 0x1378 #define RT2860_EXP_CTS_TIME 0x137c #define RT2860_EXP_ACK_TIME 0x1380 /* MAC RX configuration registers */ #define RT2860_RX_FILTR_CFG 0x1400 #define RT2860_AUTO_RSP_CFG 0x1404 #define RT2860_LEGACY_BASIC_RATE 0x1408 #define RT2860_HT_BASIC_RATE 0x140c #define RT2860_HT_CTRL_CFG 0x1410 #define RT2860_SIFS_COST_CFG 0x1414 #define RT2860_RX_PARSER_CFG 0x1418 /* MAC Security configuration registers */ #define RT2860_TX_SEC_CNT0 0x1500 #define RT2860_RX_SEC_CNT0 0x1504 #define RT2860_CCMP_FC_MUTE 0x1508 /* MAC HCCA/PSMP configuration registers */ #define RT2860_TXOP_HLDR_ADDR0 0x1600 #define RT2860_TXOP_HLDR_ADDR1 0x1604 #define RT2860_TXOP_HLDR_ET 0x1608 #define RT2860_QOS_CFPOLL_RA_DW0 0x160c #define RT2860_QOS_CFPOLL_A1_DW1 0x1610 #define RT2860_QOS_CFPOLL_QC 0x1614 /* MAC Statistics Counters */ #define RT2860_RX_STA_CNT0 0x1700 #define RT2860_RX_STA_CNT1 0x1704 #define RT2860_RX_STA_CNT2 0x1708 #define RT2860_TX_STA_CNT0 0x170c #define RT2860_TX_STA_CNT1 0x1710 #define RT2860_TX_STA_CNT2 0x1714 #define RT2860_TX_STAT_FIFO 0x1718 /* RX WCID search table */ #define RT2860_WCID_ENTRY(wcid) (0x1800 + (wcid) * 8) #define RT2860_FW_BASE 0x2000 #define RT2870_FW_BASE 0x3000 /* Pair-wise key table */ #define RT2860_PKEY(wcid) (0x4000 + (wcid) * 32) /* IV/EIV table */ #define RT2860_IVEIV(wcid) (0x6000 + (wcid) * 8) /* WCID attribute table */ #define RT2860_WCID_ATTR(wcid) (0x6800 + (wcid) * 4) /* Shared Key Table */ #define RT2860_SKEY(vap, kidx) (0x6c00 + (vap) * 128 + (kidx) * 32) /* Shared Key Mode */ #define RT2860_SKEY_MODE_0_7 0x7000 #define RT2860_SKEY_MODE_8_15 0x7004 #define RT2860_SKEY_MODE_16_23 0x7008 #define RT2860_SKEY_MODE_24_31 0x700c /* Shared Memory between MCU and host */ #define RT2860_H2M_MAILBOX 0x7010 #define RT2860_H2M_MAILBOX_CID 0x7014 #define RT2860_H2M_MAILBOX_STATUS 0x701c #define RT2860_H2M_BBPAGENT 0x7028 #define RT2860_BCN_BASE(vap) (0x7800 + (vap) * 512) /* possible flags for RT2860_PCI_CFG */ #define RT2860_PCI_CFG_USB (1 << 17) #define RT2860_PCI_CFG_PCI (1 << 16) /* possible flags for register RT2860_PCI_EECTRL */ #define RT2860_C (1 << 0) #define RT2860_S (1 << 1) #define RT2860_D (1 << 2) #define RT2860_SHIFT_D 2 #define RT2860_Q (1 << 3) #define RT2860_SHIFT_Q 3 /* possible flags for registers INT_STATUS/INT_MASK */ #define RT2860_TX_COHERENT (1 << 17) #define RT2860_RX_COHERENT (1 << 16) #define RT2860_MAC_INT_4 (1 << 15) #define RT2860_MAC_INT_3 (1 << 14) #define RT2860_MAC_INT_2 (1 << 13) #define RT2860_MAC_INT_1 (1 << 12) #define RT2860_MAC_INT_0 (1 << 11) #define RT2860_TX_RX_COHERENT (1 << 10) #define RT2860_MCU_CMD_INT (1 << 9) #define RT2860_TX_DONE_INT5 (1 << 8) #define RT2860_TX_DONE_INT4 (1 << 7) #define RT2860_TX_DONE_INT3 (1 << 6) #define RT2860_TX_DONE_INT2 (1 << 5) #define RT2860_TX_DONE_INT1 (1 << 4) #define RT2860_TX_DONE_INT0 (1 << 3) #define RT2860_RX_DONE_INT (1 << 2) #define RT2860_TX_DLY_INT (1 << 1) #define RT2860_RX_DLY_INT (1 << 0) /* possible flags for register WPDMA_GLO_CFG */ #define RT2860_HDR_SEG_LEN_SHIFT 8 #define RT2860_BIG_ENDIAN (1 << 7) #define RT2860_TX_WB_DDONE (1 << 6) #define RT2860_WPDMA_BT_SIZE_SHIFT 4 #define RT2860_WPDMA_BT_SIZE16 0 #define RT2860_WPDMA_BT_SIZE32 1 #define RT2860_WPDMA_BT_SIZE64 2 #define RT2860_WPDMA_BT_SIZE128 3 #define RT2860_RX_DMA_BUSY (1 << 3) #define RT2860_RX_DMA_EN (1 << 2) #define RT2860_TX_DMA_BUSY (1 << 1) #define RT2860_TX_DMA_EN (1 << 0) /* possible flags for register DELAY_INT_CFG */ -#define RT2860_TXDLY_INT_EN (1 << 31) +#define RT2860_TXDLY_INT_EN (1U << 31) #define RT2860_TXMAX_PINT_SHIFT 24 #define RT2860_TXMAX_PTIME_SHIFT 16 #define RT2860_RXDLY_INT_EN (1 << 15) #define RT2860_RXMAX_PINT_SHIFT 8 #define RT2860_RXMAX_PTIME_SHIFT 0 /* possible flags for register GPIO_CTRL */ #define RT2860_GPIO_D_SHIFT 8 #define RT2860_GPIO_O_SHIFT 0 /* possible flags for register USB_DMA_CFG */ -#define RT2860_USB_TX_BUSY (1 << 31) +#define RT2860_USB_TX_BUSY (1U << 31) #define RT2860_USB_RX_BUSY (1 << 30) #define RT2860_USB_EPOUT_VLD_SHIFT 24 #define RT2860_USB_TX_EN (1 << 23) #define RT2860_USB_RX_EN (1 << 22) #define RT2860_USB_RX_AGG_EN (1 << 21) #define RT2860_USB_TXOP_HALT (1 << 20) #define RT2860_USB_TX_CLEAR (1 << 19) #define RT2860_USB_PHY_WD_EN (1 << 16) #define RT2860_USB_PHY_MAN_RST (1 << 15) #define RT2860_USB_RX_AGG_LMT(x) ((x) << 8) /* in unit of 1KB */ #define RT2860_USB_RX_AGG_TO(x) ((x) & 0xff) /* in unit of 33ns */ /* possible flags for register US_CYC_CNT */ #define RT2860_TEST_EN (1 << 24) #define RT2860_TEST_SEL_SHIFT 16 #define RT2860_BT_MODE_EN (1 << 8) #define RT2860_US_CYC_CNT_SHIFT 0 /* possible flags for register SYS_CTRL */ #define RT2860_HST_PM_SEL (1 << 16) #define RT2860_CAP_MODE (1 << 14) #define RT2860_PME_OEN (1 << 13) #define RT2860_CLKSELECT (1 << 12) #define RT2860_PBF_CLK_EN (1 << 11) #define RT2860_MAC_CLK_EN (1 << 10) #define RT2860_DMA_CLK_EN (1 << 9) #define RT2860_MCU_READY (1 << 7) #define RT2860_ASY_RESET (1 << 4) #define RT2860_PBF_RESET (1 << 3) #define RT2860_MAC_RESET (1 << 2) #define RT2860_DMA_RESET (1 << 1) #define RT2860_MCU_RESET (1 << 0) /* possible values for register HOST_CMD */ #define RT2860_MCU_CMD_SLEEP 0x30 #define RT2860_MCU_CMD_WAKEUP 0x31 #define RT2860_MCU_CMD_LEDS 0x50 #define RT2860_MCU_CMD_LED_RSSI 0x51 #define RT2860_MCU_CMD_LED1 0x52 #define RT2860_MCU_CMD_LED2 0x53 #define RT2860_MCU_CMD_LED3 0x54 #define RT2860_MCU_CMD_RFRESET 0x72 #define RT2860_MCU_CMD_ANTSEL 0x73 #define RT2860_MCU_CMD_BBP 0x80 #define RT2860_MCU_CMD_PSLEVEL 0x83 /* possible flags for register PBF_CFG */ #define RT2860_TX1Q_NUM_SHIFT 21 #define RT2860_TX2Q_NUM_SHIFT 16 #define RT2860_NULL0_MODE (1 << 15) #define RT2860_NULL1_MODE (1 << 14) #define RT2860_RX_DROP_MODE (1 << 13) #define RT2860_TX0Q_MANUAL (1 << 12) #define RT2860_TX1Q_MANUAL (1 << 11) #define RT2860_TX2Q_MANUAL (1 << 10) #define RT2860_RX0Q_MANUAL (1 << 9) #define RT2860_HCCA_EN (1 << 8) #define RT2860_TX0Q_EN (1 << 4) #define RT2860_TX1Q_EN (1 << 3) #define RT2860_TX2Q_EN (1 << 2) #define RT2860_RX0Q_EN (1 << 1) /* possible flags for register BUF_CTRL */ #define RT2860_WRITE_TXQ(qid) (1 << (11 - (qid))) #define RT2860_NULL0_KICK (1 << 7) #define RT2860_NULL1_KICK (1 << 6) #define RT2860_BUF_RESET (1 << 5) #define RT2860_READ_TXQ(qid) (1 << (3 - (qid)) #define RT2860_READ_RX0Q (1 << 0) /* possible flags for registers MCU_INT_STA/MCU_INT_ENA */ #define RT2860_MCU_MAC_INT_8 (1 << 24) #define RT2860_MCU_MAC_INT_7 (1 << 23) #define RT2860_MCU_MAC_INT_6 (1 << 22) #define RT2860_MCU_MAC_INT_4 (1 << 20) #define RT2860_MCU_MAC_INT_3 (1 << 19) #define RT2860_MCU_MAC_INT_2 (1 << 18) #define RT2860_MCU_MAC_INT_1 (1 << 17) #define RT2860_MCU_MAC_INT_0 (1 << 16) #define RT2860_DTX0_INT (1 << 11) #define RT2860_DTX1_INT (1 << 10) #define RT2860_DTX2_INT (1 << 9) #define RT2860_DRX0_INT (1 << 8) #define RT2860_HCMD_INT (1 << 7) #define RT2860_N0TX_INT (1 << 6) #define RT2860_N1TX_INT (1 << 5) #define RT2860_BCNTX_INT (1 << 4) #define RT2860_MTX0_INT (1 << 3) #define RT2860_MTX1_INT (1 << 2) #define RT2860_MTX2_INT (1 << 1) #define RT2860_MRX0_INT (1 << 0) /* possible flags for register TXRXQ_PCNT */ #define RT2860_RX0Q_PCNT_MASK 0xff000000 #define RT2860_TX2Q_PCNT_MASK 0x00ff0000 #define RT2860_TX1Q_PCNT_MASK 0x0000ff00 #define RT2860_TX0Q_PCNT_MASK 0x000000ff /* possible flags for register CAP_CTRL */ -#define RT2860_CAP_ADC_FEQ (1 << 31) +#define RT2860_CAP_ADC_FEQ (1U << 31) #define RT2860_CAP_START (1 << 30) #define RT2860_MAN_TRIG (1 << 29) #define RT2860_TRIG_OFFSET_SHIFT 16 #define RT2860_START_ADDR_SHIFT 0 /* possible flags for register RF_CSR_CFG */ #define RT3070_RF_KICK (1 << 17) #define RT3070_RF_WRITE (1 << 16) /* possible flags for register EFUSE_CTRL */ -#define RT3070_SEL_EFUSE (1 << 31) +#define RT3070_SEL_EFUSE (1U << 31) #define RT3070_EFSROM_KICK (1 << 30) #define RT3070_EFSROM_AIN_MASK 0x03ff0000 #define RT3070_EFSROM_AIN_SHIFT 16 #define RT3070_EFSROM_MODE_MASK 0x000000c0 #define RT3070_EFUSE_AOUT_MASK 0x0000003f /* possible flags for register MAC_SYS_CTRL */ #define RT2860_RX_TS_EN (1 << 7) #define RT2860_WLAN_HALT_EN (1 << 6) #define RT2860_PBF_LOOP_EN (1 << 5) #define RT2860_CONT_TX_TEST (1 << 4) #define RT2860_MAC_RX_EN (1 << 3) #define RT2860_MAC_TX_EN (1 << 2) #define RT2860_BBP_HRST (1 << 1) #define RT2860_MAC_SRST (1 << 0) /* possible flags for register MAC_BSSID_DW1 */ #define RT2860_MULTI_BCN_NUM_SHIFT 18 #define RT2860_MULTI_BSSID_MODE_SHIFT 16 /* possible flags for register MAX_LEN_CFG */ #define RT2860_MIN_MPDU_LEN_SHIFT 16 #define RT2860_MAX_PSDU_LEN_SHIFT 12 #define RT2860_MAX_PSDU_LEN8K 0 #define RT2860_MAX_PSDU_LEN16K 1 #define RT2860_MAX_PSDU_LEN32K 2 #define RT2860_MAX_PSDU_LEN64K 3 #define RT2860_MAX_MPDU_LEN_SHIFT 0 /* possible flags for registers BBP_CSR_CFG/H2M_BBPAGENT */ #define RT2860_BBP_RW_PARALLEL (1 << 19) #define RT2860_BBP_PAR_DUR_112_5 (1 << 18) #define RT2860_BBP_CSR_KICK (1 << 17) #define RT2860_BBP_CSR_READ (1 << 16) #define RT2860_BBP_ADDR_SHIFT 8 #define RT2860_BBP_DATA_SHIFT 0 /* possible flags for register RF_CSR_CFG0 */ -#define RT2860_RF_REG_CTRL (1 << 31) +#define RT2860_RF_REG_CTRL (1U << 31) #define RT2860_RF_LE_SEL1 (1 << 30) #define RT2860_RF_LE_STBY (1 << 29) #define RT2860_RF_REG_WIDTH_SHIFT 24 #define RT2860_RF_REG_0_SHIFT 0 /* possible flags for register RF_CSR_CFG1 */ #define RT2860_RF_DUR_5 (1 << 24) #define RT2860_RF_REG_1_SHIFT 0 /* possible flags for register LED_CFG */ #define RT2860_LED_POL (1 << 30) #define RT2860_Y_LED_MODE_SHIFT 28 #define RT2860_G_LED_MODE_SHIFT 26 #define RT2860_R_LED_MODE_SHIFT 24 #define RT2860_LED_MODE_OFF 0 #define RT2860_LED_MODE_BLINK_TX 1 #define RT2860_LED_MODE_SLOW_BLINK 2 #define RT2860_LED_MODE_ON 3 #define RT2860_SLOW_BLK_TIME_SHIFT 16 #define RT2860_LED_OFF_TIME_SHIFT 8 #define RT2860_LED_ON_TIME_SHIFT 0 /* possible flags for register XIFS_TIME_CFG */ #define RT2860_BB_RXEND_EN (1 << 29) #define RT2860_EIFS_TIME_SHIFT 20 #define RT2860_OFDM_XIFS_TIME_SHIFT 16 #define RT2860_OFDM_SIFS_TIME_SHIFT 8 #define RT2860_CCK_SIFS_TIME_SHIFT 0 /* possible flags for register BKOFF_SLOT_CFG */ #define RT2860_CC_DELAY_TIME_SHIFT 8 #define RT2860_SLOT_TIME 0 /* possible flags for register NAV_TIME_CFG */ -#define RT2860_NAV_UPD (1 << 31) +#define RT2860_NAV_UPD (1U << 31) #define RT2860_NAV_UPD_VAL_SHIFT 16 #define RT2860_NAV_CLR_EN (1 << 15) #define RT2860_NAV_TIMER_SHIFT 0 /* possible flags for register CH_TIME_CFG */ #define RT2860_EIFS_AS_CH_BUSY (1 << 4) #define RT2860_NAV_AS_CH_BUSY (1 << 3) #define RT2860_RX_AS_CH_BUSY (1 << 2) #define RT2860_TX_AS_CH_BUSY (1 << 1) #define RT2860_CH_STA_TIMER_EN (1 << 0) /* possible values for register BCN_TIME_CFG */ #define RT2860_TSF_INS_COMP_SHIFT 24 #define RT2860_BCN_TX_EN (1 << 20) #define RT2860_TBTT_TIMER_EN (1 << 19) #define RT2860_TSF_SYNC_MODE_SHIFT 17 #define RT2860_TSF_SYNC_MODE_DIS 0 #define RT2860_TSF_SYNC_MODE_STA 1 #define RT2860_TSF_SYNC_MODE_IBSS 2 #define RT2860_TSF_SYNC_MODE_HOSTAP 3 #define RT2860_TSF_TIMER_EN (1 << 16) #define RT2860_BCN_INTVAL_SHIFT 0 /* possible flags for register TBTT_SYNC_CFG */ #define RT2860_BCN_CWMIN_SHIFT 20 #define RT2860_BCN_AIFSN_SHIFT 16 #define RT2860_BCN_EXP_WIN_SHIFT 8 #define RT2860_TBTT_ADJUST_SHIFT 0 /* possible flags for register INT_TIMER_CFG */ #define RT2860_GP_TIMER_SHIFT 16 #define RT2860_PRE_TBTT_TIMER_SHIFT 0 /* possible flags for register INT_TIMER_EN */ #define RT2860_GP_TIMER_EN (1 << 1) #define RT2860_PRE_TBTT_INT_EN (1 << 0) /* possible flags for register MAC_STATUS_REG */ #define RT2860_RX_STATUS_BUSY (1 << 1) #define RT2860_TX_STATUS_BUSY (1 << 0) /* possible flags for register PWR_PIN_CFG */ #define RT2860_IO_ADDA_PD (1 << 3) #define RT2860_IO_PLL_PD (1 << 2) #define RT2860_IO_RA_PE (1 << 1) #define RT2860_IO_RF_PE (1 << 0) /* possible flags for register AUTO_WAKEUP_CFG */ #define RT2860_AUTO_WAKEUP_EN (1 << 15) #define RT2860_SLEEP_TBTT_NUM_SHIFT 8 #define RT2860_WAKEUP_LEAD_TIME_SHIFT 0 /* possible flags for register TX_PIN_CFG */ -#define RT3593_LNA_PE_G2_POL (1 << 31) +#define RT3593_LNA_PE_G2_POL (1U << 31) #define RT3593_LNA_PE_A2_POL (1 << 30) #define RT3593_LNA_PE_G2_EN (1 << 29) #define RT3593_LNA_PE_A2_EN (1 << 28) #define RT3593_LNA_PE2_EN (RT3593_LNA_PE_A2_EN | RT3593_LNA_PE_G2_EN) #define RT3593_PA_PE_G2_POL (1 << 27) #define RT3593_PA_PE_A2_POL (1 << 26) #define RT3593_PA_PE_G2_EN (1 << 25) #define RT3593_PA_PE_A2_EN (1 << 24) #define RT2860_TRSW_POL (1 << 19) #define RT2860_TRSW_EN (1 << 18) #define RT2860_RFTR_POL (1 << 17) #define RT2860_RFTR_EN (1 << 16) #define RT2860_LNA_PE_G1_POL (1 << 15) #define RT2860_LNA_PE_A1_POL (1 << 14) #define RT2860_LNA_PE_G0_POL (1 << 13) #define RT2860_LNA_PE_A0_POL (1 << 12) #define RT2860_LNA_PE_G1_EN (1 << 11) #define RT2860_LNA_PE_A1_EN (1 << 10) #define RT2860_LNA_PE1_EN (RT2860_LNA_PE_A1_EN | RT2860_LNA_PE_G1_EN) #define RT2860_LNA_PE_G0_EN (1 << 9) #define RT2860_LNA_PE_A0_EN (1 << 8) #define RT2860_LNA_PE0_EN (RT2860_LNA_PE_A0_EN | RT2860_LNA_PE_G0_EN) #define RT2860_PA_PE_G1_POL (1 << 7) #define RT2860_PA_PE_A1_POL (1 << 6) #define RT2860_PA_PE_G0_POL (1 << 5) #define RT2860_PA_PE_A0_POL (1 << 4) #define RT2860_PA_PE_G1_EN (1 << 3) #define RT2860_PA_PE_A1_EN (1 << 2) #define RT2860_PA_PE_G0_EN (1 << 1) #define RT2860_PA_PE_A0_EN (1 << 0) /* possible flags for register TX_BAND_CFG */ #define RT2860_5G_BAND_SEL_N (1 << 2) #define RT2860_5G_BAND_SEL_P (1 << 1) #define RT2860_TX_BAND_SEL (1 << 0) /* possible flags for register TX_SW_CFG0 */ #define RT2860_DLY_RFTR_EN_SHIFT 24 #define RT2860_DLY_TRSW_EN_SHIFT 16 #define RT2860_DLY_PAPE_EN_SHIFT 8 #define RT2860_DLY_TXPE_EN_SHIFT 0 /* possible flags for register TX_SW_CFG1 */ #define RT2860_DLY_RFTR_DIS_SHIFT 16 #define RT2860_DLY_TRSW_DIS_SHIFT 8 #define RT2860_DLY_PAPE_DIS SHIFT 0 /* possible flags for register TX_SW_CFG2 */ #define RT2860_DLY_LNA_EN_SHIFT 24 #define RT2860_DLY_LNA_DIS_SHIFT 16 #define RT2860_DLY_DAC_EN_SHIFT 8 #define RT2860_DLY_DAC_DIS_SHIFT 0 /* possible flags for register TXOP_THRES_CFG */ #define RT2860_TXOP_REM_THRES_SHIFT 24 #define RT2860_CF_END_THRES_SHIFT 16 #define RT2860_RDG_IN_THRES 8 #define RT2860_RDG_OUT_THRES 0 /* possible flags for register TXOP_CTRL_CFG */ #define RT2860_EXT_CW_MIN_SHIFT 16 #define RT2860_EXT_CCA_DLY_SHIFT 8 #define RT2860_EXT_CCA_EN (1 << 7) #define RT2860_LSIG_TXOP_EN (1 << 6) #define RT2860_TXOP_TRUN_EN_MIMOPS (1 << 4) #define RT2860_TXOP_TRUN_EN_TXOP (1 << 3) #define RT2860_TXOP_TRUN_EN_RATE (1 << 2) #define RT2860_TXOP_TRUN_EN_AC (1 << 1) #define RT2860_TXOP_TRUN_EN_TIMEOUT (1 << 0) /* possible flags for register TX_RTS_CFG */ #define RT2860_RTS_FBK_EN (1 << 24) #define RT2860_RTS_THRES_SHIFT 8 #define RT2860_RTS_RTY_LIMIT_SHIFT 0 /* possible flags for register TX_TIMEOUT_CFG */ #define RT2860_TXOP_TIMEOUT_SHIFT 16 #define RT2860_RX_ACK_TIMEOUT_SHIFT 8 #define RT2860_MPDU_LIFE_TIME_SHIFT 4 /* possible flags for register TX_RTY_CFG */ #define RT2860_TX_AUTOFB_EN (1 << 30) #define RT2860_AGG_RTY_MODE_TIMER (1 << 29) #define RT2860_NAG_RTY_MODE_TIMER (1 << 28) #define RT2860_LONG_RTY_THRES_SHIFT 16 #define RT2860_LONG_RTY_LIMIT_SHIFT 8 #define RT2860_SHORT_RTY_LIMIT_SHIFT 0 /* possible flags for register TX_LINK_CFG */ #define RT2860_REMOTE_MFS_SHIFT 24 #define RT2860_REMOTE_MFB_SHIFT 16 #define RT2860_TX_CFACK_EN (1 << 12) #define RT2860_TX_RDG_EN (1 << 11) #define RT2860_TX_MRQ_EN (1 << 10) #define RT2860_REMOTE_UMFS_EN (1 << 9) #define RT2860_TX_MFB_EN (1 << 8) #define RT2860_REMOTE_MFB_LT_SHIFT 0 /* possible flags for registers *_PROT_CFG */ #define RT2860_RTSTH_EN (1 << 26) #define RT2860_TXOP_ALLOW_GF40 (1 << 25) #define RT2860_TXOP_ALLOW_GF20 (1 << 24) #define RT2860_TXOP_ALLOW_MM40 (1 << 23) #define RT2860_TXOP_ALLOW_MM20 (1 << 22) #define RT2860_TXOP_ALLOW_OFDM (1 << 21) #define RT2860_TXOP_ALLOW_CCK (1 << 20) #define RT2860_TXOP_ALLOW_ALL (0x3f << 20) #define RT2860_PROT_NAV_SHORT (1 << 18) #define RT2860_PROT_NAV_LONG (2 << 18) #define RT2860_PROT_CTRL_RTS_CTS (1 << 16) #define RT2860_PROT_CTRL_CTS (2 << 16) /* possible flags for registers EXP_{CTS,ACK}_TIME */ #define RT2860_EXP_OFDM_TIME_SHIFT 16 #define RT2860_EXP_CCK_TIME_SHIFT 0 /* possible flags for register RX_FILTR_CFG */ #define RT2860_DROP_CTRL_RSV (1 << 16) #define RT2860_DROP_BAR (1 << 15) #define RT2860_DROP_BA (1 << 14) #define RT2860_DROP_PSPOLL (1 << 13) #define RT2860_DROP_RTS (1 << 12) #define RT2860_DROP_CTS (1 << 11) #define RT2860_DROP_ACK (1 << 10) #define RT2860_DROP_CFEND (1 << 9) #define RT2860_DROP_CFACK (1 << 8) #define RT2860_DROP_DUPL (1 << 7) #define RT2860_DROP_BC (1 << 6) #define RT2860_DROP_MC (1 << 5) #define RT2860_DROP_VER_ERR (1 << 4) #define RT2860_DROP_NOT_MYBSS (1 << 3) #define RT2860_DROP_UC_NOME (1 << 2) #define RT2860_DROP_PHY_ERR (1 << 1) #define RT2860_DROP_CRC_ERR (1 << 0) /* possible flags for register AUTO_RSP_CFG */ #define RT2860_CTRL_PWR_BIT (1 << 7) #define RT2860_BAC_ACK_POLICY (1 << 6) #define RT2860_CCK_SHORT_EN (1 << 4) #define RT2860_CTS_40M_REF_EN (1 << 3) #define RT2860_CTS_40M_MODE_EN (1 << 2) #define RT2860_BAC_ACKPOLICY_EN (1 << 1) #define RT2860_AUTO_RSP_EN (1 << 0) /* possible flags for register SIFS_COST_CFG */ #define RT2860_OFDM_SIFS_COST_SHIFT 8 #define RT2860_CCK_SIFS_COST_SHIFT 0 /* possible flags for register TXOP_HLDR_ET */ #define RT2860_TXOP_ETM1_EN (1 << 25) #define RT2860_TXOP_ETM0_EN (1 << 24) #define RT2860_TXOP_ETM_THRES_SHIFT 16 #define RT2860_TXOP_ETO_EN (1 << 8) #define RT2860_TXOP_ETO_THRES_SHIFT 1 #define RT2860_PER_RX_RST_EN (1 << 0) /* possible flags for register TX_STAT_FIFO */ #define RT2860_TXQ_MCS_SHIFT 16 #define RT2860_TXQ_WCID_SHIFT 8 #define RT2860_TXQ_ACKREQ (1 << 7) #define RT2860_TXQ_AGG (1 << 6) #define RT2860_TXQ_OK (1 << 5) #define RT2860_TXQ_PID_SHIFT 1 #define RT2860_TXQ_VLD (1 << 0) /* possible flags for register WCID_ATTR */ #define RT2860_MODE_NOSEC 0 #define RT2860_MODE_WEP40 1 #define RT2860_MODE_WEP104 2 #define RT2860_MODE_TKIP 3 #define RT2860_MODE_AES_CCMP 4 #define RT2860_MODE_CKIP40 5 #define RT2860_MODE_CKIP104 6 #define RT2860_MODE_CKIP128 7 #define RT2860_RX_PKEY_EN (1 << 0) /* possible flags for register H2M_MAILBOX */ #define RT2860_H2M_BUSY (1 << 24) #define RT2860_TOKEN_NO_INTR 0xff /* possible flags for MCU command RT2860_MCU_CMD_LEDS */ #define RT2860_LED_RADIO (1 << 13) #define RT2860_LED_LINK_2GHZ (1 << 14) #define RT2860_LED_LINK_5GHZ (1 << 15) /* possible flags for RT3020 RF register 1 */ #define RT3070_RF_BLOCK (1 << 0) #define RT3070_RX0_PD (1 << 2) #define RT3070_TX0_PD (1 << 3) #define RT3070_RX1_PD (1 << 4) #define RT3070_TX1_PD (1 << 5) #define RT3070_RX2_PD (1 << 6) #define RT3070_TX2_PD (1 << 7) /* possible flags for RT3020 RF register 7 */ #define RT3070_TUNE (1 << 0) /* possible flags for RT3020 RF register 15 */ #define RT3070_TX_LO2 (1 << 3) /* possible flags for RT3020 RF register 17 */ #define RT3070_TX_LO1 (1 << 3) /* possible flags for RT3020 RF register 20 */ #define RT3070_RX_LO1 (1 << 3) /* possible flags for RT3020 RF register 21 */ #define RT3070_RX_LO2 (1 << 3) #define RT3070_RX_CTB (1 << 7) /* possible flags for RT3020 RF register 22 */ #define RT3070_BB_LOOPBACK (1 << 0) /* possible flags for RT3053 RF register 1 */ #define RT3593_VCO (1 << 0) /* possible flags for RT3053 RF register 2 */ #define RT3593_RESCAL (1 << 7) /* possible flags for RT3053 RF register 3 */ #define RT3593_VCOCAL (1 << 7) /* possible flags for RT3053 RF register 6 */ #define RT3593_VCO_IC (1 << 6) /* possible flags for RT3053 RF register 20 */ #define RT3593_LDO_PLL_VC_MASK 0x0e #define RT3593_LDO_RF_VC_MASK 0xe0 /* possible flags for RT3053 RF register 22 */ #define RT3593_CP_IC_MASK 0xe0 #define RT3593_CP_IC_SHIFT 5 /* possible flags for RT3053 RF register 46 */ #define RT3593_RX_CTB (1 << 5) #define RT3090_DEF_LNA 10 /* RT2860 TX descriptor */ struct rt2860_txd { uint32_t sdp0; /* Segment Data Pointer 0 */ uint16_t sdl1; /* Segment Data Length 1 */ #define RT2860_TX_BURST (1 << 15) #define RT2860_TX_LS1 (1 << 14) /* SDP1 is the last segment */ uint16_t sdl0; /* Segment Data Length 0 */ #define RT2860_TX_DDONE (1 << 15) #define RT2860_TX_LS0 (1 << 14) /* SDP0 is the last segment */ uint32_t sdp1; /* Segment Data Pointer 1 */ uint8_t reserved[3]; uint8_t flags; #define RT2860_TX_QSEL_SHIFT 1 #define RT2860_TX_QSEL_MGMT (0 << 1) #define RT2860_TX_QSEL_HCCA (1 << 1) #define RT2860_TX_QSEL_EDCA (2 << 1) #define RT2860_TX_WIV (1 << 0) } __packed; /* RT2870 TX descriptor */ struct rt2870_txd { uint16_t len; uint8_t pad; uint8_t flags; } __packed; /* TX Wireless Information */ struct rt2860_txwi { uint8_t flags; #define RT2860_TX_MPDU_DSITY_SHIFT 5 #define RT2860_TX_AMPDU (1 << 4) #define RT2860_TX_TS (1 << 3) #define RT2860_TX_CFACK (1 << 2) #define RT2860_TX_MMPS (1 << 1) #define RT2860_TX_FRAG (1 << 0) uint8_t txop; #define RT2860_TX_TXOP_HT 0 #define RT2860_TX_TXOP_PIFS 1 #define RT2860_TX_TXOP_SIFS 2 #define RT2860_TX_TXOP_BACKOFF 3 uint16_t phy; #define RT2860_PHY_MODE 0xc000 #define RT2860_PHY_CCK (0 << 14) #define RT2860_PHY_OFDM (1 << 14) #define RT2860_PHY_HT (2 << 14) #define RT2860_PHY_HT_GF (3 << 14) #define RT2860_PHY_SGI (1 << 8) #define RT2860_PHY_BW40 (1 << 7) #define RT2860_PHY_MCS 0x7f #define RT2860_PHY_SHPRE (1 << 3) uint8_t xflags; #define RT2860_TX_BAWINSIZE_SHIFT 2 #define RT2860_TX_NSEQ (1 << 1) #define RT2860_TX_ACK (1 << 0) uint8_t wcid; /* Wireless Client ID */ uint16_t len; #define RT2860_TX_PID_SHIFT 12 uint32_t iv; uint32_t eiv; } __packed; /* RT2860 RX descriptor */ struct rt2860_rxd { uint32_t sdp0; uint16_t sdl1; /* unused */ uint16_t sdl0; #define RT2860_RX_DDONE (1 << 15) #define RT2860_RX_LS0 (1 << 14) uint32_t sdp1; /* unused */ uint32_t flags; #define RT2860_RX_DEC (1 << 16) #define RT2860_RX_AMPDU (1 << 15) #define RT2860_RX_L2PAD (1 << 14) #define RT2860_RX_RSSI (1 << 13) #define RT2860_RX_HTC (1 << 12) #define RT2860_RX_AMSDU (1 << 11) #define RT2860_RX_MICERR (1 << 10) #define RT2860_RX_ICVERR (1 << 9) #define RT2860_RX_CRCERR (1 << 8) #define RT2860_RX_MYBSS (1 << 7) #define RT2860_RX_BC (1 << 6) #define RT2860_RX_MC (1 << 5) #define RT2860_RX_UC2ME (1 << 4) #define RT2860_RX_FRAG (1 << 3) #define RT2860_RX_NULL (1 << 2) #define RT2860_RX_DATA (1 << 1) #define RT2860_RX_BA (1 << 0) } __packed; /* RT2870 RX descriptor */ struct rt2870_rxd { /* single 32-bit field */ uint32_t flags; } __packed; /* RX Wireless Information */ struct rt2860_rxwi { uint8_t wcid; uint8_t keyidx; #define RT2860_RX_UDF_SHIFT 5 #define RT2860_RX_BSS_IDX_SHIFT 2 uint16_t len; #define RT2860_RX_TID_SHIFT 12 uint16_t seq; uint16_t phy; uint8_t rssi[3]; uint8_t reserved1; uint8_t snr[2]; uint16_t reserved2; } __packed; /* first DMA segment contains TXWI + 802.11 header + 32-bit padding */ #define RT2860_TXWI_DMASZ \ (sizeof (struct rt2860_txwi) + \ sizeof (struct ieee80211_frame) + 6 + \ sizeof (uint16_t)) #define RT2860_RF1 0 #define RT2860_RF2 2 #define RT2860_RF3 1 #define RT2860_RF4 3 #define RT2860_RF_2820 1 /* 2T3R */ #define RT2860_RF_2850 2 /* dual-band 2T3R */ #define RT2860_RF_2720 3 /* 1T2R */ #define RT2860_RF_2750 4 /* dual-band 1T2R */ #define RT3070_RF_3020 5 /* 1T1R */ #define RT3070_RF_2020 6 /* b/g */ #define RT3070_RF_3021 7 /* 1T2R */ #define RT3070_RF_3022 8 /* 2T2R */ #define RT3070_RF_3052 9 /* dual-band 2T2R */ #define RT3070_RF_3320 11 /* 1T1R */ #define RT3070_RF_3053 13 /* dual-band 3T3R */ /* USB commands for RT2870 only */ #define RT2870_RESET 1 #define RT2870_WRITE_2 2 #define RT2870_WRITE_REGION_1 6 #define RT2870_READ_REGION_1 7 #define RT2870_EEPROM_READ 9 #define RT2860_EEPROM_DELAY 1 /* minimum hold time (microsecond) */ #define RT2860_EEPROM_VERSION 0x01 #define RT2860_EEPROM_MAC01 0x02 #define RT2860_EEPROM_MAC23 0x03 #define RT2860_EEPROM_MAC45 0x04 #define RT2860_EEPROM_PCIE_PSLEVEL 0x11 #define RT2860_EEPROM_REV 0x12 #define RT2860_EEPROM_ANTENNA 0x1a #define RT2860_EEPROM_CONFIG 0x1b #define RT2860_EEPROM_COUNTRY 0x1c #define RT2860_EEPROM_FREQ_LEDS 0x1d #define RT2860_EEPROM_LED1 0x1e #define RT2860_EEPROM_LED2 0x1f #define RT2860_EEPROM_LED3 0x20 #define RT2860_EEPROM_LNA 0x22 #define RT2860_EEPROM_RSSI1_2GHZ 0x23 #define RT2860_EEPROM_RSSI2_2GHZ 0x24 #define RT2860_EEPROM_RSSI1_5GHZ 0x25 #define RT2860_EEPROM_RSSI2_5GHZ 0x26 #define RT2860_EEPROM_DELTAPWR 0x28 #define RT2860_EEPROM_PWR2GHZ_BASE1 0x29 #define RT2860_EEPROM_PWR2GHZ_BASE2 0x30 #define RT2860_EEPROM_TSSI1_2GHZ 0x37 #define RT2860_EEPROM_TSSI2_2GHZ 0x38 #define RT2860_EEPROM_TSSI3_2GHZ 0x39 #define RT2860_EEPROM_TSSI4_2GHZ 0x3a #define RT2860_EEPROM_TSSI5_2GHZ 0x3b #define RT2860_EEPROM_PWR5GHZ_BASE1 0x3c #define RT2860_EEPROM_PWR5GHZ_BASE2 0x53 #define RT2860_EEPROM_TSSI1_5GHZ 0x6a #define RT2860_EEPROM_TSSI2_5GHZ 0x6b #define RT2860_EEPROM_TSSI3_5GHZ 0x6c #define RT2860_EEPROM_TSSI4_5GHZ 0x6d #define RT2860_EEPROM_TSSI5_5GHZ 0x6e #define RT2860_EEPROM_RPWR 0x6f #define RT2860_EEPROM_BBP_BASE 0x78 #define RT3071_EEPROM_RF_BASE 0x82 #define RT2860_RIDX_CCK1 0 #define RT2860_RIDX_CCK11 3 #define RT2860_RIDX_OFDM6 4 #define RT2860_RIDX_MAX 11 static const struct rt2860_rate { uint8_t rate; uint8_t mcs; enum ieee80211_phytype phy; uint8_t ctl_ridx; uint16_t sp_ack_dur; uint16_t lp_ack_dur; } rt2860_rates[] = { { 2, 0, IEEE80211_T_DS, 0, 314, 314 }, { 4, 1, IEEE80211_T_DS, 1, 258, 162 }, { 11, 2, IEEE80211_T_DS, 2, 223, 127 }, { 22, 3, IEEE80211_T_DS, 3, 213, 117 }, { 12, 0, IEEE80211_T_OFDM, 4, 60, 60 }, { 18, 1, IEEE80211_T_OFDM, 4, 52, 52 }, { 24, 2, IEEE80211_T_OFDM, 6, 48, 48 }, { 36, 3, IEEE80211_T_OFDM, 6, 44, 44 }, { 48, 4, IEEE80211_T_OFDM, 8, 44, 44 }, { 72, 5, IEEE80211_T_OFDM, 8, 40, 40 }, { 96, 6, IEEE80211_T_OFDM, 8, 40, 40 }, { 108, 7, IEEE80211_T_OFDM, 8, 40, 40 } }; /* * Control and status registers access macros. */ #define RAL_READ(sc, reg) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) #define RAL_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) #define RAL_BARRIER_WRITE(sc) \ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, 0x1800, \ BUS_SPACE_BARRIER_WRITE) #define RAL_BARRIER_READ_WRITE(sc) \ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, 0x1800, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) #define RAL_WRITE_REGION_1(sc, offset, datap, count) \ bus_space_write_region_1((sc)->sc_st, (sc)->sc_sh, (offset), \ (datap), (count)) #define RAL_SET_REGION_4(sc, offset, val, count) \ bus_space_set_region_4((sc)->sc_st, (sc)->sc_sh, (offset), \ (val), (count)) /* * EEPROM access macro. */ #define RT2860_EEPROM_CTL(sc, val) do { \ RAL_WRITE((sc), RT2860_PCI_EECTRL, (val)); \ RAL_BARRIER_READ_WRITE((sc)); \ DELAY(RT2860_EEPROM_DELAY); \ } while (/* CONSTCOND */0) /* * Default values for MAC registers; values taken from the reference driver. */ #define RT2860_DEF_MAC \ { RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \ { RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \ { RT2860_HT_BASIC_RATE, 0x00008003 }, \ { RT2860_MAC_SYS_CTRL, 0x00000000 }, \ { RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \ { RT2860_TX_SW_CFG0, 0x00000000 }, \ { RT2860_TX_SW_CFG1, 0x00080606 }, \ { RT2860_TX_LINK_CFG, 0x00001020 }, \ { RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \ { RT2860_LED_CFG, 0x7f031e46 }, \ { RT2860_WMM_AIFSN_CFG, 0x00002273 }, \ { RT2860_WMM_CWMIN_CFG, 0x00002344 }, \ { RT2860_WMM_CWMAX_CFG, 0x000034aa }, \ { RT2860_MAX_PCNT, 0x1f3fbf9f }, \ { RT2860_TX_RTY_CFG, 0x47d01f0f }, \ { RT2860_AUTO_RSP_CFG, 0x00000013 }, \ { RT2860_CCK_PROT_CFG, 0x05740003 }, \ { RT2860_OFDM_PROT_CFG, 0x05740003 }, \ { RT2860_GF20_PROT_CFG, 0x01744004 }, \ { RT2860_GF40_PROT_CFG, 0x03f44084 }, \ { RT2860_MM20_PROT_CFG, 0x01744004 }, \ { RT2860_MM40_PROT_CFG, 0x03f54084 }, \ { RT2860_TXOP_CTRL_CFG, 0x0000583f }, \ { RT2860_TXOP_HLDR_ET, 0x00000002 }, \ { RT2860_TX_RTS_CFG, 0x00092b20 }, \ { RT2860_EXP_ACK_TIME, 0x002400ca }, \ { RT2860_XIFS_TIME_CFG, 0x33a41010 }, \ { RT2860_PWR_PIN_CFG, 0x00000003 } /* XXX only a few registers differ from above, try to merge? */ #define RT2870_DEF_MAC \ { RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \ { RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \ { RT2860_HT_BASIC_RATE, 0x00008003 }, \ { RT2860_MAC_SYS_CTRL, 0x00000000 }, \ { RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \ { RT2860_TX_SW_CFG0, 0x00000000 }, \ { RT2860_TX_SW_CFG1, 0x00080606 }, \ { RT2860_TX_LINK_CFG, 0x00001020 }, \ { RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \ { RT2860_LED_CFG, 0x7f031e46 }, \ { RT2860_WMM_AIFSN_CFG, 0x00002273 }, \ { RT2860_WMM_CWMIN_CFG, 0x00002344 }, \ { RT2860_WMM_CWMAX_CFG, 0x000034aa }, \ { RT2860_MAX_PCNT, 0x1f3fbf9f }, \ { RT2860_TX_RTY_CFG, 0x47d01f0f }, \ { RT2860_AUTO_RSP_CFG, 0x00000013 }, \ { RT2860_CCK_PROT_CFG, 0x05740003 }, \ { RT2860_OFDM_PROT_CFG, 0x05740003 }, \ { RT2860_PBF_CFG, 0x00f40006 }, \ { RT2860_WPDMA_GLO_CFG, 0x00000030 }, \ { RT2860_GF20_PROT_CFG, 0x01744004 }, \ { RT2860_GF40_PROT_CFG, 0x03f44084 }, \ { RT2860_MM20_PROT_CFG, 0x01744004 }, \ { RT2860_MM40_PROT_CFG, 0x03f44084 }, \ { RT2860_TXOP_CTRL_CFG, 0x0000583f }, \ { RT2860_TXOP_HLDR_ET, 0x00000002 }, \ { RT2860_TX_RTS_CFG, 0x00092b20 }, \ { RT2860_EXP_ACK_TIME, 0x002400ca }, \ { RT2860_XIFS_TIME_CFG, 0x33a41010 }, \ { RT2860_PWR_PIN_CFG, 0x00000003 } /* * Default values for BBP registers; values taken from the reference driver. */ #define RT2860_DEF_BBP \ { 65, 0x2c }, \ { 66, 0x38 }, \ { 69, 0x12 }, \ { 70, 0x0a }, \ { 73, 0x10 }, \ { 81, 0x37 }, \ { 82, 0x62 }, \ { 83, 0x6a }, \ { 84, 0x99 }, \ { 86, 0x00 }, \ { 91, 0x04 }, \ { 92, 0x00 }, \ { 103, 0x00 }, \ { 105, 0x05 }, \ { 106, 0x35 } /* * Default settings for RF registers; values derived from the reference driver. */ #define RT2860_RF2850 \ { 1, 0x100bb3, 0x1301e1, 0x05a014, 0x001402 }, \ { 2, 0x100bb3, 0x1301e1, 0x05a014, 0x001407 }, \ { 3, 0x100bb3, 0x1301e2, 0x05a014, 0x001402 }, \ { 4, 0x100bb3, 0x1301e2, 0x05a014, 0x001407 }, \ { 5, 0x100bb3, 0x1301e3, 0x05a014, 0x001402 }, \ { 6, 0x100bb3, 0x1301e3, 0x05a014, 0x001407 }, \ { 7, 0x100bb3, 0x1301e4, 0x05a014, 0x001402 }, \ { 8, 0x100bb3, 0x1301e4, 0x05a014, 0x001407 }, \ { 9, 0x100bb3, 0x1301e5, 0x05a014, 0x001402 }, \ { 10, 0x100bb3, 0x1301e5, 0x05a014, 0x001407 }, \ { 11, 0x100bb3, 0x1301e6, 0x05a014, 0x001402 }, \ { 12, 0x100bb3, 0x1301e6, 0x05a014, 0x001407 }, \ { 13, 0x100bb3, 0x1301e7, 0x05a014, 0x001402 }, \ { 14, 0x100bb3, 0x1301e8, 0x05a014, 0x001404 }, \ { 36, 0x100bb3, 0x130266, 0x056014, 0x001408 }, \ { 38, 0x100bb3, 0x130267, 0x056014, 0x001404 }, \ { 40, 0x100bb2, 0x1301a0, 0x056014, 0x001400 }, \ { 44, 0x100bb2, 0x1301a0, 0x056014, 0x001408 }, \ { 46, 0x100bb2, 0x1301a1, 0x056014, 0x001402 }, \ { 48, 0x100bb2, 0x1301a1, 0x056014, 0x001406 }, \ { 52, 0x100bb2, 0x1301a2, 0x056014, 0x001404 }, \ { 54, 0x100bb2, 0x1301a2, 0x056014, 0x001408 }, \ { 56, 0x100bb2, 0x1301a3, 0x056014, 0x001402 }, \ { 60, 0x100bb2, 0x1301a4, 0x056014, 0x001400 }, \ { 62, 0x100bb2, 0x1301a4, 0x056014, 0x001404 }, \ { 64, 0x100bb2, 0x1301a4, 0x056014, 0x001408 }, \ { 100, 0x100bb2, 0x1301ac, 0x05e014, 0x001400 }, \ { 102, 0x100bb2, 0x1701ac, 0x15e014, 0x001404 }, \ { 104, 0x100bb2, 0x1701ac, 0x15e014, 0x001408 }, \ { 108, 0x100bb3, 0x17028c, 0x15e014, 0x001404 }, \ { 110, 0x100bb3, 0x13028d, 0x05e014, 0x001400 }, \ { 112, 0x100bb3, 0x13028d, 0x05e014, 0x001406 }, \ { 116, 0x100bb3, 0x13028e, 0x05e014, 0x001408 }, \ { 118, 0x100bb3, 0x13028f, 0x05e014, 0x001404 }, \ { 120, 0x100bb1, 0x1300e0, 0x05e014, 0x001400 }, \ { 124, 0x100bb1, 0x1300e0, 0x05e014, 0x001404 }, \ { 126, 0x100bb1, 0x1300e0, 0x05e014, 0x001406 }, \ { 128, 0x100bb1, 0x1300e0, 0x05e014, 0x001408 }, \ { 132, 0x100bb1, 0x1300e1, 0x05e014, 0x001402 }, \ { 134, 0x100bb1, 0x1300e1, 0x05e014, 0x001404 }, \ { 136, 0x100bb1, 0x1300e1, 0x05e014, 0x001406 }, \ { 140, 0x100bb1, 0x1300e2, 0x05e014, 0x001400 }, \ { 149, 0x100bb1, 0x1300e2, 0x05e014, 0x001409 }, \ { 151, 0x100bb1, 0x1300e3, 0x05e014, 0x001401 }, \ { 153, 0x100bb1, 0x1300e3, 0x05e014, 0x001403 }, \ { 157, 0x100bb1, 0x1300e3, 0x05e014, 0x001407 }, \ { 159, 0x100bb1, 0x1300e3, 0x05e014, 0x001409 }, \ { 161, 0x100bb1, 0x1300e4, 0x05e014, 0x001401 }, \ { 165, 0x100bb1, 0x1300e4, 0x05e014, 0x001405 }, \ { 167, 0x100bb1, 0x1300f4, 0x05e014, 0x001407 }, \ { 169, 0x100bb1, 0x1300f4, 0x05e014, 0x001409 }, \ { 171, 0x100bb1, 0x1300f5, 0x05e014, 0x001401 }, \ { 173, 0x100bb1, 0x1300f5, 0x05e014, 0x001403 } #define RT3070_RF3052 \ { 0xf1, 2, 2 }, \ { 0xf1, 2, 7 }, \ { 0xf2, 2, 2 }, \ { 0xf2, 2, 7 }, \ { 0xf3, 2, 2 }, \ { 0xf3, 2, 7 }, \ { 0xf4, 2, 2 }, \ { 0xf4, 2, 7 }, \ { 0xf5, 2, 2 }, \ { 0xf5, 2, 7 }, \ { 0xf6, 2, 2 }, \ { 0xf6, 2, 7 }, \ { 0xf7, 2, 2 }, \ { 0xf8, 2, 4 }, \ { 0x56, 0, 4 }, \ { 0x56, 0, 6 }, \ { 0x56, 0, 8 }, \ { 0x57, 0, 0 }, \ { 0x57, 0, 2 }, \ { 0x57, 0, 4 }, \ { 0x57, 0, 8 }, \ { 0x57, 0, 10 }, \ { 0x58, 0, 0 }, \ { 0x58, 0, 4 }, \ { 0x58, 0, 6 }, \ { 0x58, 0, 8 }, \ { 0x5b, 0, 8 }, \ { 0x5b, 0, 10 }, \ { 0x5c, 0, 0 }, \ { 0x5c, 0, 4 }, \ { 0x5c, 0, 6 }, \ { 0x5c, 0, 8 }, \ { 0x5d, 0, 0 }, \ { 0x5d, 0, 2 }, \ { 0x5d, 0, 4 }, \ { 0x5d, 0, 8 }, \ { 0x5d, 0, 10 }, \ { 0x5e, 0, 0 }, \ { 0x5e, 0, 4 }, \ { 0x5e, 0, 6 }, \ { 0x5e, 0, 8 }, \ { 0x5f, 0, 0 }, \ { 0x5f, 0, 9 }, \ { 0x5f, 0, 11 }, \ { 0x60, 0, 1 }, \ { 0x60, 0, 5 }, \ { 0x60, 0, 7 }, \ { 0x60, 0, 9 }, \ { 0x61, 0, 1 }, \ { 0x61, 0, 3 }, \ { 0x61, 0, 5 }, \ { 0x61, 0, 7 }, \ { 0x61, 0, 9 } #define RT3070_DEF_RF \ { 4, 0x40 }, \ { 5, 0x03 }, \ { 6, 0x02 }, \ { 7, 0x70 }, \ { 9, 0x0f }, \ { 10, 0x41 }, \ { 11, 0x21 }, \ { 12, 0x7b }, \ { 14, 0x90 }, \ { 15, 0x58 }, \ { 16, 0xb3 }, \ { 17, 0x92 }, \ { 18, 0x2c }, \ { 19, 0x02 }, \ { 20, 0xba }, \ { 21, 0xdb }, \ { 24, 0x16 }, \ { 25, 0x01 }, \ { 29, 0x1f } #define RT3572_DEF_RF \ { 0, 0x70 }, \ { 1, 0x81 }, \ { 2, 0xf1 }, \ { 3, 0x02 }, \ { 4, 0x4c }, \ { 5, 0x05 }, \ { 6, 0x4a }, \ { 7, 0xd8 }, \ { 9, 0xc3 }, \ { 10, 0xf1 }, \ { 11, 0xb9 }, \ { 12, 0x70 }, \ { 13, 0x65 }, \ { 14, 0xa0 }, \ { 15, 0x53 }, \ { 16, 0x4c }, \ { 17, 0x23 }, \ { 18, 0xac }, \ { 19, 0x93 }, \ { 20, 0xb3 }, \ { 21, 0xd0 }, \ { 22, 0x00 }, \ { 23, 0x3c }, \ { 24, 0x16 }, \ { 25, 0x15 }, \ { 26, 0x85 }, \ { 27, 0x00 }, \ { 28, 0x00 }, \ { 29, 0x9b }, \ { 30, 0x09 }, \ { 31, 0x10 } Index: head/sys/dev/sound/pci/hda/hdaa.h =================================================================== --- head/sys/dev/sound/pci/hda/hdaa.h (revision 258779) +++ head/sys/dev/sound/pci/hda/hdaa.h (revision 258780) @@ -1,274 +1,274 @@ /*-7 * Copyright (c) 2006 Stephane E. Potvin * Copyright (c) 2006 Ariff Abdullah * Copyright (c) 2008-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Intel High Definition Audio (Audio function quirks) driver for FreeBSD. */ #ifndef _HDAA_QUIRKS_H_ #define _HDAA_QUIRKS_H_ #define HDAA_GPIO_SHIFT(n) (n * 3) #define HDAA_GPIO_MASK(n) (0x7 << (n * 3)) #define HDAA_GPIO_KEEP(n) (0x0 << (n * 3)) #define HDAA_GPIO_SET(n) (0x1 << (n * 3)) #define HDAA_GPIO_CLEAR(n) (0x2 << (n * 3)) #define HDAA_GPIO_DISABLE(n) (0x3 << (n * 3)) #define HDAA_GPIO_INPUT(n) (0x4 << (n * 3)) /* 9 - 25 = anything else */ #define HDAA_QUIRK_SOFTPCMVOL (1 << 9) #define HDAA_QUIRK_FIXEDRATE (1 << 10) #define HDAA_QUIRK_FORCESTEREO (1 << 11) #define HDAA_QUIRK_EAPDINV (1 << 12) #define HDAA_QUIRK_SENSEINV (1 << 14) /* 26 - 31 = vrefs */ #define HDAA_QUIRK_IVREF50 (1 << 26) #define HDAA_QUIRK_IVREF80 (1 << 27) #define HDAA_QUIRK_IVREF100 (1 << 28) #define HDAA_QUIRK_OVREF50 (1 << 29) #define HDAA_QUIRK_OVREF80 (1 << 30) -#define HDAA_QUIRK_OVREF100 (1 << 31) +#define HDAA_QUIRK_OVREF100 (1U << 31) #define HDAA_QUIRK_IVREF (HDAA_QUIRK_IVREF50 | HDAA_QUIRK_IVREF80 | \ HDAA_QUIRK_IVREF100) #define HDAA_QUIRK_OVREF (HDAA_QUIRK_OVREF50 | HDAA_QUIRK_OVREF80 | \ HDAA_QUIRK_OVREF100) #define HDAA_QUIRK_VREF (HDAA_QUIRK_IVREF | HDAA_QUIRK_OVREF) #define HDAA_AMP_VOL_DEFAULT (-1) #define HDAA_AMP_MUTE_DEFAULT (0xffffffff) #define HDAA_AMP_MUTE_NONE (0) #define HDAA_AMP_MUTE_LEFT (1 << 0) #define HDAA_AMP_MUTE_RIGHT (1 << 1) #define HDAA_AMP_MUTE_ALL (HDAA_AMP_MUTE_LEFT | HDAA_AMP_MUTE_RIGHT) #define HDAA_AMP_LEFT_MUTED(v) ((v) & (HDAA_AMP_MUTE_LEFT)) #define HDAA_AMP_RIGHT_MUTED(v) (((v) & HDAA_AMP_MUTE_RIGHT) >> 1) /* Widget in playback receiving signal from recording. */ #define HDAA_ADC_MONITOR (1 << 0) /* Input mixer widget needs volume control as destination. */ #define HDAA_IMIX_AS_DST (2 << 0) #define HDAA_CTL_OUT 1 #define HDAA_CTL_IN 2 #define HDA_MAX_CONNS 32 #define HDA_MAX_NAMELEN 32 struct hdaa_audio_as; struct hdaa_audio_ctl; struct hdaa_chan; struct hdaa_devinfo; struct hdaa_pcm_devinfo; struct hdaa_widget; struct hdaa_widget { nid_t nid; int type; int enable; int nconns, selconn; int waspin; uint32_t pflags; int bindas; int bindseqmask; int ossdev; uint32_t ossmask; int unsol; nid_t conns[HDA_MAX_CONNS]; u_char connsenable[HDA_MAX_CONNS]; char name[HDA_MAX_NAMELEN]; uint8_t *eld; int eld_len; struct hdaa_devinfo *devinfo; struct { uint32_t widget_cap; uint32_t outamp_cap; uint32_t inamp_cap; uint32_t supp_stream_formats; uint32_t supp_pcm_size_rate; uint32_t eapdbtl; } param; union { struct { uint32_t config; uint32_t original; uint32_t newconf; uint32_t cap; uint32_t ctrl; int connected; } pin; struct { uint8_t stripecap; } conv; } wclass; }; struct hdaa_audio_ctl { struct hdaa_widget *widget, *childwidget; int enable; int index, dir, ndir; int mute, step, size, offset; int left, right, forcemute; uint32_t muted; uint32_t ossmask; /* OSS devices that may affect control. */ int devleft[SOUND_MIXER_NRDEVICES]; /* Left ampl in 1/4dB. */ int devright[SOUND_MIXER_NRDEVICES]; /* Right ampl in 1/4dB. */ int devmute[SOUND_MIXER_NRDEVICES]; /* Mutes per OSS device. */ }; /* Association is a group of pins bound for some special function. */ struct hdaa_audio_as { u_char enable; u_char index; u_char dir; u_char pincnt; u_char fakeredir; u_char digital; uint16_t pinset; nid_t hpredir; nid_t pins[16]; nid_t dacs[2][16]; int num_chans; int chans[2]; int location; /* Pins location, if all have the same */ int mixed; /* Mixed/multiplexed recording, not multichannel. */ struct hdaa_pcm_devinfo *pdevinfo; }; struct hdaa_pcm_devinfo { device_t dev; struct hdaa_devinfo *devinfo; struct snd_mixer *mixer; int index; int registered; int playas, recas; u_char left[SOUND_MIXER_NRDEVICES]; u_char right[SOUND_MIXER_NRDEVICES]; int minamp[SOUND_MIXER_NRDEVICES]; /* Minimal amps in 1/4dB. */ int maxamp[SOUND_MIXER_NRDEVICES]; /* Maximal amps in 1/4dB. */ int chan_size; int chan_blkcnt; u_char digital; uint32_t ossmask; /* Mask of supported OSS devices. */ uint32_t recsrc; /* Mask of supported OSS sources. */ int autorecsrc; }; struct hdaa_devinfo { device_t dev; struct mtx *lock; nid_t nid; nid_t startnode, endnode; uint32_t outamp_cap; uint32_t inamp_cap; uint32_t supp_stream_formats; uint32_t supp_pcm_size_rate; uint32_t gpio_cap; uint32_t quirks; uint32_t newquirks; uint32_t gpio; uint32_t newgpio; uint32_t gpo; uint32_t newgpo; int nodecnt; int ctlcnt; int ascnt; int num_devs; int num_chans; struct hdaa_widget *widget; struct hdaa_audio_ctl *ctl; struct hdaa_audio_as *as; struct hdaa_pcm_devinfo *devs; struct hdaa_chan *chans; struct callout poll_jack; int poll_ival; }; #define HDAA_CHN_RUNNING 0x00000001 #define HDAA_CHN_SUSPEND 0x00000002 struct hdaa_chan { struct snd_dbuf *b; struct pcm_channel *c; struct pcmchan_caps caps; struct hdaa_devinfo *devinfo; struct hdaa_pcm_devinfo *pdevinfo; uint32_t spd, fmt, fmtlist[32], pcmrates[16]; uint32_t supp_stream_formats, supp_pcm_size_rate; uint32_t blkcnt, blksz; uint32_t *dmapos; uint32_t flags; int dir; int off; int sid; int bit16, bit32; int channels; /* Number of audio channels. */ int as; /* Number of association. */ int asindex; /* Index within association. */ nid_t io[16]; uint8_t stripecap; /* AND of stripecap of all ios. */ uint8_t stripectl; /* stripe to use to all ios. */ }; #define MINQDB(ctl) \ ((0 - (ctl)->offset) * ((ctl)->size + 1)) #define MAXQDB(ctl) \ (((ctl)->step - (ctl)->offset) * ((ctl)->size + 1)) #define RANGEQDB(ctl) \ ((ctl)->step * ((ctl)->size + 1)) #define VAL2QDB(ctl, val) \ (((ctl)->size + 1) * ((int)(val) - (ctl)->offset)) #define QDB2VAL(ctl, qdb) \ imax(imin((((qdb) + (ctl)->size / 2 * ((qdb) > 0 ? 1 : -1)) / \ ((ctl)->size + 1) + (ctl)->offset), (ctl)->step), 0) #define hdaa_codec_id(devinfo) \ (((uint32_t)hda_get_vendor_id(devinfo->dev) << 16) + \ hda_get_device_id(devinfo->dev)) #define hdaa_card_id(devinfo) \ (((uint32_t)hda_get_subdevice_id(devinfo->dev) << 16) + \ hda_get_subvendor_id(devinfo->dev)) struct hdaa_widget *hdaa_widget_get(struct hdaa_devinfo *, nid_t); uint32_t hdaa_widget_pin_patch(uint32_t config, const char *str); uint32_t hdaa_gpio_patch(uint32_t gpio, const char *str); void hdaa_patch(struct hdaa_devinfo *devinfo); void hdaa_patch_direct(struct hdaa_devinfo *devinfo); #endif Index: head/sys/dev/usb/controller/ehci.h =================================================================== --- head/sys/dev/usb/controller/ehci.h (revision 258779) +++ head/sys/dev/usb/controller/ehci.h (revision 258780) @@ -1,450 +1,450 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _EHCI_H_ #define _EHCI_H_ #define EHCI_MAX_DEVICES MIN(USB_MAX_DEVICES, 128) /* * Alignment NOTE: structures must be aligned so that the hardware can index * without performing addition. */ #define EHCI_FRAMELIST_ALIGN 0x1000 /* bytes */ #define EHCI_FRAMELIST_COUNT 1024 /* units */ #define EHCI_VIRTUAL_FRAMELIST_COUNT 128 /* units */ #if ((8*EHCI_VIRTUAL_FRAMELIST_COUNT) < USB_MAX_HS_ISOC_FRAMES_PER_XFER) #error "maximum number of high-speed isochronous frames is higher than supported!" #endif #if (EHCI_VIRTUAL_FRAMELIST_COUNT < USB_MAX_FS_ISOC_FRAMES_PER_XFER) #error "maximum number of full-speed isochronous frames is higher than supported!" #endif /* Link types */ #define EHCI_LINK_TERMINATE 0x00000001 #define EHCI_LINK_TYPE(x) ((x) & 0x00000006) #define EHCI_LINK_ITD 0x0 #define EHCI_LINK_QH 0x2 #define EHCI_LINK_SITD 0x4 #define EHCI_LINK_FSTN 0x6 #define EHCI_LINK_ADDR(x) ((x) &~ 0x1f) /* Structures alignment (bytes) */ #define EHCI_ITD_ALIGN 128 #define EHCI_SITD_ALIGN 64 #define EHCI_QTD_ALIGN 64 #define EHCI_QH_ALIGN 128 #define EHCI_FSTN_ALIGN 32 /* Data buffers are divided into one or more pages */ #define EHCI_PAGE_SIZE 0x1000 #if ((USB_PAGE_SIZE < EHCI_PAGE_SIZE) || (EHCI_PAGE_SIZE == 0) || \ (USB_PAGE_SIZE < EHCI_ITD_ALIGN) || (EHCI_ITD_ALIGN == 0) || \ (USB_PAGE_SIZE < EHCI_SITD_ALIGN) || (EHCI_SITD_ALIGN == 0) || \ (USB_PAGE_SIZE < EHCI_QTD_ALIGN) || (EHCI_QTD_ALIGN == 0) || \ (USB_PAGE_SIZE < EHCI_QH_ALIGN) || (EHCI_QH_ALIGN == 0) || \ (USB_PAGE_SIZE < EHCI_FSTN_ALIGN) || (EHCI_FSTN_ALIGN == 0)) #error "Invalid USB page size!" #endif /* * Isochronous Transfer Descriptor. This descriptor is used for high speed * transfers only. */ struct ehci_itd { volatile uint32_t itd_next; volatile uint32_t itd_status[8]; #define EHCI_ITD_SET_LEN(x) ((x) << 16) #define EHCI_ITD_GET_LEN(x) (((x) >> 16) & 0xFFF) #define EHCI_ITD_IOC (1 << 15) #define EHCI_ITD_SET_PG(x) ((x) << 12) #define EHCI_ITD_GET_PG(x) (((x) >> 12) & 0x7) #define EHCI_ITD_SET_OFFS(x) (x) #define EHCI_ITD_GET_OFFS(x) (((x) >> 0) & 0xFFF) -#define EHCI_ITD_ACTIVE (1 << 31) +#define EHCI_ITD_ACTIVE (1U << 31) #define EHCI_ITD_DATABUFERR (1 << 30) #define EHCI_ITD_BABBLE (1 << 29) #define EHCI_ITD_XACTERR (1 << 28) volatile uint32_t itd_bp[7]; /* itd_bp[0] */ #define EHCI_ITD_SET_ADDR(x) (x) #define EHCI_ITD_GET_ADDR(x) (((x) >> 0) & 0x7F) #define EHCI_ITD_SET_ENDPT(x) ((x) << 8) #define EHCI_ITD_GET_ENDPT(x) (((x) >> 8) & 0xF) /* itd_bp[1] */ #define EHCI_ITD_SET_DIR_IN (1 << 11) #define EHCI_ITD_SET_DIR_OUT (0 << 11) #define EHCI_ITD_SET_MPL(x) (x) #define EHCI_ITD_GET_MPL(x) (((x) >> 0) & 0x7FF) volatile uint32_t itd_bp_hi[7]; /* * Extra information needed: */ uint32_t itd_self; struct ehci_itd *next; struct ehci_itd *prev; struct ehci_itd *obj_next; struct usb_page_cache *page_cache; } __aligned(EHCI_ITD_ALIGN); typedef struct ehci_itd ehci_itd_t; /* * Split Transaction Isochronous Transfer Descriptor. This descriptor is used * for full speed transfers only. */ struct ehci_sitd { volatile uint32_t sitd_next; volatile uint32_t sitd_portaddr; #define EHCI_SITD_SET_DIR_OUT (0 << 31) -#define EHCI_SITD_SET_DIR_IN (1 << 31) +#define EHCI_SITD_SET_DIR_IN (1U << 31) #define EHCI_SITD_SET_ADDR(x) (x) #define EHCI_SITD_GET_ADDR(x) ((x) & 0x7F) #define EHCI_SITD_SET_ENDPT(x) ((x) << 8) #define EHCI_SITD_GET_ENDPT(x) (((x) >> 8) & 0xF) #define EHCI_SITD_GET_DIR(x) ((x) >> 31) #define EHCI_SITD_SET_PORT(x) ((x) << 24) #define EHCI_SITD_GET_PORT(x) (((x) >> 24) & 0x7F) #define EHCI_SITD_SET_HUBA(x) ((x) << 16) #define EHCI_SITD_GET_HUBA(x) (((x) >> 16) & 0x7F) volatile uint32_t sitd_mask; #define EHCI_SITD_SET_SMASK(x) (x) #define EHCI_SITD_SET_CMASK(x) ((x) << 8) volatile uint32_t sitd_status; #define EHCI_SITD_COMPLETE_SPLIT (1<<1) #define EHCI_SITD_START_SPLIT (0<<1) #define EHCI_SITD_MISSED_MICRO_FRAME (1<<2) #define EHCI_SITD_XACTERR (1<<3) #define EHCI_SITD_BABBLE (1<<4) #define EHCI_SITD_DATABUFERR (1<<5) #define EHCI_SITD_ERROR (1<<6) #define EHCI_SITD_ACTIVE (1<<7) #define EHCI_SITD_IOC (1<<31) #define EHCI_SITD_SET_LEN(len) ((len)<<16) #define EHCI_SITD_GET_LEN(x) (((x)>>16) & 0x3FF) volatile uint32_t sitd_bp[2]; volatile uint32_t sitd_back; volatile uint32_t sitd_bp_hi[2]; /* * Extra information needed: */ uint32_t sitd_self; struct ehci_sitd *next; struct ehci_sitd *prev; struct ehci_sitd *obj_next; struct usb_page_cache *page_cache; } __aligned(EHCI_SITD_ALIGN); typedef struct ehci_sitd ehci_sitd_t; /* Queue Element Transfer Descriptor */ struct ehci_qtd { volatile uint32_t qtd_next; volatile uint32_t qtd_altnext; volatile uint32_t qtd_status; #define EHCI_QTD_GET_STATUS(x) (((x) >> 0) & 0xff) #define EHCI_QTD_SET_STATUS(x) ((x) << 0) #define EHCI_QTD_ACTIVE 0x80 #define EHCI_QTD_HALTED 0x40 #define EHCI_QTD_BUFERR 0x20 #define EHCI_QTD_BABBLE 0x10 #define EHCI_QTD_XACTERR 0x08 #define EHCI_QTD_MISSEDMICRO 0x04 #define EHCI_QTD_SPLITXSTATE 0x02 #define EHCI_QTD_PINGSTATE 0x01 #define EHCI_QTD_STATERRS 0x74 #define EHCI_QTD_GET_PID(x) (((x) >> 8) & 0x3) #define EHCI_QTD_SET_PID(x) ((x) << 8) #define EHCI_QTD_PID_OUT 0x0 #define EHCI_QTD_PID_IN 0x1 #define EHCI_QTD_PID_SETUP 0x2 #define EHCI_QTD_GET_CERR(x) (((x) >> 10) & 0x3) #define EHCI_QTD_SET_CERR(x) ((x) << 10) #define EHCI_QTD_GET_C_PAGE(x) (((x) >> 12) & 0x7) #define EHCI_QTD_SET_C_PAGE(x) ((x) << 12) #define EHCI_QTD_GET_IOC(x) (((x) >> 15) & 0x1) #define EHCI_QTD_IOC 0x00008000 #define EHCI_QTD_GET_BYTES(x) (((x) >> 16) & 0x7fff) #define EHCI_QTD_SET_BYTES(x) ((x) << 16) #define EHCI_QTD_GET_TOGGLE(x) (((x) >> 31) & 0x1) #define EHCI_QTD_SET_TOGGLE(x) ((x) << 31) #define EHCI_QTD_TOGGLE_MASK 0x80000000 #define EHCI_QTD_NBUFFERS 5 #define EHCI_QTD_PAYLOAD_MAX ((EHCI_QTD_NBUFFERS-1)*EHCI_PAGE_SIZE) volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; /* * Extra information needed: */ struct ehci_qtd *alt_next; struct ehci_qtd *obj_next; struct usb_page_cache *page_cache; uint32_t qtd_self; uint16_t len; } __aligned(EHCI_QTD_ALIGN); typedef struct ehci_qtd ehci_qtd_t; /* Queue Head Sub Structure */ struct ehci_qh_sub { volatile uint32_t qtd_next; volatile uint32_t qtd_altnext; volatile uint32_t qtd_status; volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; } __aligned(4); /* Queue Head */ struct ehci_qh { volatile uint32_t qh_link; volatile uint32_t qh_endp; #define EHCI_QH_GET_ADDR(x) (((x) >> 0) & 0x7f) /* endpoint addr */ #define EHCI_QH_SET_ADDR(x) (x) #define EHCI_QH_ADDRMASK 0x0000007f #define EHCI_QH_GET_INACT(x) (((x) >> 7) & 0x01) /* inactivate on next */ #define EHCI_QH_INACT 0x00000080 #define EHCI_QH_GET_ENDPT(x) (((x) >> 8) & 0x0f) /* endpoint no */ #define EHCI_QH_SET_ENDPT(x) ((x) << 8) #define EHCI_QH_GET_EPS(x) (((x) >> 12) & 0x03) /* endpoint speed */ #define EHCI_QH_SET_EPS(x) ((x) << 12) #define EHCI_QH_SPEED_FULL 0x0 #define EHCI_QH_SPEED_LOW 0x1 #define EHCI_QH_SPEED_HIGH 0x2 #define EHCI_QH_GET_DTC(x) (((x) >> 14) & 0x01) /* data toggle control */ #define EHCI_QH_DTC 0x00004000 #define EHCI_QH_GET_HRECL(x) (((x) >> 15) & 0x01) /* head of reclamation */ #define EHCI_QH_HRECL 0x00008000 #define EHCI_QH_GET_MPL(x) (((x) >> 16) & 0x7ff) /* max packet len */ #define EHCI_QH_SET_MPL(x) ((x) << 16) #define EHCI_QH_MPLMASK 0x07ff0000 #define EHCI_QH_GET_CTL(x) (((x) >> 27) & 0x01) /* control endpoint */ #define EHCI_QH_CTL 0x08000000 #define EHCI_QH_GET_NRL(x) (((x) >> 28) & 0x0f) /* NAK reload */ #define EHCI_QH_SET_NRL(x) ((x) << 28) volatile uint32_t qh_endphub; #define EHCI_QH_GET_SMASK(x) (((x) >> 0) & 0xff) /* intr sched mask */ #define EHCI_QH_SET_SMASK(x) ((x) << 0) #define EHCI_QH_GET_CMASK(x) (((x) >> 8) & 0xff) /* split completion mask */ #define EHCI_QH_SET_CMASK(x) ((x) << 8) #define EHCI_QH_GET_HUBA(x) (((x) >> 16) & 0x7f) /* hub address */ #define EHCI_QH_SET_HUBA(x) ((x) << 16) #define EHCI_QH_GET_PORT(x) (((x) >> 23) & 0x7f) /* hub port */ #define EHCI_QH_SET_PORT(x) ((x) << 23) #define EHCI_QH_GET_MULT(x) (((x) >> 30) & 0x03) /* pipe multiplier */ #define EHCI_QH_SET_MULT(x) ((x) << 30) volatile uint32_t qh_curqtd; struct ehci_qh_sub qh_qtd; /* * Extra information needed: */ struct ehci_qh *next; struct ehci_qh *prev; struct ehci_qh *obj_next; struct usb_page_cache *page_cache; uint32_t qh_self; } __aligned(EHCI_QH_ALIGN); typedef struct ehci_qh ehci_qh_t; /* Periodic Frame Span Traversal Node */ struct ehci_fstn { volatile uint32_t fstn_link; volatile uint32_t fstn_back; } __aligned(EHCI_FSTN_ALIGN); typedef struct ehci_fstn ehci_fstn_t; struct ehci_hw_softc { struct usb_page_cache pframes_pc; struct usb_page_cache terminate_pc; struct usb_page_cache async_start_pc; struct usb_page_cache intr_start_pc[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct usb_page_cache isoc_hs_start_pc[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct usb_page_cache isoc_fs_start_pc[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct usb_page pframes_pg; struct usb_page terminate_pg; struct usb_page async_start_pg; struct usb_page intr_start_pg[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct usb_page isoc_hs_start_pg[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct usb_page isoc_fs_start_pg[EHCI_VIRTUAL_FRAMELIST_COUNT]; }; struct ehci_config_desc { struct usb_config_descriptor confd; struct usb_interface_descriptor ifcd; struct usb_endpoint_descriptor endpd; } __packed; union ehci_hub_desc { struct usb_status stat; struct usb_port_status ps; struct usb_hub_descriptor hubd; uint8_t temp[128]; }; typedef struct ehci_softc { struct ehci_hw_softc sc_hw; struct usb_bus sc_bus; /* base device */ struct usb_callout sc_tmo_pcd; struct usb_callout sc_tmo_poll; union ehci_hub_desc sc_hub_desc; struct usb_device *sc_devices[EHCI_MAX_DEVICES]; struct resource *sc_io_res; struct resource *sc_irq_res; struct ehci_qh *sc_async_p_last; struct ehci_qh *sc_intr_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct ehci_sitd *sc_isoc_fs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]; struct ehci_itd *sc_isoc_hs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]; void *sc_intr_hdl; bus_size_t sc_io_size; bus_space_tag_t sc_io_tag; bus_space_handle_t sc_io_hdl; uint32_t sc_terminate_self; /* TD short packet termination pointer */ uint32_t sc_eintrs; uint16_t sc_intr_stat[EHCI_VIRTUAL_FRAMELIST_COUNT]; uint16_t sc_id_vendor; /* vendor ID for root hub */ uint16_t sc_flags; /* chip specific flags */ #define EHCI_SCFLG_SETMODE 0x0001 /* set bridge mode again after init */ #define EHCI_SCFLG_FORCESPEED 0x0002 /* force speed */ #define EHCI_SCFLG_NORESTERM 0x0004 /* don't terminate reset sequence */ #define EHCI_SCFLG_BIGEDESC 0x0008 /* big-endian byte order descriptors */ #define EHCI_SCFLG_BIGEMMIO 0x0010 /* big-endian byte order MMIO */ #define EHCI_SCFLG_TT 0x0020 /* transaction translator present */ #define EHCI_SCFLG_LOSTINTRBUG 0x0040 /* workaround for VIA / ATI chipsets */ #define EHCI_SCFLG_IAADBUG 0x0080 /* workaround for nVidia chipsets */ #define EHCI_SCFLG_DONTRESET 0x0100 /* don't reset ctrl. in ehci_init() */ #define EHCI_SCFLG_DONEINIT 0x1000 /* ehci_init() has been called. */ uint8_t sc_offs; /* offset to operational registers */ uint8_t sc_doorbell_disable; /* set on doorbell failure */ uint8_t sc_noport; uint8_t sc_addr; /* device address */ uint8_t sc_conf; /* device configuration */ uint8_t sc_isreset; uint8_t sc_hub_idata[8]; char sc_vendor[16]; /* vendor string for root hub */ } ehci_softc_t; #define EREAD1(sc, a) bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (a)) #define EREAD2(sc, a) bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (a)) #define EREAD4(sc, a) bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (a)) #define EWRITE1(sc, a, x) \ bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), (x)) #define EWRITE2(sc, a, x) \ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), (x)) #define EWRITE4(sc, a, x) \ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), (x)) #define EOREAD1(sc, a) \ bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a)) #define EOREAD2(sc, a) \ bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a)) #define EOREAD4(sc, a) \ bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a)) #define EOWRITE1(sc, a, x) \ bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), (x)) #define EOWRITE2(sc, a, x) \ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), (x)) #define EOWRITE4(sc, a, x) \ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), (x)) #ifdef USB_EHCI_BIG_ENDIAN_DESC /* * Handle byte order conversion between host and ``host controller''. * Typically the latter is little-endian but some controllers require * big-endian in which case we may need to manually swap. */ static __inline uint32_t htohc32(const struct ehci_softc *sc, const uint32_t v) { return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? htobe32(v) : htole32(v); } static __inline uint16_t htohc16(const struct ehci_softc *sc, const uint16_t v) { return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? htobe16(v) : htole16(v); } static __inline uint32_t hc32toh(const struct ehci_softc *sc, const uint32_t v) { return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? be32toh(v) : le32toh(v); } static __inline uint16_t hc16toh(const struct ehci_softc *sc, const uint16_t v) { return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? be16toh(v) : le16toh(v); } #else /* * Normal little-endian only conversion routines. */ static __inline uint32_t htohc32(const struct ehci_softc *sc, const uint32_t v) { return htole32(v); } static __inline uint16_t htohc16(const struct ehci_softc *sc, const uint16_t v) { return htole16(v); } static __inline uint32_t hc32toh(const struct ehci_softc *sc, const uint32_t v) { return le32toh(v); } static __inline uint16_t hc16toh(const struct ehci_softc *sc, const uint16_t v) { return le16toh(v); } #endif usb_bus_mem_cb_t ehci_iterate_hw_softc; usb_error_t ehci_reset(ehci_softc_t *sc); usb_error_t ehci_init(ehci_softc_t *sc); void ehci_detach(struct ehci_softc *sc); void ehci_interrupt(ehci_softc_t *sc); #endif /* _EHCI_H_ */ Index: head/sys/dev/usb/wlan/if_rumreg.h =================================================================== --- head/sys/dev/usb/wlan/if_rumreg.h (revision 258779) +++ head/sys/dev/usb/wlan/if_rumreg.h (revision 258780) @@ -1,235 +1,235 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 Damien Bergamini * Copyright (c) 2006 Niall O'Higgins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define RT2573_NOISE_FLOOR -95 #define RT2573_TX_DESC_SIZE (sizeof (struct rum_tx_desc)) #define RT2573_RX_DESC_SIZE (sizeof (struct rum_rx_desc)) #define RT2573_CONFIG_NO 1 #define RT2573_IFACE_INDEX 0 #define RT2573_MCU_CNTL 0x01 #define RT2573_WRITE_MAC 0x02 #define RT2573_READ_MAC 0x03 #define RT2573_WRITE_MULTI_MAC 0x06 #define RT2573_READ_MULTI_MAC 0x07 #define RT2573_READ_EEPROM 0x09 #define RT2573_WRITE_LED 0x0a /* * Control and status registers. */ #define RT2573_AIFSN_CSR 0x0400 #define RT2573_CWMIN_CSR 0x0404 #define RT2573_CWMAX_CSR 0x0408 #define RT2573_MCU_CODE_BASE 0x0800 #define RT2573_HW_BEACON_BASE0 0x2400 #define RT2573_MAC_CSR0 0x3000 #define RT2573_MAC_CSR1 0x3004 #define RT2573_MAC_CSR2 0x3008 #define RT2573_MAC_CSR3 0x300c #define RT2573_MAC_CSR4 0x3010 #define RT2573_MAC_CSR5 0x3014 #define RT2573_MAC_CSR6 0x3018 #define RT2573_MAC_CSR7 0x301c #define RT2573_MAC_CSR8 0x3020 #define RT2573_MAC_CSR9 0x3024 #define RT2573_MAC_CSR10 0x3028 #define RT2573_MAC_CSR11 0x302c #define RT2573_MAC_CSR12 0x3030 #define RT2573_MAC_CSR13 0x3034 #define RT2573_MAC_CSR14 0x3038 #define RT2573_MAC_CSR15 0x303c #define RT2573_TXRX_CSR0 0x3040 #define RT2573_TXRX_CSR1 0x3044 #define RT2573_TXRX_CSR2 0x3048 #define RT2573_TXRX_CSR3 0x304c #define RT2573_TXRX_CSR4 0x3050 #define RT2573_TXRX_CSR5 0x3054 #define RT2573_TXRX_CSR6 0x3058 #define RT2573_TXRX_CSR7 0x305c #define RT2573_TXRX_CSR8 0x3060 #define RT2573_TXRX_CSR9 0x3064 #define RT2573_TXRX_CSR10 0x3068 #define RT2573_TXRX_CSR11 0x306c #define RT2573_TXRX_CSR12 0x3070 #define RT2573_TXRX_CSR13 0x3074 #define RT2573_TXRX_CSR14 0x3078 #define RT2573_TXRX_CSR15 0x307c #define RT2573_PHY_CSR0 0x3080 #define RT2573_PHY_CSR1 0x3084 #define RT2573_PHY_CSR2 0x3088 #define RT2573_PHY_CSR3 0x308c #define RT2573_PHY_CSR4 0x3090 #define RT2573_PHY_CSR5 0x3094 #define RT2573_PHY_CSR6 0x3098 #define RT2573_PHY_CSR7 0x309c #define RT2573_SEC_CSR0 0x30a0 #define RT2573_SEC_CSR1 0x30a4 #define RT2573_SEC_CSR2 0x30a8 #define RT2573_SEC_CSR3 0x30ac #define RT2573_SEC_CSR4 0x30b0 #define RT2573_SEC_CSR5 0x30b4 #define RT2573_STA_CSR0 0x30c0 #define RT2573_STA_CSR1 0x30c4 #define RT2573_STA_CSR2 0x30c8 #define RT2573_STA_CSR3 0x30cc #define RT2573_STA_CSR4 0x30d0 #define RT2573_STA_CSR5 0x30d4 /* possible flags for register RT2573_MAC_CSR1 */ #define RT2573_RESET_ASIC (1 << 0) #define RT2573_RESET_BBP (1 << 1) #define RT2573_HOST_READY (1 << 2) /* possible flags for register MAC_CSR5 */ #define RT2573_ONE_BSSID 3 /* possible flags for register TXRX_CSR0 */ /* Tx filter flags are in the low 16 bits */ #define RT2573_AUTO_TX_SEQ (1 << 15) /* Rx filter flags are in the high 16 bits */ #define RT2573_DISABLE_RX (1 << 16) #define RT2573_DROP_CRC_ERROR (1 << 17) #define RT2573_DROP_PHY_ERROR (1 << 18) #define RT2573_DROP_CTL (1 << 19) #define RT2573_DROP_NOT_TO_ME (1 << 20) #define RT2573_DROP_TODS (1 << 21) #define RT2573_DROP_VER_ERROR (1 << 22) #define RT2573_DROP_MULTICAST (1 << 23) #define RT2573_DROP_BROADCAST (1 << 24) #define RT2573_DROP_ACKCTS (1 << 25) /* possible flags for register TXRX_CSR4 */ #define RT2573_SHORT_PREAMBLE (1 << 18) #define RT2573_MRR_ENABLED (1 << 19) #define RT2573_MRR_CCK_FALLBACK (1 << 22) /* possible flags for register TXRX_CSR9 */ #define RT2573_TSF_TICKING (1 << 16) #define RT2573_TSF_MODE(x) (((x) & 0x3) << 17) /* TBTT stands for Target Beacon Transmission Time */ #define RT2573_ENABLE_TBTT (1 << 19) #define RT2573_GENERATE_BEACON (1 << 20) /* possible flags for register PHY_CSR0 */ #define RT2573_PA_PE_2GHZ (1 << 16) #define RT2573_PA_PE_5GHZ (1 << 17) /* possible flags for register PHY_CSR3 */ #define RT2573_BBP_READ (1 << 15) #define RT2573_BBP_BUSY (1 << 16) /* possible flags for register PHY_CSR4 */ #define RT2573_RF_20BIT (20 << 24) -#define RT2573_RF_BUSY (1 << 31) +#define RT2573_RF_BUSY (1U << 31) /* LED values */ #define RT2573_LED_RADIO (1 << 8) #define RT2573_LED_G (1 << 9) #define RT2573_LED_A (1 << 10) #define RT2573_LED_ON 0x1e1e #define RT2573_LED_OFF 0x0 #define RT2573_MCU_RUN (1 << 3) #define RT2573_SMART_MODE (1 << 0) #define RT2573_BBPR94_DEFAULT 6 #define RT2573_BBP_WRITE (1 << 15) /* dual-band RF */ #define RT2573_RF_5226 1 #define RT2573_RF_5225 3 /* single-band RF */ #define RT2573_RF_2528 2 #define RT2573_RF_2527 4 #define RT2573_BBP_VERSION 0 struct rum_tx_desc { uint32_t flags; #define RT2573_TX_BURST (1 << 0) #define RT2573_TX_VALID (1 << 1) #define RT2573_TX_MORE_FRAG (1 << 2) #define RT2573_TX_NEED_ACK (1 << 3) #define RT2573_TX_TIMESTAMP (1 << 4) #define RT2573_TX_OFDM (1 << 5) #define RT2573_TX_IFS_SIFS (1 << 6) #define RT2573_TX_LONG_RETRY (1 << 7) uint16_t wme; #define RT2573_QID(v) (v) #define RT2573_AIFSN(v) ((v) << 4) #define RT2573_LOGCWMIN(v) ((v) << 8) #define RT2573_LOGCWMAX(v) ((v) << 12) uint16_t xflags; #define RT2573_TX_HWSEQ (1 << 12) uint8_t plcp_signal; uint8_t plcp_service; #define RT2573_PLCP_LENGEXT 0x80 uint8_t plcp_length_lo; uint8_t plcp_length_hi; uint32_t iv; uint32_t eiv; uint8_t offset; uint8_t qid; uint8_t txpower; #define RT2573_DEFAULT_TXPOWER 0 uint8_t reserved; } __packed; struct rum_rx_desc { uint32_t flags; #define RT2573_RX_BUSY (1 << 0) #define RT2573_RX_DROP (1 << 1) #define RT2573_RX_CRC_ERROR (1 << 6) #define RT2573_RX_OFDM (1 << 7) uint8_t rate; uint8_t rssi; uint8_t reserved1; uint8_t offset; uint32_t iv; uint32_t eiv; uint32_t reserved2[2]; } __packed; #define RT2573_RF1 0 #define RT2573_RF2 2 #define RT2573_RF3 1 #define RT2573_RF4 3 #define RT2573_EEPROM_MACBBP 0x0000 #define RT2573_EEPROM_ADDRESS 0x0004 #define RT2573_EEPROM_ANTENNA 0x0020 #define RT2573_EEPROM_CONFIG2 0x0022 #define RT2573_EEPROM_BBP_BASE 0x0026 #define RT2573_EEPROM_TXPOWER 0x0046 #define RT2573_EEPROM_FREQ_OFFSET 0x005e #define RT2573_EEPROM_RSSI_2GHZ_OFFSET 0x009a #define RT2573_EEPROM_RSSI_5GHZ_OFFSET 0x009c Index: head/sys/dev/usb/wlan/if_runreg.h =================================================================== --- head/sys/dev/usb/wlan/if_runreg.h (revision 258779) +++ head/sys/dev/usb/wlan/if_runreg.h (revision 258780) @@ -1,1327 +1,1327 @@ /* $OpenBSD: rt2860reg.h,v 1.19 2009/05/18 19:25:07 damien Exp $ */ /*- * Copyright (c) 2007 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #ifndef _IF_RUNREG_H_ #define _IF_RUNREG_H_ /* PCI registers */ #define RT2860_PCI_CFG 0x0000 #define RT2860_PCI_EECTRL 0x0004 #define RT2860_PCI_MCUCTRL 0x0008 #define RT2860_PCI_SYSCTRL 0x000c #define RT2860_PCIE_JTAG 0x0010 #define RT2860_CONFIG_NO 1 #define RT2860_IFACE_INDEX 0 #define RT3070_OPT_14 0x0114 /* SCH/DMA registers */ #define RT2860_INT_STATUS 0x0200 #define RT2860_INT_MASK 0x0204 #define RT2860_WPDMA_GLO_CFG 0x0208 #define RT2860_WPDMA_RST_IDX 0x020c #define RT2860_DELAY_INT_CFG 0x0210 #define RT2860_WMM_AIFSN_CFG 0x0214 #define RT2860_WMM_CWMIN_CFG 0x0218 #define RT2860_WMM_CWMAX_CFG 0x021c #define RT2860_WMM_TXOP0_CFG 0x0220 #define RT2860_WMM_TXOP1_CFG 0x0224 #define RT2860_GPIO_CTRL 0x0228 #define RT2860_MCU_CMD_REG 0x022c #define RT2860_TX_BASE_PTR(qid) (0x0230 + (qid) * 16) #define RT2860_TX_MAX_CNT(qid) (0x0234 + (qid) * 16) #define RT2860_TX_CTX_IDX(qid) (0x0238 + (qid) * 16) #define RT2860_TX_DTX_IDX(qid) (0x023c + (qid) * 16) #define RT2860_RX_BASE_PTR 0x0290 #define RT2860_RX_MAX_CNT 0x0294 #define RT2860_RX_CALC_IDX 0x0298 #define RT2860_FS_DRX_IDX 0x029c #define RT2860_USB_DMA_CFG 0x02a0 /* RT2870 only */ #define RT2860_US_CYC_CNT 0x02a4 /* PBF registers */ #define RT2860_SYS_CTRL 0x0400 #define RT2860_HOST_CMD 0x0404 #define RT2860_PBF_CFG 0x0408 #define RT2860_MAX_PCNT 0x040c #define RT2860_BUF_CTRL 0x0410 #define RT2860_MCU_INT_STA 0x0414 #define RT2860_MCU_INT_ENA 0x0418 #define RT2860_TXQ_IO(qid) (0x041c + (qid) * 4) #define RT2860_RX0Q_IO 0x0424 #define RT2860_BCN_OFFSET0 0x042c #define RT2860_BCN_OFFSET1 0x0430 #define RT2860_TXRXQ_STA 0x0434 #define RT2860_TXRXQ_PCNT 0x0438 #define RT2860_PBF_DBG 0x043c #define RT2860_CAP_CTRL 0x0440 /* RT3070 registers */ #define RT3070_RF_CSR_CFG 0x0500 #define RT3070_EFUSE_CTRL 0x0580 #define RT3070_EFUSE_DATA0 0x0590 #define RT3070_EFUSE_DATA1 0x0594 #define RT3070_EFUSE_DATA2 0x0598 #define RT3070_EFUSE_DATA3 0x059c #define RT3070_LDO_CFG0 0x05d4 #define RT3070_GPIO_SWITCH 0x05dc /* MAC registers */ #define RT2860_ASIC_VER_ID 0x1000 #define RT2860_MAC_SYS_CTRL 0x1004 #define RT2860_MAC_ADDR_DW0 0x1008 #define RT2860_MAC_ADDR_DW1 0x100c #define RT2860_MAC_BSSID_DW0 0x1010 #define RT2860_MAC_BSSID_DW1 0x1014 #define RT2860_MAX_LEN_CFG 0x1018 #define RT2860_BBP_CSR_CFG 0x101c #define RT2860_RF_CSR_CFG0 0x1020 #define RT2860_RF_CSR_CFG1 0x1024 #define RT2860_RF_CSR_CFG2 0x1028 #define RT2860_LED_CFG 0x102c /* undocumented registers */ #define RT2860_DEBUG 0x10f4 /* MAC Timing control registers */ #define RT2860_XIFS_TIME_CFG 0x1100 #define RT2860_BKOFF_SLOT_CFG 0x1104 #define RT2860_NAV_TIME_CFG 0x1108 #define RT2860_CH_TIME_CFG 0x110c #define RT2860_PBF_LIFE_TIMER 0x1110 #define RT2860_BCN_TIME_CFG 0x1114 #define RT2860_TBTT_SYNC_CFG 0x1118 #define RT2860_TSF_TIMER_DW0 0x111c #define RT2860_TSF_TIMER_DW1 0x1120 #define RT2860_TBTT_TIMER 0x1124 #define RT2860_INT_TIMER_CFG 0x1128 #define RT2860_INT_TIMER_EN 0x112c #define RT2860_CH_IDLE_TIME 0x1130 /* MAC Power Save configuration registers */ #define RT2860_MAC_STATUS_REG 0x1200 #define RT2860_PWR_PIN_CFG 0x1204 #define RT2860_AUTO_WAKEUP_CFG 0x1208 /* MAC TX configuration registers */ #define RT2860_EDCA_AC_CFG(aci) (0x1300 + (aci) * 4) #define RT2860_EDCA_TID_AC_MAP 0x1310 #define RT2860_TX_PWR_CFG(ridx) (0x1314 + (ridx) * 4) #define RT2860_TX_PIN_CFG 0x1328 #define RT2860_TX_BAND_CFG 0x132c #define RT2860_TX_SW_CFG0 0x1330 #define RT2860_TX_SW_CFG1 0x1334 #define RT2860_TX_SW_CFG2 0x1338 #define RT2860_TXOP_THRES_CFG 0x133c #define RT2860_TXOP_CTRL_CFG 0x1340 #define RT2860_TX_RTS_CFG 0x1344 #define RT2860_TX_TIMEOUT_CFG 0x1348 #define RT2860_TX_RTY_CFG 0x134c #define RT2860_TX_LINK_CFG 0x1350 #define RT2860_HT_FBK_CFG0 0x1354 #define RT2860_HT_FBK_CFG1 0x1358 #define RT2860_LG_FBK_CFG0 0x135c #define RT2860_LG_FBK_CFG1 0x1360 #define RT2860_CCK_PROT_CFG 0x1364 #define RT2860_OFDM_PROT_CFG 0x1368 #define RT2860_MM20_PROT_CFG 0x136c #define RT2860_MM40_PROT_CFG 0x1370 #define RT2860_GF20_PROT_CFG 0x1374 #define RT2860_GF40_PROT_CFG 0x1378 #define RT2860_EXP_CTS_TIME 0x137c #define RT2860_EXP_ACK_TIME 0x1380 /* MAC RX configuration registers */ #define RT2860_RX_FILTR_CFG 0x1400 #define RT2860_AUTO_RSP_CFG 0x1404 #define RT2860_LEGACY_BASIC_RATE 0x1408 #define RT2860_HT_BASIC_RATE 0x140c #define RT2860_HT_CTRL_CFG 0x1410 #define RT2860_SIFS_COST_CFG 0x1414 #define RT2860_RX_PARSER_CFG 0x1418 /* MAC Security configuration registers */ #define RT2860_TX_SEC_CNT0 0x1500 #define RT2860_RX_SEC_CNT0 0x1504 #define RT2860_CCMP_FC_MUTE 0x1508 /* MAC HCCA/PSMP configuration registers */ #define RT2860_TXOP_HLDR_ADDR0 0x1600 #define RT2860_TXOP_HLDR_ADDR1 0x1604 #define RT2860_TXOP_HLDR_ET 0x1608 #define RT2860_QOS_CFPOLL_RA_DW0 0x160c #define RT2860_QOS_CFPOLL_A1_DW1 0x1610 #define RT2860_QOS_CFPOLL_QC 0x1614 /* MAC Statistics Counters */ #define RT2860_RX_STA_CNT0 0x1700 #define RT2860_RX_STA_CNT1 0x1704 #define RT2860_RX_STA_CNT2 0x1708 #define RT2860_TX_STA_CNT0 0x170c #define RT2860_TX_STA_CNT1 0x1710 #define RT2860_TX_STA_CNT2 0x1714 #define RT2860_TX_STAT_FIFO 0x1718 /* RX WCID search table */ #define RT2860_WCID_ENTRY(wcid) (0x1800 + (wcid) * 8) #define RT2860_FW_BASE 0x2000 #define RT2870_FW_BASE 0x3000 /* Pair-wise key table */ #define RT2860_PKEY(wcid) (0x4000 + (wcid) * 32) /* IV/EIV table */ #define RT2860_IVEIV(wcid) (0x6000 + (wcid) * 8) /* WCID attribute table */ #define RT2860_WCID_ATTR(wcid) (0x6800 + (wcid) * 4) /* Shared Key Table */ #define RT2860_SKEY(vap, kidx) (0x6c00 + (vap) * 128 + (kidx) * 32) /* Shared Key Mode */ #define RT2860_SKEY_MODE_0_7 0x7000 #define RT2860_SKEY_MODE_8_15 0x7004 #define RT2860_SKEY_MODE_16_23 0x7008 #define RT2860_SKEY_MODE_24_31 0x700c /* Shared Memory between MCU and host */ #define RT2860_H2M_MAILBOX 0x7010 #define RT2860_H2M_MAILBOX_CID 0x7014 #define RT2860_H2M_MAILBOX_STATUS 0x701c #define RT2860_H2M_INTSRC 0x7024 #define RT2860_H2M_BBPAGENT 0x7028 #define RT2860_BCN_BASE(vap) (0x7800 + (vap) * 512) /* possible flags for register RT2860_PCI_EECTRL */ #define RT2860_C (1 << 0) #define RT2860_S (1 << 1) #define RT2860_D (1 << 2) #define RT2860_SHIFT_D 2 #define RT2860_Q (1 << 3) #define RT2860_SHIFT_Q 3 /* possible flags for registers INT_STATUS/INT_MASK */ #define RT2860_TX_COHERENT (1 << 17) #define RT2860_RX_COHERENT (1 << 16) #define RT2860_MAC_INT_4 (1 << 15) #define RT2860_MAC_INT_3 (1 << 14) #define RT2860_MAC_INT_2 (1 << 13) #define RT2860_MAC_INT_1 (1 << 12) #define RT2860_MAC_INT_0 (1 << 11) #define RT2860_TX_RX_COHERENT (1 << 10) #define RT2860_MCU_CMD_INT (1 << 9) #define RT2860_TX_DONE_INT5 (1 << 8) #define RT2860_TX_DONE_INT4 (1 << 7) #define RT2860_TX_DONE_INT3 (1 << 6) #define RT2860_TX_DONE_INT2 (1 << 5) #define RT2860_TX_DONE_INT1 (1 << 4) #define RT2860_TX_DONE_INT0 (1 << 3) #define RT2860_RX_DONE_INT (1 << 2) #define RT2860_TX_DLY_INT (1 << 1) #define RT2860_RX_DLY_INT (1 << 0) /* possible flags for register WPDMA_GLO_CFG */ #define RT2860_HDR_SEG_LEN_SHIFT 8 #define RT2860_BIG_ENDIAN (1 << 7) #define RT2860_TX_WB_DDONE (1 << 6) #define RT2860_WPDMA_BT_SIZE_SHIFT 4 #define RT2860_WPDMA_BT_SIZE16 0 #define RT2860_WPDMA_BT_SIZE32 1 #define RT2860_WPDMA_BT_SIZE64 2 #define RT2860_WPDMA_BT_SIZE128 3 #define RT2860_RX_DMA_BUSY (1 << 3) #define RT2860_RX_DMA_EN (1 << 2) #define RT2860_TX_DMA_BUSY (1 << 1) #define RT2860_TX_DMA_EN (1 << 0) /* possible flags for register DELAY_INT_CFG */ -#define RT2860_TXDLY_INT_EN (1 << 31) +#define RT2860_TXDLY_INT_EN (1U << 31) #define RT2860_TXMAX_PINT_SHIFT 24 #define RT2860_TXMAX_PTIME_SHIFT 16 #define RT2860_RXDLY_INT_EN (1 << 15) #define RT2860_RXMAX_PINT_SHIFT 8 #define RT2860_RXMAX_PTIME_SHIFT 0 /* possible flags for register GPIO_CTRL */ #define RT2860_GPIO_D_SHIFT 8 #define RT2860_GPIO_O_SHIFT 0 /* possible flags for register USB_DMA_CFG */ -#define RT2860_USB_TX_BUSY (1 << 31) +#define RT2860_USB_TX_BUSY (1U << 31) #define RT2860_USB_RX_BUSY (1 << 30) #define RT2860_USB_EPOUT_VLD_SHIFT 24 #define RT2860_USB_TX_EN (1 << 23) #define RT2860_USB_RX_EN (1 << 22) #define RT2860_USB_RX_AGG_EN (1 << 21) #define RT2860_USB_TXOP_HALT (1 << 20) #define RT2860_USB_TX_CLEAR (1 << 19) #define RT2860_USB_PHY_WD_EN (1 << 16) #define RT2860_USB_PHY_MAN_RST (1 << 15) #define RT2860_USB_RX_AGG_LMT(x) ((x) << 8) /* in unit of 1KB */ #define RT2860_USB_RX_AGG_TO(x) ((x) & 0xff) /* in unit of 33ns */ /* possible flags for register US_CYC_CNT */ #define RT2860_TEST_EN (1 << 24) #define RT2860_TEST_SEL_SHIFT 16 #define RT2860_BT_MODE_EN (1 << 8) #define RT2860_US_CYC_CNT_SHIFT 0 /* possible flags for register SYS_CTRL */ #define RT2860_HST_PM_SEL (1 << 16) #define RT2860_CAP_MODE (1 << 14) #define RT2860_PME_OEN (1 << 13) #define RT2860_CLKSELECT (1 << 12) #define RT2860_PBF_CLK_EN (1 << 11) #define RT2860_MAC_CLK_EN (1 << 10) #define RT2860_DMA_CLK_EN (1 << 9) #define RT2860_MCU_READY (1 << 7) #define RT2860_ASY_RESET (1 << 4) #define RT2860_PBF_RESET (1 << 3) #define RT2860_MAC_RESET (1 << 2) #define RT2860_DMA_RESET (1 << 1) #define RT2860_MCU_RESET (1 << 0) /* possible values for register HOST_CMD */ #define RT2860_MCU_CMD_SLEEP 0x30 #define RT2860_MCU_CMD_WAKEUP 0x31 #define RT2860_MCU_CMD_LEDS 0x50 #define RT2860_MCU_CMD_LED_RSSI 0x51 #define RT2860_MCU_CMD_LED1 0x52 #define RT2860_MCU_CMD_LED2 0x53 #define RT2860_MCU_CMD_LED3 0x54 #define RT2860_MCU_CMD_RFRESET 0x72 #define RT2860_MCU_CMD_ANTSEL 0x73 #define RT2860_MCU_CMD_BBP 0x80 #define RT2860_MCU_CMD_PSLEVEL 0x83 /* possible flags for register PBF_CFG */ #define RT2860_TX1Q_NUM_SHIFT 21 #define RT2860_TX2Q_NUM_SHIFT 16 #define RT2860_NULL0_MODE (1 << 15) #define RT2860_NULL1_MODE (1 << 14) #define RT2860_RX_DROP_MODE (1 << 13) #define RT2860_TX0Q_MANUAL (1 << 12) #define RT2860_TX1Q_MANUAL (1 << 11) #define RT2860_TX2Q_MANUAL (1 << 10) #define RT2860_RX0Q_MANUAL (1 << 9) #define RT2860_HCCA_EN (1 << 8) #define RT2860_TX0Q_EN (1 << 4) #define RT2860_TX1Q_EN (1 << 3) #define RT2860_TX2Q_EN (1 << 2) #define RT2860_RX0Q_EN (1 << 1) /* possible flags for register BUF_CTRL */ #define RT2860_WRITE_TXQ(qid) (1 << (11 - (qid))) #define RT2860_NULL0_KICK (1 << 7) #define RT2860_NULL1_KICK (1 << 6) #define RT2860_BUF_RESET (1 << 5) #define RT2860_READ_TXQ(qid) (1 << (3 - (qid)) #define RT2860_READ_RX0Q (1 << 0) /* possible flags for registers MCU_INT_STA/MCU_INT_ENA */ #define RT2860_MCU_MAC_INT_8 (1 << 24) #define RT2860_MCU_MAC_INT_7 (1 << 23) #define RT2860_MCU_MAC_INT_6 (1 << 22) #define RT2860_MCU_MAC_INT_4 (1 << 20) #define RT2860_MCU_MAC_INT_3 (1 << 19) #define RT2860_MCU_MAC_INT_2 (1 << 18) #define RT2860_MCU_MAC_INT_1 (1 << 17) #define RT2860_MCU_MAC_INT_0 (1 << 16) #define RT2860_DTX0_INT (1 << 11) #define RT2860_DTX1_INT (1 << 10) #define RT2860_DTX2_INT (1 << 9) #define RT2860_DRX0_INT (1 << 8) #define RT2860_HCMD_INT (1 << 7) #define RT2860_N0TX_INT (1 << 6) #define RT2860_N1TX_INT (1 << 5) #define RT2860_BCNTX_INT (1 << 4) #define RT2860_MTX0_INT (1 << 3) #define RT2860_MTX1_INT (1 << 2) #define RT2860_MTX2_INT (1 << 1) #define RT2860_MRX0_INT (1 << 0) /* possible flags for register TXRXQ_PCNT */ #define RT2860_RX0Q_PCNT_MASK 0xff000000 #define RT2860_TX2Q_PCNT_MASK 0x00ff0000 #define RT2860_TX1Q_PCNT_MASK 0x0000ff00 #define RT2860_TX0Q_PCNT_MASK 0x000000ff /* possible flags for register CAP_CTRL */ -#define RT2860_CAP_ADC_FEQ (1 << 31) +#define RT2860_CAP_ADC_FEQ (1U << 31) #define RT2860_CAP_START (1 << 30) #define RT2860_MAN_TRIG (1 << 29) #define RT2860_TRIG_OFFSET_SHIFT 16 #define RT2860_START_ADDR_SHIFT 0 /* possible flags for register RF_CSR_CFG */ #define RT3070_RF_KICK (1 << 17) #define RT3070_RF_WRITE (1 << 16) /* possible flags for register EFUSE_CTRL */ -#define RT3070_SEL_EFUSE (1 << 31) +#define RT3070_SEL_EFUSE (1U << 31) #define RT3070_EFSROM_KICK (1 << 30) #define RT3070_EFSROM_AIN_MASK 0x03ff0000 #define RT3070_EFSROM_AIN_SHIFT 16 #define RT3070_EFSROM_MODE_MASK 0x000000c0 #define RT3070_EFUSE_AOUT_MASK 0x0000003f /* possible flags for register MAC_SYS_CTRL */ #define RT2860_RX_TS_EN (1 << 7) #define RT2860_WLAN_HALT_EN (1 << 6) #define RT2860_PBF_LOOP_EN (1 << 5) #define RT2860_CONT_TX_TEST (1 << 4) #define RT2860_MAC_RX_EN (1 << 3) #define RT2860_MAC_TX_EN (1 << 2) #define RT2860_BBP_HRST (1 << 1) #define RT2860_MAC_SRST (1 << 0) /* possible flags for register MAC_BSSID_DW1 */ #define RT2860_MULTI_BCN_NUM_SHIFT 18 #define RT2860_MULTI_BSSID_MODE_SHIFT 16 /* possible flags for register MAX_LEN_CFG */ #define RT2860_MIN_MPDU_LEN_SHIFT 16 #define RT2860_MAX_PSDU_LEN_SHIFT 12 #define RT2860_MAX_PSDU_LEN8K 0 #define RT2860_MAX_PSDU_LEN16K 1 #define RT2860_MAX_PSDU_LEN32K 2 #define RT2860_MAX_PSDU_LEN64K 3 #define RT2860_MAX_MPDU_LEN_SHIFT 0 /* possible flags for registers BBP_CSR_CFG/H2M_BBPAGENT */ #define RT2860_BBP_RW_PARALLEL (1 << 19) #define RT2860_BBP_PAR_DUR_112_5 (1 << 18) #define RT2860_BBP_CSR_KICK (1 << 17) #define RT2860_BBP_CSR_READ (1 << 16) #define RT2860_BBP_ADDR_SHIFT 8 #define RT2860_BBP_DATA_SHIFT 0 /* possible flags for register RF_CSR_CFG0 */ -#define RT2860_RF_REG_CTRL (1 << 31) +#define RT2860_RF_REG_CTRL (1U << 31) #define RT2860_RF_LE_SEL1 (1 << 30) #define RT2860_RF_LE_STBY (1 << 29) #define RT2860_RF_REG_WIDTH_SHIFT 24 #define RT2860_RF_REG_0_SHIFT 0 /* possible flags for register RF_CSR_CFG1 */ #define RT2860_RF_DUR_5 (1 << 24) #define RT2860_RF_REG_1_SHIFT 0 /* possible flags for register LED_CFG */ #define RT2860_LED_POL (1 << 30) #define RT2860_Y_LED_MODE_SHIFT 28 #define RT2860_G_LED_MODE_SHIFT 26 #define RT2860_R_LED_MODE_SHIFT 24 #define RT2860_LED_MODE_OFF 0 #define RT2860_LED_MODE_BLINK_TX 1 #define RT2860_LED_MODE_SLOW_BLINK 2 #define RT2860_LED_MODE_ON 3 #define RT2860_SLOW_BLK_TIME_SHIFT 16 #define RT2860_LED_OFF_TIME_SHIFT 8 #define RT2860_LED_ON_TIME_SHIFT 0 /* possible flags for register XIFS_TIME_CFG */ #define RT2860_BB_RXEND_EN (1 << 29) #define RT2860_EIFS_TIME_SHIFT 20 #define RT2860_OFDM_XIFS_TIME_SHIFT 16 #define RT2860_OFDM_SIFS_TIME_SHIFT 8 #define RT2860_CCK_SIFS_TIME_SHIFT 0 /* possible flags for register BKOFF_SLOT_CFG */ #define RT2860_CC_DELAY_TIME_SHIFT 8 #define RT2860_SLOT_TIME 0 /* possible flags for register NAV_TIME_CFG */ -#define RT2860_NAV_UPD (1 << 31) +#define RT2860_NAV_UPD (1U << 31) #define RT2860_NAV_UPD_VAL_SHIFT 16 #define RT2860_NAV_CLR_EN (1 << 15) #define RT2860_NAV_TIMER_SHIFT 0 /* possible flags for register CH_TIME_CFG */ #define RT2860_EIFS_AS_CH_BUSY (1 << 4) #define RT2860_NAV_AS_CH_BUSY (1 << 3) #define RT2860_RX_AS_CH_BUSY (1 << 2) #define RT2860_TX_AS_CH_BUSY (1 << 1) #define RT2860_CH_STA_TIMER_EN (1 << 0) /* possible values for register BCN_TIME_CFG */ #define RT2860_TSF_INS_COMP_SHIFT 24 #define RT2860_BCN_TX_EN (1 << 20) #define RT2860_TBTT_TIMER_EN (1 << 19) #define RT2860_TSF_SYNC_MODE_SHIFT 17 #define RT2860_TSF_SYNC_MODE_DIS 0 #define RT2860_TSF_SYNC_MODE_STA 1 #define RT2860_TSF_SYNC_MODE_IBSS 2 #define RT2860_TSF_SYNC_MODE_HOSTAP 3 #define RT2860_TSF_TIMER_EN (1 << 16) #define RT2860_BCN_INTVAL_SHIFT 0 /* possible flags for register TBTT_SYNC_CFG */ #define RT2860_BCN_CWMIN_SHIFT 20 #define RT2860_BCN_AIFSN_SHIFT 16 #define RT2860_BCN_EXP_WIN_SHIFT 8 #define RT2860_TBTT_ADJUST_SHIFT 0 /* possible flags for register INT_TIMER_CFG */ #define RT2860_GP_TIMER_SHIFT 16 #define RT2860_PRE_TBTT_TIMER_SHIFT 0 /* possible flags for register INT_TIMER_EN */ #define RT2860_GP_TIMER_EN (1 << 1) #define RT2860_PRE_TBTT_INT_EN (1 << 0) /* possible flags for register MAC_STATUS_REG */ #define RT2860_RX_STATUS_BUSY (1 << 1) #define RT2860_TX_STATUS_BUSY (1 << 0) /* possible flags for register PWR_PIN_CFG */ #define RT2860_IO_ADDA_PD (1 << 3) #define RT2860_IO_PLL_PD (1 << 2) #define RT2860_IO_RA_PE (1 << 1) #define RT2860_IO_RF_PE (1 << 0) /* possible flags for register AUTO_WAKEUP_CFG */ #define RT2860_AUTO_WAKEUP_EN (1 << 15) #define RT2860_SLEEP_TBTT_NUM_SHIFT 8 #define RT2860_WAKEUP_LEAD_TIME_SHIFT 0 /* possible flags for register TX_PIN_CFG */ #define RT2860_TRSW_POL (1 << 19) #define RT2860_TRSW_EN (1 << 18) #define RT2860_RFTR_POL (1 << 17) #define RT2860_RFTR_EN (1 << 16) #define RT2860_LNA_PE_G1_POL (1 << 15) #define RT2860_LNA_PE_A1_POL (1 << 14) #define RT2860_LNA_PE_G0_POL (1 << 13) #define RT2860_LNA_PE_A0_POL (1 << 12) #define RT2860_LNA_PE_G1_EN (1 << 11) #define RT2860_LNA_PE_A1_EN (1 << 10) #define RT2860_LNA_PE1_EN (RT2860_LNA_PE_A1_EN | RT2860_LNA_PE_G1_EN) #define RT2860_LNA_PE_G0_EN (1 << 9) #define RT2860_LNA_PE_A0_EN (1 << 8) #define RT2860_LNA_PE0_EN (RT2860_LNA_PE_A0_EN | RT2860_LNA_PE_G0_EN) #define RT2860_PA_PE_G1_POL (1 << 7) #define RT2860_PA_PE_A1_POL (1 << 6) #define RT2860_PA_PE_G0_POL (1 << 5) #define RT2860_PA_PE_A0_POL (1 << 4) #define RT2860_PA_PE_G1_EN (1 << 3) #define RT2860_PA_PE_A1_EN (1 << 2) #define RT2860_PA_PE_G0_EN (1 << 1) #define RT2860_PA_PE_A0_EN (1 << 0) /* possible flags for register TX_BAND_CFG */ #define RT2860_5G_BAND_SEL_N (1 << 2) #define RT2860_5G_BAND_SEL_P (1 << 1) #define RT2860_TX_BAND_SEL (1 << 0) /* possible flags for register TX_SW_CFG0 */ #define RT2860_DLY_RFTR_EN_SHIFT 24 #define RT2860_DLY_TRSW_EN_SHIFT 16 #define RT2860_DLY_PAPE_EN_SHIFT 8 #define RT2860_DLY_TXPE_EN_SHIFT 0 /* possible flags for register TX_SW_CFG1 */ #define RT2860_DLY_RFTR_DIS_SHIFT 16 #define RT2860_DLY_TRSW_DIS_SHIFT 8 #define RT2860_DLY_PAPE_DIS SHIFT 0 /* possible flags for register TX_SW_CFG2 */ #define RT2860_DLY_LNA_EN_SHIFT 24 #define RT2860_DLY_LNA_DIS_SHIFT 16 #define RT2860_DLY_DAC_EN_SHIFT 8 #define RT2860_DLY_DAC_DIS_SHIFT 0 /* possible flags for register TXOP_THRES_CFG */ #define RT2860_TXOP_REM_THRES_SHIFT 24 #define RT2860_CF_END_THRES_SHIFT 16 #define RT2860_RDG_IN_THRES 8 #define RT2860_RDG_OUT_THRES 0 /* possible flags for register TXOP_CTRL_CFG */ #define RT2860_EXT_CW_MIN_SHIFT 16 #define RT2860_EXT_CCA_DLY_SHIFT 8 #define RT2860_EXT_CCA_EN (1 << 7) #define RT2860_LSIG_TXOP_EN (1 << 6) #define RT2860_TXOP_TRUN_EN_MIMOPS (1 << 4) #define RT2860_TXOP_TRUN_EN_TXOP (1 << 3) #define RT2860_TXOP_TRUN_EN_RATE (1 << 2) #define RT2860_TXOP_TRUN_EN_AC (1 << 1) #define RT2860_TXOP_TRUN_EN_TIMEOUT (1 << 0) /* possible flags for register TX_RTS_CFG */ #define RT2860_RTS_FBK_EN (1 << 24) #define RT2860_RTS_THRES_SHIFT 8 #define RT2860_RTS_RTY_LIMIT_SHIFT 0 /* possible flags for register TX_TIMEOUT_CFG */ #define RT2860_TXOP_TIMEOUT_SHIFT 16 #define RT2860_RX_ACK_TIMEOUT_SHIFT 8 #define RT2860_MPDU_LIFE_TIME_SHIFT 4 /* possible flags for register TX_RTY_CFG */ #define RT2860_TX_AUTOFB_EN (1 << 30) #define RT2860_AGG_RTY_MODE_TIMER (1 << 29) #define RT2860_NAG_RTY_MODE_TIMER (1 << 28) #define RT2860_LONG_RTY_THRES_SHIFT 16 #define RT2860_LONG_RTY_LIMIT_SHIFT 8 #define RT2860_SHORT_RTY_LIMIT_SHIFT 0 /* possible flags for register TX_LINK_CFG */ #define RT2860_REMOTE_MFS_SHIFT 24 #define RT2860_REMOTE_MFB_SHIFT 16 #define RT2860_TX_CFACK_EN (1 << 12) #define RT2860_TX_RDG_EN (1 << 11) #define RT2860_TX_MRQ_EN (1 << 10) #define RT2860_REMOTE_UMFS_EN (1 << 9) #define RT2860_TX_MFB_EN (1 << 8) #define RT2860_REMOTE_MFB_LT_SHIFT 0 /* possible flags for registers *_PROT_CFG */ #define RT2860_RTSTH_EN (1 << 26) #define RT2860_TXOP_ALLOW_GF40 (1 << 25) #define RT2860_TXOP_ALLOW_GF20 (1 << 24) #define RT2860_TXOP_ALLOW_MM40 (1 << 23) #define RT2860_TXOP_ALLOW_MM20 (1 << 22) #define RT2860_TXOP_ALLOW_OFDM (1 << 21) #define RT2860_TXOP_ALLOW_CCK (1 << 20) #define RT2860_TXOP_ALLOW_ALL (0x3f << 20) #define RT2860_PROT_NAV_SHORT (1 << 18) #define RT2860_PROT_NAV_LONG (2 << 18) #define RT2860_PROT_CTRL_RTS_CTS (1 << 16) #define RT2860_PROT_CTRL_CTS (2 << 16) /* possible flags for registers EXP_{CTS,ACK}_TIME */ #define RT2860_EXP_OFDM_TIME_SHIFT 16 #define RT2860_EXP_CCK_TIME_SHIFT 0 /* possible flags for register RX_FILTR_CFG */ #define RT2860_DROP_CTRL_RSV (1 << 16) #define RT2860_DROP_BAR (1 << 15) #define RT2860_DROP_BA (1 << 14) #define RT2860_DROP_PSPOLL (1 << 13) #define RT2860_DROP_RTS (1 << 12) #define RT2860_DROP_CTS (1 << 11) #define RT2860_DROP_ACK (1 << 10) #define RT2860_DROP_CFEND (1 << 9) #define RT2860_DROP_CFACK (1 << 8) #define RT2860_DROP_DUPL (1 << 7) #define RT2860_DROP_BC (1 << 6) #define RT2860_DROP_MC (1 << 5) #define RT2860_DROP_VER_ERR (1 << 4) #define RT2860_DROP_NOT_MYBSS (1 << 3) #define RT2860_DROP_UC_NOME (1 << 2) #define RT2860_DROP_PHY_ERR (1 << 1) #define RT2860_DROP_CRC_ERR (1 << 0) /* possible flags for register AUTO_RSP_CFG */ #define RT2860_CTRL_PWR_BIT (1 << 7) #define RT2860_BAC_ACK_POLICY (1 << 6) #define RT2860_CCK_SHORT_EN (1 << 4) #define RT2860_CTS_40M_REF_EN (1 << 3) #define RT2860_CTS_40M_MODE_EN (1 << 2) #define RT2860_BAC_ACKPOLICY_EN (1 << 1) #define RT2860_AUTO_RSP_EN (1 << 0) /* possible flags for register SIFS_COST_CFG */ #define RT2860_OFDM_SIFS_COST_SHIFT 8 #define RT2860_CCK_SIFS_COST_SHIFT 0 /* possible flags for register TXOP_HLDR_ET */ #define RT2860_TXOP_ETM1_EN (1 << 25) #define RT2860_TXOP_ETM0_EN (1 << 24) #define RT2860_TXOP_ETM_THRES_SHIFT 16 #define RT2860_TXOP_ETO_EN (1 << 8) #define RT2860_TXOP_ETO_THRES_SHIFT 1 #define RT2860_PER_RX_RST_EN (1 << 0) /* possible flags for register TX_STAT_FIFO */ #define RT2860_TXQ_MCS_SHIFT 16 #define RT2860_TXQ_WCID_SHIFT 8 #define RT2860_TXQ_ACKREQ (1 << 7) #define RT2860_TXQ_AGG (1 << 6) #define RT2860_TXQ_OK (1 << 5) #define RT2860_TXQ_PID_SHIFT 1 #define RT2860_TXQ_VLD (1 << 0) /* possible flags for register WCID_ATTR */ #define RT2860_MODE_NOSEC 0 #define RT2860_MODE_WEP40 1 #define RT2860_MODE_WEP104 2 #define RT2860_MODE_TKIP 3 #define RT2860_MODE_AES_CCMP 4 #define RT2860_MODE_CKIP40 5 #define RT2860_MODE_CKIP104 6 #define RT2860_MODE_CKIP128 7 #define RT2860_RX_PKEY_EN (1 << 0) /* possible flags for register H2M_MAILBOX */ #define RT2860_H2M_BUSY (1 << 24) #define RT2860_TOKEN_NO_INTR 0xff /* possible flags for MCU command RT2860_MCU_CMD_LEDS */ #define RT2860_LED_RADIO (1 << 13) #define RT2860_LED_LINK_2GHZ (1 << 14) #define RT2860_LED_LINK_5GHZ (1 << 15) /* possible flags for RT3020 RF register 1 */ #define RT3070_RF_BLOCK (1 << 0) #define RT3070_PLL_PD (1 << 1) #define RT3070_RX0_PD (1 << 2) #define RT3070_TX0_PD (1 << 3) #define RT3070_RX1_PD (1 << 4) #define RT3070_TX1_PD (1 << 5) /* possible flags for RT3020 RF register 15 */ #define RT3070_TX_LO2 (1 << 3) /* possible flags for RT3020 RF register 17 */ #define RT3070_TX_LO1 (1 << 3) /* possible flags for RT3020 RF register 20 */ #define RT3070_RX_LO1 (1 << 3) /* possible flags for RT3020 RF register 21 */ #define RT3070_RX_LO2 (1 << 3) /* Possible flags for RT5390 RF register 3. */ #define RT5390_VCOCAL (1 << 7) /* Possible flags for RT5390 RF register 38. */ #define RT5390_RX_LO1 (1 << 5) /* Possible flags for RT5390 RF register 39. */ #define RT5390_RX_LO2 (1 << 7) /* RT2860 TX descriptor */ struct rt2860_txd { uint32_t sdp0; /* Segment Data Pointer 0 */ uint16_t sdl1; /* Segment Data Length 1 */ #define RT2860_TX_BURST (1 << 15) #define RT2860_TX_LS1 (1 << 14) /* SDP1 is the last segment */ uint16_t sdl0; /* Segment Data Length 0 */ #define RT2860_TX_DDONE (1 << 15) #define RT2860_TX_LS0 (1 << 14) /* SDP0 is the last segment */ uint32_t sdp1; /* Segment Data Pointer 1 */ uint8_t reserved[3]; uint8_t flags; #define RT2860_TX_QSEL_SHIFT 1 #define RT2860_TX_QSEL_MGMT (0 << 1) #define RT2860_TX_QSEL_HCCA (1 << 1) #define RT2860_TX_QSEL_EDCA (2 << 1) #define RT2860_TX_WIV (1 << 0) } __packed; /* RT2870 TX descriptor */ struct rt2870_txd { uint16_t len; uint8_t pad; uint8_t flags; } __packed; /* TX Wireless Information */ struct rt2860_txwi { uint8_t flags; #define RT2860_TX_MPDU_DSITY_SHIFT 5 #define RT2860_TX_AMPDU (1 << 4) #define RT2860_TX_TS (1 << 3) #define RT2860_TX_CFACK (1 << 2) #define RT2860_TX_MMPS (1 << 1) #define RT2860_TX_FRAG (1 << 0) uint8_t txop; #define RT2860_TX_TXOP_HT 0 #define RT2860_TX_TXOP_PIFS 1 #define RT2860_TX_TXOP_SIFS 2 #define RT2860_TX_TXOP_BACKOFF 3 uint16_t phy; #define RT2860_PHY_MODE 0xc000 #define RT2860_PHY_CCK (0 << 14) #define RT2860_PHY_OFDM (1 << 14) #define RT2860_PHY_HT (2 << 14) #define RT2860_PHY_HT_GF (3 << 14) #define RT2860_PHY_SGI (1 << 8) #define RT2860_PHY_BW40 (1 << 7) #define RT2860_PHY_MCS 0x7f #define RT2860_PHY_SHPRE (1 << 3) uint8_t xflags; #define RT2860_TX_BAWINSIZE_SHIFT 2 #define RT2860_TX_NSEQ (1 << 1) #define RT2860_TX_ACK (1 << 0) uint8_t wcid; /* Wireless Client ID */ uint16_t len; #define RT2860_TX_PID_SHIFT 12 uint32_t iv; uint32_t eiv; } __packed; /* RT2860 RX descriptor */ struct rt2860_rxd { uint32_t sdp0; uint16_t sdl1; /* unused */ uint16_t sdl0; #define RT2860_RX_DDONE (1 << 15) #define RT2860_RX_LS0 (1 << 14) uint32_t sdp1; /* unused */ uint32_t flags; #define RT2860_RX_DEC (1 << 16) #define RT2860_RX_AMPDU (1 << 15) #define RT2860_RX_L2PAD (1 << 14) #define RT2860_RX_RSSI (1 << 13) #define RT2860_RX_HTC (1 << 12) #define RT2860_RX_AMSDU (1 << 11) #define RT2860_RX_MICERR (1 << 10) #define RT2860_RX_ICVERR (1 << 9) #define RT2860_RX_CRCERR (1 << 8) #define RT2860_RX_MYBSS (1 << 7) #define RT2860_RX_BC (1 << 6) #define RT2860_RX_MC (1 << 5) #define RT2860_RX_UC2ME (1 << 4) #define RT2860_RX_FRAG (1 << 3) #define RT2860_RX_NULL (1 << 2) #define RT2860_RX_DATA (1 << 1) #define RT2860_RX_BA (1 << 0) } __packed; /* RT2870 RX descriptor */ struct rt2870_rxd { /* single 32-bit field */ uint32_t flags; } __packed; /* RX Wireless Information */ struct rt2860_rxwi { uint8_t wcid; uint8_t keyidx; #define RT2860_RX_UDF_SHIFT 5 #define RT2860_RX_BSS_IDX_SHIFT 2 uint16_t len; #define RT2860_RX_TID_SHIFT 12 uint16_t seq; uint16_t phy; uint8_t rssi[3]; uint8_t reserved1; uint8_t snr[2]; uint16_t reserved2; } __packed; /* first DMA segment contains TXWI + 802.11 header + 32-bit padding */ #define RT2860_TXWI_DMASZ \ (sizeof (struct rt2860_txwi) + \ sizeof (struct ieee80211_htframe) + \ sizeof (uint16_t)) #define RT2860_RF_2820 0x0001 /* 2T3R */ #define RT2860_RF_2850 0x0002 /* dual-band 2T3R */ #define RT2860_RF_2720 0x0003 /* 1T2R */ #define RT2860_RF_2750 0x0004 /* dual-band 1T2R */ #define RT3070_RF_3020 0x0005 /* 1T1R */ #define RT3070_RF_2020 0x0006 /* b/g */ #define RT3070_RF_3021 0x0007 /* 1T2R */ #define RT3070_RF_3022 0x0008 /* 2T2R */ #define RT3070_RF_3052 0x0009 /* dual-band 2T2R */ #define RT5390_RF_5370 0x5370 /* 1T1R */ #define RT5390_RF_5372 0x5372 /* 2T2R */ /* USB commands for RT2870 only */ #define RT2870_RESET 1 #define RT2870_WRITE_2 2 #define RT2870_WRITE_REGION_1 6 #define RT2870_READ_REGION_1 7 #define RT2870_EEPROM_READ 9 #define RT2860_EEPROM_DELAY 1 /* minimum hold time (microsecond) */ #define RT2860_EEPROM_VERSION 0x01 #define RT2860_EEPROM_MAC01 0x02 #define RT2860_EEPROM_MAC23 0x03 #define RT2860_EEPROM_MAC45 0x04 #define RT2860_EEPROM_PCIE_PSLEVEL 0x11 #define RT2860_EEPROM_REV 0x12 #define RT2860_EEPROM_ANTENNA 0x1a #define RT2860_EEPROM_CONFIG 0x1b #define RT2860_EEPROM_COUNTRY 0x1c #define RT2860_EEPROM_FREQ_LEDS 0x1d #define RT2860_EEPROM_LED1 0x1e #define RT2860_EEPROM_LED2 0x1f #define RT2860_EEPROM_LED3 0x20 #define RT2860_EEPROM_LNA 0x22 #define RT2860_EEPROM_RSSI1_2GHZ 0x23 #define RT2860_EEPROM_RSSI2_2GHZ 0x24 #define RT2860_EEPROM_RSSI1_5GHZ 0x25 #define RT2860_EEPROM_RSSI2_5GHZ 0x26 #define RT2860_EEPROM_DELTAPWR 0x28 #define RT2860_EEPROM_PWR2GHZ_BASE1 0x29 #define RT2860_EEPROM_PWR2GHZ_BASE2 0x30 #define RT2860_EEPROM_TSSI1_2GHZ 0x37 #define RT2860_EEPROM_TSSI2_2GHZ 0x38 #define RT2860_EEPROM_TSSI3_2GHZ 0x39 #define RT2860_EEPROM_TSSI4_2GHZ 0x3a #define RT2860_EEPROM_TSSI5_2GHZ 0x3b #define RT2860_EEPROM_PWR5GHZ_BASE1 0x3c #define RT2860_EEPROM_PWR5GHZ_BASE2 0x53 #define RT2860_EEPROM_TSSI1_5GHZ 0x6a #define RT2860_EEPROM_TSSI2_5GHZ 0x6b #define RT2860_EEPROM_TSSI3_5GHZ 0x6c #define RT2860_EEPROM_TSSI4_5GHZ 0x6d #define RT2860_EEPROM_TSSI5_5GHZ 0x6e #define RT2860_EEPROM_RPWR 0x6f #define RT2860_EEPROM_BBP_BASE 0x78 #define RT3071_EEPROM_RF_BASE 0x82 #define RT2860_RIDX_CCK1 0 #define RT2860_RIDX_CCK11 3 #define RT2860_RIDX_OFDM6 4 #define RT2860_RIDX_MAX 12 static const struct rt2860_rate { uint8_t rate; uint8_t mcs; enum ieee80211_phytype phy; uint8_t ctl_ridx; uint16_t sp_ack_dur; uint16_t lp_ack_dur; } rt2860_rates[] = { { 2, 0, IEEE80211_T_DS, 0, 314, 314 }, { 4, 1, IEEE80211_T_DS, 1, 258, 162 }, { 11, 2, IEEE80211_T_DS, 2, 223, 127 }, { 22, 3, IEEE80211_T_DS, 3, 213, 117 }, { 12, 0, IEEE80211_T_OFDM, 4, 60, 60 }, { 18, 1, IEEE80211_T_OFDM, 4, 52, 52 }, { 24, 2, IEEE80211_T_OFDM, 6, 48, 48 }, { 36, 3, IEEE80211_T_OFDM, 6, 44, 44 }, { 48, 4, IEEE80211_T_OFDM, 8, 44, 44 }, { 72, 5, IEEE80211_T_OFDM, 8, 40, 40 }, { 96, 6, IEEE80211_T_OFDM, 8, 40, 40 }, { 108, 7, IEEE80211_T_OFDM, 8, 40, 40 } }; /* * EEPROM access macro. */ #define RT2860_EEPROM_CTL(sc, val) do { \ RAL_WRITE((sc), RT2860_PCI_EECTRL, (val)); \ RAL_BARRIER_READ_WRITE((sc)); \ DELAY(RT2860_EEPROM_DELAY); \ } while (/* CONSTCOND */0) /* * Default values for MAC registers; values taken from the reference driver. */ #define RT2870_DEF_MAC \ { RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \ { RT2860_BCN_OFFSET1, 0x6f77d0c8 }, \ { RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \ { RT2860_HT_BASIC_RATE, 0x00008003 }, \ { RT2860_MAC_SYS_CTRL, 0x00000000 }, \ { RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \ { RT2860_TX_SW_CFG0, 0x00000000 }, \ { RT2860_TX_SW_CFG1, 0x00080606 }, \ { RT2860_TX_LINK_CFG, 0x00001020 }, \ { RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \ { RT2860_MAX_LEN_CFG, 0x00001f00 }, \ { RT2860_LED_CFG, 0x7f031e46 }, \ { RT2860_WMM_AIFSN_CFG, 0x00002273 }, \ { RT2860_WMM_CWMIN_CFG, 0x00002344 }, \ { RT2860_WMM_CWMAX_CFG, 0x000034aa }, \ { RT2860_MAX_PCNT, 0x1f3fbf9f }, \ { RT2860_TX_RTY_CFG, 0x47d01f0f }, \ { RT2860_AUTO_RSP_CFG, 0x00000013 }, \ { RT2860_CCK_PROT_CFG, 0x05740003 }, \ { RT2860_OFDM_PROT_CFG, 0x05740003 }, \ { RT2860_PBF_CFG, 0x00f40006 }, \ { RT2860_WPDMA_GLO_CFG, 0x00000030 }, \ { RT2860_GF20_PROT_CFG, 0x01744004 }, \ { RT2860_GF40_PROT_CFG, 0x03f44084 }, \ { RT2860_MM20_PROT_CFG, 0x01744004 }, \ { RT2860_MM40_PROT_CFG, 0x03f44084 }, \ { RT2860_TXOP_CTRL_CFG, 0x0000583f }, \ { RT2860_TXOP_HLDR_ET, 0x00000002 }, \ { RT2860_TX_RTS_CFG, 0x00092b20 }, \ { RT2860_EXP_ACK_TIME, 0x002400ca }, \ { RT2860_XIFS_TIME_CFG, 0x33a41010 }, \ { RT2860_PWR_PIN_CFG, 0x00000003 } /* * Default values for BBP registers; values taken from the reference driver. */ #define RT2860_DEF_BBP \ { 65, 0x2c }, \ { 66, 0x38 }, \ { 68, 0x0b }, \ { 69, 0x12 }, \ { 70, 0x0a }, \ { 73, 0x10 }, \ { 81, 0x37 }, \ { 82, 0x62 }, \ { 83, 0x6a }, \ { 84, 0x99 }, \ { 86, 0x00 }, \ { 91, 0x04 }, \ { 92, 0x00 }, \ { 103, 0x00 }, \ { 105, 0x05 }, \ { 106, 0x35 } #define RT5390_DEF_BBP \ { 31, 0x08 }, \ { 65, 0x2c }, \ { 66, 0x38 }, \ { 68, 0x0b }, \ { 69, 0x0d }, \ { 70, 0x06 }, \ { 73, 0x13 }, \ { 75, 0x46 }, \ { 76, 0x28 }, \ { 77, 0x59 }, \ { 81, 0x37 }, \ { 82, 0x62 }, \ { 83, 0x7a }, \ { 84, 0x9a }, \ { 86, 0x38 }, \ { 91, 0x04 }, \ { 92, 0x02 }, \ { 103, 0xc0 }, \ { 104, 0x92 }, \ { 105, 0x3c }, \ { 106, 0x03 }, \ { 128, 0x12 } /* * Default settings for RF registers; values derived from the reference driver. */ #define RT2860_RF2850 \ { 1, 0x98402ecc, 0x984c0786, 0x9816b455, 0x9800510b }, \ { 2, 0x98402ecc, 0x984c0786, 0x98168a55, 0x9800519f }, \ { 3, 0x98402ecc, 0x984c078a, 0x98168a55, 0x9800518b }, \ { 4, 0x98402ecc, 0x984c078a, 0x98168a55, 0x9800519f }, \ { 5, 0x98402ecc, 0x984c078e, 0x98168a55, 0x9800518b }, \ { 6, 0x98402ecc, 0x984c078e, 0x98168a55, 0x9800519f }, \ { 7, 0x98402ecc, 0x984c0792, 0x98168a55, 0x9800518b }, \ { 8, 0x98402ecc, 0x984c0792, 0x98168a55, 0x9800519f }, \ { 9, 0x98402ecc, 0x984c0796, 0x98168a55, 0x9800518b }, \ { 10, 0x98402ecc, 0x984c0796, 0x98168a55, 0x9800519f }, \ { 11, 0x98402ecc, 0x984c079a, 0x98168a55, 0x9800518b }, \ { 12, 0x98402ecc, 0x984c079a, 0x98168a55, 0x9800519f }, \ { 13, 0x98402ecc, 0x984c079e, 0x98168a55, 0x9800518b }, \ { 14, 0x98402ecc, 0x984c07a2, 0x98168a55, 0x98005193 }, \ { 36, 0x98402ecc, 0x984c099a, 0x98158a55, 0x980ed1a3 }, \ { 38, 0x98402ecc, 0x984c099e, 0x98158a55, 0x980ed193 }, \ { 40, 0x98402ec8, 0x984c0682, 0x98158a55, 0x980ed183 }, \ { 44, 0x98402ec8, 0x984c0682, 0x98158a55, 0x980ed1a3 }, \ { 46, 0x98402ec8, 0x984c0686, 0x98158a55, 0x980ed18b }, \ { 48, 0x98402ec8, 0x984c0686, 0x98158a55, 0x980ed19b }, \ { 52, 0x98402ec8, 0x984c068a, 0x98158a55, 0x980ed193 }, \ { 54, 0x98402ec8, 0x984c068a, 0x98158a55, 0x980ed1a3 }, \ { 56, 0x98402ec8, 0x984c068e, 0x98158a55, 0x980ed18b }, \ { 60, 0x98402ec8, 0x984c0692, 0x98158a55, 0x980ed183 }, \ { 62, 0x98402ec8, 0x984c0692, 0x98158a55, 0x980ed193 }, \ { 64, 0x98402ec8, 0x984c0692, 0x98158a55, 0x980ed1a3 }, \ { 100, 0x98402ec8, 0x984c06b2, 0x98178a55, 0x980ed783 }, \ { 102, 0x98402ec8, 0x985c06b2, 0x98578a55, 0x980ed793 }, \ { 104, 0x98402ec8, 0x985c06b2, 0x98578a55, 0x980ed1a3 }, \ { 108, 0x98402ecc, 0x985c0a32, 0x98578a55, 0x980ed193 }, \ { 110, 0x98402ecc, 0x984c0a36, 0x98178a55, 0x980ed183 }, \ { 112, 0x98402ecc, 0x984c0a36, 0x98178a55, 0x980ed19b }, \ { 116, 0x98402ecc, 0x984c0a3a, 0x98178a55, 0x980ed1a3 }, \ { 118, 0x98402ecc, 0x984c0a3e, 0x98178a55, 0x980ed193 }, \ { 120, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed183 }, \ { 124, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed193 }, \ { 126, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed15b }, \ { 128, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed1a3 }, \ { 132, 0x98402ec4, 0x984c0386, 0x98178a55, 0x980ed18b }, \ { 134, 0x98402ec4, 0x984c0386, 0x98178a55, 0x980ed193 }, \ { 136, 0x98402ec4, 0x984c0386, 0x98178a55, 0x980ed19b }, \ { 140, 0x98402ec4, 0x984c038a, 0x98178a55, 0x980ed183 }, \ { 149, 0x98402ec4, 0x984c038a, 0x98178a55, 0x980ed1a7 }, \ { 151, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed187 }, \ { 153, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed18f }, \ { 157, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed19f }, \ { 159, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed1a7 }, \ { 161, 0x98402ec4, 0x984c0392, 0x98178a55, 0x980ed187 }, \ { 165, 0x98402ec4, 0x984c0392, 0x98178a55, 0x980ed197 }, \ { 167, 0x98402ec4, 0x984c03d2, 0x98179855, 0x9815531f }, \ { 169, 0x98402ec4, 0x984c03d2, 0x98179855, 0x98155327 }, \ { 171, 0x98402ec4, 0x984c03d6, 0x98179855, 0x98155307 }, \ { 173, 0x98402ec4, 0x984c03d6, 0x98179855, 0x9815530f }, \ { 184, 0x95002ccc, 0x9500491e, 0x9509be55, 0x950c0a0b }, \ { 188, 0x95002ccc, 0x95004922, 0x9509be55, 0x950c0a13 }, \ { 192, 0x95002ccc, 0x95004926, 0x9509be55, 0x950c0a1b }, \ { 196, 0x95002ccc, 0x9500492a, 0x9509be55, 0x950c0a23 }, \ { 208, 0x95002ccc, 0x9500493a, 0x9509be55, 0x950c0a13 }, \ { 212, 0x95002ccc, 0x9500493e, 0x9509be55, 0x950c0a1b }, \ { 216, 0x95002ccc, 0x95004982, 0x9509be55, 0x950c0a23 } #define RT3070_RF3052 \ { 0xf1, 2, 2 }, \ { 0xf1, 2, 7 }, \ { 0xf2, 2, 2 }, \ { 0xf2, 2, 7 }, \ { 0xf3, 2, 2 }, \ { 0xf3, 2, 7 }, \ { 0xf4, 2, 2 }, \ { 0xf4, 2, 7 }, \ { 0xf5, 2, 2 }, \ { 0xf5, 2, 7 }, \ { 0xf6, 2, 2 }, \ { 0xf6, 2, 7 }, \ { 0xf7, 2, 2 }, \ { 0xf8, 2, 4 }, \ { 0x56, 0, 4 }, \ { 0x56, 0, 6 }, \ { 0x56, 0, 8 }, \ { 0x57, 0, 0 }, \ { 0x57, 0, 2 }, \ { 0x57, 0, 4 }, \ { 0x57, 0, 8 }, \ { 0x57, 0, 10 }, \ { 0x58, 0, 0 }, \ { 0x58, 0, 4 }, \ { 0x58, 0, 6 }, \ { 0x58, 0, 8 }, \ { 0x5b, 0, 8 }, \ { 0x5b, 0, 10 }, \ { 0x5c, 0, 0 }, \ { 0x5c, 0, 4 }, \ { 0x5c, 0, 6 }, \ { 0x5c, 0, 8 }, \ { 0x5d, 0, 0 }, \ { 0x5d, 0, 2 }, \ { 0x5d, 0, 4 }, \ { 0x5d, 0, 8 }, \ { 0x5d, 0, 10 }, \ { 0x5e, 0, 0 }, \ { 0x5e, 0, 4 }, \ { 0x5e, 0, 6 }, \ { 0x5e, 0, 8 }, \ { 0x5f, 0, 0 }, \ { 0x5f, 0, 9 }, \ { 0x5f, 0, 11 }, \ { 0x60, 0, 1 }, \ { 0x60, 0, 5 }, \ { 0x60, 0, 7 }, \ { 0x60, 0, 9 }, \ { 0x61, 0, 1 }, \ { 0x61, 0, 3 }, \ { 0x61, 0, 5 }, \ { 0x61, 0, 7 }, \ { 0x61, 0, 9 } #define RT3070_DEF_RF \ { 4, 0x40 }, \ { 5, 0x03 }, \ { 6, 0x02 }, \ { 7, 0x60 }, \ { 9, 0x0f }, \ { 10, 0x41 }, \ { 11, 0x21 }, \ { 12, 0x7b }, \ { 14, 0x90 }, \ { 15, 0x58 }, \ { 16, 0xb3 }, \ { 17, 0x92 }, \ { 18, 0x2c }, \ { 19, 0x02 }, \ { 20, 0xba }, \ { 21, 0xdb }, \ { 24, 0x16 }, \ { 25, 0x03 }, \ { 29, 0x1f } #define RT3572_DEF_RF \ { 0, 0x70 }, \ { 1, 0x81 }, \ { 2, 0xf1 }, \ { 3, 0x02 }, \ { 4, 0x4c }, \ { 5, 0x05 }, \ { 6, 0x4a }, \ { 7, 0xd8 }, \ { 9, 0xc3 }, \ { 10, 0xf1 }, \ { 11, 0xb9 }, \ { 12, 0x70 }, \ { 13, 0x65 }, \ { 14, 0xa0 }, \ { 15, 0x53 }, \ { 16, 0x4c }, \ { 17, 0x23 }, \ { 18, 0xac }, \ { 19, 0x93 }, \ { 20, 0xb3 }, \ { 21, 0xd0 }, \ { 22, 0x00 }, \ { 23, 0x3c }, \ { 24, 0x16 }, \ { 25, 0x15 }, \ { 26, 0x85 }, \ { 27, 0x00 }, \ { 28, 0x00 }, \ { 29, 0x9b }, \ { 30, 0x09 }, \ { 31, 0x10 } #define RT5390_DEF_RF \ { 1, 0x0f }, \ { 2, 0x80 }, \ { 3, 0x88 }, \ { 5, 0x10 }, \ { 6, 0xa0 }, \ { 7, 0x00 }, \ { 10, 0x53 }, \ { 11, 0x4a }, \ { 12, 0x46 }, \ { 13, 0x9f }, \ { 14, 0x00 }, \ { 15, 0x00 }, \ { 16, 0x00 }, \ { 18, 0x03 }, \ { 19, 0x00 }, \ { 20, 0x00 }, \ { 21, 0x00 }, \ { 22, 0x20 }, \ { 23, 0x00 }, \ { 24, 0x00 }, \ { 25, 0xc0 }, \ { 26, 0x00 }, \ { 27, 0x09 }, \ { 28, 0x00 }, \ { 29, 0x10 }, \ { 30, 0x10 }, \ { 31, 0x80 }, \ { 32, 0x80 }, \ { 33, 0x00 }, \ { 34, 0x07 }, \ { 35, 0x12 }, \ { 36, 0x00 }, \ { 37, 0x08 }, \ { 38, 0x85 }, \ { 39, 0x1b }, \ { 40, 0x0b }, \ { 41, 0xbb }, \ { 42, 0xd2 }, \ { 43, 0x9a }, \ { 44, 0x0e }, \ { 45, 0xa2 }, \ { 46, 0x7b }, \ { 47, 0x00 }, \ { 48, 0x10 }, \ { 49, 0x94 }, \ { 52, 0x38 }, \ { 53, 0x84 }, \ { 54, 0x78 }, \ { 55, 0x44 }, \ { 56, 0x22 }, \ { 57, 0x80 }, \ { 58, 0x7f }, \ { 59, 0x8f }, \ { 60, 0x45 }, \ { 61, 0xdd }, \ { 62, 0x00 }, \ { 63, 0x00 } #define RT5392_DEF_RF \ { 1, 0x17 }, \ { 3, 0x88 }, \ { 5, 0x10 }, \ { 6, 0xe0 }, \ { 7, 0x00 }, \ { 10, 0x53 }, \ { 11, 0x4a }, \ { 12, 0x46 }, \ { 13, 0x9f }, \ { 14, 0x00 }, \ { 15, 0x00 }, \ { 16, 0x00 }, \ { 18, 0x03 }, \ { 19, 0x4d }, \ { 20, 0x00 }, \ { 21, 0x8d }, \ { 22, 0x20 }, \ { 23, 0x0b }, \ { 24, 0x44 }, \ { 25, 0x80 }, \ { 26, 0x82 }, \ { 27, 0x09 }, \ { 28, 0x00 }, \ { 29, 0x10 }, \ { 30, 0x10 }, \ { 31, 0x80 }, \ { 32, 0x20 }, \ { 33, 0xc0 }, \ { 34, 0x07 }, \ { 35, 0x12 }, \ { 36, 0x00 }, \ { 37, 0x08 }, \ { 38, 0x89 }, \ { 39, 0x1b }, \ { 40, 0x0f }, \ { 41, 0xbb }, \ { 42, 0xd5 }, \ { 43, 0x9b }, \ { 44, 0x0e }, \ { 45, 0xa2 }, \ { 46, 0x73 }, \ { 47, 0x0c }, \ { 48, 0x10 }, \ { 49, 0x94 }, \ { 50, 0x94 }, \ { 51, 0x3a }, \ { 52, 0x48 }, \ { 53, 0x44 }, \ { 54, 0x38 }, \ { 55, 0x43 }, \ { 56, 0xa1 }, \ { 57, 0x00 }, \ { 58, 0x39 }, \ { 59, 0x07 }, \ { 60, 0x45 }, \ { 61, 0x91 }, \ { 62, 0x39 }, \ { 63, 0x07 } union run_stats { uint32_t raw; struct { uint16_t fail; uint16_t pad; } error; struct { uint16_t success; uint16_t retry; } tx; } __aligned(4); #endif /* _IF_RUNREG_H_ */ Index: head/sys/dev/usb/wlan/if_uralreg.h =================================================================== --- head/sys/dev/usb/wlan/if_uralreg.h (revision 258779) +++ head/sys/dev/usb/wlan/if_uralreg.h (revision 258780) @@ -1,211 +1,211 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define RAL_NOISE_FLOOR -95 #define RAL_RSSI_CORR 120 #define RAL_RX_DESC_SIZE (sizeof (struct ural_rx_desc)) #define RAL_TX_DESC_SIZE (sizeof (struct ural_tx_desc)) #define RAL_FRAME_SIZE 0x780 /* NOTE: using 0x980 does not work */ #define RAL_CONFIG_NO 1 #define RAL_IFACE_INDEX 0 #define RAL_VENDOR_REQUEST 0x01 #define RAL_WRITE_MAC 0x02 #define RAL_READ_MAC 0x03 #define RAL_WRITE_MULTI_MAC 0x06 #define RAL_READ_MULTI_MAC 0x07 #define RAL_READ_EEPROM 0x09 /* * MAC registers. */ #define RAL_MAC_CSR0 0x0400 /* ASIC Version */ #define RAL_MAC_CSR1 0x0402 /* System control */ #define RAL_MAC_CSR2 0x0404 /* MAC addr0 */ #define RAL_MAC_CSR3 0x0406 /* MAC addr1 */ #define RAL_MAC_CSR4 0x0408 /* MAC addr2 */ #define RAL_MAC_CSR5 0x040a /* BSSID0 */ #define RAL_MAC_CSR6 0x040c /* BSSID1 */ #define RAL_MAC_CSR7 0x040e /* BSSID2 */ #define RAL_MAC_CSR8 0x0410 /* Max frame length */ #define RAL_MAC_CSR9 0x0412 /* Timer control */ #define RAL_MAC_CSR10 0x0414 /* Slot time */ #define RAL_MAC_CSR11 0x0416 /* IFS */ #define RAL_MAC_CSR12 0x0418 /* EIFS */ #define RAL_MAC_CSR13 0x041a /* Power mode0 */ #define RAL_MAC_CSR14 0x041c /* Power mode1 */ #define RAL_MAC_CSR15 0x041e /* Power saving transition0 */ #define RAL_MAC_CSR16 0x0420 /* Power saving transition1 */ #define RAL_MAC_CSR17 0x0422 /* Power state control */ #define RAL_MAC_CSR18 0x0424 /* Auto wake-up control */ #define RAL_MAC_CSR19 0x0426 /* GPIO control */ #define RAL_MAC_CSR20 0x0428 /* LED control0 */ #define RAL_MAC_CSR22 0x042c /* XXX not documented */ /* * Tx/Rx Registers. */ #define RAL_TXRX_CSR0 0x0440 /* Security control */ #define RAL_TXRX_CSR2 0x0444 /* Rx control */ #define RAL_TXRX_CSR5 0x044a /* CCK Tx BBP ID0 */ #define RAL_TXRX_CSR6 0x044c /* CCK Tx BBP ID1 */ #define RAL_TXRX_CSR7 0x044e /* OFDM Tx BBP ID0 */ #define RAL_TXRX_CSR8 0x0450 /* OFDM Tx BBP ID1 */ #define RAL_TXRX_CSR10 0x0454 /* Auto responder control */ #define RAL_TXRX_CSR11 0x0456 /* Auto responder basic rate */ #define RAL_TXRX_CSR18 0x0464 /* Beacon interval */ #define RAL_TXRX_CSR19 0x0466 /* Beacon/sync control */ #define RAL_TXRX_CSR20 0x0468 /* Beacon alignment */ #define RAL_TXRX_CSR21 0x046a /* XXX not documented */ /* * Security registers. */ #define RAL_SEC_CSR0 0x0480 /* Shared key 0, word 0 */ /* * PHY registers. */ #define RAL_PHY_CSR2 0x04c4 /* Tx MAC configuration */ #define RAL_PHY_CSR4 0x04c8 /* Interface configuration */ #define RAL_PHY_CSR5 0x04ca /* BBP Pre-Tx CCK */ #define RAL_PHY_CSR6 0x04cc /* BBP Pre-Tx OFDM */ #define RAL_PHY_CSR7 0x04ce /* BBP serial control */ #define RAL_PHY_CSR8 0x04d0 /* BBP serial status */ #define RAL_PHY_CSR9 0x04d2 /* RF serial control0 */ #define RAL_PHY_CSR10 0x04d4 /* RF serial control1 */ /* * Statistics registers. */ #define RAL_STA_CSR0 0x04e0 /* FCS error */ #define RAL_DISABLE_RX (1 << 0) #define RAL_DROP_CRC (1 << 1) #define RAL_DROP_PHY (1 << 2) #define RAL_DROP_CTL (1 << 3) #define RAL_DROP_NOT_TO_ME (1 << 4) #define RAL_DROP_TODS (1 << 5) #define RAL_DROP_BAD_VERSION (1 << 6) #define RAL_DROP_MULTICAST (1 << 9) #define RAL_DROP_BROADCAST (1 << 10) #define RAL_SHORT_PREAMBLE (1 << 2) #define RAL_RESET_ASIC (1 << 0) #define RAL_RESET_BBP (1 << 1) #define RAL_HOST_READY (1 << 2) #define RAL_ENABLE_TSF (1 << 0) #define RAL_ENABLE_TSF_SYNC(x) (((x) & 0x3) << 1) #define RAL_ENABLE_TBCN (1 << 3) #define RAL_ENABLE_BEACON_GENERATOR (1 << 4) #define RAL_RF_AWAKE (3 << 7) #define RAL_BBP_AWAKE (3 << 5) #define RAL_BBP_WRITE (1 << 15) #define RAL_BBP_BUSY (1 << 0) #define RAL_RF1_AUTOTUNE 0x08000 #define RAL_RF3_AUTOTUNE 0x00040 #define RAL_RF_2522 0x00 #define RAL_RF_2523 0x01 #define RAL_RF_2524 0x02 #define RAL_RF_2525 0x03 #define RAL_RF_2525E 0x04 #define RAL_RF_2526 0x05 /* dual-band RF */ #define RAL_RF_5222 0x10 #define RAL_BBP_VERSION 0 #define RAL_BBP_TX 2 #define RAL_BBP_RX 14 #define RAL_BBP_ANTA 0x00 #define RAL_BBP_DIVERSITY 0x01 #define RAL_BBP_ANTB 0x02 #define RAL_BBP_ANTMASK 0x03 #define RAL_BBP_FLIPIQ 0x04 #define RAL_JAPAN_FILTER 0x08 struct ural_tx_desc { uint32_t flags; #define RAL_TX_RETRY(x) ((x) << 4) #define RAL_TX_MORE_FRAG (1 << 8) #define RAL_TX_ACK (1 << 9) #define RAL_TX_TIMESTAMP (1 << 10) #define RAL_TX_OFDM (1 << 11) #define RAL_TX_NEWSEQ (1 << 12) #define RAL_TX_IFS_MASK 0x00006000 #define RAL_TX_IFS_BACKOFF (0 << 13) #define RAL_TX_IFS_SIFS (1 << 13) #define RAL_TX_IFS_NEWBACKOFF (2 << 13) #define RAL_TX_IFS_NONE (3 << 13) uint16_t wme; #define RAL_LOGCWMAX(x) (((x) & 0xf) << 12) #define RAL_LOGCWMIN(x) (((x) & 0xf) << 8) #define RAL_AIFSN(x) (((x) & 0x3) << 6) #define RAL_IVOFFSET(x) (((x) & 0x3f)) uint16_t reserved1; uint8_t plcp_signal; uint8_t plcp_service; #define RAL_PLCP_LENGEXT 0x80 uint8_t plcp_length_lo; uint8_t plcp_length_hi; uint32_t iv; uint32_t eiv; } __packed; struct ural_rx_desc { uint32_t flags; #define RAL_RX_CRC_ERROR (1 << 5) #define RAL_RX_OFDM (1 << 6) #define RAL_RX_PHY_ERROR (1 << 7) uint8_t rssi; uint8_t rate; uint16_t reserved; uint32_t iv; uint32_t eiv; } __packed; #define RAL_RF_LOBUSY (1 << 15) -#define RAL_RF_BUSY (1 << 31) +#define RAL_RF_BUSY (1U << 31) #define RAL_RF_20BIT (20 << 24) #define RAL_RF1 0 #define RAL_RF2 2 #define RAL_RF3 1 #define RAL_RF4 3 #define RAL_EEPROM_ADDRESS 0x0004 #define RAL_EEPROM_TXPOWER 0x003c #define RAL_EEPROM_CONFIG0 0x0016 #define RAL_EEPROM_BBP_BASE 0x001c Index: head/sys/dev/usb/wlan/if_urtwreg.h =================================================================== --- head/sys/dev/usb/wlan/if_urtwreg.h (revision 258779) +++ head/sys/dev/usb/wlan/if_urtwreg.h (revision 258780) @@ -1,432 +1,432 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2008 Weongyo Jeong * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define URTW_CONFIG_INDEX 0 #define URTW_IFACE_INDEX 0 /* for 8187 */ #define URTW_MAC0 0x0000 /* 1 byte */ #define URTW_MAC1 0x0001 /* 1 byte */ #define URTW_MAC2 0x0002 /* 1 byte */ #define URTW_MAC3 0x0003 /* 1 byte */ #define URTW_MAC4 0x0004 /* 1 byte */ #define URTW_MAC5 0x0005 /* 1 byte */ #define URTW_MAR 0x0008 /* 6 byte */ #define URTW_RXFIFO_CNT 0x0010 /* 1 byte */ #define URTW_TXFIFO_CNT 0x0012 /* 1 byte */ #define URTW_BQREQ 0x0013 /* 1 byte */ #define URTW_TSFT 0x0018 /* 6 byte */ #define URTW_TLPDA 0x0020 /* 4 byte */ #define URTW_TNPDA 0x0024 /* 4 byte */ #define URTW_THPDA 0x0028 /* 4 byte */ #define URTW_BRSR 0x002c /* 2 byte */ #define URTW_BRSR_MBR_8185 (0x0fff) #define URTW_8187B_EIFS 0x002d /* 1 byte for 8187B */ #define URTW_BSSID 0x002e /* 6 byte */ #define URTW_BRSR_8187B 0x0034 /* 2 byte for 8187B */ #define URTW_RESP_RATE 0x0034 /* 1 byte for 8187L */ #define URTW_RESP_MAX_RATE_SHIFT (4) #define URTW_RESP_MIN_RATE_SHIFT (0) #define URTW_EIFS 0x0035 /* 1 byte */ #define URTW_CMD 0x0037 /* 1 byte */ #define URTW_CMD_TX_ENABLE (0x4) #define URTW_CMD_RX_ENABLE (0x8) #define URTW_CMD_RST (0x10) #define URTW_INTR_MASK 0x003c /* 2 byte */ #define URTW_INTR_STATUS 0x003e /* 2 byte */ #define URTW_TX_CONF 0x0040 /* 4 byte */ #define URTW_TX_LOOPBACK_SHIFT (17) #define URTW_TX_LOOPBACK_NONE (0 << URTW_TX_LOOPBACK_SHIFT) #define URTW_TX_LOOPBACK_MAC (1 << URTW_TX_LOOPBACK_SHIFT) #define URTW_TX_LOOPBACK_BASEBAND (2 << URTW_TX_LOOPBACK_SHIFT) #define URTW_TX_LOOPBACK_CONTINUE (3 << URTW_TX_LOOPBACK_SHIFT) #define URTW_TX_LOOPBACK_MASK (0x60000) #define URTW_TX_DPRETRY_MASK (0xff00) #define URTW_TX_RTSRETRY_MASK (0xff) #define URTW_TX_DPRETRY_SHIFT (0) #define URTW_TX_RTSRETRY_SHIFT (8) #define URTW_TX_NOCRC (0x10000) #define URTW_TX_MXDMA_MASK (0xe00000) #define URTW_TX_MXDMA_1024 (6 << URTW_TX_MXDMA_SHIFT) #define URTW_TX_MXDMA_2048 (7 << URTW_TX_MXDMA_SHIFT) #define URTW_TX_MXDMA_SHIFT (21) #define URTW_TX_DISCW (1 << 20) #define URTW_TX_SWPLCPLEN (1 << 24) #define URTW_TX_R8187vD (5 << 25) #define URTW_TX_R8187vD_B (6 << 25) #define URTW_TX_HWMASK (7 << 25) #define URTW_TX_DISREQQSIZE (1 << 28) #define URTW_TX_HW_SEQNUM (1 << 30) -#define URTW_TX_CWMIN (1 << 31) +#define URTW_TX_CWMIN (1U << 31) #define URTW_TX_NOICV (0x80000) #define URTW_RX 0x0044 /* 4 byte */ #define URTW_RX_9356SEL (1 << 6) #define URTW_RX_FILTER_MASK \ (URTW_RX_FILTER_ALLMAC | URTW_RX_FILTER_NICMAC | URTW_RX_FILTER_MCAST | \ URTW_RX_FILTER_BCAST | URTW_RX_FILTER_CRCERR | URTW_RX_FILTER_ICVERR | \ URTW_RX_FILTER_DATA | URTW_RX_FILTER_CTL | URTW_RX_FILTER_MNG | \ (1 << 21) | \ URTW_RX_FILTER_PWR | URTW_RX_CHECK_BSSID) #define URTW_RX_FILTER_ALLMAC (0x00000001) #define URTW_RX_FILTER_NICMAC (0x00000002) #define URTW_RX_FILTER_MCAST (0x00000004) #define URTW_RX_FILTER_BCAST (0x00000008) #define URTW_RX_FILTER_CRCERR (0x00000020) #define URTW_RX_FILTER_ICVERR (0x00001000) #define URTW_RX_FILTER_DATA (0x00040000) #define URTW_RX_FILTER_CTL (0x00080000) #define URTW_RX_FILTER_MNG (0x00100000) #define URTW_RX_FILTER_PWR (0x00400000) #define URTW_RX_CHECK_BSSID (0x00800000) #define URTW_RX_FIFO_THRESHOLD_MASK ((1 << 13) | (1 << 14) | (1 << 15)) #define URTW_RX_FIFO_THRESHOLD_SHIFT (13) #define URTW_RX_FIFO_THRESHOLD_128 (3) #define URTW_RX_FIFO_THRESHOLD_256 (4) #define URTW_RX_FIFO_THRESHOLD_512 (5) #define URTW_RX_FIFO_THRESHOLD_1024 (6) #define URTW_RX_FIFO_THRESHOLD_NONE (7 << URTW_RX_FIFO_THRESHOLD_SHIFT) #define URTW_RX_AUTORESETPHY (1 << URTW_RX_AUTORESETPHY_SHIFT) #define URTW_RX_AUTORESETPHY_SHIFT (28) #define URTW_MAX_RX_DMA_MASK ((1<<8) | (1<<9) | (1<<10)) #define URTW_MAX_RX_DMA_2048 (7 << URTW_MAX_RX_DMA_SHIFT) #define URTW_MAX_RX_DMA_1024 (6) #define URTW_MAX_RX_DMA_SHIFT (10) -#define URTW_RCR_ONLYERLPKT (1 << 31) +#define URTW_RCR_ONLYERLPKT (1U << 31) #define URTW_INT_TIMEOUT 0x0048 /* 4 byte */ #define URTW_INT_TBDA 0x004c /* 4 byte */ #define URTW_EPROM_CMD 0x0050 /* 1 byte */ #define URTW_EPROM_CMD_NORMAL (0x0) #define URTW_EPROM_CMD_NORMAL_MODE \ (URTW_EPROM_CMD_NORMAL << URTW_EPROM_CMD_SHIFT) #define URTW_EPROM_CMD_LOAD (0x1) #define URTW_EPROM_CMD_PROGRAM (0x2) #define URTW_EPROM_CMD_PROGRAM_MODE \ (URTW_EPROM_CMD_PROGRAM << URTW_EPROM_CMD_SHIFT) #define URTW_EPROM_CMD_CONFIG (0x3) #define URTW_EPROM_CMD_SHIFT (6) #define URTW_EPROM_CMD_MASK ((1 << 7) | (1 << 6)) #define URTW_EPROM_READBIT (0x1) #define URTW_EPROM_WRITEBIT (0x2) #define URTW_EPROM_CK (0x4) #define URTW_EPROM_CS (0x8) #define URTW_CONFIG0 0x0051 /* 1 byte */ #define URTW_CONFIG1 0x0052 /* 1 byte */ #define URTW_CONFIG2 0x0053 /* 1 byte */ #define URTW_ANAPARAM 0x0054 /* 4 byte */ #define URTW_8225_ANAPARAM_ON (0xa0000a59) #define URTW_8225_ANAPARAM_OFF (0xa00beb59) #define URTW_8187B_8225_ANAPARAM_ON (0x45090658) #define URTW_8187B_8225_ANAPARAM_OFF (0x55480658) #define URTW_MSR 0x0058 /* 1 byte */ #define URTW_MSR_LINK_MASK ((1 << 2) | (1 << 3)) #define URTW_MSR_LINK_SHIFT (2) #define URTW_MSR_LINK_NONE (0 << URTW_MSR_LINK_SHIFT) #define URTW_MSR_LINK_ADHOC (1 << URTW_MSR_LINK_SHIFT) #define URTW_MSR_LINK_STA (2 << URTW_MSR_LINK_SHIFT) #define URTW_MSR_LINK_HOSTAP (3 << URTW_MSR_LINK_SHIFT) #define URTW_MSR_LINK_ENEDCA (1 << 4) #define URTW_CONFIG3 0x0059 /* 1 byte */ #define URTW_CONFIG3_ANAPARAM_WRITE (0x40) #define URTW_CONFIG3_GNT_SELECT (0x80) #define URTW_CONFIG3_ANAPARAM_W_SHIFT (6) #define URTW_CONFIG4 0x005a /* 1 byte */ #define URTW_CONFIG4_VCOOFF (1 << 7) #define URTW_TESTR 0x005b /* 1 byte */ #define URTW_PSR 0x005e /* 1 byte */ #define URTW_SECURITY 0x005f /* 1 byte */ #define URTW_ANAPARAM2 0x0060 /* 4 byte */ #define URTW_8225_ANAPARAM2_ON (0x860c7312) #define URTW_8225_ANAPARAM2_OFF (0x840dec11) #define URTW_8187B_8225_ANAPARAM2_ON (0x727f3f52) #define URTW_8187B_8225_ANAPARAM2_OFF (0x72003f50) #define URTW_BEACON_INTERVAL 0x0070 /* 2 byte */ #define URTW_ATIM_WND 0x0072 /* 2 byte */ #define URTW_BEACON_INTERVAL_TIME 0x0074 /* 2 byte */ #define URTW_ATIM_TR_ITV 0x0076 /* 2 byte */ #define URTW_PHY_DELAY 0x0078 /* 1 byte */ #define URTW_CARRIER_SCOUNT 0x0079 /* 1 byte */ #define URTW_PHY_MAGIC1 0x007c /* 1 byte */ #define URTW_PHY_MAGIC2 0x007d /* 1 byte */ #define URTW_PHY_MAGIC3 0x007e /* 1 byte */ #define URTW_PHY_MAGIC4 0x007f /* 1 byte */ #define URTW_RF_PINS_OUTPUT 0x0080 /* 2 byte */ #define URTW_RF_PINS_OUTPUT_MAGIC1 (0x3a0) #define URTW_BB_HOST_BANG_CLK (1 << 1) #define URTW_BB_HOST_BANG_EN (1 << 2) #define URTW_BB_HOST_BANG_RW (1 << 3) #define URTW_RF_PINS_ENABLE 0x0082 /* 2 byte */ #define URTW_RF_PINS_SELECT 0x0084 /* 2 byte */ #define URTW_ADDR_MAGIC1 0x0085 /* broken? */ #define URTW_RF_PINS_INPUT 0x0086 /* 2 byte */ #define URTW_RF_PINS_MAGIC1 (0xfff3) #define URTW_RF_PINS_MAGIC2 (0xfff0) #define URTW_RF_PINS_MAGIC3 (0x0007) #define URTW_RF_PINS_MAGIC4 (0xf) #define URTW_RF_PINS_MAGIC5 (0x0080) #define URTW_RF_PARA 0x0088 /* 4 byte */ #define URTW_RF_TIMING 0x008c /* 4 byte */ #define URTW_GP_ENABLE 0x0090 /* 1 byte */ #define URTW_GP_ENABLE_DATA_MAGIC1 (0x1) #define URTW_GPIO 0x0091 /* 1 byte */ #define URTW_GPIO_DATA_MAGIC1 (0x1) #define URTW_HSSI_PARA 0x0094 /* 4 byte */ #define URTW_TX_AGC_CTL 0x009c /* 1 byte */ #define URTW_TX_AGC_CTL_PERPACKET_GAIN (0x1) #define URTW_TX_AGC_CTL_PERPACKET_ANTSEL (0x2) #define URTW_TX_AGC_CTL_FEEDBACK_ANT (0x4) #define URTW_TX_GAIN_CCK 0x009d /* 1 byte */ #define URTW_TX_GAIN_OFDM 0x009e /* 1 byte */ #define URTW_TX_ANTENNA 0x009f /* 1 byte */ #define URTW_WPA_CONFIG 0x00b0 /* 1 byte */ #define URTW_SIFS 0x00b4 /* 1 byte */ #define URTW_DIFS 0x00b5 /* 1 byte */ #define URTW_SLOT 0x00b6 /* 1 byte */ #define URTW_CW_CONF 0x00bc /* 1 byte */ #define URTW_CW_CONF_PERPACKET_RETRY (0x2) #define URTW_CW_CONF_PERPACKET_CW (0x1) #define URTW_CW_VAL 0x00bd /* 1 byte */ #define URTW_RATE_FALLBACK 0x00be /* 1 byte */ #define URTW_RATE_FALLBACK_ENABLE (0x80) #define URTW_ACM_CONTROL 0x00bf /* 1 byte */ #define URTW_CONFIG5 0x00d8 /* 1 byte */ #define URTW_TXDMA_POLLING 0x00d9 /* 1 byte */ #define URTW_CWR 0x00dc /* 2 byte */ #define URTW_RETRY_CTR 0x00de /* 1 byte */ #define URTW_INT_MIG 0x00e2 /* 2 byte */ #define URTW_RDSAR 0x00e4 /* 4 byte */ #define URTW_TID_AC_MAP 0x00e8 /* 2 byte */ #define URTW_ANAPARAM3 0x00ee /* 1 byte */ #define URTW_8187B_8225_ANAPARAM3_ON (0x0) #define URTW_8187B_8225_ANAPARAM3_OFF (0x0) #define URTW_8187B_AC_VO 0x00f0 /* 4 byte for 8187B */ #define URTW_FEMR 0x00f4 /* 2 byte */ #define URTW_8187B_AC_VI 0x00f4 /* 4 byte for 8187B */ #define URTW_8187B_AC_BE 0x00f8 /* 4 byte for 8187B */ #define URTW_TALLY_CNT 0x00fa /* 2 byte */ #define URTW_TALLY_SEL 0x00fc /* 1 byte */ #define URTW_8187B_AC_BK 0x00fc /* 4 byte for 8187B */ #define URTW_ADDR_MAGIC2 0x00fe /* 2 byte */ #define URTW_ADDR_MAGIC3 0x00ff /* 1 byte */ /* for 8225 */ #define URTW_8225_ADDR_0_MAGIC 0x0 #define URTW_8225_ADDR_0_DATA_MAGIC1 (0x1b7) #define URTW_8225_ADDR_0_DATA_MAGIC2 (0x0b7) #define URTW_8225_ADDR_0_DATA_MAGIC3 (0x127) #define URTW_8225_ADDR_0_DATA_MAGIC4 (0x027) #define URTW_8225_ADDR_0_DATA_MAGIC5 (0x22f) #define URTW_8225_ADDR_0_DATA_MAGIC6 (0x2bf) #define URTW_8225_ADDR_1_MAGIC 0x1 #define URTW_8225_ADDR_2_MAGIC 0x2 #define URTW_8225_ADDR_2_DATA_MAGIC1 (0xc4d) #define URTW_8225_ADDR_2_DATA_MAGIC2 (0x44d) #define URTW_8225_ADDR_3_MAGIC 0x3 #define URTW_8225_ADDR_3_DATA_MAGIC1 (0x2) #define URTW_8225_ADDR_5_MAGIC 0x5 #define URTW_8225_ADDR_5_DATA_MAGIC1 (0x4) #define URTW_8225_ADDR_6_MAGIC 0x6 #define URTW_8225_ADDR_6_DATA_MAGIC1 (0xe6) #define URTW_8225_ADDR_6_DATA_MAGIC2 (0x80) #define URTW_8225_ADDR_7_MAGIC 0x7 #define URTW_8225_ADDR_8_MAGIC 0x8 #define URTW_8225_ADDR_8_DATA_MAGIC1 (0x588) #define URTW_8225_ADDR_9_MAGIC 0x9 #define URTW_8225_ADDR_9_DATA_MAGIC1 (0x700) #define URTW_8225_ADDR_C_MAGIC 0xc #define URTW_8225_ADDR_C_DATA_MAGIC1 (0x850) #define URTW_8225_ADDR_C_DATA_MAGIC2 (0x050) /* for EEPROM */ #define URTW_EPROM_CHANPLAN 0x03 #define URTW_EPROM_TXPW_BASE 0x05 #define URTW_EPROM_RFCHIPID 0x06 #define URTW_EPROM_RFCHIPID_RTL8225U (5) #define URTW_EPROM_RFCHIPID_RTL8225Z2 (6) #define URTW_EPROM_MACADDR 0x07 #define URTW_EPROM_TXPW0 0x16 #define URTW_EPROM_TXPW2 0x1b #define URTW_EPROM_TXPW1 0x3d #define URTW_EPROM_SWREV 0x3f #define URTW_EPROM_CID_MASK (0xff) #define URTW_EPROM_CID_RSVD0 (0x00) #define URTW_EPROM_CID_RSVD1 (0xff) #define URTW_EPROM_CID_ALPHA0 (0x01) #define URTW_EPROM_CID_SERCOMM_PS (0x02) #define URTW_EPROM_CID_HW_LED (0x03) /* LED */ #define URTW_CID_DEFAULT 0 #define URTW_CID_8187_ALPHA0 1 #define URTW_CID_8187_SERCOMM_PS 2 #define URTW_CID_8187_HW_LED 3 #define URTW_SW_LED_MODE0 0 #define URTW_SW_LED_MODE1 1 #define URTW_SW_LED_MODE2 2 #define URTW_SW_LED_MODE3 3 #define URTW_HW_LED 4 #define URTW_LED_CTL_POWER_ON 0 #define URTW_LED_CTL_LINK 2 #define URTW_LED_CTL_TX 4 #define URTW_LED_PIN_GPIO0 0 #define URTW_LED_PIN_LED0 1 #define URTW_LED_PIN_LED1 2 #define URTW_LED_UNKNOWN 0 #define URTW_LED_ON 1 #define URTW_LED_OFF 2 #define URTW_LED_BLINK_NORMAL 3 #define URTW_LED_BLINK_SLOWLY 4 #define URTW_LED_POWER_ON_BLINK 5 #define URTW_LED_SCAN_BLINK 6 #define URTW_LED_NO_LINK_BLINK 7 #define URTW_LED_BLINK_CM3 8 /* for extra area */ #define URTW_EPROM_DISABLE 0 #define URTW_EPROM_ENABLE 1 #define URTW_EPROM_DELAY 10 #define URTW_8187_GETREGS_REQ 5 #define URTW_8187_SETREGS_REQ 5 #define URTW_8225_RF_MAX_SENS 6 #define URTW_8225_RF_DEF_SENS 4 #define URTW_DEFAULT_RTS_RETRY 7 #define URTW_DEFAULT_TX_RETRY 7 #define URTW_DEFAULT_RTS_THRESHOLD 2342U #define URTW_ASIFS_TIME 10 #define URTW_ACKCTS_LEN 14 /* len for ACK and CTS */ struct urtw_8187b_rxhdr { uint32_t flag; #define URTW_RX_FLAG_LEN /* 0 ~ 11 bits */ #define URTW_RX_FLAG_ICV_ERR (1 << 12) #define URTW_RX_FLAG_CRC32_ERR (1 << 13) #define URTW_RX_FLAG_PM (1 << 14) #define URTW_RX_FLAG_RX_ERR (1 << 15) #define URTW_RX_FLAG_BCAST (1 << 16) #define URTW_RX_FLAG_PAM (1 << 17) #define URTW_RX_FLAG_MCAST (1 << 18) #define URTW_RX_FLAG_QOS (1 << 19) /* only for RTL8187B */ #define URTW_RX_FLAG_RXRATE /* 20 ~ 23 bits */ #define URTW_RX_FLAG_RXRATE_SHIFT 20 #define URTW_RX_FLAG_TRSW (1 << 24) /* only for RTL8187B */ #define URTW_RX_FLAG_SPLCP (1 << 25) #define URTW_RX_FLAG_FOF (1 << 26) #define URTW_RX_FLAG_DMA_FAIL (1 << 27) #define URTW_RX_FLAG_LAST (1 << 28) #define URTW_RX_FLAG_FIRST (1 << 29) #define URTW_RX_FLAG_EOR (1 << 30) -#define URTW_RX_FLAG_OWN (1 << 31) +#define URTW_RX_FLAG_OWN (1U << 31) uint64_t mactime; uint8_t noise; uint8_t rssi; #define URTW_RX_RSSI /* 0 ~ 6 bits */ #define URTW_RX_RSSI_MASK 0x3f #define URTW_RX_ANTENNA (1 << 7) uint8_t agc; uint8_t flag2; #define URTW_RX_FLAG2_DECRYPTED (1 << 0) #define URTW_RX_FLAG2_WAKUP (1 << 1) #define URTW_RX_FLAG2_SHIFT (1 << 2) #define URTW_RX_FLAG2_RSVD0 /* 3 ~ 7 bits */ uint16_t flag3; #define URTW_RX_FLAG3_NUMMCSI /* 0 ~ 3 bits */ #define URTW_RX_FLAG3_SNR_L2E /* 4 ~ 9 bits */ #define URTW_RX_FLAG3_CFO_BIAS /* 10 ~ 15 bits */ int8_t pwdb; uint8_t fot; } __packed; struct urtw_8187b_txhdr { uint32_t flag; #define URTW_TX_FLAG_PKTLEN /* 0 ~ 11 bits */ #define URTW_TX_FLAG_RSVD0 /* 12 ~ 14 bits */ #define URTW_TX_FLAG_NO_ENC (1 << 15) #define URTW_TX_FLAG_SPLCP (1 << 16) #define URTW_TX_FLAG_MOREFRAG (1 << 17) #define URTW_TX_FLAG_CTS (1 << 18) #define URTW_TX_FLAG_RTSRATE /* 19 ~ 22 bits */ #define URTW_TX_FLAG_RTSRATE_SHIFT 19 #define URTW_TX_FLAG_RTS (1 << 23) #define URTW_TX_FLAG_TXRATE /* 24 ~ 27 bits */ #define URTW_TX_FLAG_TXRATE_SHIFT 24 #define URTW_TX_FLAG_LAST (1 << 28) #define URTW_TX_FLAG_FIRST (1 << 29) #define URTW_TX_FLAG_DMA (1 << 30) -#define URTW_TX_FLAG_OWN (1 << 31) +#define URTW_TX_FLAG_OWN (1U << 31) uint16_t rtsdur; uint16_t len; #define URTW_TX_LEN /* 0 ~ 14 bits */ #define URTW_TX_LEN_EXT (1 << 15) uint32_t bufaddr; uint16_t flag1; #define URTW_TX_FLAG1_RXLEN /* 0 ~ 11 bits */ #define URTW_TX_FLAG1_RSVD0 /* 12 ~ 14 bits */ #define URTW_TX_FLAG1_MICCAL (1 << 15) uint16_t txdur; uint32_t nextdescaddr; uint8_t rtsagc; uint8_t retry; uint16_t flag2; #define URTW_TX_FLAG2_RTDB (1 << 0) #define URTW_TX_FLAG2_NOACM (1 << 1) #define URTW_TX_FLAG2_PIFS (1 << 2) #define URTW_TX_FLAG2_RSVD0 /* 3 ~ 6 bits */ #define URTW_TX_FLAG2_RTSRATEFALLBACK /* 7 ~ 10 bits */ #define URTW_TX_FLAG2_RATEFALLBACK /* 11 ~ 15 bits */ uint16_t delaybound; uint16_t flag3; #define URTW_TX_FLAG3_RSVD0 /* 0 ~ 3 bits */ #define URTW_TX_FLAG3_AGC /* 4 ~ 11 bits */ #define URTW_TX_FLAG3_ANTENNA (1 << 12) #define URTW_TX_FLAG3_SPC /* 13 ~ 14 bits */ #define URTW_TX_FLAG3_RSVD1 (1 << 15) uint32_t flag4; #define URTW_TX_FLAG4_LENADJUST /* 0 ~ 1 bits */ #define URTW_TX_FLAG4_RSVD0 (1 << 2) #define URTW_TX_FLAG4_TPCDESEN (1 << 3) #define URTW_TX_FLAG4_TPCPOLARITY /* 4 ~ 5 bits */ #define URTW_TX_FLAG4_TPCEN (1 << 6) #define URTW_TX_FLAG4_PTEN (1 << 7) #define URTW_TX_FLAG4_BCKEY /* 8 ~ 13 bits */ #define URTW_TX_FLAG4_ENBCKEY (1 << 14) #define URTW_TX_FLAG4_ENPMPD (1 << 15) #define URTW_TX_FLAG4_FRAGQSZ /* 16 ~ 31 bits */ } __packed; struct urtw_8187l_rxhdr { uint32_t flag; uint8_t noise; uint8_t rssi; #define URTW_RX_8187L_RSSI /* 0 ~ 6 bits */ #define URTW_RX_8187L_RSSI_MASK 0x3f #define URTW_RX_8187L_ANTENNA (1 << 7) uint8_t agc; uint8_t flag2; #define URTW_RX_8187L_DECRYPTED (1 << 0) #define URTW_RX_8187L_WAKEUP (1 << 1) #define URTW_RX_8187L_SHIFT (1 << 2) #define URTW_RX_8187L_RSVD0 /* 3 ~ 7 bits */ uint64_t mactime; } __packed; struct urtw_8187l_txhdr { uint32_t flag; uint16_t rtsdur; uint16_t len; uint32_t retry; } __packed; Index: head/sys/dev/usb/wlan/if_zydreg.h =================================================================== --- head/sys/dev/usb/wlan/if_zydreg.h (revision 258779) +++ head/sys/dev/usb/wlan/if_zydreg.h (revision 258780) @@ -1,1314 +1,1314 @@ /* $OpenBSD: if_zydreg.h,v 1.19 2006/11/30 19:28:07 damien Exp $ */ /* $NetBSD: if_zydreg.h,v 1.2 2007/06/16 11:18:45 kiyohara Exp $ */ /* $FreeBSD$ */ /*- * Copyright (c) 2006 by Damien Bergamini * Copyright (c) 2006 by Florian Stoehr * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * ZyDAS ZD1211/ZD1211B USB WLAN driver. */ #define ZYD_CR_GPI_EN 0x9418 #define ZYD_CR_RADIO_PD 0x942c #define ZYD_CR_RF2948_PD 0x942c #define ZYD_CR_EN_PS_MANUAL_AGC 0x943c #define ZYD_CR_CONFIG_PHILIPS 0x9440 #define ZYD_CR_I2C_WRITE 0x9444 #define ZYD_CR_SA2400_SER_RP 0x9448 #define ZYD_CR_RADIO_PE 0x9458 #define ZYD_CR_RST_BUS_MASTER 0x945c #define ZYD_CR_RFCFG 0x9464 #define ZYD_CR_HSTSCHG 0x946c #define ZYD_CR_PHY_ON 0x9474 #define ZYD_CR_RX_DELAY 0x9478 #define ZYD_CR_RX_PE_DELAY 0x947c #define ZYD_CR_GPIO_1 0x9490 #define ZYD_CR_GPIO_2 0x9494 #define ZYD_CR_EnZYD_CRyBufMux 0x94a8 #define ZYD_CR_PS_CTRL 0x9500 #define ZYD_CR_ADDA_PWR_DWN 0x9504 #define ZYD_CR_ADDA_MBIAS_WT 0x9508 #define ZYD_CR_INTERRUPT 0x9510 #define ZYD_CR_MAC_PS_STATE 0x950c #define ZYD_CR_ATIM_WND_PERIOD 0x951c #define ZYD_CR_BCN_INTERVAL 0x9520 #define ZYD_CR_PRE_TBTT 0x9524 /* * MAC registers. */ #define ZYD_MAC_MACADRL 0x9610 /* MAC address (low) */ #define ZYD_MAC_MACADRH 0x9614 /* MAC address (high) */ #define ZYD_MAC_BSSADRL 0x9618 /* BSS address (low) */ #define ZYD_MAC_BSSADRH 0x961c /* BSS address (high) */ #define ZYD_MAC_BCNCFG 0x9620 /* BCN configuration */ #define ZYD_MAC_GHTBL 0x9624 /* Group hash table (low) */ #define ZYD_MAC_GHTBH 0x9628 /* Group hash table (high) */ #define ZYD_MAC_RX_TIMEOUT 0x962c /* Rx timeout value */ #define ZYD_MAC_BAS_RATE 0x9630 /* Basic rate setting */ #define ZYD_MAC_MAN_RATE 0x9634 /* Mandatory rate setting */ #define ZYD_MAC_RTSCTSRATE 0x9638 /* RTS CTS rate */ #define ZYD_MAC_BACKOFF_PROTECT 0x963c /* Backoff protection */ #define ZYD_MAC_RX_THRESHOLD 0x9640 /* Rx threshold */ #define ZYD_MAC_TX_PE_CONTROL 0x9644 /* Tx_PE control */ #define ZYD_MAC_AFTER_PNP 0x9648 /* After PnP */ #define ZYD_MAC_RX_PE_DELAY 0x964c /* Rx_pe delay */ #define ZYD_MAC_RX_ADDR2_L 0x9650 /* RX address2 (low) */ #define ZYD_MAC_RX_ADDR2_H 0x9654 /* RX address2 (high) */ #define ZYD_MAC_SIFS_ACK_TIME 0x9658 /* Dynamic SIFS ack time */ #define ZYD_MAC_PHY_DELAY 0x9660 /* PHY delay */ #define ZYD_MAC_PHY_DELAY2 0x966c /* PHY delay */ #define ZYD_MAC_BCNFIFO 0x9670 /* Beacon FIFO I/O port */ #define ZYD_MAC_SNIFFER 0x9674 /* Sniffer on/off */ #define ZYD_MAC_ENCRYPTION_TYPE 0x9678 /* Encryption type */ #define ZYD_MAC_RETRY 0x967c /* Retry time */ #define ZYD_MAC_MISC 0x9680 /* Misc */ #define ZYD_MAC_STMACHINESTAT 0x9684 /* State machine status */ #define ZYD_MAC_TX_UNDERRUN_CNT 0x9688 /* TX underrun counter */ #define ZYD_MAC_RXFILTER 0x968c /* Send to host settings */ #define ZYD_MAC_ACK_EXT 0x9690 /* Acknowledge extension */ #define ZYD_MAC_BCNFIFOST 0x9694 /* BCN FIFO set and status */ #define ZYD_MAC_DIFS_EIFS_SIFS 0x9698 /* DIFS, EIFS & SIFS settings */ #define ZYD_MAC_RX_TIMEOUT_CNT 0x969c /* RX timeout count */ #define ZYD_MAC_RX_TOTAL_FRAME 0x96a0 /* RX total frame count */ #define ZYD_MAC_RX_CRC32_CNT 0x96a4 /* RX CRC32 frame count */ #define ZYD_MAC_RX_CRC16_CNT 0x96a8 /* RX CRC16 frame count */ #define ZYD_MAC_RX_UDEC 0x96ac /* RX unicast decr. error count */ #define ZYD_MAC_RX_OVERRUN_CNT 0x96b0 /* RX FIFO overrun count */ #define ZYD_MAC_RX_MDEC 0x96bc /* RX multicast decr. err. cnt. */ #define ZYD_MAC_NAV_TCR 0x96c4 /* NAV timer count read */ #define ZYD_MAC_BACKOFF_ST_RD 0x96c8 /* Backoff status read */ #define ZYD_MAC_DM_RETRY_CNT_RD 0x96cc /* DM retry count read */ #define ZYD_MAC_RX_ACR 0x96d0 /* RX arbitration count read */ #define ZYD_MAC_TX_CCR 0x96d4 /* Tx complete count read */ #define ZYD_MAC_TCB_ADDR 0x96e8 /* Current PCI process TCP addr */ #define ZYD_MAC_RCB_ADDR 0x96ec /* Next RCB address */ #define ZYD_MAC_CONT_WIN_LIMIT 0x96f0 /* Contention window limit */ #define ZYD_MAC_TX_PKT 0x96f4 /* Tx total packet count read */ #define ZYD_MAC_DL_CTRL 0x96f8 /* Download control */ #define ZYD_MAC_CAM_MODE 0x9700 /* CAM: Continuous Access Mode */ #define ZYD_MACB_TXPWR_CTL1 0x9b00 #define ZYD_MACB_TXPWR_CTL2 0x9b04 #define ZYD_MACB_TXPWR_CTL3 0x9b08 #define ZYD_MACB_TXPWR_CTL4 0x9b0c #define ZYD_MACB_AIFS_CTL1 0x9b10 #define ZYD_MACB_AIFS_CTL2 0x9b14 #define ZYD_MACB_TXOP 0x9b20 #define ZYD_MACB_MAX_RETRY 0x9b28 /* * Miscellaneous registers. */ #define ZYD_FIRMWARE_START_ADDR 0xee00 #define ZYD_FIRMWARE_BASE_ADDR 0xee1d /* Firmware base address */ /* * EEPROM registers. */ #define ZYD_EEPROM_START_HEAD 0xf800 /* EEPROM start */ #define ZYD_EEPROM_SUBID 0xf817 #define ZYD_EEPROM_POD 0xf819 #define ZYD_EEPROM_MAC_ADDR_P1 0xf81b /* Part 1 of the MAC address */ #define ZYD_EEPROM_MAC_ADDR_P2 0xf81d /* Part 2 of the MAC address */ #define ZYD_EEPROM_PWR_CAL 0xf81f /* Calibration */ #define ZYD_EEPROM_PWR_INT 0xf827 /* Calibration */ #define ZYD_EEPROM_ALLOWEDCHAN 0xf82f /* Allowed CH mask, 1 bit each */ #define ZYD_EEPROM_DEVICE_VER 0xf837 /* Device version */ #define ZYD_EEPROM_PHY_REG 0xf83c /* PHY registers */ #define ZYD_EEPROM_36M_CAL 0xf83f /* Calibration */ #define ZYD_EEPROM_11A_INT 0xf847 /* Interpolation */ #define ZYD_EEPROM_48M_CAL 0xf84f /* Calibration */ #define ZYD_EEPROM_48M_INT 0xf857 /* Interpolation */ #define ZYD_EEPROM_54M_CAL 0xf85f /* Calibration */ #define ZYD_EEPROM_54M_INT 0xf867 /* Interpolation */ /* * Firmware registers offsets (relative to fwbase). */ #define ZYD_FW_FIRMWARE_REV 0x0000 /* Firmware version */ #define ZYD_FW_USB_SPEED 0x0001 /* USB speed (!=0 if highspeed) */ #define ZYD_FW_FIX_TX_RATE 0x0002 /* Fixed TX rate */ #define ZYD_FW_LINK_STATUS 0x0003 #define ZYD_FW_SOFT_RESET 0x0004 #define ZYD_FW_FLASH_CHK 0x0005 /* possible flags for register ZYD_FW_LINK_STATUS */ #define ZYD_LED1 (1 << 8) #define ZYD_LED2 (1 << 9) /* * RF IDs. */ #define ZYD_RF_UW2451 0x2 /* not supported yet */ #define ZYD_RF_UCHIP 0x3 /* not supported yet */ #define ZYD_RF_AL2230 0x4 #define ZYD_RF_AL7230B 0x5 #define ZYD_RF_THETA 0x6 /* not supported yet */ #define ZYD_RF_AL2210 0x7 #define ZYD_RF_MAXIM_NEW 0x8 #define ZYD_RF_GCT 0x9 #define ZYD_RF_AL2230S 0xa /* not supported yet */ #define ZYD_RF_RALINK 0xb /* not supported yet */ #define ZYD_RF_INTERSIL 0xc /* not supported yet */ #define ZYD_RF_RFMD 0xd #define ZYD_RF_MAXIM_NEW2 0xe #define ZYD_RF_PHILIPS 0xf /* not supported yet */ /* * PHY registers (8 bits, not documented). */ #define ZYD_CR0 0x9000 #define ZYD_CR1 0x9004 #define ZYD_CR2 0x9008 #define ZYD_CR3 0x900c #define ZYD_CR5 0x9010 #define ZYD_CR6 0x9014 #define ZYD_CR7 0x9018 #define ZYD_CR8 0x901c #define ZYD_CR4 0x9020 #define ZYD_CR9 0x9024 #define ZYD_CR10 0x9028 #define ZYD_CR11 0x902c #define ZYD_CR12 0x9030 #define ZYD_CR13 0x9034 #define ZYD_CR14 0x9038 #define ZYD_CR15 0x903c #define ZYD_CR16 0x9040 #define ZYD_CR17 0x9044 #define ZYD_CR18 0x9048 #define ZYD_CR19 0x904c #define ZYD_CR20 0x9050 #define ZYD_CR21 0x9054 #define ZYD_CR22 0x9058 #define ZYD_CR23 0x905c #define ZYD_CR24 0x9060 #define ZYD_CR25 0x9064 #define ZYD_CR26 0x9068 #define ZYD_CR27 0x906c #define ZYD_CR28 0x9070 #define ZYD_CR29 0x9074 #define ZYD_CR30 0x9078 #define ZYD_CR31 0x907c #define ZYD_CR32 0x9080 #define ZYD_CR33 0x9084 #define ZYD_CR34 0x9088 #define ZYD_CR35 0x908c #define ZYD_CR36 0x9090 #define ZYD_CR37 0x9094 #define ZYD_CR38 0x9098 #define ZYD_CR39 0x909c #define ZYD_CR40 0x90a0 #define ZYD_CR41 0x90a4 #define ZYD_CR42 0x90a8 #define ZYD_CR43 0x90ac #define ZYD_CR44 0x90b0 #define ZYD_CR45 0x90b4 #define ZYD_CR46 0x90b8 #define ZYD_CR47 0x90bc #define ZYD_CR48 0x90c0 #define ZYD_CR49 0x90c4 #define ZYD_CR50 0x90c8 #define ZYD_CR51 0x90cc #define ZYD_CR52 0x90d0 #define ZYD_CR53 0x90d4 #define ZYD_CR54 0x90d8 #define ZYD_CR55 0x90dc #define ZYD_CR56 0x90e0 #define ZYD_CR57 0x90e4 #define ZYD_CR58 0x90e8 #define ZYD_CR59 0x90ec #define ZYD_CR60 0x90f0 #define ZYD_CR61 0x90f4 #define ZYD_CR62 0x90f8 #define ZYD_CR63 0x90fc #define ZYD_CR64 0x9100 #define ZYD_CR65 0x9104 #define ZYD_CR66 0x9108 #define ZYD_CR67 0x910c #define ZYD_CR68 0x9110 #define ZYD_CR69 0x9114 #define ZYD_CR70 0x9118 #define ZYD_CR71 0x911c #define ZYD_CR72 0x9120 #define ZYD_CR73 0x9124 #define ZYD_CR74 0x9128 #define ZYD_CR75 0x912c #define ZYD_CR76 0x9130 #define ZYD_CR77 0x9134 #define ZYD_CR78 0x9138 #define ZYD_CR79 0x913c #define ZYD_CR80 0x9140 #define ZYD_CR81 0x9144 #define ZYD_CR82 0x9148 #define ZYD_CR83 0x914c #define ZYD_CR84 0x9150 #define ZYD_CR85 0x9154 #define ZYD_CR86 0x9158 #define ZYD_CR87 0x915c #define ZYD_CR88 0x9160 #define ZYD_CR89 0x9164 #define ZYD_CR90 0x9168 #define ZYD_CR91 0x916c #define ZYD_CR92 0x9170 #define ZYD_CR93 0x9174 #define ZYD_CR94 0x9178 #define ZYD_CR95 0x917c #define ZYD_CR96 0x9180 #define ZYD_CR97 0x9184 #define ZYD_CR98 0x9188 #define ZYD_CR99 0x918c #define ZYD_CR100 0x9190 #define ZYD_CR101 0x9194 #define ZYD_CR102 0x9198 #define ZYD_CR103 0x919c #define ZYD_CR104 0x91a0 #define ZYD_CR105 0x91a4 #define ZYD_CR106 0x91a8 #define ZYD_CR107 0x91ac #define ZYD_CR108 0x91b0 #define ZYD_CR109 0x91b4 #define ZYD_CR110 0x91b8 #define ZYD_CR111 0x91bc #define ZYD_CR112 0x91c0 #define ZYD_CR113 0x91c4 #define ZYD_CR114 0x91c8 #define ZYD_CR115 0x91cc #define ZYD_CR116 0x91d0 #define ZYD_CR117 0x91d4 #define ZYD_CR118 0x91d8 #define ZYD_CR119 0x91dc #define ZYD_CR120 0x91e0 #define ZYD_CR121 0x91e4 #define ZYD_CR122 0x91e8 #define ZYD_CR123 0x91ec #define ZYD_CR124 0x91f0 #define ZYD_CR125 0x91f4 #define ZYD_CR126 0x91f8 #define ZYD_CR127 0x91fc #define ZYD_CR128 0x9200 #define ZYD_CR129 0x9204 #define ZYD_CR130 0x9208 #define ZYD_CR131 0x920c #define ZYD_CR132 0x9210 #define ZYD_CR133 0x9214 #define ZYD_CR134 0x9218 #define ZYD_CR135 0x921c #define ZYD_CR136 0x9220 #define ZYD_CR137 0x9224 #define ZYD_CR138 0x9228 #define ZYD_CR139 0x922c #define ZYD_CR140 0x9230 #define ZYD_CR141 0x9234 #define ZYD_CR142 0x9238 #define ZYD_CR143 0x923c #define ZYD_CR144 0x9240 #define ZYD_CR145 0x9244 #define ZYD_CR146 0x9248 #define ZYD_CR147 0x924c #define ZYD_CR148 0x9250 #define ZYD_CR149 0x9254 #define ZYD_CR150 0x9258 #define ZYD_CR151 0x925c #define ZYD_CR152 0x9260 #define ZYD_CR153 0x9264 #define ZYD_CR154 0x9268 #define ZYD_CR155 0x926c #define ZYD_CR156 0x9270 #define ZYD_CR157 0x9274 #define ZYD_CR158 0x9278 #define ZYD_CR159 0x927c #define ZYD_CR160 0x9280 #define ZYD_CR161 0x9284 #define ZYD_CR162 0x9288 #define ZYD_CR163 0x928c #define ZYD_CR164 0x9290 #define ZYD_CR165 0x9294 #define ZYD_CR166 0x9298 #define ZYD_CR167 0x929c #define ZYD_CR168 0x92a0 #define ZYD_CR169 0x92a4 #define ZYD_CR170 0x92a8 #define ZYD_CR171 0x92ac #define ZYD_CR172 0x92b0 #define ZYD_CR173 0x92b4 #define ZYD_CR174 0x92b8 #define ZYD_CR175 0x92bc #define ZYD_CR176 0x92c0 #define ZYD_CR177 0x92c4 #define ZYD_CR178 0x92c8 #define ZYD_CR179 0x92cc #define ZYD_CR180 0x92d0 #define ZYD_CR181 0x92d4 #define ZYD_CR182 0x92d8 #define ZYD_CR183 0x92dc #define ZYD_CR184 0x92e0 #define ZYD_CR185 0x92e4 #define ZYD_CR186 0x92e8 #define ZYD_CR187 0x92ec #define ZYD_CR188 0x92f0 #define ZYD_CR189 0x92f4 #define ZYD_CR190 0x92f8 #define ZYD_CR191 0x92fc #define ZYD_CR192 0x9300 #define ZYD_CR193 0x9304 #define ZYD_CR194 0x9308 #define ZYD_CR195 0x930c #define ZYD_CR196 0x9310 #define ZYD_CR197 0x9314 #define ZYD_CR198 0x9318 #define ZYD_CR199 0x931c #define ZYD_CR200 0x9320 #define ZYD_CR201 0x9324 #define ZYD_CR202 0x9328 #define ZYD_CR203 0x932c #define ZYD_CR204 0x9330 #define ZYD_CR205 0x9334 #define ZYD_CR206 0x9338 #define ZYD_CR207 0x933c #define ZYD_CR208 0x9340 #define ZYD_CR209 0x9344 #define ZYD_CR210 0x9348 #define ZYD_CR211 0x934c #define ZYD_CR212 0x9350 #define ZYD_CR213 0x9354 #define ZYD_CR214 0x9358 #define ZYD_CR215 0x935c #define ZYD_CR216 0x9360 #define ZYD_CR217 0x9364 #define ZYD_CR218 0x9368 #define ZYD_CR219 0x936c #define ZYD_CR220 0x9370 #define ZYD_CR221 0x9374 #define ZYD_CR222 0x9378 #define ZYD_CR223 0x937c #define ZYD_CR224 0x9380 #define ZYD_CR225 0x9384 #define ZYD_CR226 0x9388 #define ZYD_CR227 0x938c #define ZYD_CR228 0x9390 #define ZYD_CR229 0x9394 #define ZYD_CR230 0x9398 #define ZYD_CR231 0x939c #define ZYD_CR232 0x93a0 #define ZYD_CR233 0x93a4 #define ZYD_CR234 0x93a8 #define ZYD_CR235 0x93ac #define ZYD_CR236 0x93b0 #define ZYD_CR240 0x93c0 #define ZYD_CR241 0x93c4 #define ZYD_CR242 0x93c8 #define ZYD_CR243 0x93cc #define ZYD_CR244 0x93d0 #define ZYD_CR245 0x93d4 #define ZYD_CR251 0x93ec #define ZYD_CR252 0x93f0 #define ZYD_CR253 0x93f4 #define ZYD_CR254 0x93f8 #define ZYD_CR255 0x93fc /* copied nearly verbatim from the Linux driver rewrite */ #define ZYD_DEF_PHY \ { \ { ZYD_CR0, 0x0a }, { ZYD_CR1, 0x06 }, { ZYD_CR2, 0x26 }, \ { ZYD_CR3, 0x38 }, { ZYD_CR4, 0x80 }, { ZYD_CR9, 0xa0 }, \ { ZYD_CR10, 0x81 }, { ZYD_CR11, 0x00 }, { ZYD_CR12, 0x7f }, \ { ZYD_CR13, 0x8c }, { ZYD_CR14, 0x80 }, { ZYD_CR15, 0x3d }, \ { ZYD_CR16, 0x20 }, { ZYD_CR17, 0x1e }, { ZYD_CR18, 0x0a }, \ { ZYD_CR19, 0x48 }, { ZYD_CR20, 0x0c }, { ZYD_CR21, 0x0c }, \ { ZYD_CR22, 0x23 }, { ZYD_CR23, 0x90 }, { ZYD_CR24, 0x14 }, \ { ZYD_CR25, 0x40 }, { ZYD_CR26, 0x10 }, { ZYD_CR27, 0x19 }, \ { ZYD_CR28, 0x7f }, { ZYD_CR29, 0x80 }, { ZYD_CR30, 0x4b }, \ { ZYD_CR31, 0x60 }, { ZYD_CR32, 0x43 }, { ZYD_CR33, 0x08 }, \ { ZYD_CR34, 0x06 }, { ZYD_CR35, 0x0a }, { ZYD_CR36, 0x00 }, \ { ZYD_CR37, 0x00 }, { ZYD_CR38, 0x38 }, { ZYD_CR39, 0x0c }, \ { ZYD_CR40, 0x84 }, { ZYD_CR41, 0x2a }, { ZYD_CR42, 0x80 }, \ { ZYD_CR43, 0x10 }, { ZYD_CR44, 0x12 }, { ZYD_CR46, 0xff }, \ { ZYD_CR47, 0x1e }, { ZYD_CR48, 0x26 }, { ZYD_CR49, 0x5b }, \ { ZYD_CR64, 0xd0 }, { ZYD_CR65, 0x04 }, { ZYD_CR66, 0x58 }, \ { ZYD_CR67, 0xc9 }, { ZYD_CR68, 0x88 }, { ZYD_CR69, 0x41 }, \ { ZYD_CR70, 0x23 }, { ZYD_CR71, 0x10 }, { ZYD_CR72, 0xff }, \ { ZYD_CR73, 0x32 }, { ZYD_CR74, 0x30 }, { ZYD_CR75, 0x65 }, \ { ZYD_CR76, 0x41 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x30 }, \ { ZYD_CR79, 0x68 }, { ZYD_CR80, 0x64 }, { ZYD_CR81, 0x64 }, \ { ZYD_CR82, 0x00 }, { ZYD_CR83, 0x00 }, { ZYD_CR84, 0x00 }, \ { ZYD_CR85, 0x02 }, { ZYD_CR86, 0x00 }, { ZYD_CR87, 0x00 }, \ { ZYD_CR88, 0xff }, { ZYD_CR89, 0xfc }, { ZYD_CR90, 0x00 }, \ { ZYD_CR91, 0x00 }, { ZYD_CR92, 0x00 }, { ZYD_CR93, 0x08 }, \ { ZYD_CR94, 0x00 }, { ZYD_CR95, 0x00 }, { ZYD_CR96, 0xff }, \ { ZYD_CR97, 0xe7 }, { ZYD_CR98, 0x00 }, { ZYD_CR99, 0x00 }, \ { ZYD_CR100, 0x00 }, { ZYD_CR101, 0xae }, { ZYD_CR102, 0x02 }, \ { ZYD_CR103, 0x00 }, { ZYD_CR104, 0x03 }, { ZYD_CR105, 0x65 }, \ { ZYD_CR106, 0x04 }, { ZYD_CR107, 0x00 }, { ZYD_CR108, 0x0a }, \ { ZYD_CR109, 0xaa }, { ZYD_CR110, 0xaa }, { ZYD_CR111, 0x25 }, \ { ZYD_CR112, 0x25 }, { ZYD_CR113, 0x00 }, { ZYD_CR119, 0x1e }, \ { ZYD_CR125, 0x90 }, { ZYD_CR126, 0x00 }, { ZYD_CR127, 0x00 }, \ { ZYD_CR5, 0x00 }, { ZYD_CR6, 0x00 }, { ZYD_CR7, 0x00 }, \ { ZYD_CR8, 0x00 }, { ZYD_CR9, 0x20 }, { ZYD_CR12, 0xf0 }, \ { ZYD_CR20, 0x0e }, { ZYD_CR21, 0x0e }, { ZYD_CR27, 0x10 }, \ { ZYD_CR44, 0x33 }, { ZYD_CR47, 0x1E }, { ZYD_CR83, 0x24 }, \ { ZYD_CR84, 0x04 }, { ZYD_CR85, 0x00 }, { ZYD_CR86, 0x0C }, \ { ZYD_CR87, 0x12 }, { ZYD_CR88, 0x0C }, { ZYD_CR89, 0x00 }, \ { ZYD_CR90, 0x10 }, { ZYD_CR91, 0x08 }, { ZYD_CR93, 0x00 }, \ { ZYD_CR94, 0x01 }, { ZYD_CR95, 0x00 }, { ZYD_CR96, 0x50 }, \ { ZYD_CR97, 0x37 }, { ZYD_CR98, 0x35 }, { ZYD_CR101, 0x13 }, \ { ZYD_CR102, 0x27 }, { ZYD_CR103, 0x27 }, { ZYD_CR104, 0x18 }, \ { ZYD_CR105, 0x12 }, { ZYD_CR109, 0x27 }, { ZYD_CR110, 0x27 }, \ { ZYD_CR111, 0x27 }, { ZYD_CR112, 0x27 }, { ZYD_CR113, 0x27 }, \ { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x26 }, { ZYD_CR116, 0x24 }, \ { ZYD_CR117, 0xfc }, { ZYD_CR118, 0xfa }, { ZYD_CR120, 0x4f }, \ { ZYD_CR125, 0xaa }, { ZYD_CR127, 0x03 }, { ZYD_CR128, 0x14 }, \ { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, { ZYD_CR131, 0x0C }, \ { ZYD_CR136, 0xdf }, { ZYD_CR137, 0x40 }, { ZYD_CR138, 0xa0 }, \ { ZYD_CR139, 0xb0 }, { ZYD_CR140, 0x99 }, { ZYD_CR141, 0x82 }, \ { ZYD_CR142, 0x54 }, { ZYD_CR143, 0x1c }, { ZYD_CR144, 0x6c }, \ { ZYD_CR147, 0x07 }, { ZYD_CR148, 0x4c }, { ZYD_CR149, 0x50 }, \ { ZYD_CR150, 0x0e }, { ZYD_CR151, 0x18 }, { ZYD_CR160, 0xfe }, \ { ZYD_CR161, 0xee }, { ZYD_CR162, 0xaa }, { ZYD_CR163, 0xfa }, \ { ZYD_CR164, 0xfa }, { ZYD_CR165, 0xea }, { ZYD_CR166, 0xbe }, \ { ZYD_CR167, 0xbe }, { ZYD_CR168, 0x6a }, { ZYD_CR169, 0xba }, \ { ZYD_CR170, 0xba }, { ZYD_CR171, 0xba }, { ZYD_CR204, 0x7d }, \ { ZYD_CR203, 0x30 }, { 0, 0} \ } #define ZYD_DEF_PHYB \ { \ { ZYD_CR0, 0x14 }, { ZYD_CR1, 0x06 }, { ZYD_CR2, 0x26 }, \ { ZYD_CR3, 0x38 }, { ZYD_CR4, 0x80 }, { ZYD_CR9, 0xe0 }, \ { ZYD_CR10, 0x81 }, { ZYD_CR11, 0x00 }, { ZYD_CR12, 0xf0 }, \ { ZYD_CR13, 0x8c }, { ZYD_CR14, 0x80 }, { ZYD_CR15, 0x3d }, \ { ZYD_CR16, 0x20 }, { ZYD_CR17, 0x1e }, { ZYD_CR18, 0x0a }, \ { ZYD_CR19, 0x48 }, { ZYD_CR20, 0x10 }, { ZYD_CR21, 0x0e }, \ { ZYD_CR22, 0x23 }, { ZYD_CR23, 0x90 }, { ZYD_CR24, 0x14 }, \ { ZYD_CR25, 0x40 }, { ZYD_CR26, 0x10 }, { ZYD_CR27, 0x10 }, \ { ZYD_CR28, 0x7f }, { ZYD_CR29, 0x80 }, { ZYD_CR30, 0x4b }, \ { ZYD_CR31, 0x60 }, { ZYD_CR32, 0x43 }, { ZYD_CR33, 0x08 }, \ { ZYD_CR34, 0x06 }, { ZYD_CR35, 0x0a }, { ZYD_CR36, 0x00 }, \ { ZYD_CR37, 0x00 }, { ZYD_CR38, 0x38 }, { ZYD_CR39, 0x0c }, \ { ZYD_CR40, 0x84 }, { ZYD_CR41, 0x2a }, { ZYD_CR42, 0x80 }, \ { ZYD_CR43, 0x10 }, { ZYD_CR44, 0x33 }, { ZYD_CR46, 0xff }, \ { ZYD_CR47, 0x1E }, { ZYD_CR48, 0x26 }, { ZYD_CR49, 0x5b }, \ { ZYD_CR64, 0xd0 }, { ZYD_CR65, 0x04 }, { ZYD_CR66, 0x58 }, \ { ZYD_CR67, 0xc9 }, { ZYD_CR68, 0x88 }, { ZYD_CR69, 0x41 }, \ { ZYD_CR70, 0x23 }, { ZYD_CR71, 0x10 }, { ZYD_CR72, 0xff }, \ { ZYD_CR73, 0x32 }, { ZYD_CR74, 0x30 }, { ZYD_CR75, 0x65 }, \ { ZYD_CR76, 0x41 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x30 }, \ { ZYD_CR79, 0xf0 }, { ZYD_CR80, 0x64 }, { ZYD_CR81, 0x64 }, \ { ZYD_CR82, 0x00 }, { ZYD_CR83, 0x24 }, { ZYD_CR84, 0x04 }, \ { ZYD_CR85, 0x00 }, { ZYD_CR86, 0x0c }, { ZYD_CR87, 0x12 }, \ { ZYD_CR88, 0x0c }, { ZYD_CR89, 0x00 }, { ZYD_CR90, 0x58 }, \ { ZYD_CR91, 0x04 }, { ZYD_CR92, 0x00 }, { ZYD_CR93, 0x00 }, \ { ZYD_CR94, 0x01 }, { ZYD_CR95, 0x20 }, { ZYD_CR96, 0x50 }, \ { ZYD_CR97, 0x37 }, { ZYD_CR98, 0x35 }, { ZYD_CR99, 0x00 }, \ { ZYD_CR100, 0x01 }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \ { ZYD_CR103, 0x27 }, { ZYD_CR104, 0x18 }, { ZYD_CR105, 0x12 }, \ { ZYD_CR106, 0x04 }, { ZYD_CR107, 0x00 }, { ZYD_CR108, 0x0a }, \ { ZYD_CR109, 0x27 }, { ZYD_CR110, 0x27 }, { ZYD_CR111, 0x27 }, \ { ZYD_CR112, 0x27 }, { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, \ { ZYD_CR115, 0x26 }, { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xfc }, \ { ZYD_CR118, 0xfa }, { ZYD_CR119, 0x1e }, { ZYD_CR125, 0x90 }, \ { ZYD_CR126, 0x00 }, { ZYD_CR127, 0x00 }, { ZYD_CR128, 0x14 }, \ { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, { ZYD_CR131, 0x0c }, \ { ZYD_CR136, 0xdf }, { ZYD_CR137, 0xa0 }, { ZYD_CR138, 0xa8 }, \ { ZYD_CR139, 0xb4 }, { ZYD_CR140, 0x98 }, { ZYD_CR141, 0x82 }, \ { ZYD_CR142, 0x53 }, { ZYD_CR143, 0x1c }, { ZYD_CR144, 0x6c }, \ { ZYD_CR147, 0x07 }, { ZYD_CR148, 0x40 }, { ZYD_CR149, 0x40 }, \ { ZYD_CR150, 0x14 }, { ZYD_CR151, 0x18 }, { ZYD_CR159, 0x70 }, \ { ZYD_CR160, 0xfe }, { ZYD_CR161, 0xee }, { ZYD_CR162, 0xaa }, \ { ZYD_CR163, 0xfa }, { ZYD_CR164, 0xfa }, { ZYD_CR165, 0xea }, \ { ZYD_CR166, 0xbe }, { ZYD_CR167, 0xbe }, { ZYD_CR168, 0x6a }, \ { ZYD_CR169, 0xba }, { ZYD_CR170, 0xba }, { ZYD_CR171, 0xba }, \ { ZYD_CR204, 0x7d }, { ZYD_CR203, 0x30 }, \ { 0, 0 } \ } #define ZYD_RFMD_PHY \ { \ { ZYD_CR2, 0x1e }, { ZYD_CR9, 0x20 }, { ZYD_CR10, 0x89 }, \ { ZYD_CR11, 0x00 }, { ZYD_CR15, 0xd0 }, { ZYD_CR17, 0x68 }, \ { ZYD_CR19, 0x4a }, { ZYD_CR20, 0x0c }, { ZYD_CR21, 0x0e }, \ { ZYD_CR23, 0x48 }, { ZYD_CR24, 0x14 }, { ZYD_CR26, 0x90 }, \ { ZYD_CR27, 0x30 }, { ZYD_CR29, 0x20 }, { ZYD_CR31, 0xb2 }, \ { ZYD_CR32, 0x43 }, { ZYD_CR33, 0x28 }, { ZYD_CR38, 0x30 }, \ { ZYD_CR34, 0x0f }, { ZYD_CR35, 0xf0 }, { ZYD_CR41, 0x2a }, \ { ZYD_CR46, 0x7f }, { ZYD_CR47, 0x1e }, { ZYD_CR51, 0xc5 }, \ { ZYD_CR52, 0xc5 }, { ZYD_CR53, 0xc5 }, { ZYD_CR79, 0x58 }, \ { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR82, 0x00 }, \ { ZYD_CR83, 0x24 }, { ZYD_CR84, 0x04 }, { ZYD_CR85, 0x00 }, \ { ZYD_CR86, 0x10 }, { ZYD_CR87, 0x2a }, { ZYD_CR88, 0x10 }, \ { ZYD_CR89, 0x24 }, { ZYD_CR90, 0x18 }, { ZYD_CR91, 0x00 }, \ { ZYD_CR92, 0x0a }, { ZYD_CR93, 0x00 }, { ZYD_CR94, 0x01 }, \ { ZYD_CR95, 0x00 }, { ZYD_CR96, 0x40 }, { ZYD_CR97, 0x37 }, \ { ZYD_CR98, 0x05 }, { ZYD_CR99, 0x28 }, { ZYD_CR100, 0x00 }, \ { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, { ZYD_CR103, 0x27 }, \ { ZYD_CR104, 0x18 }, { ZYD_CR105, 0x12 }, { ZYD_CR106, 0x1a }, \ { ZYD_CR107, 0x24 }, { ZYD_CR108, 0x0a }, { ZYD_CR109, 0x13 }, \ { ZYD_CR110, 0x2f }, { ZYD_CR111, 0x27 }, { ZYD_CR112, 0x27 }, \ { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x40 }, \ { ZYD_CR116, 0x40 }, { ZYD_CR117, 0xf0 }, { ZYD_CR118, 0xf0 }, \ { ZYD_CR119, 0x16 }, { ZYD_CR122, 0x00 }, { ZYD_CR127, 0x03 }, \ { ZYD_CR131, 0x08 }, { ZYD_CR138, 0x28 }, { ZYD_CR148, 0x44 }, \ { ZYD_CR150, 0x10 }, { ZYD_CR169, 0xbb }, { ZYD_CR170, 0xbb } \ } #define ZYD_RFMD_RF \ { \ 0x000007, 0x07dd43, 0x080959, 0x0e6666, 0x116a57, 0x17dd43, \ 0x1819f9, 0x1e6666, 0x214554, 0x25e7fa, 0x27fffa, 0x294128, \ 0x2c0000, 0x300000, 0x340000, 0x381e0f, 0x6c180f \ } #define ZYD_RFMD_CHANTABLE \ { \ { 0x181979, 0x1e6666 }, \ { 0x181989, 0x1e6666 }, \ { 0x181999, 0x1e6666 }, \ { 0x1819a9, 0x1e6666 }, \ { 0x1819b9, 0x1e6666 }, \ { 0x1819c9, 0x1e6666 }, \ { 0x1819d9, 0x1e6666 }, \ { 0x1819e9, 0x1e6666 }, \ { 0x1819f9, 0x1e6666 }, \ { 0x181a09, 0x1e6666 }, \ { 0x181a19, 0x1e6666 }, \ { 0x181a29, 0x1e6666 }, \ { 0x181a39, 0x1e6666 }, \ { 0x181a60, 0x1c0000 } \ } #define ZYD_AL2230_PHY \ { \ { ZYD_CR15, 0x20 }, { ZYD_CR23, 0x40 }, { ZYD_CR24, 0x20 }, \ { ZYD_CR26, 0x11 }, { ZYD_CR28, 0x3e }, { ZYD_CR29, 0x00 }, \ { ZYD_CR44, 0x33 }, { ZYD_CR106, 0x2a }, { ZYD_CR107, 0x1a }, \ { ZYD_CR109, 0x09 }, { ZYD_CR110, 0x27 }, { ZYD_CR111, 0x2b }, \ { ZYD_CR112, 0x2b }, { ZYD_CR119, 0x0a }, { ZYD_CR10, 0x89 }, \ { ZYD_CR17, 0x28 }, { ZYD_CR26, 0x93 }, { ZYD_CR34, 0x30 }, \ { ZYD_CR35, 0x3e }, { ZYD_CR41, 0x24 }, { ZYD_CR44, 0x32 }, \ { ZYD_CR46, 0x96 }, { ZYD_CR47, 0x1e }, { ZYD_CR79, 0x58 }, \ { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR87, 0x0a }, \ { ZYD_CR89, 0x04 }, { ZYD_CR92, 0x0a }, { ZYD_CR99, 0x28 }, \ { ZYD_CR100, 0x00 }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \ { ZYD_CR106, 0x24 }, { ZYD_CR107, 0x2a }, { ZYD_CR109, 0x09 }, \ { ZYD_CR110, 0x13 }, { ZYD_CR111, 0x1f }, { ZYD_CR112, 0x1f }, \ { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, \ { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xf4 }, { ZYD_CR118, 0xfc }, \ { ZYD_CR119, 0x10 }, { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x77 }, \ { ZYD_CR122, 0xe0 }, { ZYD_CR137, 0x88 }, { ZYD_CR252, 0xff }, \ { ZYD_CR253, 0xff }, { ZYD_CR251, 0x2f }, { ZYD_CR251, 0x3f }, \ { ZYD_CR138, 0x28 }, { ZYD_CR203, 0x06 } \ } #define ZYD_AL2230_PHY_B \ { \ { ZYD_CR10, 0x89 }, { ZYD_CR15, 0x20 }, { ZYD_CR17, 0x2B }, \ { ZYD_CR23, 0x40 }, { ZYD_CR24, 0x20 }, { ZYD_CR26, 0x93 }, \ { ZYD_CR28, 0x3e }, { ZYD_CR29, 0x00 }, { ZYD_CR33, 0x28 }, \ { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x3e }, { ZYD_CR41, 0x24 }, \ { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x99 }, { ZYD_CR47, 0x1e }, \ { ZYD_CR48, 0x06 }, { ZYD_CR49, 0xf9 }, { ZYD_CR51, 0x01 }, \ { ZYD_CR52, 0x80 }, { ZYD_CR53, 0x7e }, { ZYD_CR65, 0x00 }, \ { ZYD_CR66, 0x00 }, { ZYD_CR67, 0x00 }, { ZYD_CR68, 0x00 }, \ { ZYD_CR69, 0x28 }, { ZYD_CR79, 0x58 }, { ZYD_CR80, 0x30 }, \ { ZYD_CR81, 0x30 }, { ZYD_CR87, 0x0a }, { ZYD_CR89, 0x04 }, \ { ZYD_CR91, 0x00 }, { ZYD_CR92, 0x0a }, { ZYD_CR98, 0x8d }, \ { ZYD_CR99, 0x00 }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \ { ZYD_CR106, 0x24 }, { ZYD_CR107, 0x2a }, { ZYD_CR109, 0x13 }, \ { ZYD_CR110, 0x1f }, { ZYD_CR111, 0x1f }, { ZYD_CR112, 0x1f }, \ { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x26 }, \ { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xfa }, { ZYD_CR118, 0xfa }, \ { ZYD_CR119, 0x10 }, { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x6c }, \ { ZYD_CR122, 0xfc }, { ZYD_CR123, 0x57 }, { ZYD_CR125, 0xad }, \ { ZYD_CR126, 0x6c }, { ZYD_CR127, 0x03 }, { ZYD_CR137, 0x50 }, \ { ZYD_CR138, 0xa8 }, { ZYD_CR144, 0xac }, { ZYD_CR150, 0x0d }, \ { ZYD_CR252, 0x34 }, { ZYD_CR253, 0x34 } \ } #define ZYD_AL2230_PHY_PART1 \ { \ { ZYD_CR240, 0x57 }, { ZYD_CR9, 0xe0 } \ } #define ZYD_AL2230_PHY_PART2 \ { \ { ZYD_CR251, 0x2f }, { ZYD_CR251, 0x7f }, \ } #define ZYD_AL2230_PHY_PART3 \ { \ { ZYD_CR128, 0x14 }, { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, \ } #define ZYD_AL2230S_PHY_INIT \ { \ { ZYD_CR47, 0x1e }, { ZYD_CR106, 0x22 }, { ZYD_CR107, 0x2a }, \ { ZYD_CR109, 0x13 }, { ZYD_CR118, 0xf8 }, { ZYD_CR119, 0x12 }, \ { ZYD_CR122, 0xe0 }, { ZYD_CR128, 0x10 }, { ZYD_CR129, 0x0e }, \ { ZYD_CR130, 0x10 } \ } #define ZYD_AL2230_PHY_FINI_PART1 \ { \ { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR79, 0x58 }, \ { ZYD_CR12, 0xf0 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x58 }, \ { ZYD_CR203, 0x06 }, { ZYD_CR240, 0x80 }, \ } #define ZYD_AL2230_RF_PART1 \ { \ 0x03f790, 0x033331, 0x00000d, 0x0b3331, 0x03b812, 0x00fff3 \ } #define ZYD_AL2230_RF_PART2 \ { \ 0x000da4, 0x0f4dc5, 0x0805b6, 0x011687, 0x000688, 0x0403b9, \ 0x00dbba, 0x00099b, 0x0bdffc, 0x00000d, 0x00500f \ } #define ZYD_AL2230_RF_PART3 \ { \ 0x00d00f, 0x004c0f, 0x00540f, 0x00700f, 0x00500f \ } #define ZYD_AL2230_RF_B \ { \ 0x03f790, 0x033331, 0x00000d, 0x0b3331, 0x03b812, 0x00fff3, \ 0x0005a4, 0x0f4dc5, 0x0805b6, 0x0146c7, 0x000688, 0x0403b9, \ 0x00dbba, 0x00099b, 0x0bdffc, 0x00000d, 0x00580f \ } #define ZYD_AL2230_RF_B_PART1 \ { \ 0x8cccd0, 0x481dc0, 0xcfff00, 0x25a000 \ } #define ZYD_AL2230_RF_B_PART2 \ { \ 0x25a000, 0xa3b2f0, 0x6da010, 0xe36280, 0x116000, 0x9dc020, \ 0x5ddb00, 0xd99000, 0x3ffbd0, 0xb00000, 0xf01a00 \ } #define ZYD_AL2230_RF_B_PART3 \ { \ 0xf01b00, 0xf01e00, 0xf01a00 \ } #define ZYD_AL2230_CHANTABLE \ { \ { 0x03f790, 0x033331, 0x00000d }, \ { 0x03f790, 0x0b3331, 0x00000d }, \ { 0x03e790, 0x033331, 0x00000d }, \ { 0x03e790, 0x0b3331, 0x00000d }, \ { 0x03f7a0, 0x033331, 0x00000d }, \ { 0x03f7a0, 0x0b3331, 0x00000d }, \ { 0x03e7a0, 0x033331, 0x00000d }, \ { 0x03e7a0, 0x0b3331, 0x00000d }, \ { 0x03f7b0, 0x033331, 0x00000d }, \ { 0x03f7b0, 0x0b3331, 0x00000d }, \ { 0x03e7b0, 0x033331, 0x00000d }, \ { 0x03e7b0, 0x0b3331, 0x00000d }, \ { 0x03f7c0, 0x033331, 0x00000d }, \ { 0x03e7c0, 0x066661, 0x00000d } \ } #define ZYD_AL2230_CHANTABLE_B \ { \ { 0x09efc0, 0x8cccc0, 0xb00000 }, \ { 0x09efc0, 0x8cccd0, 0xb00000 }, \ { 0x09e7c0, 0x8cccc0, 0xb00000 }, \ { 0x09e7c0, 0x8cccd0, 0xb00000 }, \ { 0x05efc0, 0x8cccc0, 0xb00000 }, \ { 0x05efc0, 0x8cccd0, 0xb00000 }, \ { 0x05e7c0, 0x8cccc0, 0xb00000 }, \ { 0x05e7c0, 0x8cccd0, 0xb00000 }, \ { 0x0defc0, 0x8cccc0, 0xb00000 }, \ { 0x0defc0, 0x8cccd0, 0xb00000 }, \ { 0x0de7c0, 0x8cccc0, 0xb00000 }, \ { 0x0de7c0, 0x8cccd0, 0xb00000 }, \ { 0x03efc0, 0x8cccc0, 0xb00000 }, \ { 0x03e7c0, 0x866660, 0xb00000 } \ } #define ZYD_AL7230B_PHY_1 \ { \ { ZYD_CR240, 0x57 }, { ZYD_CR15, 0x20 }, { ZYD_CR23, 0x40 }, \ { ZYD_CR24, 0x20 }, { ZYD_CR26, 0x11 }, { ZYD_CR28, 0x3e }, \ { ZYD_CR29, 0x00 }, { ZYD_CR44, 0x33 }, { ZYD_CR106, 0x22 }, \ { ZYD_CR107, 0x1a }, { ZYD_CR109, 0x09 }, { ZYD_CR110, 0x27 }, \ { ZYD_CR111, 0x2b }, { ZYD_CR112, 0x2b }, { ZYD_CR119, 0x0a }, \ { ZYD_CR122, 0xfc }, { ZYD_CR10, 0x89 }, { ZYD_CR17, 0x28 }, \ { ZYD_CR26, 0x93 }, { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x3e }, \ { ZYD_CR41, 0x24 }, { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x96 }, \ { ZYD_CR47, 0x1e }, { ZYD_CR79, 0x58 }, { ZYD_CR80, 0x30 }, \ { ZYD_CR81, 0x30 }, { ZYD_CR87, 0x0a }, { ZYD_CR89, 0x04 }, \ { ZYD_CR92, 0x0a }, { ZYD_CR99, 0x28 }, { ZYD_CR100, 0x02 }, \ { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, { ZYD_CR106, 0x22 }, \ { ZYD_CR107, 0x3f }, { ZYD_CR109, 0x09 }, { ZYD_CR110, 0x1f }, \ { ZYD_CR111, 0x1f }, { ZYD_CR112, 0x1f }, { ZYD_CR113, 0x27 }, \ { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, { ZYD_CR116, 0x3f }, \ { ZYD_CR117, 0xfa }, { ZYD_CR118, 0xfc }, { ZYD_CR119, 0x10 }, \ { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x77 }, { ZYD_CR137, 0x88 }, \ { ZYD_CR138, 0xa8 }, { ZYD_CR252, 0x34 }, { ZYD_CR253, 0x34 }, \ { ZYD_CR251, 0x2f } \ } #define ZYD_AL7230B_PHY_2 \ { \ { ZYD_CR251, 0x3f }, { ZYD_CR128, 0x14 }, { ZYD_CR129, 0x12 }, \ { ZYD_CR130, 0x10 }, { ZYD_CR38, 0x38 }, { ZYD_CR136, 0xdf } \ } #define ZYD_AL7230B_PHY_3 \ { \ { ZYD_CR203, 0x06 }, { ZYD_CR240, 0x80 } \ } #define ZYD_AL7230B_RF_1 \ { \ 0x09ec04, 0x8cccc8, 0x4ff821, 0xc5fbfc, 0x21ebfe, 0xafd401, \ 0x6cf56a, 0xe04073, 0x193d76, 0x9dd844, 0x500007, 0xd8c010, \ 0x3c9000, 0xbfffff, 0x700000, 0xf15d58 \ } #define ZYD_AL7230B_RF_2 \ { \ 0xf15d59, 0xf15d5c, 0xf15d58 \ } #define ZYD_AL7230B_RF_SETCHANNEL \ { \ 0x4ff821, 0xc5fbfc, 0x21ebfe, 0xafd401, 0x6cf56a, 0xe04073, \ 0x193d76, 0x9dd844, 0x500007, 0xd8c010, 0x3c9000, 0xf15d58 \ } #define ZYD_AL7230B_CHANTABLE \ { \ { 0x09ec00, 0x8cccc8 }, \ { 0x09ec00, 0x8cccd8 }, \ { 0x09ec00, 0x8cccc0 }, \ { 0x09ec00, 0x8cccd0 }, \ { 0x05ec00, 0x8cccc8 }, \ { 0x05ec00, 0x8cccd8 }, \ { 0x05ec00, 0x8cccc0 }, \ { 0x05ec00, 0x8cccd0 }, \ { 0x0dec00, 0x8cccc8 }, \ { 0x0dec00, 0x8cccd8 }, \ { 0x0dec00, 0x8cccc0 }, \ { 0x0dec00, 0x8cccd0 }, \ { 0x03ec00, 0x8cccc8 }, \ { 0x03ec00, 0x866660 } \ } #define ZYD_AL2210_PHY \ { \ { ZYD_CR9, 0xe0 }, { ZYD_CR10, 0x91 }, { ZYD_CR12, 0x90 }, \ { ZYD_CR15, 0xd0 }, { ZYD_CR16, 0x40 }, { ZYD_CR17, 0x58 }, \ { ZYD_CR18, 0x04 }, { ZYD_CR23, 0x66 }, { ZYD_CR24, 0x14 }, \ { ZYD_CR26, 0x90 }, { ZYD_CR31, 0x80 }, { ZYD_CR34, 0x06 }, \ { ZYD_CR35, 0x3e }, { ZYD_CR38, 0x38 }, { ZYD_CR46, 0x90 }, \ { ZYD_CR47, 0x1e }, { ZYD_CR64, 0x64 }, { ZYD_CR79, 0xb5 }, \ { ZYD_CR80, 0x38 }, { ZYD_CR81, 0x30 }, { ZYD_CR113, 0xc0 }, \ { ZYD_CR127, 0x03 } \ } #define ZYD_AL2210_RF \ { \ 0x2396c0, 0x00fcb1, 0x358132, 0x0108b3, 0xc77804, 0x456415, \ 0xff2226, 0x806667, 0x7860f8, 0xbb01c9, 0x00000a, 0x00000b \ } #define ZYD_AL2210_CHANTABLE \ { \ 0x0196c0, 0x019710, 0x019760, 0x0197b0, 0x019800, 0x019850, \ 0x0198a0, 0x0198f0, 0x019940, 0x019990, 0x0199e0, 0x019a30, \ 0x019a80, 0x019b40 \ } #define ZYD_GCT_PHY \ { \ { ZYD_CR10, 0x89 }, { ZYD_CR15, 0x20 }, { ZYD_CR17, 0x28 }, \ { ZYD_CR23, 0x38 }, { ZYD_CR24, 0x20 }, { ZYD_CR26, 0x93 }, \ { ZYD_CR27, 0x15 }, { ZYD_CR28, 0x3e }, { ZYD_CR29, 0x00 }, \ { ZYD_CR33, 0x28 }, { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x43 }, \ { ZYD_CR41, 0x24 }, { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x92 }, \ { ZYD_CR47, 0x1e }, { ZYD_CR48, 0x04 }, { ZYD_CR49, 0xfa }, \ { ZYD_CR79, 0x58 }, { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, \ { ZYD_CR87, 0x0a }, { ZYD_CR89, 0x04 }, { ZYD_CR91, 0x00 }, \ { ZYD_CR92, 0x0a }, { ZYD_CR98, 0x8d }, { ZYD_CR99, 0x28 }, \ { ZYD_CR100, 0x02 }, { ZYD_CR101, 0x09 }, { ZYD_CR102, 0x27 }, \ { ZYD_CR106, 0x1c }, { ZYD_CR107, 0x1c }, { ZYD_CR109, 0x13 }, \ { ZYD_CR110, 0x1f }, { ZYD_CR111, 0x13 }, { ZYD_CR112, 0x1f }, \ { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x23 }, { ZYD_CR115, 0x24 }, \ { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xfa }, { ZYD_CR118, 0xf0 }, \ { ZYD_CR119, 0x1a }, { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x1f }, \ { ZYD_CR122, 0xf0 }, { ZYD_CR123, 0x57 }, { ZYD_CR125, 0xad }, \ { ZYD_CR126, 0x6c }, { ZYD_CR127, 0x03 }, { ZYD_CR128, 0x14 }, \ { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, { ZYD_CR137, 0x50 }, \ { ZYD_CR138, 0xa8 }, { ZYD_CR144, 0xac }, { ZYD_CR146, 0x20 }, \ { ZYD_CR252, 0xff }, { ZYD_CR253, 0xff } \ } #define ZYD_GCT_RF \ { \ 0x40002b, 0x519e4f, 0x6f81ad, 0x73fffe, 0x25f9c, 0x100047, \ 0x200999, 0x307602, 0x346063, \ } #define ZYD_GCT_VCO \ { \ { 0x664d, 0x604d, 0x6675, 0x6475, 0x6655, 0x6455, 0x6665 }, \ { 0x666d, 0x606d, 0x664d, 0x644d, 0x6675, 0x6475, 0x6655 }, \ { 0x665d, 0x605d, 0x666d, 0x646d, 0x664d, 0x644d, 0x6675 }, \ { 0x667d, 0x607d, 0x665d, 0x645d, 0x666d, 0x646d, 0x664d }, \ { 0x6643, 0x6043, 0x667d, 0x647d, 0x665d, 0x645d, 0x666d }, \ { 0x6663, 0x6063, 0x6643, 0x6443, 0x667d, 0x647d, 0x665d }, \ { 0x6653, 0x6053, 0x6663, 0x6463, 0x6643, 0x6443, 0x667d }, \ { 0x6673, 0x6073, 0x6653, 0x6453, 0x6663, 0x6463, 0x6643 }, \ { 0x664b, 0x604b, 0x6673, 0x6473, 0x6653, 0x6453, 0x6663 }, \ { 0x666b, 0x606b, 0x664b, 0x644b, 0x6673, 0x6473, 0x6653 }, \ { 0x665b, 0x605b, 0x666b, 0x646b, 0x664b, 0x644b, 0x6673 } \ } #define ZYD_GCT_TXGAIN \ { \ 0x0e313, 0x0fb13, 0x0e093, 0x0f893, 0x0ea93, 0x1f093, 0x1f493, \ 0x1f693, 0x1f393, 0x1f35b, 0x1e6db, 0x1ff3f, 0x1ffff, 0x361d7, \ 0x37fbf, 0x3ff8b, 0x3ff33, 0x3fb3f, 0x3ffff \ } #define ZYD_GCT_CHANNEL_ACAL \ { \ 0x106847, 0x106847, 0x106867, 0x106867, 0x106867, 0x106867, \ 0x106857, 0x106857, 0x106857, 0x106857, 0x106877, 0x106877, \ 0x106877, 0x10684f \ } #define ZYD_GCT_CHANNEL_STD \ { \ 0x100047, 0x100047, 0x100067, 0x100067, 0x100067, 0x100067, \ 0x100057, 0x100057, 0x100057, 0x100057, 0x100077, 0x100077, \ 0x100077, 0x10004f \ } #define ZYD_GCT_CHANNEL_DIV \ { \ 0x200999, 0x20099b, 0x200998, 0x20099a, 0x200999, 0x20099b, \ 0x200998, 0x20099a, 0x200999, 0x20099b, 0x200998, 0x20099a, \ 0x200999, 0x200ccc \ } #define ZYD_MAXIM2_PHY \ { \ { ZYD_CR23, 0x40 }, { ZYD_CR15, 0x20 }, { ZYD_CR28, 0x3e }, \ { ZYD_CR29, 0x00 }, { ZYD_CR26, 0x11 }, { ZYD_CR44, 0x33 }, \ { ZYD_CR106, 0x2a }, { ZYD_CR107, 0x1a }, { ZYD_CR109, 0x2b }, \ { ZYD_CR110, 0x2b }, { ZYD_CR111, 0x2b }, { ZYD_CR112, 0x2b }, \ { ZYD_CR10, 0x89 }, { ZYD_CR17, 0x20 }, { ZYD_CR26, 0x93 }, \ { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x40 }, { ZYD_CR41, 0x24 }, \ { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x90 }, { ZYD_CR89, 0x18 }, \ { ZYD_CR92, 0x0a }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \ { ZYD_CR106, 0x20 }, { ZYD_CR107, 0x24 }, { ZYD_CR109, 0x09 }, \ { ZYD_CR110, 0x13 }, { ZYD_CR111, 0x13 }, { ZYD_CR112, 0x13 }, \ { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, \ { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xf4 }, { ZYD_CR118, 0xfa }, \ { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x77 }, { ZYD_CR122, 0xfe }, \ { ZYD_CR10, 0x89 }, { ZYD_CR17, 0x20 }, { ZYD_CR26, 0x93 }, \ { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x40 }, { ZYD_CR41, 0x24 }, \ { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x90 }, { ZYD_CR79, 0x58 }, \ { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR89, 0x18 }, \ { ZYD_CR92, 0x0a }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \ { ZYD_CR106, 0x20 }, { ZYD_CR107, 0x24 }, { ZYD_CR109, 0x09 }, \ { ZYD_CR110, 0x13 }, { ZYD_CR111, 0x13 }, { ZYD_CR112, 0x13 }, \ { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, \ { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xf4 }, { ZYD_CR118, 0x00 }, \ { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x06 }, { ZYD_CR122, 0xfe } \ } #define ZYD_MAXIM2_RF \ { \ 0x33334, 0x10a03, 0x00400, 0x00ca1, 0x10072, 0x18645, 0x04006, \ 0x000a7, 0x08258, 0x03fc9, 0x0040a, 0x0000b, 0x0026c \ } #define ZYD_MAXIM2_CHANTABLE_F \ { \ 0x33334, 0x08884, 0x1ddd4, 0x33334, 0x08884, 0x1ddd4, 0x33334, \ 0x08884, 0x1ddd4, 0x33334, 0x08884, 0x1ddd4, 0x33334, 0x26664 \ } #define ZYD_MAXIM2_CHANTABLE \ { \ { 0x33334, 0x10a03 }, \ { 0x08884, 0x20a13 }, \ { 0x1ddd4, 0x30a13 }, \ { 0x33334, 0x10a13 }, \ { 0x08884, 0x20a23 }, \ { 0x1ddd4, 0x30a23 }, \ { 0x33334, 0x10a23 }, \ { 0x08884, 0x20a33 }, \ { 0x1ddd4, 0x30a33 }, \ { 0x33334, 0x10a33 }, \ { 0x08884, 0x20a43 }, \ { 0x1ddd4, 0x30a43 }, \ { 0x33334, 0x10a43 }, \ { 0x26664, 0x20a53 } \ } #define ZYD_TX_RATEDIV \ { \ 0x1, 0x2, 0xb, 0xb, 0x0, 0x0, 0x0, 0x0, 0x30, 0x18, 0xc, 0x6, \ 0x36, 0x24, 0x12, 0x9 \ } /* * Control pipe requests. */ #define ZYD_DOWNLOADREQ 0x30 #define ZYD_DOWNLOADSTS 0x31 #define ZYD_READFWDATAREQ 0x32 /* possible values for register ZYD_CR_INTERRUPT */ #define ZYD_HWINT_MASK 0x004f0000 /* possible values for register ZYD_MAC_MISC */ #define ZYD_UNLOCK_PHY_REGS 0x80 /* possible values for register ZYD_MAC_ENCRYPTION_TYPE */ #define ZYD_ENC_SNIFFER 8 /* flags for register ZYD_MAC_RXFILTER */ #define ZYD_FILTER_ASS_REQ (1 << 0) #define ZYD_FILTER_ASS_RSP (1 << 1) #define ZYD_FILTER_REASS_REQ (1 << 2) #define ZYD_FILTER_REASS_RSP (1 << 3) #define ZYD_FILTER_PRB_REQ (1 << 4) #define ZYD_FILTER_PRB_RSP (1 << 5) #define ZYD_FILTER_BCN (1 << 8) #define ZYD_FILTER_ATIM (1 << 9) #define ZYD_FILTER_DEASS (1 << 10) #define ZYD_FILTER_AUTH (1 << 11) #define ZYD_FILTER_DEAUTH (1 << 12) #define ZYD_FILTER_PS_POLL (1 << 26) #define ZYD_FILTER_RTS (1 << 27) #define ZYD_FILTER_CTS (1 << 28) #define ZYD_FILTER_ACK (1 << 29) #define ZYD_FILTER_CFE (1 << 30) -#define ZYD_FILTER_CFE_A (1 << 31) +#define ZYD_FILTER_CFE_A (1U << 31) /* helpers for register ZYD_MAC_RXFILTER */ #define ZYD_FILTER_MONITOR 0xffffffff #define ZYD_FILTER_BSS \ (ZYD_FILTER_ASS_REQ | ZYD_FILTER_ASS_RSP | \ ZYD_FILTER_REASS_REQ | ZYD_FILTER_REASS_RSP | \ ZYD_FILTER_PRB_REQ | ZYD_FILTER_PRB_RSP | \ (0x3 << 6) | \ ZYD_FILTER_BCN | ZYD_FILTER_ATIM | ZYD_FILTER_DEASS | \ ZYD_FILTER_AUTH | ZYD_FILTER_DEAUTH | \ (0x7 << 13) | \ ZYD_FILTER_PS_POLL | ZYD_FILTER_ACK) #define ZYD_FILTER_HOSTAP \ (ZYD_FILTER_ASS_REQ | ZYD_FILTER_REASS_REQ | \ ZYD_FILTER_PRB_REQ | ZYD_FILTER_DEASS | ZYD_FILTER_AUTH | \ ZYD_FILTER_DEAUTH | ZYD_FILTER_PS_POLL) struct zyd_tx_desc { uint8_t phy; #define ZYD_TX_PHY_SIGNAL(x) ((x) & 0xf) #define ZYD_TX_PHY_OFDM (1 << 4) #define ZYD_TX_PHY_SHPREAMBLE (1 << 5) /* CCK */ #define ZYD_TX_PHY_5GHZ (1 << 5) /* OFDM */ uint16_t len; uint8_t flags; #define ZYD_TX_FLAG_BACKOFF (1 << 0) #define ZYD_TX_FLAG_MULTICAST (1 << 1) #define ZYD_TX_FLAG_TYPE(x) (((x) & 0x3) << 2) #define ZYD_TX_TYPE_DATA 0 #define ZYD_TX_TYPE_PS_POLL 1 #define ZYD_TX_TYPE_MGMT 2 #define ZYD_TX_TYPE_CTL 3 #define ZYD_TX_FLAG_WAKEUP (1 << 4) #define ZYD_TX_FLAG_RTS (1 << 5) #define ZYD_TX_FLAG_ENCRYPT (1 << 6) #define ZYD_TX_FLAG_CTS_TO_SELF (1 << 7) uint16_t pktlen; uint16_t plcp_length; uint8_t plcp_service; #define ZYD_PLCP_LENGEXT 0x80 uint16_t nextlen; } __packed; struct zyd_plcphdr { uint8_t signal; uint8_t reserved[2]; uint16_t service; /* unaligned! */ } __packed; struct zyd_rx_stat { uint8_t signal_cck; uint8_t rssi; uint8_t signal_ofdm; uint8_t cipher; #define ZYD_RX_CIPHER_WEP64 1 #define ZYD_RX_CIPHER_TKIP 2 #define ZYD_RX_CIPHER_AES 4 #define ZYD_RX_CIPHER_WEP128 5 #define ZYD_RX_CIPHER_WEP256 6 #define ZYD_RX_CIPHER_WEP \ (ZYD_RX_CIPHER_WEP64 | ZYD_RX_CIPHER_WEP128 | ZYD_RX_CIPHER_WEP256) uint8_t flags; #define ZYD_RX_OFDM (1 << 0) #define ZYD_RX_TIMEOUT (1 << 1) #define ZYD_RX_OVERRUN (1 << 2) #define ZYD_RX_DECRYPTERR (1 << 3) #define ZYD_RX_BADCRC32 (1 << 4) #define ZYD_RX_NOT2ME (1 << 5) #define ZYD_RX_BADCRC16 (1 << 6) #define ZYD_RX_ERROR (1 << 7) } __packed; /* this structure may be unaligned */ struct zyd_rx_desc { #define ZYD_MAX_RXFRAMECNT 3 uWord len[ZYD_MAX_RXFRAMECNT]; uWord tag; #define ZYD_TAG_MULTIFRAME 0x697e } __packed; /* I2C bus alike */ struct zyd_rfwrite_cmd { uint16_t code; uint16_t width; uint16_t bit[32]; #define ZYD_RF_IF_LE (1 << 1) #define ZYD_RF_CLK (1 << 2) #define ZYD_RF_DATA (1 << 3) } __packed; struct zyd_cmd { uint16_t code; #define ZYD_CMD_IOWR 0x0021 /* write HMAC or PHY register */ #define ZYD_CMD_IORD 0x0022 /* read HMAC or PHY register */ #define ZYD_CMD_RFCFG 0x0023 /* write RF register */ #define ZYD_NOTIF_IORD 0x9001 /* response for ZYD_CMD_IORD */ #define ZYD_NOTIF_MACINTR 0x9001 /* interrupt notification */ #define ZYD_NOTIF_RETRYSTATUS 0xa001 /* Tx retry notification */ uint8_t data[64]; } __packed; /* structure for command ZYD_CMD_IOWR */ struct zyd_pair { uint16_t reg; /* helpers macros to read/write 32-bit registers */ #define ZYD_REG32_LO(reg) (reg) #define ZYD_REG32_HI(reg) \ ((reg) + ((((reg) & 0xf000) == 0x9000) ? 2 : 1)) uint16_t val; } __packed; /* structure for notification ZYD_NOTIF_RETRYSTATUS */ struct zyd_notif_retry { uint16_t rate; uint8_t macaddr[IEEE80211_ADDR_LEN]; uint16_t count; } __packed; #define ZYD_CONFIG_INDEX 0 #define ZYD_IFACE_INDEX 0 #define ZYD_INTR_TIMEOUT 1000 #define ZYD_TX_TIMEOUT 10000 #define ZYD_MAX_TXBUFSZ \ (sizeof(struct zyd_tx_desc) + MCLBYTES) #define ZYD_MIN_FRAGSZ \ (sizeof(struct zyd_plcphdr) + IEEE80211_MIN_LEN + \ sizeof(struct zyd_rx_stat)) #define ZYD_MIN_RXBUFSZ ZYD_MIN_FRAGSZ #define ZYX_MAX_RXBUFSZ \ ((sizeof (struct zyd_plcphdr) + IEEE80211_MAX_LEN + \ sizeof (struct zyd_rx_stat)) * ZYD_MAX_RXFRAMECNT + \ sizeof (struct zyd_rx_desc)) #define ZYD_TX_DESC_SIZE (sizeof (struct zyd_tx_desc)) #define ZYD_RX_LIST_CNT 1 #define ZYD_TX_LIST_CNT 5 #define ZYD_CMD_FLAG_READ (1 << 0) #define ZYD_CMD_FLAG_SENT (1 << 1) /* quickly determine if a given rate is CCK or OFDM */ #define ZYD_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) struct zyd_phy_pair { uint16_t reg; uint8_t val; }; struct zyd_mac_pair { uint16_t reg; uint32_t val; }; struct zyd_tx_data { STAILQ_ENTRY(zyd_tx_data) next; struct zyd_softc *sc; struct zyd_tx_desc desc; struct mbuf *m; struct ieee80211_node *ni; int rate; }; typedef STAILQ_HEAD(, zyd_tx_data) zyd_txdhead; struct zyd_rx_data { struct mbuf *m; int rssi; }; struct zyd_rx_radiotap_header { struct ieee80211_radiotap_header wr_ihdr; uint8_t wr_flags; uint8_t wr_rate; uint16_t wr_chan_freq; uint16_t wr_chan_flags; int8_t wr_antsignal; int8_t wr_antnoise; } __packed __aligned(8); #define ZYD_RX_RADIOTAP_PRESENT \ ((1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \ (1 << IEEE80211_RADIOTAP_CHANNEL)) struct zyd_tx_radiotap_header { struct ieee80211_radiotap_header wt_ihdr; uint8_t wt_flags; uint8_t wt_rate; uint16_t wt_chan_freq; uint16_t wt_chan_flags; } __packed __aligned(8); #define ZYD_TX_RADIOTAP_PRESENT \ ((1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_CHANNEL)) struct zyd_softc; /* forward declaration */ struct zyd_rf { /* RF methods */ int (*init)(struct zyd_rf *); int (*switch_radio)(struct zyd_rf *, int); int (*set_channel)(struct zyd_rf *, uint8_t); int (*bandedge6)(struct zyd_rf *, struct ieee80211_channel *); /* RF attributes */ struct zyd_softc *rf_sc; /* back-pointer */ int width; int idx; /* for GIT RF */ int update_pwr; }; struct zyd_rq { struct zyd_cmd *cmd; const uint16_t *idata; struct zyd_pair *odata; int ilen; int olen; int flags; STAILQ_ENTRY(zyd_rq) rq; }; struct zyd_vap { struct ieee80211vap vap; int (*newstate)(struct ieee80211vap *, enum ieee80211_state, int); }; #define ZYD_VAP(vap) ((struct zyd_vap *)(vap)) enum { ZYD_BULK_WR, ZYD_BULK_RD, ZYD_INTR_WR, ZYD_INTR_RD, ZYD_N_TRANSFER = 4, }; struct zyd_softc { struct ifnet *sc_ifp; device_t sc_dev; struct usb_device *sc_udev; struct usb_xfer *sc_xfer[ZYD_N_TRANSFER]; int sc_flags; #define ZYD_FLAG_FWLOADED (1 << 0) #define ZYD_FLAG_INITONCE (1 << 1) #define ZYD_FLAG_INITDONE (1 << 2) #define ZYD_FLAG_DETACHED (1 << 3) struct zyd_rf sc_rf; STAILQ_HEAD(, zyd_rq) sc_rtx; STAILQ_HEAD(, zyd_rq) sc_rqh; uint8_t sc_bssid[IEEE80211_ADDR_LEN]; uint16_t sc_fwbase; uint8_t sc_regdomain; uint8_t sc_macrev; uint16_t sc_fwrev; uint8_t sc_rfrev; uint8_t sc_parev; uint8_t sc_al2230s; uint8_t sc_bandedge6; uint8_t sc_newphy; uint8_t sc_cckgain; uint8_t sc_fix_cr157; uint8_t sc_ledtype; uint8_t sc_txled; uint32_t sc_atim_wnd; uint32_t sc_pre_tbtt; uint32_t sc_bcn_int; uint8_t sc_pwrcal[14]; uint8_t sc_pwrint[14]; uint8_t sc_ofdm36_cal[14]; uint8_t sc_ofdm48_cal[14]; uint8_t sc_ofdm54_cal[14]; struct mtx sc_mtx; struct zyd_tx_data tx_data[ZYD_TX_LIST_CNT]; zyd_txdhead tx_q; zyd_txdhead tx_free; int tx_nfree; struct zyd_rx_desc sc_rx_desc; struct zyd_rx_data sc_rx_data[ZYD_MAX_RXFRAMECNT]; int sc_rx_count; struct zyd_cmd sc_ibuf; struct zyd_rx_radiotap_header sc_rxtap; int sc_rxtap_len; struct zyd_tx_radiotap_header sc_txtap; int sc_txtap_len; }; #define ZYD_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define ZYD_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define ZYD_LOCK_ASSERT(sc, t) mtx_assert(&(sc)->sc_mtx, t) Index: head/sys/dev/wpi/if_wpireg.h =================================================================== --- head/sys/dev/wpi/if_wpireg.h (revision 258779) +++ head/sys/dev/wpi/if_wpireg.h (revision 258780) @@ -1,737 +1,737 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006,2007 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define WPI_TX_RING_COUNT 256 #define WPI_CMD_RING_COUNT 256 #define WPI_RX_RING_COUNT 64 /* * Rings must be aligned on a 16K boundary. */ #define WPI_RING_DMA_ALIGN 0x4000 /* maximum scatter/gather */ #define WPI_MAX_SCATTER 4 /* maximum Rx buffer size */ #define WPI_RBUF_SIZE ( 3 * 1024 ) /* XXX 3000 but must be aligned */ /* * Control and status registers. */ #define WPI_HWCONFIG 0x000 #define WPI_INTR 0x008 #define WPI_MASK 0x00c #define WPI_INTR_STATUS 0x010 #define WPI_GPIO_STATUS 0x018 #define WPI_RESET 0x020 #define WPI_GPIO_CTL 0x024 #define WPI_EEPROM_CTL 0x02c #define WPI_EEPROM_STATUS 0x030 #define WPI_UCODE_SET 0x058 #define WPI_UCODE_CLR 0x05c #define WPI_TEMPERATURE 0x060 #define WPI_CHICKEN 0x100 #define WPI_PLL_CTL 0x20c #define WPI_WRITE_MEM_ADDR 0x444 #define WPI_READ_MEM_ADDR 0x448 #define WPI_WRITE_MEM_DATA 0x44c #define WPI_READ_MEM_DATA 0x450 #define WPI_TX_WIDX 0x460 #define WPI_TX_CTL(qid) (0x940 + (qid) * 8) #define WPI_TX_BASE(qid) (0x944 + (qid) * 8) #define WPI_TX_DESC(qid) (0x980 + (qid) * 80) #define WPI_RX_CONFIG 0xc00 #define WPI_RX_BASE 0xc04 #define WPI_RX_WIDX 0xc20 #define WPI_RX_RIDX_PTR 0xc24 #define WPI_RX_CTL 0xcc0 #define WPI_RX_STATUS 0xcc4 #define WPI_TX_CONFIG(qid) (0xd00 + (qid) * 32) #define WPI_TX_CREDIT(qid) (0xd04 + (qid) * 32) #define WPI_TX_STATE(qid) (0xd08 + (qid) * 32) #define WPI_TX_BASE_PTR 0xe80 #define WPI_MSG_CONFIG 0xe88 #define WPI_TX_STATUS 0xe90 /* * NIC internal memory offsets. */ #define WPI_MEM_MODE 0x2e00 #define WPI_MEM_RA 0x2e04 #define WPI_MEM_TXCFG 0x2e10 #define WPI_MEM_MAGIC4 0x2e14 #define WPI_MEM_MAGIC5 0x2e20 #define WPI_MEM_BYPASS1 0x2e2c #define WPI_MEM_BYPASS2 0x2e30 #define WPI_MEM_CLOCK1 0x3004 #define WPI_MEM_CLOCK2 0x3008 #define WPI_MEM_POWER 0x300c #define WPI_MEM_PCIDEV 0x3010 #define WPI_MEM_HW_RADIO_OFF 0x3014 #define WPI_MEM_UCODE_CTL 0x3400 #define WPI_MEM_UCODE_SRC 0x3404 #define WPI_MEM_UCODE_DST 0x3408 #define WPI_MEM_UCODE_SIZE 0x340c #define WPI_MEM_UCODE_BASE 0x3800 #define WPI_MEM_TEXT_BASE 0x3490 #define WPI_MEM_TEXT_SIZE 0x3494 #define WPI_MEM_DATA_BASE 0x3498 #define WPI_MEM_DATA_SIZE 0x349c /* possible flags for register WPI_HWCONFIG */ #define WPI_HW_ALM_MB (1 << 8) #define WPI_HW_ALM_MM (1 << 9) #define WPI_HW_SKU_MRC (1 << 10) #define WPI_HW_REV_D (1 << 11) #define WPI_HW_TYPE_B (1 << 12) /* possible flags for registers WPI_READ_MEM_ADDR/WPI_WRITE_MEM_ADDR */ #define WPI_MEM_4 ((sizeof (uint32_t) - 1) << 24) /* possible values for WPI_MEM_UCODE_DST */ #define WPI_FW_TEXT 0x00000000 /* possible flags for WPI_GPIO_STATUS */ #define WPI_POWERED (1 << 9) /* possible flags for register WPI_RESET */ #define WPI_NEVO_RESET (1 << 0) #define WPI_SW_RESET (1 << 7) #define WPI_MASTER_DISABLED (1 << 8) #define WPI_STOP_MASTER (1 << 9) /* possible flags for register WPI_GPIO_CTL */ #define WPI_GPIO_CLOCK (1 << 0) #define WPI_GPIO_INIT (1 << 2) #define WPI_GPIO_MAC (1 << 3) #define WPI_GPIO_SLEEP (1 << 4) #define WPI_GPIO_PWR_STATUS 0x07000000 #define WPI_GPIO_PWR_SLEEP (4 << 24) /* possible flags for register WPI_CHICKEN */ #define WPI_CHICKEN_RXNOLOS (1 << 23) /* possible flags for register WPI_PLL_CTL */ #define WPI_PLL_INIT (1 << 24) /* possible flags for register WPI_UCODE_CLR */ #define WPI_RADIO_OFF (1 << 1) #define WPI_DISABLE_CMD (1 << 2) /* possible flags for WPI_RX_STATUS */ #define WPI_RX_IDLE (1 << 24) /* possible flags for register WPI_UC_CTL */ #define WPI_UC_ENABLE (1 << 30) -#define WPI_UC_RUN (1 << 31) +#define WPI_UC_RUN (1U << 31) /* possible flags for register WPI_INTR_CSR */ #define WPI_ALIVE_INTR (1 << 0) #define WPI_WAKEUP_INTR (1 << 1) #define WPI_SW_ERROR (1 << 25) #define WPI_TX_INTR (1 << 27) #define WPI_HW_ERROR (1 << 29) -#define WPI_RX_INTR (1 << 31) +#define WPI_RX_INTR (1U << 31) #define WPI_INTR_MASK \ (WPI_SW_ERROR | WPI_HW_ERROR | WPI_TX_INTR | WPI_RX_INTR | \ WPI_ALIVE_INTR | WPI_WAKEUP_INTR) /* possible flags for register WPI_TX_STATUS */ #define WPI_TX_IDLE(qid) (1 << ((qid) + 24) | 1 << ((qid) + 16)) /* possible flags for register WPI_EEPROM_CTL */ #define WPI_EEPROM_READY (1 << 0) /* possible flags for register WPI_EEPROM_STATUS */ #define WPI_EEPROM_VERSION 0x00000007 #define WPI_EEPROM_LOCKED 0x00000180 struct wpi_shared { uint32_t txbase[8]; uint32_t next; uint32_t reserved[2]; } __packed; #define WPI_MAX_SEG_LEN 65520 struct wpi_tx_desc { uint32_t flags; #define WPI_PAD32(x) (roundup2(x, 4) - (x)) struct { uint32_t addr; uint32_t len; } __attribute__((__packed__)) segs[WPI_MAX_SCATTER]; uint8_t reserved[28]; } __packed; struct wpi_tx_stat { uint8_t nrts; uint8_t ntries; uint8_t nkill; uint8_t rate; uint32_t duration; uint32_t status; } __packed; struct wpi_rx_desc { uint32_t len; uint8_t type; #define WPI_UC_READY 1 #define WPI_RX_DONE 27 #define WPI_TX_DONE 28 #define WPI_START_SCAN 130 #define WPI_SCAN_RESULTS 131 #define WPI_STOP_SCAN 132 #define WPI_STATE_CHANGED 161 #define WPI_MISSED_BEACON 162 uint8_t flags; uint8_t idx; uint8_t qid; } __packed; struct wpi_rx_stat { uint8_t len; #define WPI_STAT_MAXLEN 20 uint8_t id; uint8_t rssi; /* received signal strength */ #define WPI_RSSI_OFFSET 95 uint8_t agc; /* access gain control */ uint16_t signal; uint16_t noise; } __packed; struct wpi_rx_head { uint16_t chan; uint16_t flags; uint8_t reserved; uint8_t rate; uint16_t len; } __packed; struct wpi_rx_tail { uint32_t flags; #define WPI_RX_NO_CRC_ERR (1 << 0) #define WPI_RX_NO_OVFL_ERR (1 << 1) /* shortcut for the above */ #define WPI_RX_NOERROR (WPI_RX_NO_CRC_ERR | WPI_RX_NO_OVFL_ERR) uint64_t tstamp; uint32_t tbeacon; } __packed; struct wpi_tx_cmd { uint8_t code; #define WPI_CMD_CONFIGURE 16 #define WPI_CMD_ASSOCIATE 17 #define WPI_CMD_SET_WME 19 #define WPI_CMD_TSF 20 #define WPI_CMD_ADD_NODE 24 #define WPI_CMD_TX_DATA 28 #define WPI_CMD_MRR_SETUP 71 #define WPI_CMD_SET_LED 72 #define WPI_CMD_SET_POWER_MODE 119 #define WPI_CMD_SCAN 128 #define WPI_CMD_SET_BEACON 145 #define WPI_CMD_TXPOWER 151 #define WPI_CMD_BLUETOOTH 155 uint8_t flags; uint8_t idx; uint8_t qid; uint8_t data[360]; } __packed; /* structure for WPI_CMD_CONFIGURE */ struct wpi_config { uint8_t myaddr[IEEE80211_ADDR_LEN]; uint16_t reserved1; uint8_t bssid[IEEE80211_ADDR_LEN]; uint16_t reserved2; uint8_t wlap_bssid_addr[6]; uint16_t reserved3; uint8_t mode; #define WPI_MODE_HOSTAP 1 #define WPI_MODE_STA 3 #define WPI_MODE_IBSS 4 #define WPI_MODE_MONITOR 6 uint8_t air_propogation; uint16_t reserved4; uint8_t ofdm_mask; uint8_t cck_mask; uint16_t associd; uint32_t flags; #define WPI_CONFIG_24GHZ (1 << 0) #define WPI_CONFIG_CCK (1 << 1) #define WPI_CONFIG_AUTO (1 << 2) #define WPI_CONFIG_SHSLOT (1 << 4) #define WPI_CONFIG_SHPREAMBLE (1 << 5) #define WPI_CONFIG_NODIVERSITY (1 << 7) #define WPI_CONFIG_ANTENNA_A (1 << 8) #define WPI_CONFIG_ANTENNA_B (1 << 9) #define WPI_CONFIG_TSF (1 << 15) uint32_t filter; #define WPI_FILTER_PROMISC (1 << 0) #define WPI_FILTER_CTL (1 << 1) #define WPI_FILTER_MULTICAST (1 << 2) #define WPI_FILTER_NODECRYPT (1 << 3) #define WPI_FILTER_BSS (1 << 5) #define WPI_FILTER_BEACON (1 << 6) uint8_t chan; uint16_t reserved6; } __packed; /* structure for command WPI_CMD_ASSOCIATE */ struct wpi_assoc { uint32_t flags; uint32_t filter; uint8_t ofdm_mask; uint8_t cck_mask; uint16_t reserved; } __packed; /* structure for command WPI_CMD_SET_WME */ struct wpi_wme_setup { uint32_t flags; struct { uint16_t cwmin; uint16_t cwmax; uint8_t aifsn; uint8_t reserved; uint16_t txop; } __packed ac[WME_NUM_AC]; } __packed; /* structure for command WPI_CMD_TSF */ struct wpi_cmd_tsf { uint64_t tstamp; uint16_t bintval; uint16_t atim; uint32_t binitval; uint16_t lintval; uint16_t reserved; } __packed; /* structure for WPI_CMD_ADD_NODE */ struct wpi_node_info { uint8_t control; #define WPI_NODE_UPDATE (1 << 0) uint8_t reserved1[3]; uint8_t bssid[IEEE80211_ADDR_LEN]; uint16_t reserved2; uint8_t id; #define WPI_ID_BSS 0 #define WPI_ID_BROADCAST 24 uint8_t flags; uint16_t reserved3; uint16_t key_flags; uint8_t tkip; uint8_t reserved4; uint16_t ttak[5]; uint16_t reserved5; uint8_t key[IEEE80211_KEYBUF_SIZE]; uint32_t action; #define WPI_ACTION_SET_RATE 4 uint32_t mask; uint16_t tid; uint8_t rate; uint8_t antenna; #define WPI_ANTENNA_A (1<<6) #define WPI_ANTENNA_B (1<<7) #define WPI_ANTENNA_BOTH (WPI_ANTENNA_A|WPI_ANTENNA_B) uint8_t add_imm; uint8_t del_imm; uint16_t add_imm_start; } __packed; /* structure for command WPI_CMD_TX_DATA */ struct wpi_cmd_data { uint16_t len; uint16_t lnext; uint32_t flags; #define WPI_TX_NEED_RTS (1 << 1) #define WPI_TX_NEED_CTS (1 << 2) #define WPI_TX_NEED_ACK (1 << 3) #define WPI_TX_FULL_TXOP (1 << 7) #define WPI_TX_BT_DISABLE (1 << 12) /* bluetooth coexistence */ #define WPI_TX_AUTO_SEQ (1 << 13) #define WPI_TX_INSERT_TSTAMP (1 << 16) uint8_t rate; uint8_t id; uint8_t tid; uint8_t security; uint8_t key[IEEE80211_KEYBUF_SIZE]; uint8_t tkip[IEEE80211_WEP_MICLEN]; uint32_t fnext; uint32_t lifetime; #define WPI_LIFETIME_INFINITE 0xffffffff uint8_t ofdm_mask; uint8_t cck_mask; uint8_t rts_ntries; uint8_t data_ntries; uint16_t timeout; uint16_t txop; struct ieee80211_frame wh; } __packed; /* structure for command WPI_CMD_SET_BEACON */ struct wpi_cmd_beacon { uint16_t len; uint16_t reserved1; uint32_t flags; /* same as wpi_cmd_data */ uint8_t rate; uint8_t id; uint8_t reserved2[30]; uint32_t lifetime; uint8_t ofdm_mask; uint8_t cck_mask; uint16_t reserved3[3]; uint16_t tim; uint8_t timsz; uint8_t reserved4; struct ieee80211_frame wh; } __packed; /* structure for notification WPI_MISSED_BEACON */ struct wpi_missed_beacon { uint32_t consecutive; uint32_t total; uint32_t expected; uint32_t received; } __packed; /* structure for WPI_CMD_MRR_SETUP */ struct wpi_mrr_setup { uint8_t which; #define WPI_MRR_CTL 0 #define WPI_MRR_DATA 1 uint8_t reserved[3]; struct { uint8_t signal; uint8_t flags; uint8_t ntries; uint8_t next; #define WPI_OFDM6 0 #define WPI_OFDM54 7 #define WPI_CCK1 8 #define WPI_CCK2 9 #define WPI_CCK11 11 } __attribute__((__packed__)) rates[WPI_CCK11 + 1]; } __packed; /* structure for WPI_CMD_SET_LED */ struct wpi_cmd_led { uint32_t unit; /* multiplier (in usecs) */ uint8_t which; #define WPI_LED_ACTIVITY 1 #define WPI_LED_LINK 2 uint8_t off; uint8_t on; uint8_t reserved; } __packed; /* structure for WPI_CMD_SET_POWER_MODE */ struct wpi_power { uint32_t flags; #define WPI_POWER_CAM 0 /* constantly awake mode */ uint32_t rx_timeout; uint32_t tx_timeout; uint32_t sleep[5]; } __packed; /* structure for command WPI_CMD_SCAN */ struct wpi_scan_hdr { uint16_t len; uint8_t reserved1; uint8_t nchan; uint16_t quiet; uint16_t threshold; uint16_t promotion; uint16_t reserved2; uint32_t maxtimeout; uint32_t suspend; uint32_t flags; uint32_t filter; struct { uint16_t len; uint16_t lnext; uint32_t flags; uint8_t rate; uint8_t id; uint8_t tid; uint8_t security; uint8_t key[IEEE80211_KEYBUF_SIZE]; uint8_t tkip[IEEE80211_WEP_MICLEN]; uint32_t fnext; uint32_t lifetime; uint8_t ofdm_mask; uint8_t cck_mask; uint8_t rts_ntries; uint8_t data_ntries; uint16_t timeout; uint16_t txop; } tx __attribute__((__packed__)); #define WPI_SCAN_MAX_ESSIDS 4 struct { uint8_t id; uint8_t esslen; uint8_t essid[32]; }scan_essids[WPI_SCAN_MAX_ESSIDS]; /* followed by probe request body */ /* followed by nchan x wpi_scan_chan */ } __packed; struct wpi_scan_chan { uint8_t flags; uint8_t chan; #define WPI_CHAN_ACTIVE (1 << 0) #define WPI_CHAN_DIRECT (1 << 1) uint8_t gain_radio; uint8_t gain_dsp; uint16_t active; /* msecs */ uint16_t passive; /* msecs */ } __packed; /* structure for WPI_CMD_BLUETOOTH */ struct wpi_bluetooth { uint8_t flags; uint8_t lead; uint8_t kill; uint8_t reserved; uint32_t ack; uint32_t cts; } __packed; /* structure for command WPI_CMD_TXPOWER */ struct wpi_cmd_txpower { uint8_t band; #define WPI_RATE_5GHZ 0 #define WPI_RATE_2GHZ 1 uint8_t reserved; uint16_t channel; #define WPI_RATE_MAPPING_COUNT 12 struct { uint8_t rate; uint8_t gain_radio; uint8_t gain_dsp; uint8_t reserved; } __packed rates [WPI_RATE_MAPPING_COUNT]; } __packed; #define WPI_FW_MAIN_TEXT_MAXSZ (80 * 1024 ) #define WPI_FW_MAIN_DATA_MAXSZ (32 * 1024 ) #define WPI_FW_INIT_TEXT_MAXSZ (80 * 1024 ) #define WPI_FW_INIT_DATA_MAXSZ (32 * 1024 ) #define WPI_FW_BOOT_TEXT_MAXSZ 1024 #define WPI_FW_UPDATED (1 << 31 ) /* firmware image header */ struct wpi_firmware_hdr { #define WPI_FW_MINVERSION 2144 uint32_t version; uint32_t rtextsz; uint32_t rdatasz; uint32_t itextsz; uint32_t idatasz; uint32_t btextsz; } __packed; /* structure for WPI_UC_READY notification */ struct wpi_ucode_info { uint32_t version; uint8_t revision[8]; uint8_t type; uint8_t subtype; uint16_t reserved; uint32_t logptr; uint32_t errorptr; uint32_t timestamp; uint32_t valid; } __packed; /* structure for WPI_START_SCAN notification */ struct wpi_start_scan { uint64_t tstamp; uint32_t tbeacon; uint8_t chan; uint8_t band; uint16_t reserved; uint32_t status; } __packed; /* structure for WPI_STOP_SCAN notification */ struct wpi_stop_scan { uint8_t nchan; uint8_t status; uint8_t reserved; uint8_t chan; uint64_t tsf; } __packed; #define WPI_EEPROM_MAC 0x015 #define WPI_EEPROM_REVISION 0x035 #define WPI_EEPROM_CAPABILITIES 0x045 #define WPI_EEPROM_TYPE 0x04a #define WPI_EEPROM_DOMAIN 0x060 #define WPI_EEPROM_BAND1 0x063 #define WPI_EEPROM_BAND2 0x072 #define WPI_EEPROM_BAND3 0x080 #define WPI_EEPROM_BAND4 0x08d #define WPI_EEPROM_BAND5 0x099 #define WPI_EEPROM_POWER_GRP 0x100 struct wpi_eeprom_chan { uint8_t flags; #define WPI_EEPROM_CHAN_VALID (1<<0) #define WPI_EEPROM_CHAN_IBSS (1<<1) #define WPI_EEPROM_CHAN_ACTIVE (1<<3) #define WPI_EEPROM_CHAN_RADAR (1<<4) int8_t maxpwr; } __packed; struct wpi_eeprom_sample { uint8_t index; int8_t power; uint16_t volt; }; #define WPI_POWER_GROUPS_COUNT 5 struct wpi_eeprom_group { struct wpi_eeprom_sample samples[5]; int32_t coef[5]; int32_t corr[5]; int8_t maxpwr; uint8_t chan; int16_t temp; } __packed; #define WPI_CHAN_BANDS_COUNT 5 #define WPI_MAX_CHAN_PER_BAND 14 static const struct wpi_chan_band { uint32_t addr; /* offset in EEPROM */ uint8_t nchan; uint8_t chan[WPI_MAX_CHAN_PER_BAND]; } wpi_bands[5] = { { WPI_EEPROM_BAND1, 14, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }}, { WPI_EEPROM_BAND2, 13, { 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 }}, { WPI_EEPROM_BAND3, 12, { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 }}, { WPI_EEPROM_BAND4, 11, { 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 }}, { WPI_EEPROM_BAND5, 6, { 145, 149, 153, 157, 161, 165 }} }; #define WPI_MAX_PWR_INDEX 77 /* * RF Tx gain values from highest to lowest power (values obtained from * the reference driver.) */ static const uint8_t wpi_rf_gain_2ghz[WPI_MAX_PWR_INDEX + 1] = { 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xbb, 0xbb, 0xbb, 0xbb, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xd3, 0xd3, 0xb3, 0xb3, 0xb3, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x73, 0xeb, 0xeb, 0xeb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xab, 0xab, 0xab, 0x8b, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xc3, 0xc3, 0xc3, 0xc3, 0xa3, 0xa3, 0xa3, 0xa3, 0x83, 0x83, 0x83, 0x83, 0x63, 0x63, 0x63, 0x63, 0x43, 0x43, 0x43, 0x43, 0x23, 0x23, 0x23, 0x23, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t wpi_rf_gain_5ghz[WPI_MAX_PWR_INDEX + 1] = { 0xfb, 0xfb, 0xfb, 0xdb, 0xdb, 0xbb, 0xbb, 0x9b, 0x9b, 0x7b, 0x7b, 0x7b, 0x7b, 0x5b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x1b, 0x1b, 0x1b, 0x73, 0x73, 0x73, 0x53, 0x53, 0x53, 0x53, 0x53, 0x33, 0x33, 0x33, 0x33, 0x13, 0x13, 0x13, 0x13, 0x13, 0xab, 0xab, 0xab, 0x8b, 0x8b, 0x8b, 0x8b, 0x6b, 0x6b, 0x6b, 0x6b, 0x4b, 0x4b, 0x4b, 0x4b, 0x2b, 0x2b, 0x2b, 0x2b, 0x0b, 0x0b, 0x0b, 0x0b, 0x83, 0x83, 0x63, 0x63, 0x63, 0x63, 0x43, 0x43, 0x43, 0x43, 0x23, 0x23, 0x23, 0x23, 0x03 }; /* * DSP pre-DAC gain values from highest to lowest power (values obtained * from the reference driver.) */ static const uint8_t wpi_dsp_gain_2ghz[WPI_MAX_PWR_INDEX + 1] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7d, 0x6e, 0x69, 0x62, 0x7d, 0x73, 0x6c, 0x63, 0x77, 0x6f, 0x69, 0x61, 0x5c, 0x6a, 0x64, 0x78, 0x71, 0x6b, 0x7d, 0x77, 0x70, 0x6a, 0x65, 0x61, 0x5b, 0x6b, 0x79, 0x73, 0x6d, 0x7f, 0x79, 0x73, 0x6c, 0x66, 0x60, 0x5c, 0x6e, 0x68, 0x62, 0x74, 0x7d, 0x77, 0x71, 0x6b, 0x65, 0x60, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f }; static const uint8_t wpi_dsp_gain_5ghz[WPI_MAX_PWR_INDEX + 1] = { 0x7f, 0x78, 0x72, 0x77, 0x65, 0x71, 0x66, 0x72, 0x67, 0x75, 0x6b, 0x63, 0x5c, 0x6c, 0x7d, 0x76, 0x6d, 0x66, 0x60, 0x5a, 0x68, 0x62, 0x5c, 0x76, 0x6f, 0x68, 0x7e, 0x79, 0x71, 0x69, 0x63, 0x76, 0x6f, 0x68, 0x62, 0x74, 0x6d, 0x66, 0x62, 0x5d, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78 }; #define WPI_READ(sc, reg) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) #define WPI_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) #define WPI_WRITE_REGION_4(sc, offset, datap, count) \ bus_space_write_region_4((sc)->sc_st, (sc)->sc_sh, (offset), \ (datap), (count)) Index: head/sys/geom/raid/tr_raid1e.c =================================================================== --- head/sys/geom/raid/tr_raid1e.c (revision 258779) +++ head/sys/geom/raid/tr_raid1e.c (revision 258780) @@ -1,1250 +1,1250 @@ /*- * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "geom/raid/g_raid.h" #include "g_raid_tr_if.h" #define N 2 SYSCTL_DECL(_kern_geom_raid_raid1e); #define RAID1E_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */ static int g_raid1e_rebuild_slab = RAID1E_REBUILD_SLAB; TUNABLE_INT("kern.geom.raid.raid1e.rebuild_slab_size", &g_raid1e_rebuild_slab); SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_slab_size, CTLFLAG_RW, &g_raid1e_rebuild_slab, 0, "Amount of the disk to rebuild each read/write cycle of the rebuild."); #define RAID1E_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */ static int g_raid1e_rebuild_fair_io = RAID1E_REBUILD_FAIR_IO; TUNABLE_INT("kern.geom.raid.raid1e.rebuild_fair_io", &g_raid1e_rebuild_fair_io); SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_fair_io, CTLFLAG_RW, &g_raid1e_rebuild_fair_io, 0, "Fraction of the I/O bandwidth to use when disk busy for rebuild."); #define RAID1E_REBUILD_CLUSTER_IDLE 100 static int g_raid1e_rebuild_cluster_idle = RAID1E_REBUILD_CLUSTER_IDLE; TUNABLE_INT("kern.geom.raid.raid1e.rebuild_cluster_idle", &g_raid1e_rebuild_cluster_idle); SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW, &g_raid1e_rebuild_cluster_idle, 0, "Number of slabs to do each time we trigger a rebuild cycle"); #define RAID1E_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */ static int g_raid1e_rebuild_meta_update = RAID1E_REBUILD_META_UPDATE; TUNABLE_INT("kern.geom.raid.raid1e.rebuild_meta_update", &g_raid1e_rebuild_meta_update); SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_meta_update, CTLFLAG_RW, &g_raid1e_rebuild_meta_update, 0, "When to update the meta data."); static MALLOC_DEFINE(M_TR_RAID1E, "tr_raid1e_data", "GEOM_RAID RAID1E data"); #define TR_RAID1E_NONE 0 #define TR_RAID1E_REBUILD 1 #define TR_RAID1E_RESYNC 2 #define TR_RAID1E_F_DOING_SOME 0x1 #define TR_RAID1E_F_LOCKED 0x2 #define TR_RAID1E_F_ABORT 0x4 struct g_raid_tr_raid1e_object { struct g_raid_tr_object trso_base; int trso_starting; int trso_stopping; int trso_type; int trso_recover_slabs; /* slabs before rest */ int trso_fair_io; int trso_meta_update; int trso_flags; struct g_raid_subdisk *trso_failed_sd; /* like per volume */ void *trso_buffer; /* Buffer space */ off_t trso_lock_pos; /* Locked range start. */ off_t trso_lock_len; /* Locked range length. */ struct bio trso_bio; }; static g_raid_tr_taste_t g_raid_tr_taste_raid1e; static g_raid_tr_event_t g_raid_tr_event_raid1e; static g_raid_tr_start_t g_raid_tr_start_raid1e; static g_raid_tr_stop_t g_raid_tr_stop_raid1e; static g_raid_tr_iostart_t g_raid_tr_iostart_raid1e; static g_raid_tr_iodone_t g_raid_tr_iodone_raid1e; static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1e; static g_raid_tr_locked_t g_raid_tr_locked_raid1e; static g_raid_tr_idle_t g_raid_tr_idle_raid1e; static g_raid_tr_free_t g_raid_tr_free_raid1e; static kobj_method_t g_raid_tr_raid1e_methods[] = { KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1e), KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1e), KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1e), KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1e), KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1e), KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1e), KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1e), KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1e), KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1e), KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1e), { 0, 0 } }; static struct g_raid_tr_class g_raid_tr_raid1e_class = { "RAID1E", g_raid_tr_raid1e_methods, sizeof(struct g_raid_tr_raid1e_object), .trc_enable = 1, .trc_priority = 200, .trc_accept_unmapped = 1 }; static void g_raid_tr_raid1e_rebuild_abort(struct g_raid_tr_object *tr); static void g_raid_tr_raid1e_maybe_rebuild(struct g_raid_tr_object *tr, struct g_raid_subdisk *sd); static int g_raid_tr_raid1e_select_read_disk(struct g_raid_volume *vol, int no, off_t off, off_t len, u_int mask); static inline void V2P(struct g_raid_volume *vol, off_t virt, int *disk, off_t *offset, off_t *start) { off_t nstrip; u_int strip_size; strip_size = vol->v_strip_size; /* Strip number. */ nstrip = virt / strip_size; /* Start position in strip. */ *start = virt % strip_size; /* Disk number. */ *disk = (nstrip * N) % vol->v_disks_count; /* Strip start position in disk. */ *offset = ((nstrip * N) / vol->v_disks_count) * strip_size; } static inline void P2V(struct g_raid_volume *vol, int disk, off_t offset, off_t *virt, int *copy) { off_t nstrip, start; u_int strip_size; strip_size = vol->v_strip_size; /* Start position in strip. */ start = offset % strip_size; /* Physical strip number. */ nstrip = (offset / strip_size) * vol->v_disks_count + disk; /* Number of physical strip (copy) inside virtual strip. */ *copy = nstrip % N; /* Offset in virtual space. */ *virt = (nstrip / N) * strip_size + start; } static int g_raid_tr_taste_raid1e(struct g_raid_tr_object *tr, struct g_raid_volume *vol) { struct g_raid_tr_raid1e_object *trs; trs = (struct g_raid_tr_raid1e_object *)tr; if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1E || tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1EA) return (G_RAID_TR_TASTE_FAIL); trs->trso_starting = 1; return (G_RAID_TR_TASTE_SUCCEED); } static int g_raid_tr_update_state_raid1e_even(struct g_raid_volume *vol) { struct g_raid_softc *sc; struct g_raid_subdisk *sd, *bestsd, *worstsd; int i, j, state, sstate; sc = vol->v_softc; state = G_RAID_VOLUME_S_OPTIMAL; for (i = 0; i < vol->v_disks_count / N; i++) { bestsd = &vol->v_subdisks[i * N]; for (j = 1; j < N; j++) { sd = &vol->v_subdisks[i * N + j]; if (sd->sd_state > bestsd->sd_state) bestsd = sd; else if (sd->sd_state == bestsd->sd_state && (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || sd->sd_state == G_RAID_SUBDISK_S_RESYNC) && sd->sd_rebuild_pos > bestsd->sd_rebuild_pos) bestsd = sd; } if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED && bestsd->sd_state != G_RAID_SUBDISK_S_ACTIVE) { /* We found reasonable candidate. */ G_RAID_DEBUG1(1, sc, "Promote subdisk %s:%d from %s to ACTIVE.", vol->v_name, bestsd->sd_pos, g_raid_subdisk_state2str(bestsd->sd_state)); g_raid_change_subdisk_state(bestsd, G_RAID_SUBDISK_S_ACTIVE); g_raid_write_metadata(sc, vol, bestsd, bestsd->sd_disk); } worstsd = &vol->v_subdisks[i * N]; for (j = 1; j < N; j++) { sd = &vol->v_subdisks[i * N + j]; if (sd->sd_state < worstsd->sd_state) worstsd = sd; } if (worstsd->sd_state == G_RAID_SUBDISK_S_ACTIVE) sstate = G_RAID_VOLUME_S_OPTIMAL; else if (worstsd->sd_state >= G_RAID_SUBDISK_S_STALE) sstate = G_RAID_VOLUME_S_SUBOPTIMAL; else if (bestsd->sd_state == G_RAID_SUBDISK_S_ACTIVE) sstate = G_RAID_VOLUME_S_DEGRADED; else sstate = G_RAID_VOLUME_S_BROKEN; if (sstate < state) state = sstate; } return (state); } static int g_raid_tr_update_state_raid1e_odd(struct g_raid_volume *vol) { struct g_raid_softc *sc; struct g_raid_subdisk *sd, *bestsd, *worstsd; int i, j, state, sstate; sc = vol->v_softc; if (g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE) == vol->v_disks_count) return (G_RAID_VOLUME_S_OPTIMAL); for (i = 0; i < vol->v_disks_count; i++) { sd = &vol->v_subdisks[i]; if (sd->sd_state == G_RAID_SUBDISK_S_UNINITIALIZED) { /* We found reasonable candidate. */ G_RAID_DEBUG1(1, sc, "Promote subdisk %s:%d from %s to STALE.", vol->v_name, sd->sd_pos, g_raid_subdisk_state2str(sd->sd_state)); g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_STALE); g_raid_write_metadata(sc, vol, sd, sd->sd_disk); } } state = G_RAID_VOLUME_S_OPTIMAL; for (i = 0; i < vol->v_disks_count; i++) { bestsd = &vol->v_subdisks[i]; worstsd = &vol->v_subdisks[i]; for (j = 1; j < N; j++) { sd = &vol->v_subdisks[(i + j) % vol->v_disks_count]; if (sd->sd_state > bestsd->sd_state) bestsd = sd; else if (sd->sd_state == bestsd->sd_state && (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || sd->sd_state == G_RAID_SUBDISK_S_RESYNC) && sd->sd_rebuild_pos > bestsd->sd_rebuild_pos) bestsd = sd; if (sd->sd_state < worstsd->sd_state) worstsd = sd; } if (worstsd->sd_state == G_RAID_SUBDISK_S_ACTIVE) sstate = G_RAID_VOLUME_S_OPTIMAL; else if (worstsd->sd_state >= G_RAID_SUBDISK_S_STALE) sstate = G_RAID_VOLUME_S_SUBOPTIMAL; else if (bestsd->sd_state >= G_RAID_SUBDISK_S_STALE) sstate = G_RAID_VOLUME_S_DEGRADED; else sstate = G_RAID_VOLUME_S_BROKEN; if (sstate < state) state = sstate; } return (state); } static int g_raid_tr_update_state_raid1e(struct g_raid_volume *vol, struct g_raid_subdisk *sd) { struct g_raid_tr_raid1e_object *trs; struct g_raid_softc *sc; u_int s; sc = vol->v_softc; trs = (struct g_raid_tr_raid1e_object *)vol->v_tr; if (trs->trso_stopping && (trs->trso_flags & TR_RAID1E_F_DOING_SOME) == 0) s = G_RAID_VOLUME_S_STOPPED; else if (trs->trso_starting) s = G_RAID_VOLUME_S_STARTING; else { if ((vol->v_disks_count % N) == 0) s = g_raid_tr_update_state_raid1e_even(vol); else s = g_raid_tr_update_state_raid1e_odd(vol); } if (s != vol->v_state) { g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ? G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN, G_RAID_EVENT_VOLUME); g_raid_change_volume_state(vol, s); if (!trs->trso_starting && !trs->trso_stopping) g_raid_write_metadata(sc, vol, NULL, NULL); } if (!trs->trso_starting && !trs->trso_stopping) g_raid_tr_raid1e_maybe_rebuild(vol->v_tr, sd); return (0); } static void g_raid_tr_raid1e_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd, struct g_raid_disk *disk) { struct g_raid_volume *vol; vol = sd->sd_volume; /* * We don't fail the last disk in the pack, since it still has decent * data on it and that's better than failing the disk if it is the root * file system. * * XXX should this be controlled via a tunable? It makes sense for * the volume that has / on it. I can't think of a case where we'd * want the volume to go away on this kind of event. */ if ((g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE) + g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC) + g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) + g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED) < vol->v_disks_count) && (sd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED)) return; g_raid_fail_disk(sc, sd, disk); } static void g_raid_tr_raid1e_rebuild_done(struct g_raid_tr_raid1e_object *trs) { struct g_raid_volume *vol; struct g_raid_subdisk *sd; vol = trs->trso_base.tro_volume; sd = trs->trso_failed_sd; g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk); free(trs->trso_buffer, M_TR_RAID1E); trs->trso_buffer = NULL; trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME; trs->trso_type = TR_RAID1E_NONE; trs->trso_recover_slabs = 0; trs->trso_failed_sd = NULL; g_raid_tr_update_state_raid1e(vol, NULL); } static void g_raid_tr_raid1e_rebuild_finish(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; struct g_raid_subdisk *sd; trs = (struct g_raid_tr_raid1e_object *)tr; sd = trs->trso_failed_sd; G_RAID_DEBUG1(0, tr->tro_volume->v_softc, "Subdisk %s:%d-%s rebuild completed.", sd->sd_volume->v_name, sd->sd_pos, sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]"); g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE); sd->sd_rebuild_pos = 0; g_raid_tr_raid1e_rebuild_done(trs); } static void g_raid_tr_raid1e_rebuild_abort(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; struct g_raid_subdisk *sd; struct g_raid_volume *vol; vol = tr->tro_volume; trs = (struct g_raid_tr_raid1e_object *)tr; sd = trs->trso_failed_sd; if (trs->trso_flags & TR_RAID1E_F_DOING_SOME) { G_RAID_DEBUG1(1, vol->v_softc, "Subdisk %s:%d-%s rebuild is aborting.", sd->sd_volume->v_name, sd->sd_pos, sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]"); trs->trso_flags |= TR_RAID1E_F_ABORT; } else { G_RAID_DEBUG1(0, vol->v_softc, "Subdisk %s:%d-%s rebuild aborted.", sd->sd_volume->v_name, sd->sd_pos, sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]"); trs->trso_flags &= ~TR_RAID1E_F_ABORT; if (trs->trso_flags & TR_RAID1E_F_LOCKED) { trs->trso_flags &= ~TR_RAID1E_F_LOCKED; g_raid_unlock_range(tr->tro_volume, trs->trso_lock_pos, trs->trso_lock_len); } g_raid_tr_raid1e_rebuild_done(trs); } } static void g_raid_tr_raid1e_rebuild_some(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; struct g_raid_softc *sc; struct g_raid_volume *vol; struct g_raid_subdisk *sd; struct bio *bp; off_t len, virtual, vend, offset, start; int disk, copy, best; trs = (struct g_raid_tr_raid1e_object *)tr; if (trs->trso_flags & TR_RAID1E_F_DOING_SOME) return; vol = tr->tro_volume; sc = vol->v_softc; sd = trs->trso_failed_sd; while (1) { if (sd->sd_rebuild_pos >= sd->sd_size) { g_raid_tr_raid1e_rebuild_finish(tr); return; } /* Get virtual offset from physical rebuild position. */ P2V(vol, sd->sd_pos, sd->sd_rebuild_pos, &virtual, ©); /* Get physical offset back to get first stripe position. */ V2P(vol, virtual, &disk, &offset, &start); /* Calculate contignous data length. */ len = MIN(g_raid1e_rebuild_slab, sd->sd_size - sd->sd_rebuild_pos); if ((vol->v_disks_count % N) != 0) len = MIN(len, vol->v_strip_size - start); /* Find disk with most accurate data. */ best = g_raid_tr_raid1e_select_read_disk(vol, disk, offset + start, len, 0); if (best < 0) { /* There is no any valid disk. */ g_raid_tr_raid1e_rebuild_abort(tr); return; } else if (best != copy) { /* Some other disk has better data. */ break; } /* We have the most accurate data. Skip the range. */ G_RAID_DEBUG1(3, sc, "Skipping rebuild for range %ju - %ju", sd->sd_rebuild_pos, sd->sd_rebuild_pos + len); sd->sd_rebuild_pos += len; } bp = &trs->trso_bio; memset(bp, 0, sizeof(*bp)); bp->bio_offset = offset + start + ((disk + best >= vol->v_disks_count) ? vol->v_strip_size : 0); bp->bio_length = len; bp->bio_data = trs->trso_buffer; bp->bio_cmd = BIO_READ; bp->bio_cflags = G_RAID_BIO_FLAG_SYNC; bp->bio_caller1 = &vol->v_subdisks[(disk + best) % vol->v_disks_count]; G_RAID_LOGREQ(3, bp, "Queueing rebuild read"); /* * If we are crossing stripe boundary, correct affected virtual * range we should lock. */ if (start + len > vol->v_strip_size) { P2V(vol, sd->sd_pos, sd->sd_rebuild_pos + len, &vend, ©); len = vend - virtual; } trs->trso_flags |= TR_RAID1E_F_DOING_SOME; trs->trso_flags |= TR_RAID1E_F_LOCKED; trs->trso_lock_pos = virtual; trs->trso_lock_len = len; /* Lock callback starts I/O */ g_raid_lock_range(sd->sd_volume, virtual, len, NULL, bp); } static void g_raid_tr_raid1e_rebuild_start(struct g_raid_tr_object *tr) { struct g_raid_volume *vol; struct g_raid_tr_raid1e_object *trs; struct g_raid_subdisk *sd; vol = tr->tro_volume; trs = (struct g_raid_tr_raid1e_object *)tr; if (trs->trso_failed_sd) { G_RAID_DEBUG1(1, vol->v_softc, "Already rebuild in start rebuild. pos %jd\n", (intmax_t)trs->trso_failed_sd->sd_rebuild_pos); return; } sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC); if (sd == NULL) sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD); if (sd == NULL) { sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE); if (sd != NULL) { sd->sd_rebuild_pos = 0; g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_RESYNC); g_raid_write_metadata(vol->v_softc, vol, sd, NULL); } else { sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_UNINITIALIZED); if (sd == NULL) sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_NEW); if (sd != NULL) { sd->sd_rebuild_pos = 0; g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_REBUILD); g_raid_write_metadata(vol->v_softc, vol, sd, NULL); } } } if (sd == NULL) { G_RAID_DEBUG1(1, vol->v_softc, "No failed disk to rebuild. night night."); return; } trs->trso_failed_sd = sd; G_RAID_DEBUG1(0, vol->v_softc, "Subdisk %s:%d-%s rebuild start at %jd.", sd->sd_volume->v_name, sd->sd_pos, sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]", trs->trso_failed_sd->sd_rebuild_pos); trs->trso_type = TR_RAID1E_REBUILD; trs->trso_buffer = malloc(g_raid1e_rebuild_slab, M_TR_RAID1E, M_WAITOK); trs->trso_meta_update = g_raid1e_rebuild_meta_update; g_raid_tr_raid1e_rebuild_some(tr); } static void g_raid_tr_raid1e_maybe_rebuild(struct g_raid_tr_object *tr, struct g_raid_subdisk *sd) { struct g_raid_volume *vol; struct g_raid_tr_raid1e_object *trs; int nr; vol = tr->tro_volume; trs = (struct g_raid_tr_raid1e_object *)tr; if (trs->trso_stopping) return; nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) + g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC); switch(trs->trso_type) { case TR_RAID1E_NONE: if (vol->v_state < G_RAID_VOLUME_S_DEGRADED) return; if (nr == 0) { nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) + g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) + g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED); if (nr == 0) return; } g_raid_tr_raid1e_rebuild_start(tr); break; case TR_RAID1E_REBUILD: if (vol->v_state < G_RAID_VOLUME_S_DEGRADED || nr == 0 || trs->trso_failed_sd == sd) g_raid_tr_raid1e_rebuild_abort(tr); break; case TR_RAID1E_RESYNC: break; } } static int g_raid_tr_event_raid1e(struct g_raid_tr_object *tr, struct g_raid_subdisk *sd, u_int event) { g_raid_tr_update_state_raid1e(tr->tro_volume, sd); return (0); } static int g_raid_tr_start_raid1e(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; struct g_raid_volume *vol; trs = (struct g_raid_tr_raid1e_object *)tr; vol = tr->tro_volume; trs->trso_starting = 0; g_raid_tr_update_state_raid1e(vol, NULL); return (0); } static int g_raid_tr_stop_raid1e(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; struct g_raid_volume *vol; trs = (struct g_raid_tr_raid1e_object *)tr; vol = tr->tro_volume; trs->trso_starting = 0; trs->trso_stopping = 1; g_raid_tr_update_state_raid1e(vol, NULL); return (0); } /* * Select the disk to read from. Take into account: subdisk state, running * error recovery, average disk load, head position and possible cache hits. */ #define ABS(x) (((x) >= 0) ? (x) : (-(x))) static int g_raid_tr_raid1e_select_read_disk(struct g_raid_volume *vol, int no, off_t off, off_t len, u_int mask) { struct g_raid_subdisk *sd; off_t offset; int i, best, prio, bestprio; best = -1; bestprio = INT_MAX; for (i = 0; i < N; i++) { sd = &vol->v_subdisks[(no + i) % vol->v_disks_count]; offset = off; if (no + i >= vol->v_disks_count) offset += vol->v_strip_size; prio = G_RAID_SUBDISK_LOAD(sd); if ((mask & (1 << sd->sd_pos)) != 0) continue; switch (sd->sd_state) { case G_RAID_SUBDISK_S_ACTIVE: break; case G_RAID_SUBDISK_S_RESYNC: if (offset + off < sd->sd_rebuild_pos) break; /* FALLTHROUGH */ case G_RAID_SUBDISK_S_STALE: prio += i << 24; break; case G_RAID_SUBDISK_S_REBUILD: if (offset + off < sd->sd_rebuild_pos) break; /* FALLTHROUGH */ default: continue; } prio += min(sd->sd_recovery, 255) << 16; /* If disk head is precisely in position - highly prefer it. */ if (G_RAID_SUBDISK_POS(sd) == offset) prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE; else /* If disk head is close to position - prefer it. */ if (ABS(G_RAID_SUBDISK_POS(sd) - offset) < G_RAID_SUBDISK_TRACK_SIZE) prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE; if (prio < bestprio) { bestprio = prio; best = i; } } return (best); } static void g_raid_tr_iostart_raid1e_read(struct g_raid_tr_object *tr, struct bio *bp) { struct g_raid_volume *vol; struct g_raid_subdisk *sd; struct bio_queue_head queue; struct bio *cbp; char *addr; off_t offset, start, length, remain; u_int no, strip_size; int best; vol = tr->tro_volume; if ((bp->bio_flags & BIO_UNMAPPED) != 0) addr = NULL; else addr = bp->bio_data; strip_size = vol->v_strip_size; V2P(vol, bp->bio_offset, &no, &offset, &start); remain = bp->bio_length; bioq_init(&queue); while (remain > 0) { length = MIN(strip_size - start, remain); best = g_raid_tr_raid1e_select_read_disk(vol, no, offset, length, 0); KASSERT(best >= 0, ("No readable disk in volume %s!", vol->v_name)); no += best; if (no >= vol->v_disks_count) { no -= vol->v_disks_count; offset += strip_size; } cbp = g_clone_bio(bp); if (cbp == NULL) goto failure; cbp->bio_offset = offset + start; cbp->bio_length = length; if ((bp->bio_flags & BIO_UNMAPPED) != 0) { cbp->bio_ma_offset += (uintptr_t)addr; cbp->bio_ma += cbp->bio_ma_offset / PAGE_SIZE; cbp->bio_ma_offset %= PAGE_SIZE; cbp->bio_ma_n = round_page(cbp->bio_ma_offset + cbp->bio_length) / PAGE_SIZE; } else cbp->bio_data = addr; cbp->bio_caller1 = &vol->v_subdisks[no]; bioq_insert_tail(&queue, cbp); no += N - best; if (no >= vol->v_disks_count) { no -= vol->v_disks_count; offset += strip_size; } remain -= length; addr += length; start = 0; } while ((cbp = bioq_takefirst(&queue)) != NULL) { sd = cbp->bio_caller1; cbp->bio_caller1 = NULL; g_raid_subdisk_iostart(sd, cbp); } return; failure: while ((cbp = bioq_takefirst(&queue)) != NULL) g_destroy_bio(cbp); if (bp->bio_error == 0) bp->bio_error = ENOMEM; g_raid_iodone(bp, bp->bio_error); } static void g_raid_tr_iostart_raid1e_write(struct g_raid_tr_object *tr, struct bio *bp) { struct g_raid_volume *vol; struct g_raid_subdisk *sd; struct bio_queue_head queue; struct bio *cbp; char *addr; off_t offset, start, length, remain; u_int no, strip_size; int i; vol = tr->tro_volume; if ((bp->bio_flags & BIO_UNMAPPED) != 0) addr = NULL; else addr = bp->bio_data; strip_size = vol->v_strip_size; V2P(vol, bp->bio_offset, &no, &offset, &start); remain = bp->bio_length; bioq_init(&queue); while (remain > 0) { length = MIN(strip_size - start, remain); for (i = 0; i < N; i++) { sd = &vol->v_subdisks[no]; switch (sd->sd_state) { case G_RAID_SUBDISK_S_ACTIVE: case G_RAID_SUBDISK_S_STALE: case G_RAID_SUBDISK_S_RESYNC: break; case G_RAID_SUBDISK_S_REBUILD: if (offset + start >= sd->sd_rebuild_pos) goto nextdisk; break; default: goto nextdisk; } cbp = g_clone_bio(bp); if (cbp == NULL) goto failure; cbp->bio_offset = offset + start; cbp->bio_length = length; if ((bp->bio_flags & BIO_UNMAPPED) != 0 && bp->bio_cmd != BIO_DELETE) { cbp->bio_ma_offset += (uintptr_t)addr; cbp->bio_ma += cbp->bio_ma_offset / PAGE_SIZE; cbp->bio_ma_offset %= PAGE_SIZE; cbp->bio_ma_n = round_page(cbp->bio_ma_offset + cbp->bio_length) / PAGE_SIZE; } else cbp->bio_data = addr; cbp->bio_caller1 = sd; bioq_insert_tail(&queue, cbp); nextdisk: if (++no >= vol->v_disks_count) { no = 0; offset += strip_size; } } remain -= length; if (bp->bio_cmd != BIO_DELETE) addr += length; start = 0; } while ((cbp = bioq_takefirst(&queue)) != NULL) { sd = cbp->bio_caller1; cbp->bio_caller1 = NULL; g_raid_subdisk_iostart(sd, cbp); } return; failure: while ((cbp = bioq_takefirst(&queue)) != NULL) g_destroy_bio(cbp); if (bp->bio_error == 0) bp->bio_error = ENOMEM; g_raid_iodone(bp, bp->bio_error); } static void g_raid_tr_iostart_raid1e(struct g_raid_tr_object *tr, struct bio *bp) { struct g_raid_volume *vol; struct g_raid_tr_raid1e_object *trs; vol = tr->tro_volume; trs = (struct g_raid_tr_raid1e_object *)tr; if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL && vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL && vol->v_state != G_RAID_VOLUME_S_DEGRADED) { g_raid_iodone(bp, EIO); return; } /* * If we're rebuilding, squeeze in rebuild activity every so often, * even when the disk is busy. Be sure to only count real I/O * to the disk. All 'SPECIAL' I/O is traffic generated to the disk * by this module. */ if (trs->trso_failed_sd != NULL && !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) { /* Make this new or running now round short. */ trs->trso_recover_slabs = 0; if (--trs->trso_fair_io <= 0) { trs->trso_fair_io = g_raid1e_rebuild_fair_io; g_raid_tr_raid1e_rebuild_some(tr); } } switch (bp->bio_cmd) { case BIO_READ: g_raid_tr_iostart_raid1e_read(tr, bp); break; case BIO_WRITE: case BIO_DELETE: g_raid_tr_iostart_raid1e_write(tr, bp); break; case BIO_FLUSH: g_raid_tr_flush_common(tr, bp); break; default: KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)", bp->bio_cmd, vol->v_name)); break; } } static void g_raid_tr_iodone_raid1e(struct g_raid_tr_object *tr, struct g_raid_subdisk *sd, struct bio *bp) { struct bio *cbp; struct g_raid_subdisk *nsd; struct g_raid_volume *vol; struct bio *pbp; struct g_raid_tr_raid1e_object *trs; off_t virtual, offset, start; uintptr_t mask; int error, do_write, copy, disk, best; trs = (struct g_raid_tr_raid1e_object *)tr; vol = tr->tro_volume; if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) { if (trs->trso_type == TR_RAID1E_REBUILD) { nsd = trs->trso_failed_sd; if (bp->bio_cmd == BIO_READ) { /* Immediately abort rebuild, if requested. */ if (trs->trso_flags & TR_RAID1E_F_ABORT) { trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME; g_raid_tr_raid1e_rebuild_abort(tr); return; } /* On read error, skip and cross fingers. */ if (bp->bio_error != 0) { G_RAID_LOGREQ(0, bp, "Read error during rebuild (%d), " "possible data loss!", bp->bio_error); goto rebuild_round_done; } /* * The read operation finished, queue the * write and get out. */ G_RAID_LOGREQ(3, bp, "Rebuild read done: %d", bp->bio_error); bp->bio_cmd = BIO_WRITE; bp->bio_cflags = G_RAID_BIO_FLAG_SYNC; bp->bio_offset = nsd->sd_rebuild_pos; G_RAID_LOGREQ(3, bp, "Queueing rebuild write."); g_raid_subdisk_iostart(nsd, bp); } else { /* * The write operation just finished. Do * another. We keep cloning the master bio * since it has the right buffers allocated to * it. */ G_RAID_LOGREQ(3, bp, "Rebuild write done: %d", bp->bio_error); if (bp->bio_error != 0 || trs->trso_flags & TR_RAID1E_F_ABORT) { if ((trs->trso_flags & TR_RAID1E_F_ABORT) == 0) { g_raid_tr_raid1e_fail_disk(sd->sd_softc, nsd, nsd->sd_disk); } trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME; g_raid_tr_raid1e_rebuild_abort(tr); return; } rebuild_round_done: trs->trso_flags &= ~TR_RAID1E_F_LOCKED; g_raid_unlock_range(tr->tro_volume, trs->trso_lock_pos, trs->trso_lock_len); nsd->sd_rebuild_pos += bp->bio_length; if (nsd->sd_rebuild_pos >= nsd->sd_size) { g_raid_tr_raid1e_rebuild_finish(tr); return; } /* Abort rebuild if we are stopping */ if (trs->trso_stopping) { trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME; g_raid_tr_raid1e_rebuild_abort(tr); return; } if (--trs->trso_meta_update <= 0) { g_raid_write_metadata(vol->v_softc, vol, nsd, nsd->sd_disk); trs->trso_meta_update = g_raid1e_rebuild_meta_update; /* Compensate short rebuild I/Os. */ if ((vol->v_disks_count % N) != 0 && vol->v_strip_size < g_raid1e_rebuild_slab) { trs->trso_meta_update *= g_raid1e_rebuild_slab; trs->trso_meta_update /= vol->v_strip_size; } } trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME; if (--trs->trso_recover_slabs <= 0) return; /* Run next rebuild iteration. */ g_raid_tr_raid1e_rebuild_some(tr); } } else if (trs->trso_type == TR_RAID1E_RESYNC) { /* * read good sd, read bad sd in parallel. when both * done, compare the buffers. write good to the bad * if different. do the next bit of work. */ panic("Somehow, we think we're doing a resync"); } return; } pbp = bp->bio_parent; pbp->bio_inbed++; mask = (intptr_t)bp->bio_caller2; if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) { /* * Read failed on first drive. Retry the read error on * another disk drive, if available, before erroring out the * read. */ sd->sd_disk->d_read_errs++; G_RAID_LOGREQ(0, bp, "Read error (%d), %d read errors total", bp->bio_error, sd->sd_disk->d_read_errs); /* * If there are too many read errors, we move to degraded. * XXX Do we want to FAIL the drive (eg, make the user redo * everything to get it back in sync), or just degrade the * drive, which kicks off a resync? */ do_write = 0; if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) g_raid_tr_raid1e_fail_disk(sd->sd_softc, sd, sd->sd_disk); else if (mask == 0) do_write = 1; /* Restore what we were doing. */ P2V(vol, sd->sd_pos, bp->bio_offset, &virtual, ©); V2P(vol, virtual, &disk, &offset, &start); /* Find the other disk, and try to do the I/O to it. */ mask |= 1 << copy; best = g_raid_tr_raid1e_select_read_disk(vol, disk, offset, start, mask); if (best >= 0 && (cbp = g_clone_bio(pbp)) != NULL) { disk += best; if (disk >= vol->v_disks_count) { disk -= vol->v_disks_count; offset += vol->v_strip_size; } cbp->bio_offset = offset + start; cbp->bio_length = bp->bio_length; cbp->bio_data = bp->bio_data; cbp->bio_ma = bp->bio_ma; cbp->bio_ma_offset = bp->bio_ma_offset; cbp->bio_ma_n = bp->bio_ma_n; g_destroy_bio(bp); nsd = &vol->v_subdisks[disk]; G_RAID_LOGREQ(2, cbp, "Retrying read from %d", nsd->sd_pos); if (do_write) mask |= 1 << 31; - if ((mask & (1 << 31)) != 0) + if ((mask & (1U << 31)) != 0) sd->sd_recovery++; cbp->bio_caller2 = (void *)mask; if (do_write) { cbp->bio_caller1 = nsd; /* Lock callback starts I/O */ g_raid_lock_range(sd->sd_volume, virtual, cbp->bio_length, pbp, cbp); } else { g_raid_subdisk_iostart(nsd, cbp); } return; } /* * We can't retry. Return the original error by falling * through. This will happen when there's only one good disk. * We don't need to fail the raid, since its actual state is * based on the state of the subdisks. */ G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it"); } if (bp->bio_cmd == BIO_READ && bp->bio_error == 0 && - (mask & (1 << 31)) != 0) { + (mask & (1U << 31)) != 0) { G_RAID_LOGREQ(3, bp, "Recovered data from other drive"); /* Restore what we were doing. */ P2V(vol, sd->sd_pos, bp->bio_offset, &virtual, ©); V2P(vol, virtual, &disk, &offset, &start); /* Find best disk to write. */ best = g_raid_tr_raid1e_select_read_disk(vol, disk, offset, start, ~mask); if (best >= 0 && (cbp = g_clone_bio(pbp)) != NULL) { disk += best; if (disk >= vol->v_disks_count) { disk -= vol->v_disks_count; offset += vol->v_strip_size; } cbp->bio_offset = offset + start; cbp->bio_cmd = BIO_WRITE; cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP; cbp->bio_caller2 = (void *)mask; g_destroy_bio(bp); G_RAID_LOGREQ(2, cbp, "Attempting bad sector remap on failing drive."); g_raid_subdisk_iostart(&vol->v_subdisks[disk], cbp); return; } } - if ((mask & (1 << 31)) != 0) { + if ((mask & (1U << 31)) != 0) { /* * We're done with a recovery, mark the range as unlocked. * For any write errors, we agressively fail the disk since * there was both a READ and a WRITE error at this location. * Both types of errors generally indicates the drive is on * the verge of total failure anyway. Better to stop trusting * it now. However, we need to reset error to 0 in that case * because we're not failing the original I/O which succeeded. */ /* Restore what we were doing. */ P2V(vol, sd->sd_pos, bp->bio_offset, &virtual, ©); V2P(vol, virtual, &disk, &offset, &start); for (copy = 0; copy < N; copy++) { if ((mask & (1 << copy) ) != 0) vol->v_subdisks[(disk + copy) % vol->v_disks_count].sd_recovery--; } if (bp->bio_cmd == BIO_WRITE && bp->bio_error) { G_RAID_LOGREQ(0, bp, "Remap write failed: " "failing subdisk."); g_raid_tr_raid1e_fail_disk(sd->sd_softc, sd, sd->sd_disk); bp->bio_error = 0; } G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error); g_raid_unlock_range(sd->sd_volume, virtual, bp->bio_length); } if (pbp->bio_cmd != BIO_READ) { if (pbp->bio_inbed == 1 || pbp->bio_error != 0) pbp->bio_error = bp->bio_error; if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) { G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk."); g_raid_tr_raid1e_fail_disk(sd->sd_softc, sd, sd->sd_disk); } error = pbp->bio_error; } else error = bp->bio_error; g_destroy_bio(bp); if (pbp->bio_children == pbp->bio_inbed) { pbp->bio_completed = pbp->bio_length; g_raid_iodone(pbp, error); } } static int g_raid_tr_kerneldump_raid1e(struct g_raid_tr_object *tr, void *virtual, vm_offset_t physical, off_t boffset, size_t blength) { struct g_raid_volume *vol; struct g_raid_subdisk *sd; struct bio_queue_head queue; char *addr; off_t offset, start, length, remain; u_int no, strip_size; int i, error; vol = tr->tro_volume; addr = virtual; strip_size = vol->v_strip_size; V2P(vol, boffset, &no, &offset, &start); remain = blength; bioq_init(&queue); while (remain > 0) { length = MIN(strip_size - start, remain); for (i = 0; i < N; i++) { sd = &vol->v_subdisks[no]; switch (sd->sd_state) { case G_RAID_SUBDISK_S_ACTIVE: case G_RAID_SUBDISK_S_STALE: case G_RAID_SUBDISK_S_RESYNC: break; case G_RAID_SUBDISK_S_REBUILD: if (offset + start >= sd->sd_rebuild_pos) goto nextdisk; break; default: goto nextdisk; } error = g_raid_subdisk_kerneldump(sd, addr, 0, offset + start, length); if (error != 0) return (error); nextdisk: if (++no >= vol->v_disks_count) { no = 0; offset += strip_size; } } remain -= length; addr += length; start = 0; } return (0); } static int g_raid_tr_locked_raid1e(struct g_raid_tr_object *tr, void *argp) { struct bio *bp; struct g_raid_subdisk *sd; bp = (struct bio *)argp; sd = (struct g_raid_subdisk *)bp->bio_caller1; g_raid_subdisk_iostart(sd, bp); return (0); } static int g_raid_tr_idle_raid1e(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; struct g_raid_volume *vol; vol = tr->tro_volume; trs = (struct g_raid_tr_raid1e_object *)tr; trs->trso_fair_io = g_raid1e_rebuild_fair_io; trs->trso_recover_slabs = g_raid1e_rebuild_cluster_idle; /* Compensate short rebuild I/Os. */ if ((vol->v_disks_count % N) != 0 && vol->v_strip_size < g_raid1e_rebuild_slab) { trs->trso_recover_slabs *= g_raid1e_rebuild_slab; trs->trso_recover_slabs /= vol->v_strip_size; } if (trs->trso_type == TR_RAID1E_REBUILD) g_raid_tr_raid1e_rebuild_some(tr); return (0); } static int g_raid_tr_free_raid1e(struct g_raid_tr_object *tr) { struct g_raid_tr_raid1e_object *trs; trs = (struct g_raid_tr_raid1e_object *)tr; if (trs->trso_buffer != NULL) { free(trs->trso_buffer, M_TR_RAID1E); trs->trso_buffer = NULL; } return (0); } G_RAID_TR_DECLARE(raid1e, "RAID1E"); Index: head/sys/i386/pci/pci_cfgreg.c =================================================================== --- head/sys/i386/pci/pci_cfgreg.c (revision 258779) +++ head/sys/i386/pci/pci_cfgreg.c (revision 258780) @@ -1,729 +1,729 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * Copyright (c) 2004, Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_xbox.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef XBOX #include #endif #define PRVERB(a) do { \ if (bootverbose) \ printf a ; \ } while(0) #define PCIE_CACHE 8 struct pcie_cfg_elem { TAILQ_ENTRY(pcie_cfg_elem) elem; vm_offset_t vapage; vm_paddr_t papage; }; enum { CFGMECH_NONE = 0, CFGMECH_1, CFGMECH_2, CFGMECH_PCIE, }; SYSCTL_DECL(_hw_pci); static TAILQ_HEAD(pcie_cfg_list, pcie_cfg_elem) pcie_list[MAXCPU]; static uint64_t pcie_base; static int pcie_minbus, pcie_maxbus; static uint32_t pcie_badslots; static int cfgmech; static int devmax; static struct mtx pcicfg_mtx; static int mcfg_enable = 1; TUNABLE_INT("hw.pci.mcfg", &mcfg_enable); SYSCTL_INT(_hw_pci, OID_AUTO, mcfg, CTLFLAG_RDTUN, &mcfg_enable, 0, "Enable support for PCI-e memory mapped config access"); static uint32_t pci_docfgregread(int bus, int slot, int func, int reg, int bytes); static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes); static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes); #ifndef XEN static int pcireg_cfgopen(void); #endif static int pciereg_cfgread(int bus, unsigned slot, unsigned func, unsigned reg, unsigned bytes); static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func, unsigned reg, int data, unsigned bytes); /* * Some BIOS writers seem to want to ignore the spec and put * 0 in the intline rather than 255 to indicate none. Some use * numbers in the range 128-254 to indicate something strange and * apparently undocumented anywhere. Assume these are completely bogus * and map them to 255, which means "none". */ static __inline int pci_i386_map_intline(int line) { if (line == 0 || line >= 128) return (PCI_INVALID_IRQ); return (line); } #ifndef XEN static u_int16_t pcibios_get_version(void) { struct bios_regs args; if (PCIbios.ventry == 0) { PRVERB(("pcibios: No call entry point\n")); return (0); } args.eax = PCIBIOS_BIOS_PRESENT; if (bios32(&args, PCIbios.ventry, GSEL(GCODE_SEL, SEL_KPL))) { PRVERB(("pcibios: BIOS_PRESENT call failed\n")); return (0); } if (args.edx != 0x20494350) { PRVERB(("pcibios: BIOS_PRESENT didn't return 'PCI ' in edx\n")); return (0); } return (args.ebx & 0xffff); } #endif /* * Initialise access to PCI configuration space */ int pci_cfgregopen(void) { #ifdef XEN return (0); #else static int opened = 0; uint64_t pciebar; u_int16_t vid, did; u_int16_t v; if (opened) return (1); if (cfgmech == CFGMECH_NONE && pcireg_cfgopen() == 0) return (0); v = pcibios_get_version(); if (v > 0) PRVERB(("pcibios: BIOS version %x.%02x\n", (v & 0xff00) >> 8, v & 0xff)); mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; /* $PIR requires PCI BIOS 2.10 or greater. */ if (v >= 0x0210) pci_pir_open(); if (cfgmech == CFGMECH_PCIE) return (1); /* * Grope around in the PCI config space to see if this is a * chipset that is capable of doing memory-mapped config cycles. * This also implies that it can do PCIe extended config cycles. */ /* Check for supported chipsets */ vid = pci_cfgregread(0, 0, 0, PCIR_VENDOR, 2); did = pci_cfgregread(0, 0, 0, PCIR_DEVICE, 2); switch (vid) { case 0x8086: switch (did) { case 0x3590: case 0x3592: /* Intel 7520 or 7320 */ pciebar = pci_cfgregread(0, 0, 0, 0xce, 2) << 16; pcie_cfgregopen(pciebar, 0, 255); break; case 0x2580: case 0x2584: case 0x2590: /* Intel 915, 925, or 915GM */ pciebar = pci_cfgregread(0, 0, 0, 0x48, 4); pcie_cfgregopen(pciebar, 0, 255); break; } } return(1); #endif } static uint32_t pci_docfgregread(int bus, int slot, int func, int reg, int bytes) { if (cfgmech == CFGMECH_PCIE && (bus >= pcie_minbus && bus <= pcie_maxbus) && (bus != 0 || !(1 << slot & pcie_badslots))) return (pciereg_cfgread(bus, slot, func, reg, bytes)); else return (pcireg_cfgread(bus, slot, func, reg, bytes)); } /* * Read configuration space register */ u_int32_t pci_cfgregread(int bus, int slot, int func, int reg, int bytes) { uint32_t line; /* * Some BIOS writers seem to want to ignore the spec and put * 0 in the intline rather than 255 to indicate none. The rest of * the code uses 255 as an invalid IRQ. */ if (reg == PCIR_INTLINE && bytes == 1) { line = pci_docfgregread(bus, slot, func, PCIR_INTLINE, 1); return (pci_i386_map_intline(line)); } return (pci_docfgregread(bus, slot, func, reg, bytes)); } /* * Write configuration space register */ void pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes) { if (cfgmech == CFGMECH_PCIE && (bus >= pcie_minbus && bus <= pcie_maxbus) && (bus != 0 || !(1 << slot & pcie_badslots))) pciereg_cfgwrite(bus, slot, func, reg, data, bytes); else pcireg_cfgwrite(bus, slot, func, reg, data, bytes); } /* * Configuration space access using direct register operations */ /* enable configuration space accesses and return data port address */ static int pci_cfgenable(unsigned bus, unsigned slot, unsigned func, int reg, int bytes) { int dataport = 0; #ifdef XBOX if (arch_i386_is_xbox) { /* * The Xbox MCPX chipset is a derivative of the nForce 1 * chipset. It almost has the same bus layout; some devices * cannot be used, because they have been removed. */ /* * Devices 00:00.1 and 00:00.2 used to be memory controllers on * the nForce chipset, but on the Xbox, using them will lockup * the chipset. */ if (bus == 0 && slot == 0 && (func == 1 || func == 2)) return dataport; /* * Bus 1 only contains a VGA controller at 01:00.0. When you try * to probe beyond that device, you only get garbage, which * could cause lockups. */ if (bus == 1 && (slot != 0 || func != 0)) return dataport; /* * Bus 2 used to contain the AGP controller, but the Xbox MCPX * doesn't have one. Probing it can cause lockups. */ if (bus >= 2) return dataport; } #endif if (bus <= PCI_BUSMAX && slot < devmax && func <= PCI_FUNCMAX && (unsigned)reg <= PCI_REGMAX && bytes != 3 && (unsigned)bytes <= 4 && (reg & (bytes - 1)) == 0) { switch (cfgmech) { case CFGMECH_PCIE: case CFGMECH_1: - outl(CONF1_ADDR_PORT, (1 << 31) + outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11) | (func << 8) | (reg & ~0x03)); dataport = CONF1_DATA_PORT + (reg & 0x03); break; case CFGMECH_2: outb(CONF2_ENABLE_PORT, 0xf0 | (func << 1)); outb(CONF2_FORWARD_PORT, bus); dataport = 0xc000 | (slot << 8) | reg; break; } } return (dataport); } /* disable configuration space accesses */ static void pci_cfgdisable(void) { switch (cfgmech) { case CFGMECH_PCIE: case CFGMECH_1: /* * Do nothing for the config mechanism 1 case. * Writing a 0 to the address port can apparently * confuse some bridges and cause spurious * access failures. */ break; case CFGMECH_2: outb(CONF2_ENABLE_PORT, 0); break; } } static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes) { int data = -1; int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: data = inb(port); break; case 2: data = inw(port); break; case 4: data = inl(port); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes) { int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: outb(port, data); break; case 2: outw(port, data); break; case 4: outl(port, data); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); } #ifndef XEN /* check whether the configuration mechanism has been correctly identified */ static int pci_cfgcheck(int maxdev) { uint32_t id, class; uint8_t header; uint8_t device; int port; if (bootverbose) printf("pci_cfgcheck:\tdevice "); for (device = 0; device < maxdev; device++) { if (bootverbose) printf("%d ", device); port = pci_cfgenable(0, device, 0, 0, 4); id = inl(port); if (id == 0 || id == 0xffffffff) continue; port = pci_cfgenable(0, device, 0, 8, 4); class = inl(port) >> 8; if (bootverbose) printf("[class=%06x] ", class); if (class == 0 || (class & 0xf870ff) != 0) continue; port = pci_cfgenable(0, device, 0, 14, 1); header = inb(port); if (bootverbose) printf("[hdr=%02x] ", header); if ((header & 0x7e) != 0) continue; if (bootverbose) printf("is there (id=%08x)\n", id); pci_cfgdisable(); return (1); } if (bootverbose) printf("-- nothing found\n"); pci_cfgdisable(); return (0); } static int pcireg_cfgopen(void) { uint32_t mode1res, oldval1; uint8_t mode2res, oldval2; /* Check for type #1 first. */ oldval1 = inl(CONF1_ADDR_PORT); if (bootverbose) { printf("pci_open(1):\tmode 1 addr port (0x0cf8) is 0x%08x\n", oldval1); } cfgmech = CFGMECH_1; devmax = 32; outl(CONF1_ADDR_PORT, CONF1_ENABLE_CHK); DELAY(1); mode1res = inl(CONF1_ADDR_PORT); outl(CONF1_ADDR_PORT, oldval1); if (bootverbose) printf("pci_open(1a):\tmode1res=0x%08x (0x%08lx)\n", mode1res, CONF1_ENABLE_CHK); if (mode1res) { if (pci_cfgcheck(32)) return (cfgmech); } outl(CONF1_ADDR_PORT, CONF1_ENABLE_CHK1); mode1res = inl(CONF1_ADDR_PORT); outl(CONF1_ADDR_PORT, oldval1); if (bootverbose) printf("pci_open(1b):\tmode1res=0x%08x (0x%08lx)\n", mode1res, CONF1_ENABLE_CHK1); if ((mode1res & CONF1_ENABLE_MSK1) == CONF1_ENABLE_RES1) { if (pci_cfgcheck(32)) return (cfgmech); } /* Type #1 didn't work, so try type #2. */ oldval2 = inb(CONF2_ENABLE_PORT); if (bootverbose) { printf("pci_open(2):\tmode 2 enable port (0x0cf8) is 0x%02x\n", oldval2); } if ((oldval2 & 0xf0) == 0) { cfgmech = CFGMECH_2; devmax = 16; outb(CONF2_ENABLE_PORT, CONF2_ENABLE_CHK); mode2res = inb(CONF2_ENABLE_PORT); outb(CONF2_ENABLE_PORT, oldval2); if (bootverbose) printf("pci_open(2a):\tmode2res=0x%02x (0x%02x)\n", mode2res, CONF2_ENABLE_CHK); if (mode2res == CONF2_ENABLE_RES) { if (bootverbose) printf("pci_open(2a):\tnow trying mechanism 2\n"); if (pci_cfgcheck(16)) return (cfgmech); } } /* Nothing worked, so punt. */ cfgmech = CFGMECH_NONE; devmax = 0; return (cfgmech); } int pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) { struct pcie_cfg_list *pcielist; struct pcie_cfg_elem *pcie_array, *elem; #ifdef SMP struct pcpu *pc; #endif vm_offset_t va; uint32_t val1, val2; int i, slot; if (!mcfg_enable) return (0); if (minbus != 0) return (0); #ifndef PAE if (base >= 0x100000000) { if (bootverbose) printf( "PCI: Memory Mapped PCI configuration area base 0x%jx too high\n", (uintmax_t)base); return (0); } #endif if (bootverbose) printf("PCIe: Memory Mapped configuration base @ 0x%jx\n", (uintmax_t)base); #ifdef SMP STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) #endif { pcie_array = malloc(sizeof(struct pcie_cfg_elem) * PCIE_CACHE, M_DEVBUF, M_NOWAIT); if (pcie_array == NULL) return (0); va = kva_alloc(PCIE_CACHE * PAGE_SIZE); if (va == 0) { free(pcie_array, M_DEVBUF); return (0); } #ifdef SMP pcielist = &pcie_list[pc->pc_cpuid]; #else pcielist = &pcie_list[0]; #endif TAILQ_INIT(pcielist); for (i = 0; i < PCIE_CACHE; i++) { elem = &pcie_array[i]; elem->vapage = va + (i * PAGE_SIZE); elem->papage = 0; TAILQ_INSERT_HEAD(pcielist, elem, elem); } } pcie_base = base; pcie_minbus = minbus; pcie_maxbus = maxbus; cfgmech = CFGMECH_PCIE; devmax = 32; /* * On some AMD systems, some of the devices on bus 0 are * inaccessible using memory-mapped PCI config access. Walk * bus 0 looking for such devices. For these devices, we will * fall back to using type 1 config access instead. */ if (pci_cfgregopen() != 0) { for (slot = 0; slot <= PCI_SLOTMAX; slot++) { val1 = pcireg_cfgread(0, slot, 0, 0, 4); if (val1 == 0xffffffff) continue; val2 = pciereg_cfgread(0, slot, 0, 0, 4); if (val2 != val1) pcie_badslots |= (1 << slot); } } return (1); } #endif /* !XEN */ #define PCIE_PADDR(base, reg, bus, slot, func) \ ((base) + \ ((((bus) & 0xff) << 20) | \ (((slot) & 0x1f) << 15) | \ (((func) & 0x7) << 12) | \ ((reg) & 0xfff))) static __inline vm_offset_t pciereg_findaddr(int bus, unsigned slot, unsigned func, unsigned reg) { struct pcie_cfg_list *pcielist; struct pcie_cfg_elem *elem; vm_paddr_t pa, papage; pa = PCIE_PADDR(pcie_base, reg, bus, slot, func); papage = pa & ~PAGE_MASK; /* * Find an element in the cache that matches the physical page desired, * or create a new mapping from the least recently used element. * A very simple LRU algorithm is used here, does it need to be more * efficient? */ pcielist = &pcie_list[PCPU_GET(cpuid)]; TAILQ_FOREACH(elem, pcielist, elem) { if (elem->papage == papage) break; } if (elem == NULL) { elem = TAILQ_LAST(pcielist, pcie_cfg_list); if (elem->papage != 0) { pmap_kremove(elem->vapage); invlpg(elem->vapage); } pmap_kenter(elem->vapage, papage); elem->papage = papage; } if (elem != TAILQ_FIRST(pcielist)) { TAILQ_REMOVE(pcielist, elem, elem); TAILQ_INSERT_HEAD(pcielist, elem, elem); } return (elem->vapage | (pa & PAGE_MASK)); } /* * AMD BIOS And Kernel Developer's Guides for CPU families starting with 10h * have a requirement that all accesses to the memory mapped PCI configuration * space are done using AX class of registers. * Since other vendors do not currently have any contradicting requirements * the AMD access pattern is applied universally. */ static int pciereg_cfgread(int bus, unsigned slot, unsigned func, unsigned reg, unsigned bytes) { vm_offset_t va; int data = -1; if (bus < pcie_minbus || bus > pcie_maxbus || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return (-1); critical_enter(); va = pciereg_findaddr(bus, slot, func, reg); switch (bytes) { case 4: __asm("movl %1, %0" : "=a" (data) : "m" (*(volatile uint32_t *)va)); break; case 2: __asm("movzwl %1, %0" : "=a" (data) : "m" (*(volatile uint16_t *)va)); break; case 1: __asm("movzbl %1, %0" : "=a" (data) : "m" (*(volatile uint8_t *)va)); break; } critical_exit(); return (data); } static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func, unsigned reg, int data, unsigned bytes) { vm_offset_t va; if (bus < pcie_minbus || bus > pcie_maxbus || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return; critical_enter(); va = pciereg_findaddr(bus, slot, func, reg); switch (bytes) { case 4: __asm("movl %1, %0" : "=m" (*(volatile uint32_t *)va) : "a" (data)); break; case 2: __asm("movw %1, %0" : "=m" (*(volatile uint16_t *)va) : "a" ((uint16_t)data)); break; case 1: __asm("movb %1, %0" : "=m" (*(volatile uint8_t *)va) : "a" ((uint8_t)data)); break; } critical_exit(); } Index: head/sys/mips/atheros/ar71xxreg.h =================================================================== --- head/sys/mips/atheros/ar71xxreg.h (revision 258779) +++ head/sys/mips/atheros/ar71xxreg.h (revision 258780) @@ -1,565 +1,565 @@ /*- * Copyright (c) 2009 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef _AR71XX_REG_H_ #define _AR71XX_REG_H_ /* PCI region */ #define AR71XX_PCI_MEM_BASE 0x10000000 /* * PCI mem windows is 0x08000000 bytes long but we exclude control * region from the resource manager */ #define AR71XX_PCI_MEM_SIZE 0x07000000 #define AR71XX_PCI_IRQ_START 0 #define AR71XX_PCI_IRQ_END 2 #define AR71XX_PCI_NIRQS 3 /* * PCI devices slots are starting from this number */ #define AR71XX_PCI_BASE_SLOT 17 /* PCI config registers */ #define AR71XX_PCI_LCONF_CMD 0x17010000 #define PCI_LCONF_CMD_READ 0x00000000 #define PCI_LCONF_CMD_WRITE 0x00010000 #define AR71XX_PCI_LCONF_WRITE_DATA 0x17010004 #define AR71XX_PCI_LCONF_READ_DATA 0x17010008 #define AR71XX_PCI_CONF_ADDR 0x1701000C #define AR71XX_PCI_CONF_CMD 0x17010010 #define PCI_CONF_CMD_READ 0x0000000A #define PCI_CONF_CMD_WRITE 0x0000000B #define AR71XX_PCI_CONF_WRITE_DATA 0x17010014 #define AR71XX_PCI_CONF_READ_DATA 0x17010018 #define AR71XX_PCI_ERROR 0x1701001C #define AR71XX_PCI_ERROR_ADDR 0x17010020 #define AR71XX_PCI_AHB_ERROR 0x17010024 #define AR71XX_PCI_AHB_ERROR_ADDR 0x17010028 /* APB region */ /* * Size is not really true actual APB window size is * 0x01000000 but it should handle OHCI memory as well * because this controller's interrupt is routed through * APB. */ #define AR71XX_APB_BASE 0x18000000 #define AR71XX_APB_SIZE 0x06000000 /* DDR registers */ #define AR71XX_DDR_CONFIG 0x18000000 #define AR71XX_DDR_CONFIG2 0x18000004 #define AR71XX_DDR_MODE_REGISTER 0x18000008 #define AR71XX_DDR_EXT_MODE_REGISTER 0x1800000C #define AR71XX_DDR_CONTROL 0x18000010 #define AR71XX_DDR_REFRESH 0x18000014 #define AR71XX_DDR_RD_DATA_THIS_CYCLE 0x18000018 #define AR71XX_TAP_CONTROL0 0x1800001C #define AR71XX_TAP_CONTROL1 0x18000020 #define AR71XX_TAP_CONTROL2 0x18000024 #define AR71XX_TAP_CONTROL3 0x18000028 #define AR71XX_PCI_WINDOW0 0x1800007C #define AR71XX_PCI_WINDOW1 0x18000080 #define AR71XX_PCI_WINDOW2 0x18000084 #define AR71XX_PCI_WINDOW3 0x18000088 #define AR71XX_PCI_WINDOW4 0x1800008C #define AR71XX_PCI_WINDOW5 0x18000090 #define AR71XX_PCI_WINDOW6 0x18000094 #define AR71XX_PCI_WINDOW7 0x18000098 #define AR71XX_WB_FLUSH_GE0 0x1800009C #define AR71XX_WB_FLUSH_GE1 0x180000A0 #define AR71XX_WB_FLUSH_USB 0x180000A4 #define AR71XX_WB_FLUSH_PCI 0x180000A8 /* * Values for PCI_WINDOW_X registers */ #define PCI_WINDOW0_ADDR 0x10000000 #define PCI_WINDOW1_ADDR 0x11000000 #define PCI_WINDOW2_ADDR 0x12000000 #define PCI_WINDOW3_ADDR 0x13000000 #define PCI_WINDOW4_ADDR 0x14000000 #define PCI_WINDOW5_ADDR 0x15000000 #define PCI_WINDOW6_ADDR 0x16000000 #define PCI_WINDOW7_ADDR 0x17000000 /* This value enables acces to PCI config registers */ #define PCI_WINDOW7_CONF_ADDR 0x07000000 #define AR71XX_UART_ADDR 0x18020000 #define AR71XX_USB_CTRL_FLADJ 0x18030000 #define USB_CTRL_FLADJ_HOST_SHIFT 12 #define USB_CTRL_FLADJ_A5_SHIFT 10 #define USB_CTRL_FLADJ_A4_SHIFT 8 #define USB_CTRL_FLADJ_A3_SHIFT 6 #define USB_CTRL_FLADJ_A2_SHIFT 4 #define USB_CTRL_FLADJ_A1_SHIFT 2 #define USB_CTRL_FLADJ_A0_SHIFT 0 #define AR71XX_USB_CTRL_CONFIG 0x18030004 #define USB_CTRL_CONFIG_OHCI_DES_SWAP (1 << 19) #define USB_CTRL_CONFIG_OHCI_BUF_SWAP (1 << 18) #define USB_CTRL_CONFIG_EHCI_DES_SWAP (1 << 17) #define USB_CTRL_CONFIG_EHCI_BUF_SWAP (1 << 16) #define USB_CTRL_CONFIG_DISABLE_XTL (1 << 13) #define USB_CTRL_CONFIG_OVERRIDE_XTL (1 << 12) #define USB_CTRL_CONFIG_CLK_SEL_SHIFT 4 #define USB_CTRL_CONFIG_CLK_SEL_MASK 3 #define USB_CTRL_CONFIG_CLK_SEL_12 0 #define USB_CTRL_CONFIG_CLK_SEL_24 1 #define USB_CTRL_CONFIG_CLK_SEL_48 2 #define USB_CTRL_CONFIG_OVER_CURRENT_AS_GPIO (1 << 8) #define USB_CTRL_CONFIG_SS_SIMULATION_MODE (1 << 2) #define USB_CTRL_CONFIG_RESUME_UTMI_PLS_DIS (1 << 1) #define USB_CTRL_CONFIG_UTMI_BACKWARD_ENB (1 << 0) #define AR71XX_GPIO_BASE 0x18040000 #define AR71XX_GPIO_OE 0x00 #define AR71XX_GPIO_IN 0x04 #define AR71XX_GPIO_OUT 0x08 #define AR71XX_GPIO_SET 0x0c #define AR71XX_GPIO_CLEAR 0x10 #define AR71XX_GPIO_INT 0x14 #define AR71XX_GPIO_INT_TYPE 0x18 #define AR71XX_GPIO_INT_POLARITY 0x1c #define AR71XX_GPIO_INT_PENDING 0x20 #define AR71XX_GPIO_INT_MASK 0x24 #define AR71XX_GPIO_FUNCTION 0x28 #define GPIO_FUNC_STEREO_EN (1 << 17) #define GPIO_FUNC_SLIC_EN (1 << 16) #define GPIO_FUNC_SPI_CS2_EN (1 << 13) /* CS2 is shared with GPIO_1 */ #define GPIO_FUNC_SPI_CS1_EN (1 << 12) /* CS1 is shared with GPIO_0 */ #define GPIO_FUNC_UART_EN (1 << 8) #define GPIO_FUNC_USB_OC_EN (1 << 4) #define GPIO_FUNC_USB_CLK_EN (0) #define AR71XX_BASE_FREQ 40000000 #define AR71XX_PLL_CPU_BASE 0x18050000 #define AR71XX_PLL_CPU_CONFIG 0x18050000 -#define PLL_SW_UPDATE (1 << 31) +#define PLL_SW_UPDATE (1U << 31) #define PLL_LOCKED (1 << 30) #define PLL_AHB_DIV_SHIFT 20 #define PLL_AHB_DIV_MASK 7 #define PLL_DDR_DIV_SEL_SHIFT 18 #define PLL_DDR_DIV_SEL_MASK 3 #define PLL_CPU_DIV_SEL_SHIFT 16 #define PLL_CPU_DIV_SEL_MASK 3 #define PLL_LOOP_BW_SHIFT 12 #define PLL_LOOP_BW_MASK 0xf #define PLL_DIV_IN_SHIFT 10 #define PLL_DIV_IN_MASK 3 #define PLL_DIV_OUT_SHIFT 8 #define PLL_DIV_OUT_MASK 3 #define PLL_FB_SHIFT 3 #define PLL_FB_MASK 0x1f #define PLL_BYPASS (1 << 1) #define PLL_POWER_DOWN (1 << 0) #define AR71XX_PLL_SEC_CONFIG 0x18050004 #define AR71XX_PLL_ETH0_SHIFT 17 #define AR71XX_PLL_ETH1_SHIFT 19 #define AR71XX_PLL_CPU_CLK_CTRL 0x18050008 #define AR71XX_PLL_ETH_INT0_CLK 0x18050010 #define AR71XX_PLL_ETH_INT1_CLK 0x18050014 #define XPLL_ETH_INT_CLK_10 0x00991099 #define XPLL_ETH_INT_CLK_100 0x00441011 #define XPLL_ETH_INT_CLK_1000 0x13110000 #define XPLL_ETH_INT_CLK_1000_GMII 0x14110000 #define PLL_ETH_INT_CLK_10 0x00991099 #define PLL_ETH_INT_CLK_100 0x00001099 #define PLL_ETH_INT_CLK_1000 0x00110000 #define AR71XX_PLL_ETH_EXT_CLK 0x18050018 #define AR71XX_PLL_PCI_CLK 0x1805001C /* Reset block */ #define AR71XX_RST_BLOCK_BASE 0x18060000 #define AR71XX_RST_WDOG_CONTROL 0x18060008 -#define RST_WDOG_LAST (1 << 31) +#define RST_WDOG_LAST (1U << 31) #define RST_WDOG_ACTION_MASK 3 #define RST_WDOG_ACTION_RESET 3 #define RST_WDOG_ACTION_NMI 2 #define RST_WDOG_ACTION_GP_INTR 1 #define RST_WDOG_ACTION_NOACTION 0 #define AR71XX_RST_WDOG_TIMER 0x1806000C /* * APB interrupt status and mask register and interrupt bit numbers for */ #define AR71XX_MISC_INTR_STATUS 0x18060010 #define AR71XX_MISC_INTR_MASK 0x18060014 #define MISC_INTR_TIMER 0 #define MISC_INTR_ERROR 1 #define MISC_INTR_GPIO 2 #define MISC_INTR_UART 3 #define MISC_INTR_WATCHDOG 4 #define MISC_INTR_PERF 5 #define MISC_INTR_OHCI 6 #define MISC_INTR_DMA 7 #define AR71XX_PCI_INTR_STATUS 0x18060018 #define AR71XX_PCI_INTR_MASK 0x1806001C #define PCI_INTR_CORE (1 << 4) #define AR71XX_RST_RESET 0x18060024 #define RST_RESET_FULL_CHIP (1 << 24) /* Same as pulling the reset pin */ #define RST_RESET_CPU_COLD (1 << 20) /* Cold reset */ #define RST_RESET_GE1_MAC (1 << 13) #define RST_RESET_GE1_PHY (1 << 12) #define RST_RESET_GE0_MAC (1 << 9) #define RST_RESET_GE0_PHY (1 << 8) #define RST_RESET_USB_OHCI_DLL (1 << 6) #define RST_RESET_USB_HOST (1 << 5) #define RST_RESET_USB_PHY (1 << 4) #define RST_RESET_PCI_BUS (1 << 1) #define RST_RESET_PCI_CORE (1 << 0) /* Chipset revision details */ #define AR71XX_RST_RESET_REG_REV_ID 0x18060090 #define REV_ID_MAJOR_MASK 0xfff0 #define REV_ID_MAJOR_AR71XX 0x00a0 #define REV_ID_MAJOR_AR913X 0x00b0 #define REV_ID_MAJOR_AR7240 0x00c0 #define REV_ID_MAJOR_AR7241 0x0100 #define REV_ID_MAJOR_AR7242 0x1100 /* AR71XX chipset revision details */ #define AR71XX_REV_ID_MINOR_MASK 0x3 #define AR71XX_REV_ID_MINOR_AR7130 0x0 #define AR71XX_REV_ID_MINOR_AR7141 0x1 #define AR71XX_REV_ID_MINOR_AR7161 0x2 #define AR71XX_REV_ID_REVISION_MASK 0x3 #define AR71XX_REV_ID_REVISION_SHIFT 2 /* AR724X chipset revision details */ #define AR724X_REV_ID_REVISION_MASK 0x3 /* AR91XX chipset revision details */ #define AR91XX_REV_ID_MINOR_MASK 0x3 #define AR91XX_REV_ID_MINOR_AR9130 0x0 #define AR91XX_REV_ID_MINOR_AR9132 0x1 #define AR91XX_REV_ID_REVISION_MASK 0x3 #define AR91XX_REV_ID_REVISION_SHIFT 2 typedef enum { AR71XX_MII_MODE_NONE = 0, AR71XX_MII_MODE_GMII, AR71XX_MII_MODE_MII, AR71XX_MII_MODE_RGMII, AR71XX_MII_MODE_RMII, } ar71xx_mii_mode; /* * AR71xx MII control region */ #define AR71XX_MII0_CTRL 0x18070000 #define MII_CTRL_SPEED_SHIFT 4 #define MII_CTRL_SPEED_MASK 3 #define MII_CTRL_SPEED_10 0 #define MII_CTRL_SPEED_100 1 #define MII_CTRL_SPEED_1000 2 #define MII_CTRL_IF_MASK 3 #define MII_CTRL_IF_SHIFT 0 #define MII0_CTRL_IF_GMII 0 #define MII0_CTRL_IF_MII 1 #define MII0_CTRL_IF_RGMII 2 #define MII0_CTRL_IF_RMII 3 #define AR71XX_MII1_CTRL 0x18070004 #define MII1_CTRL_IF_RGMII 0 #define MII1_CTRL_IF_RMII 1 /* * GigE adapters region */ #define AR71XX_MAC0_BASE 0x19000000 #define AR71XX_MAC1_BASE 0x1A000000 #define AR71XX_MAC_CFG1 0x00 -#define MAC_CFG1_SOFT_RESET (1 << 31) +#define MAC_CFG1_SOFT_RESET (1U << 31) #define MAC_CFG1_SIMUL_RESET (1 << 30) #define MAC_CFG1_MAC_RX_BLOCK_RESET (1 << 19) #define MAC_CFG1_MAC_TX_BLOCK_RESET (1 << 18) #define MAC_CFG1_RX_FUNC_RESET (1 << 17) #define MAC_CFG1_TX_FUNC_RESET (1 << 16) #define MAC_CFG1_LOOPBACK (1 << 8) #define MAC_CFG1_RXFLOW_CTRL (1 << 5) #define MAC_CFG1_TXFLOW_CTRL (1 << 4) #define MAC_CFG1_SYNC_RX (1 << 3) #define MAC_CFG1_RX_ENABLE (1 << 2) #define MAC_CFG1_SYNC_TX (1 << 1) #define MAC_CFG1_TX_ENABLE (1 << 0) #define AR71XX_MAC_CFG2 0x04 #define MAC_CFG2_PREAMBLE_LEN_MASK 0xf #define MAC_CFG2_PREAMBLE_LEN_SHIFT 12 #define MAC_CFG2_IFACE_MODE_1000 (2 << 8) #define MAC_CFG2_IFACE_MODE_10_100 (1 << 8) #define MAC_CFG2_IFACE_MODE_SHIFT 8 #define MAC_CFG2_IFACE_MODE_MASK 3 #define MAC_CFG2_HUGE_FRAME (1 << 5) #define MAC_CFG2_LENGTH_FIELD (1 << 4) #define MAC_CFG2_ENABLE_PADCRC (1 << 2) #define MAC_CFG2_ENABLE_CRC (1 << 1) #define MAC_CFG2_FULL_DUPLEX (1 << 0) #define AR71XX_MAC_IFG 0x08 #define AR71XX_MAC_HDUPLEX 0x0C #define AR71XX_MAC_MAX_FRAME_LEN 0x10 #define AR71XX_MAC_MII_CFG 0x20 -#define MAC_MII_CFG_RESET (1 << 31) +#define MAC_MII_CFG_RESET (1U << 31) #define MAC_MII_CFG_SCAN_AUTO_INC (1 << 5) #define MAC_MII_CFG_PREAMBLE_SUP (1 << 4) #define MAC_MII_CFG_CLOCK_SELECT_MASK 0x7 #define MAC_MII_CFG_CLOCK_SELECT_MASK_AR933X 0xf #define MAC_MII_CFG_CLOCK_DIV_4 0 #define MAC_MII_CFG_CLOCK_DIV_6 2 #define MAC_MII_CFG_CLOCK_DIV_8 3 #define MAC_MII_CFG_CLOCK_DIV_10 4 #define MAC_MII_CFG_CLOCK_DIV_14 5 #define MAC_MII_CFG_CLOCK_DIV_20 6 #define MAC_MII_CFG_CLOCK_DIV_28 7 /* .. and the AR933x/AR934x extensions */ #define MAC_MII_CFG_CLOCK_DIV_34 8 #define MAC_MII_CFG_CLOCK_DIV_42 9 #define MAC_MII_CFG_CLOCK_DIV_50 10 #define MAC_MII_CFG_CLOCK_DIV_58 11 #define MAC_MII_CFG_CLOCK_DIV_66 12 #define MAC_MII_CFG_CLOCK_DIV_74 13 #define MAC_MII_CFG_CLOCK_DIV_82 14 #define MAC_MII_CFG_CLOCK_DIV_98 15 #define AR71XX_MAC_MII_CMD 0x24 #define MAC_MII_CMD_SCAN_CYCLE (1 << 1) #define MAC_MII_CMD_READ 1 #define MAC_MII_CMD_WRITE 0 #define AR71XX_MAC_MII_ADDR 0x28 #define MAC_MII_PHY_ADDR_SHIFT 8 #define MAC_MII_PHY_ADDR_MASK 0xff #define MAC_MII_REG_MASK 0x1f #define AR71XX_MAC_MII_CONTROL 0x2C #define MAC_MII_CONTROL_MASK 0xffff #define AR71XX_MAC_MII_STATUS 0x30 #define MAC_MII_STATUS_MASK 0xffff #define AR71XX_MAC_MII_INDICATOR 0x34 #define MAC_MII_INDICATOR_NOT_VALID (1 << 2) #define MAC_MII_INDICATOR_SCANNING (1 << 1) #define MAC_MII_INDICATOR_BUSY (1 << 0) #define AR71XX_MAC_IFCONTROL 0x38 #define MAC_IFCONTROL_SPEED (1 << 16) #define AR71XX_MAC_STA_ADDR1 0x40 #define AR71XX_MAC_STA_ADDR2 0x44 #define AR71XX_MAC_FIFO_CFG0 0x48 #define FIFO_CFG0_TX_FABRIC (1 << 4) #define FIFO_CFG0_TX_SYSTEM (1 << 3) #define FIFO_CFG0_RX_FABRIC (1 << 2) #define FIFO_CFG0_RX_SYSTEM (1 << 1) #define FIFO_CFG0_WATERMARK (1 << 0) #define FIFO_CFG0_ALL ((1 << 5) - 1) #define FIFO_CFG0_ENABLE_SHIFT 8 #define AR71XX_MAC_FIFO_CFG1 0x4C #define AR71XX_MAC_FIFO_CFG2 0x50 #define AR71XX_MAC_FIFO_TX_THRESHOLD 0x54 #define AR71XX_MAC_FIFO_RX_FILTMATCH 0x58 /* * These flags applicable both to AR71XX_MAC_FIFO_RX_FILTMASK and * to AR71XX_MAC_FIFO_RX_FILTMATCH */ #define FIFO_RX_MATCH_UNICAST (1 << 17) #define FIFO_RX_MATCH_TRUNC_FRAME (1 << 16) #define FIFO_RX_MATCH_VLAN_TAG (1 << 15) #define FIFO_RX_MATCH_UNSUP_OPCODE (1 << 14) #define FIFO_RX_MATCH_PAUSE_FRAME (1 << 13) #define FIFO_RX_MATCH_CTRL_FRAME (1 << 12) #define FIFO_RX_MATCH_LONG_EVENT (1 << 11) #define FIFO_RX_MATCH_DRIBBLE_NIBBLE (1 << 10) #define FIFO_RX_MATCH_BCAST (1 << 9) #define FIFO_RX_MATCH_MCAST (1 << 8) #define FIFO_RX_MATCH_OK (1 << 7) #define FIFO_RX_MATCH_OORANGE (1 << 6) #define FIFO_RX_MATCH_LEN_MSMTCH (1 << 5) #define FIFO_RX_MATCH_CRC_ERROR (1 << 4) #define FIFO_RX_MATCH_CODE_ERROR (1 << 3) #define FIFO_RX_MATCH_FALSE_CARRIER (1 << 2) #define FIFO_RX_MATCH_RX_DV_EVENT (1 << 1) #define FIFO_RX_MATCH_DROP_EVENT (1 << 0) /* * Exclude unicast and truncated frames from matching */ #define FIFO_RX_FILTMATCH_DEFAULT \ (FIFO_RX_MATCH_VLAN_TAG | \ FIFO_RX_MATCH_UNSUP_OPCODE | \ FIFO_RX_MATCH_PAUSE_FRAME | \ FIFO_RX_MATCH_CTRL_FRAME | \ FIFO_RX_MATCH_LONG_EVENT | \ FIFO_RX_MATCH_DRIBBLE_NIBBLE | \ FIFO_RX_MATCH_BCAST | \ FIFO_RX_MATCH_MCAST | \ FIFO_RX_MATCH_OK | \ FIFO_RX_MATCH_OORANGE | \ FIFO_RX_MATCH_LEN_MSMTCH | \ FIFO_RX_MATCH_CRC_ERROR | \ FIFO_RX_MATCH_CODE_ERROR | \ FIFO_RX_MATCH_FALSE_CARRIER | \ FIFO_RX_MATCH_RX_DV_EVENT | \ FIFO_RX_MATCH_DROP_EVENT) #define AR71XX_MAC_FIFO_RX_FILTMASK 0x5C #define FIFO_RX_MASK_BYTE_MODE (1 << 19) #define FIFO_RX_MASK_NO_SHORT_FRAME (1 << 18) #define FIFO_RX_MASK_BIT17 (1 << 17) #define FIFO_RX_MASK_BIT16 (1 << 16) #define FIFO_RX_MASK_TRUNC_FRAME (1 << 15) #define FIFO_RX_MASK_LONG_EVENT (1 << 14) #define FIFO_RX_MASK_VLAN_TAG (1 << 13) #define FIFO_RX_MASK_UNSUP_OPCODE (1 << 12) #define FIFO_RX_MASK_PAUSE_FRAME (1 << 11) #define FIFO_RX_MASK_CTRL_FRAME (1 << 10) #define FIFO_RX_MASK_DRIBBLE_NIBBLE (1 << 9) #define FIFO_RX_MASK_BCAST (1 << 8) #define FIFO_RX_MASK_MCAST (1 << 7) #define FIFO_RX_MASK_OK (1 << 6) #define FIFO_RX_MASK_OORANGE (1 << 5) #define FIFO_RX_MASK_LEN_MSMTCH (1 << 4) #define FIFO_RX_MASK_CODE_ERROR (1 << 3) #define FIFO_RX_MASK_FALSE_CARRIER (1 << 2) #define FIFO_RX_MASK_RX_DV_EVENT (1 << 1) #define FIFO_RX_MASK_DROP_EVENT (1 << 0) /* * Len. mismatch, unsup. opcode and short frmae bits excluded */ #define FIFO_RX_FILTMASK_DEFAULT \ (FIFO_RX_MASK_NO_SHORT_FRAME | \ FIFO_RX_MASK_BIT17 | \ FIFO_RX_MASK_BIT16 | \ FIFO_RX_MASK_TRUNC_FRAME | \ FIFO_RX_MASK_LONG_EVENT | \ FIFO_RX_MASK_VLAN_TAG | \ FIFO_RX_MASK_PAUSE_FRAME | \ FIFO_RX_MASK_CTRL_FRAME | \ FIFO_RX_MASK_DRIBBLE_NIBBLE | \ FIFO_RX_MASK_BCAST | \ FIFO_RX_MASK_MCAST | \ FIFO_RX_MASK_OK | \ FIFO_RX_MASK_OORANGE | \ FIFO_RX_MASK_CODE_ERROR | \ FIFO_RX_MASK_FALSE_CARRIER | \ FIFO_RX_MASK_RX_DV_EVENT | \ FIFO_RX_MASK_DROP_EVENT) #define AR71XX_MAC_FIFO_RAM0 0x60 #define AR71XX_MAC_FIFO_RAM1 0x64 #define AR71XX_MAC_FIFO_RAM2 0x68 #define AR71XX_MAC_FIFO_RAM3 0x6C #define AR71XX_MAC_FIFO_RAM4 0x70 #define AR71XX_MAC_FIFO_RAM5 0x74 #define AR71XX_MAC_FIFO_RAM6 0x78 #define AR71XX_DMA_TX_CONTROL 0x180 #define DMA_TX_CONTROL_EN (1 << 0) #define AR71XX_DMA_TX_DESC 0x184 #define AR71XX_DMA_TX_STATUS 0x188 #define DMA_TX_STATUS_PCOUNT_MASK 0xff #define DMA_TX_STATUS_PCOUNT_SHIFT 16 #define DMA_TX_STATUS_BUS_ERROR (1 << 3) #define DMA_TX_STATUS_UNDERRUN (1 << 1) #define DMA_TX_STATUS_PKT_SENT (1 << 0) #define AR71XX_DMA_RX_CONTROL 0x18C #define DMA_RX_CONTROL_EN (1 << 0) #define AR71XX_DMA_RX_DESC 0x190 #define AR71XX_DMA_RX_STATUS 0x194 #define DMA_RX_STATUS_PCOUNT_MASK 0xff #define DMA_RX_STATUS_PCOUNT_SHIFT 16 #define DMA_RX_STATUS_BUS_ERROR (1 << 3) #define DMA_RX_STATUS_OVERFLOW (1 << 2) #define DMA_RX_STATUS_PKT_RECVD (1 << 0) #define AR71XX_DMA_INTR 0x198 #define AR71XX_DMA_INTR_STATUS 0x19C #define DMA_INTR_ALL ((1 << 8) - 1) #define DMA_INTR_RX_BUS_ERROR (1 << 7) #define DMA_INTR_RX_OVERFLOW (1 << 6) #define DMA_INTR_RX_PKT_RCVD (1 << 4) #define DMA_INTR_TX_BUS_ERROR (1 << 3) #define DMA_INTR_TX_UNDERRUN (1 << 1) #define DMA_INTR_TX_PKT_SENT (1 << 0) #define AR71XX_SPI_BASE 0x1f000000 #define AR71XX_SPI_FS 0x00 #define AR71XX_SPI_CTRL 0x04 #define SPI_CTRL_REMAP_DISABLE (1 << 6) #define SPI_CTRL_CLOCK_DIVIDER_MASK ((1 << 6) - 1) #define AR71XX_SPI_IO_CTRL 0x08 #define SPI_IO_CTRL_CS2 (1 << 18) #define SPI_IO_CTRL_CS1 (1 << 17) #define SPI_IO_CTRL_CS0 (1 << 16) #define SPI_IO_CTRL_CSMASK (7 << 16) #define SPI_IO_CTRL_CLK (1 << 8) #define SPI_IO_CTRL_DO 1 #define AR71XX_SPI_RDS 0x0C #define ATH_READ_REG(reg) \ *((volatile uint32_t *)MIPS_PHYS_TO_KSEG1((reg))) #define ATH_WRITE_REG(reg, val) \ *((volatile uint32_t *)MIPS_PHYS_TO_KSEG1((reg))) = (val) static inline void ar71xx_ddr_flush(uint32_t reg) { ATH_WRITE_REG(reg, 1); while ((ATH_READ_REG(reg) & 0x1)) ; ATH_WRITE_REG(reg, 1); while ((ATH_READ_REG(reg) & 0x1)) ; } static inline void ar71xx_write_pll(uint32_t cfg_reg, uint32_t pll_reg, uint32_t pll, uint32_t pll_reg_shift) { uint32_t sec_cfg; /* set PLL registers */ sec_cfg = ATH_READ_REG(cfg_reg); sec_cfg &= ~(3 << pll_reg_shift); sec_cfg |= (2 << pll_reg_shift); ATH_WRITE_REG(cfg_reg, sec_cfg); DELAY(100); ATH_WRITE_REG(pll_reg, pll); sec_cfg |= (3 << pll_reg_shift); ATH_WRITE_REG(cfg_reg, sec_cfg); DELAY(100); sec_cfg &= ~(3 << pll_reg_shift); ATH_WRITE_REG(cfg_reg, sec_cfg); DELAY(100); } #endif /* _AR71XX_REG_H_ */ Index: head/sys/mips/atheros/ar934xreg.h =================================================================== --- head/sys/mips/atheros/ar934xreg.h (revision 258779) +++ head/sys/mips/atheros/ar934xreg.h (revision 258780) @@ -1,211 +1,211 @@ /*- * Copyright (c) 2013 Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __AR934X_REG_H__ #define __AR934X_REG_H__ #define AR934X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000) #define AR934X_GMAC_SIZE 0x14 #define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000) #define AR934X_WMAC_SIZE 0x20000 #define AR934X_EHCI_BASE 0x1b000000 #define AR934X_EHCI_SIZE 0x200 #define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000) #define AR934X_SRIF_SIZE 0x1000 /* AR934x GMAC configuration */ #define AR934X_GMAC_REG_ETH_CFG (AR934X_GMAC_BASE + 0x00) #define AR934X_ETH_CFG_RGMII_GMAC0 (1 << 0) #define AR934X_ETH_CFG_MII_GMAC0 (1 << 1) #define AR934X_ETH_CFG_GMII_GMAC0 (1 << 2) #define AR934X_ETH_CFG_MII_GMAC0_MASTER (1 << 3) #define AR934X_ETH_CFG_MII_GMAC0_SLAVE (1 << 4) #define AR934X_ETH_CFG_MII_GMAC0_ERR_EN (1 << 5) #define AR934X_ETH_CFG_SW_ONLY_MODE (1 << 6) #define AR934X_ETH_CFG_SW_PHY_SWAP (1 << 7) #define AR934X_ETH_CFG_SW_APB_ACCESS (1 << 9) #define AR934X_ETH_CFG_RMII_GMAC0 (1 << 10) #define AR933X_ETH_CFG_MII_CNTL_SPEED (1 << 11) #define AR934X_ETH_CFG_RMII_GMAC0_MASTER (1 << 12) #define AR934X_ETH_CFG_SW_ACC_MSB_FIRST (1 << 13) #define AR934X_DDR_REG_FLUSH_GE0 (AR71XX_APB_BASE + 0x9c) #define AR934X_DDR_REG_FLUSH_GE1 (AR71XX_APB_BASE + 0xa0) #define AR934X_DDR_REG_FLUSH_USB (AR71XX_APB_BASE + 0xa4) #define AR934X_DDR_REG_FLUSH_PCIE (AR71XX_APB_BASE + 0xa8) #define AR934X_DDR_REG_FLUSH_WMAC (AR71XX_APB_BASE + 0xac) #define AR934X_PLL_CPU_CONFIG_REG (AR71XX_PLL_CPU_BASE + 0x00) #define AR934X_PLL_DDR_CONFIG_REG (AR71XX_PLL_CPU_BASE + 0x04) #define AR934X_PLL_CPU_DDR_CLK_CTRL_REG (AR71XX_PLL_CPU_BASE + 0x08) #define AR934X_PLL_SWITCH_CLOCK_CONTROL_REG (AR71XX_PLL_CPU_BASE + 0x24) #define AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL (1 << 6) #define AR934X_PLL_ETH_XMII_CONTROL_REG (AR71XX_PLL_CPU_BASE + 0x2c) #define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0 #define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f #define AR934X_PLL_CPU_CONFIG_NINT_SHIFT 6 #define AR934X_PLL_CPU_CONFIG_NINT_MASK 0x3f #define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT 12 #define AR934X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f #define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19 #define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3 #define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT 0 #define AR934X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff #define AR934X_PLL_DDR_CONFIG_NINT_SHIFT 10 #define AR934X_PLL_DDR_CONFIG_NINT_MASK 0x3f #define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT 16 #define AR934X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f #define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23 #define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7 #define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS (1 << 2) #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS (1 << 3) #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS (1 << 4) #define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT 5 #define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK 0x1f #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT 10 #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK 0x1f #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT 15 #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK 0x1f #define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL (1 << 20) #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL (1 << 21) #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL (1 << 24) #define AR934X_RESET_REG_RESET_MODULE (AR71XX_RST_BLOCK_BASE + 0x1c) #define AR934X_RESET_REG_BOOTSTRAP (AR71XX_RST_BLOCK_BASE + 0xb0) #define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS (AR71XX_RST_BLOCK_BASE + 0xac) -#define AR934X_RESET_HOST (1 << 31) +#define AR934X_RESET_HOST (1U << 31) #define AR934X_RESET_SLIC (1 << 30) #define AR934X_RESET_HDMA (1 << 29) #define AR934X_RESET_EXTERNAL (1 << 28) #define AR934X_RESET_RTC (1 << 27) #define AR934X_RESET_PCIE_EP_INT (1 << 26) #define AR934X_RESET_CHKSUM_ACC (1 << 25) #define AR934X_RESET_FULL_CHIP (1 << 24) #define AR934X_RESET_GE1_MDIO (1 << 23) #define AR934X_RESET_GE0_MDIO (1 << 22) #define AR934X_RESET_CPU_NMI (1 << 21) #define AR934X_RESET_CPU_COLD (1 << 20) #define AR934X_RESET_HOST_RESET_INT (1 << 19) #define AR934X_RESET_PCIE_EP (1 << 18) #define AR934X_RESET_UART1 (1 << 17) #define AR934X_RESET_DDR (1 << 16) #define AR934X_RESET_USB_PHY_PLL_PWD_EXT (1 << 15) #define AR934X_RESET_NANDF (1 << 14) #define AR934X_RESET_GE1_MAC (1 << 13) #define AR934X_RESET_ETH_SWITCH_ANALOG (1 << 12) #define AR934X_RESET_USB_PHY_ANALOG (1 << 11) #define AR934X_RESET_HOST_DMA_INT (1 << 10) #define AR934X_RESET_GE0_MAC (1 << 9) #define AR934X_RESET_ETH_SWITCH (1 << 8) #define AR934X_RESET_PCIE_PHY (1 << 7) #define AR934X_RESET_PCIE (1 << 6) #define AR934X_RESET_USB_HOST (1 << 5) #define AR934X_RESET_USB_PHY (1 << 4) #define AR934X_RESET_USBSUS_OVERRIDE (1 << 3) #define AR934X_RESET_LUT (1 << 2) #define AR934X_RESET_MBOX (1 << 1) #define AR934X_RESET_I2S (1 << 0) #define AR934X_BOOTSTRAP_SW_OPTION8 (1 << 23) #define AR934X_BOOTSTRAP_SW_OPTION7 (1 << 22) #define AR934X_BOOTSTRAP_SW_OPTION6 (1 << 21) #define AR934X_BOOTSTRAP_SW_OPTION5 (1 << 20) #define AR934X_BOOTSTRAP_SW_OPTION4 (1 << 19) #define AR934X_BOOTSTRAP_SW_OPTION3 (1 << 18) #define AR934X_BOOTSTRAP_SW_OPTION2 (1 << 17) #define AR934X_BOOTSTRAP_SW_OPTION1 (1 << 16) #define AR934X_BOOTSTRAP_USB_MODE_DEVICE (1 << 7) #define AR934X_BOOTSTRAP_PCIE_RC (1 << 6) #define AR934X_BOOTSTRAP_EJTAG_MODE (1 << 5) #define AR934X_BOOTSTRAP_REF_CLK_40 (1 << 4) #define AR934X_BOOTSTRAP_BOOT_FROM_SPI (1 << 2) #define AR934X_BOOTSTRAP_SDRAM_DISABLED (1 << 1) #define AR934X_BOOTSTRAP_DDR1 (1 << 0) #define AR934X_PCIE_WMAC_INT_WMAC_MISC (1 << 0) #define AR934X_PCIE_WMAC_INT_WMAC_TX (1 << 1) #define AR934X_PCIE_WMAC_INT_WMAC_RXLP (1 << 2) #define AR934X_PCIE_WMAC_INT_WMAC_RXHP (1 << 3) #define AR934X_PCIE_WMAC_INT_PCIE_RC (1 << 4) #define AR934X_PCIE_WMAC_INT_PCIE_RC0 (1 << 5) #define AR934X_PCIE_WMAC_INT_PCIE_RC1 (1 << 6) #define AR934X_PCIE_WMAC_INT_PCIE_RC2 (1 << 7) #define AR934X_PCIE_WMAC_INT_PCIE_RC3 (1 << 8) #define AR934X_PCIE_WMAC_INT_WMAC_ALL \ (AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \ AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP) #define AR934X_PCIE_WMAC_INT_PCIE_ALL \ (AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \ AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \ AR934X_PCIE_WMAC_INT_PCIE_RC3) #define REV_ID_MAJOR_AR9341 0x0120 #define REV_ID_MAJOR_AR9342 0x1120 #define REV_ID_MAJOR_AR9344 0x2120 #define AR934X_REV_ID_REVISION_MASK 0xf /* * GPIO block */ #define AR934X_GPIO_REG_FUNC 0x6c #define AR934X_GPIO_COUNT 23 /* * SRIF block */ #define AR934X_SRIF_CPU_DPLL1_REG (AR934X_SRIF_BASE + 0x1c0) #define AR934X_SRIF_CPU_DPLL2_REG (AR934X_SRIF_BASE + 0x1c4) #define AR934X_SRIF_CPU_DPLL3_REG (AR934X_SRIF_BASE + 0x1c8) #define AR934X_SRIF_DDR_DPLL1_REG (AR934X_SRIF_BASE + 0x240) #define AR934X_SRIF_DDR_DPLL2_REG (AR934X_SRIF_BASE + 0x244) #define AR934X_SRIF_DDR_DPLL3_REG (AR934X_SRIF_BASE + 0x248) #define AR934X_SRIF_DPLL1_REFDIV_SHIFT 27 #define AR934X_SRIF_DPLL1_REFDIV_MASK 0x1f #define AR934X_SRIF_DPLL1_NINT_SHIFT 18 #define AR934X_SRIF_DPLL1_NINT_MASK 0x1ff #define AR934X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff #define AR934X_SRIF_DPLL2_LOCAL_PLL (1 << 30) #define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13 #define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7 /* XXX verify! */ #define AR934X_PLL_VAL_1000 0x16000000 #define AR934X_PLL_VAL_100 0x00000101 #define AR934X_PLL_VAL_10 0x00001616 #endif /* __AR934X_REG_H__ */ Index: head/sys/mips/atheros/if_argevar.h =================================================================== --- head/sys/mips/atheros/if_argevar.h (revision 258779) +++ head/sys/mips/atheros/if_argevar.h (revision 258780) @@ -1,181 +1,181 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __IF_ARGEVAR_H__ #define __IF_ARGEVAR_H__ #define ARGE_NPHY 32 #define ARGE_TX_RING_COUNT 128 #define ARGE_RX_RING_COUNT 128 #define ARGE_RX_DMA_SIZE ARGE_RX_RING_COUNT * sizeof(struct arge_desc) #define ARGE_TX_DMA_SIZE ARGE_TX_RING_COUNT * sizeof(struct arge_desc) #define ARGE_MAXFRAGS 8 #define ARGE_RING_ALIGN sizeof(struct arge_desc) #define ARGE_RX_ALIGN sizeof(uint32_t) #define ARGE_MAXFRAGS 8 #define ARGE_TX_RING_ADDR(sc, i) \ ((sc)->arge_rdata.arge_tx_ring_paddr + sizeof(struct arge_desc) * (i)) #define ARGE_RX_RING_ADDR(sc, i) \ ((sc)->arge_rdata.arge_rx_ring_paddr + sizeof(struct arge_desc) * (i)) #define ARGE_INC(x,y) (x) = (((x) + 1) % y) #define ARGE_MII_TIMEOUT 1000 #define ARGE_LOCK(_sc) mtx_lock(&(_sc)->arge_mtx) #define ARGE_UNLOCK(_sc) mtx_unlock(&(_sc)->arge_mtx) #define ARGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->arge_mtx, MA_OWNED) /* * register space access macros */ #define ARGE_BARRIER_READ(sc) bus_barrier(sc->arge_res, 0, 0, \ BUS_SPACE_BARRIER_READ) #define ARGE_BARRIER_WRITE(sc) bus_barrier(sc->arge_res, 0, 0, \ BUS_SPACE_BARRIER_WRITE) #define ARGE_BARRIER_RW(sc) bus_barrier(sc->arge_res, 0, 0, \ BUS_SPACE_BARRIER_READ | \ BUS_SPACE_BARRIER_WRITE) #define ARGE_WRITE(sc, reg, val) do { \ bus_write_4(sc->arge_res, (reg), (val)); \ ARGE_BARRIER_WRITE((sc)); \ } while (0) #define ARGE_READ(sc, reg) bus_read_4(sc->arge_res, (reg)) #define ARGE_SET_BITS(sc, reg, bits) \ ARGE_WRITE(sc, reg, ARGE_READ(sc, (reg)) | (bits)) #define ARGE_CLEAR_BITS(sc, reg, bits) \ ARGE_WRITE(sc, reg, ARGE_READ(sc, (reg)) & ~(bits)) #define ARGE_MDIO_WRITE(_sc, _reg, _val) \ ARGE_WRITE((_sc), (_reg), (_val)) #define ARGE_MDIO_READ(_sc, _reg) \ ARGE_READ((_sc), (_reg)) #define ARGE_MDIO_BARRIER_READ(_sc) ARGE_BARRIER_READ(_sc) #define ARGE_MDIO_BARRIER_WRITE(_sc) ARGE_BARRIER_WRITE(_sc) #define ARGE_MDIO_BARRIER_RW(_sc) ARGE_BARRIER_READ_RW(_sc) -#define ARGE_DESC_EMPTY (1 << 31) +#define ARGE_DESC_EMPTY (1U << 31) #define ARGE_DESC_MORE (1 << 24) #define ARGE_DESC_SIZE_MASK ((1 << 12) - 1) #define ARGE_DMASIZE(len) ((len) & ARGE_DESC_SIZE_MASK) struct arge_desc { uint32_t packet_addr; uint32_t packet_ctrl; uint32_t next_desc; uint32_t padding; }; struct arge_txdesc { struct mbuf *tx_m; bus_dmamap_t tx_dmamap; }; struct arge_rxdesc { struct mbuf *rx_m; bus_dmamap_t rx_dmamap; struct arge_desc *desc; }; struct arge_chain_data { bus_dma_tag_t arge_parent_tag; bus_dma_tag_t arge_tx_tag; struct arge_txdesc arge_txdesc[ARGE_TX_RING_COUNT]; bus_dma_tag_t arge_rx_tag; struct arge_rxdesc arge_rxdesc[ARGE_RX_RING_COUNT]; bus_dma_tag_t arge_tx_ring_tag; bus_dma_tag_t arge_rx_ring_tag; bus_dmamap_t arge_tx_ring_map; bus_dmamap_t arge_rx_ring_map; bus_dmamap_t arge_rx_sparemap; int arge_tx_prod; int arge_tx_cons; int arge_tx_cnt; int arge_rx_cons; }; struct arge_ring_data { struct arge_desc *arge_rx_ring; struct arge_desc *arge_tx_ring; bus_addr_t arge_rx_ring_paddr; bus_addr_t arge_tx_ring_paddr; }; /* * Allow PLL values to be overridden. */ struct arge_pll_data { uint32_t pll_10; uint32_t pll_100; uint32_t pll_1000; }; struct arge_softc { struct ifnet *arge_ifp; /* interface info */ device_t arge_dev; struct ifmedia arge_ifmedia; /* * Media & duples settings for multiPHY MAC */ uint32_t arge_media_type; uint32_t arge_duplex_mode; uint32_t arge_phymask; uint8_t arge_eaddr[ETHER_ADDR_LEN]; struct resource *arge_res; int arge_rid; struct resource *arge_irq; void *arge_intrhand; device_t arge_miibus; device_t arge_miiproxy; ar71xx_mii_mode arge_miicfg; struct arge_pll_data arge_pllcfg; bus_dma_tag_t arge_parent_tag; bus_dma_tag_t arge_tag; struct mtx arge_mtx; struct callout arge_stat_callout; struct task arge_link_task; struct arge_chain_data arge_cdata; struct arge_ring_data arge_rdata; int arge_link_status; int arge_detach; uint32_t arge_intr_status; int arge_mac_unit; int arge_if_flags; uint32_t arge_debug; uint32_t arge_mdiofreq; struct { uint32_t tx_pkts_unaligned; uint32_t tx_pkts_aligned; uint32_t rx_overflow; uint32_t tx_underflow; } stats; }; #endif /* __IF_ARGEVAR_H__ */ Index: head/sys/mips/malta/gt_pci.c =================================================================== --- head/sys/mips/malta/gt_pci.c (revision 258779) +++ head/sys/mips/malta/gt_pci.c (revision 258780) @@ -1,774 +1,774 @@ /* $NetBSD: gt_pci.c,v 1.4 2003/07/15 00:24:54 lukem Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PCI configuration support for gt I/O Processor chip. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #define ICU_LEN 16 /* number of ISA IRQs */ /* * XXX: These defines are from NetBSD's . Respective file * from FreeBSD src tree lacks some definitions. */ #define PIC_OCW1 1 #define PIC_OCW2 0 #define PIC_OCW3 0 #define OCW2_SELECT 0 #define OCW2_ILS(x) ((x) << 0) /* interrupt level select */ #define OCW3_POLL_IRQ(x) ((x) & 0x7f) #define OCW3_POLL_PENDING (1U << 7) /* * Galileo controller's registers are LE so convert to then * to/from native byte order. We rely on boot loader or emulator * to set "swap bytes" configuration correctly for us */ #define GT_PCI_DATA(v) htole32((v)) #define GT_HOST_DATA(v) le32toh((v)) struct gt_pci_softc; struct gt_pci_intr_cookie { int irq; struct gt_pci_softc *sc; }; struct gt_pci_softc { device_t sc_dev; bus_space_tag_t sc_st; bus_space_handle_t sc_ioh_icu1; bus_space_handle_t sc_ioh_icu2; bus_space_handle_t sc_ioh_elcr; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; unsigned long sc_mem; bus_space_handle_t sc_io; struct resource *sc_irq; struct intr_event *sc_eventstab[ICU_LEN]; struct gt_pci_intr_cookie sc_intr_cookies[ICU_LEN]; uint16_t sc_imask; uint16_t sc_elcr; uint16_t sc_reserved; void *sc_ih; }; static void gt_pci_set_icus(struct gt_pci_softc *); static int gt_pci_intr(void *v); static int gt_pci_probe(device_t); static int gt_pci_attach(device_t); static int gt_pci_activate_resource(device_t, device_t, int, int, struct resource *); static int gt_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int gt_pci_teardown_intr(device_t, device_t, struct resource *, void*); static int gt_pci_maxslots(device_t ); static int gt_pci_conf_setup(struct gt_pci_softc *, int, int, int, int, uint32_t *); static uint32_t gt_pci_read_config(device_t, u_int, u_int, u_int, u_int, int); static void gt_pci_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin); static struct resource * gt_pci_alloc_resource(device_t, device_t, int, int *, u_long, u_long, u_long, u_int); static void gt_pci_mask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; sc->sc_imask |= (1 << irq); sc->sc_elcr |= (1 << irq); gt_pci_set_icus(sc); } static void gt_pci_unmask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; /* Enable it, set trigger mode. */ sc->sc_imask &= ~(1 << irq); sc->sc_elcr &= ~(1 << irq); gt_pci_set_icus(sc); } static void gt_pci_set_icus(struct gt_pci_softc *sc) { /* Enable the cascade IRQ (2) if 8-15 is enabled. */ if ((sc->sc_imask & 0xff00) != 0xff00) sc->sc_imask &= ~(1U << 2); else sc->sc_imask |= (1U << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW1, sc->sc_imask & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW1, (sc->sc_imask >> 8) & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); } static int gt_pci_intr(void *v) { struct gt_pci_softc *sc = v; struct intr_event *event; int irq; for (;;) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3); if ((irq & OCW3_POLL_PENDING) == 0) { return FILTER_HANDLED; } irq = OCW3_POLL_IRQ(irq); if (irq == 2) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3); if (irq & OCW3_POLL_PENDING) irq = OCW3_POLL_IRQ(irq) + 8; else irq = 2; } event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) continue; /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); /* XXX: Log stray IRQs */ /* Send a specific EOI to the 8259. */ if (irq > 7) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq & 7)); irq = 2; } bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq)); } return FILTER_HANDLED; } static int gt_pci_probe(device_t dev) { device_set_desc(dev, "GT64120 PCI bridge"); return (0); } static int gt_pci_attach(device_t dev) { uint32_t busno; struct gt_pci_softc *sc = device_get_softc(dev); int rid; busno = 0; sc->sc_dev = dev; sc->sc_busno = busno; sc->sc_st = mips_bus_space_generic; /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io = MIPS_PHYS_TO_KSEG1(MALTA_PCI0_IO_BASE); sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "GT64120 PCI I/O Ports"; /* * First 256 bytes are ISA's registers: e.g. i8259's * So do not use them for general purpose PCI I/O window */ if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, 0x100, 0xffff) != 0) { panic("gt_pci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem = MIPS_PHYS_TO_KSEG1(MALTA_PCIMEM1_BASE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "GT64120 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem, sc->sc_mem + MALTA_PCIMEM1_SIZE) != 0) { panic("gt_pci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "GT64120 PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("gt_pci_attach: failed to set up IRQ rman"); /* * Map the PIC/ELCR registers. */ #if 0 if (bus_space_map(sc->sc_st, 0x4d0, 2, 0, &sc->sc_ioh_elcr) != 0) device_printf(dev, "unable to map ELCR registers\n"); if (bus_space_map(sc->sc_st, IO_ICU1, 2, 0, &sc->sc_ioh_icu1) != 0) device_printf(dev, "unable to map ICU1 registers\n"); if (bus_space_map(sc->sc_st, IO_ICU2, 2, 0, &sc->sc_ioh_icu2) != 0) device_printf(dev, "unable to map ICU2 registers\n"); #else sc->sc_ioh_elcr = sc->sc_io + 0x4d0; sc->sc_ioh_icu1 = sc->sc_io + IO_ICU1; sc->sc_ioh_icu2 = sc->sc_io + IO_ICU2; #endif /* All interrupts default to "masked off". */ sc->sc_imask = 0xffff; /* All interrupts default to edge-triggered. */ sc->sc_elcr = 0; /* * Initialize the 8259s. */ /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, ICW1_RESET | ICW1_IC4); /* * XXX: values from NetBSD's */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_RR); /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, ICW1_RESET | ICW1_IC4); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_RR); /* * Default all interrupts to edge-triggered. */ bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); /* * Some ISA interrupts are reserved for devices that * we know are hard-wired to certain IRQs. */ sc->sc_reserved = (1U << 0) | /* timer */ (1U << 1) | /* keyboard controller (keyboard) */ (1U << 2) | /* PIC cascade */ (1U << 3) | /* COM 2 */ (1U << 4) | /* COM 1 */ (1U << 6) | /* floppy */ (1U << 7) | /* centronics */ (1U << 8) | /* RTC */ (1U << 9) | /* I2C */ (1U << 12) | /* keyboard controller (mouse) */ (1U << 14) | /* IDE primary */ (1U << 15); /* IDE secondary */ /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, MALTA_SOUTHBRIDGE_INTR, MALTA_SOUTHBRIDGE_INTR, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, gt_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* Initialize memory and i/o rmans. */ device_add_child(dev, "pci", busno); return (bus_generic_attach(dev)); } static int gt_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int gt_pci_conf_setup(struct gt_pci_softc *sc, int bus, int slot, int func, int reg, uint32_t *addr) { *addr = (bus << 16) | (slot << 11) | (func << 8) | reg; return (0); } static uint32_t gt_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t addr; uint32_t shift, mask; if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return (uint32_t)(-1); /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); - GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1 << 31) | addr); + GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) data = GT_PCI_DATA(GT_REGVAL(GT_PCI0_CFG_DATA)); else data = GT_REGVAL(GT_PCI0_CFG_DATA); /* Check for master abort. */ if (GT_HOST_DATA(GT_REGVAL(GT_INTR_CAUSE)) & (GTIC_MASABORT0 | GTIC_TARABORT0)) data = (uint32_t) -1; switch(reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch(bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if(reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } #if 0 printf("PCICONF_READ(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif return (data); } static void gt_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t addr; uint32_t reg_data; uint32_t shift, mask; if(bytes != 4) { reg_data = gt_pci_read_config(dev, bus, slot, func, reg, 4); shift = 8 * (reg & 3); switch(bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if(reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } } if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return; /* The galileo has problems accessing device 31. */ if (bus == 0 && slot == 31) return; /* XXX: no support for bus > 0 yet */ if (bus > 0) return; /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); - GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1 << 31) | addr); + GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) GT_REGVAL(GT_PCI0_CFG_DATA) = GT_PCI_DATA(data); else GT_REGVAL(GT_PCI0_CFG_DATA) = data; #if 0 printf("PCICONF_WRITE(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif } static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin) { int bus; int device; int func; /* struct gt_pci_softc *sc = device_get_softc(pcib); */ bus = pci_get_bus(dev); device = pci_get_slot(dev); func = pci_get_function(dev); /* * XXXMIPS: We need routing logic. This is just a stub . */ switch (device) { case 9: /* * PIIX4 IDE adapter. HW IRQ0 */ return 0; case 11: /* Ethernet */ return 10; default: device_printf(pcib, "no IRQ mapping for %d/%d/%d/%d\n", bus, device, func, pin); } return (0); } static int gt_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gt_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int gt_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct gt_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * gt_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct gt_pci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bh = sc->sc_mem; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bh = sc->sc_io; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { bh += (rman_get_start(rv)); rman_set_bustag(rv, gt_pci_bus_space); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); } static int gt_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t p; int error; if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int gt_pci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct gt_pci_softc *sc = device_get_softc(dev); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq >= ICU_LEN || irq == 2) panic("%s: bad irq or type", __func__); event = sc->sc_eventstab[irq]; sc->sc_intr_cookies[irq].irq = irq; sc->sc_intr_cookies[irq].sc = sc; if (event == NULL) { error = intr_event_create(&event, (void *)&sc->sc_intr_cookies[irq], 0, irq, gt_pci_mask_irq, gt_pci_unmask_irq, NULL, NULL, "gt_pci intr%d:", irq); if (error) return 0; sc->sc_eventstab[irq] = event; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); gt_pci_unmask_irq((void *)&sc->sc_intr_cookies[irq]); return 0; } static int gt_pci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct gt_pci_softc *sc = device_get_softc(dev); int irq; irq = rman_get_start(res); gt_pci_mask_irq((void *)&sc->sc_intr_cookies[irq]); return (intr_event_remove_handler(cookie)); } static device_method_t gt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gt_pci_probe), DEVMETHOD(device_attach, gt_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, gt_read_ivar), DEVMETHOD(bus_write_ivar, gt_write_ivar), DEVMETHOD(bus_alloc_resource, gt_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, gt_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, gt_pci_setup_intr), DEVMETHOD(bus_teardown_intr, gt_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, gt_pci_maxslots), DEVMETHOD(pcib_read_config, gt_pci_read_config), DEVMETHOD(pcib_write_config, gt_pci_write_config), DEVMETHOD(pcib_route_interrupt, gt_pci_route_interrupt), DEVMETHOD_END }; static driver_t gt_pci_driver = { "pcib", gt_pci_methods, sizeof(struct gt_pci_softc), }; static devclass_t gt_pci_devclass; DRIVER_MODULE(gt_pci, gt, gt_pci_driver, gt_pci_devclass, 0, 0); Index: head/sys/mips/nlm/dev/net/nae.c =================================================================== --- head/sys/mips/nlm/dev/net/nae.c (revision 258779) +++ head/sys/mips/nlm/dev/net/nae.c (revision 258780) @@ -1,1454 +1,1454 @@ /*- * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include void nlm_nae_flush_free_fifo(uint64_t nae_base, int nblocks) { uint32_t data, fifo_mask; fifo_mask = (1 << (4 * nblocks)) - 1; nlm_write_nae_reg(nae_base, NAE_RX_FREE_FIFO_POP, fifo_mask); do { data = nlm_read_nae_reg(nae_base, NAE_RX_FREE_FIFO_POP); } while (data != fifo_mask); nlm_write_nae_reg(nae_base, NAE_RX_FREE_FIFO_POP, 0); } void nlm_program_nae_parser_seq_fifo(uint64_t nae_base, int maxports, struct nae_port_config *cfg) { uint32_t val; int start = 0, size, i; for (i = 0; i < maxports; i++) { size = cfg[i].pseq_fifo_size; val = (((size & 0x1fff) << 17) | ((start & 0xfff) << 5) | (i & 0x1f)); nlm_write_nae_reg(nae_base, NAE_PARSER_SEQ_FIFO_CFG, val); start += size; } } void nlm_setup_rx_cal_cfg(uint64_t nae_base, int total_num_ports, struct nae_port_config *cfg) { int rx_slots = 0, port; int cal_len, cal = 0, last_free = 0; uint32_t val; for (port = 0; port < total_num_ports; port++) { if (cfg[port].rx_slots_reqd) rx_slots += cfg[port].rx_slots_reqd; if (rx_slots > MAX_CAL_SLOTS) { rx_slots = MAX_CAL_SLOTS; break; } } cal_len = rx_slots - 1; do { if (cal >= MAX_CAL_SLOTS) break; last_free = cal; for (port = 0; port < total_num_ports; port++) { if (cfg[port].rx_slots_reqd > 0) { val = (cal_len << 16) | (port << 8) | cal; nlm_write_nae_reg(nae_base, NAE_RX_IF_SLOT_CAL, val); cal++; cfg[port].rx_slots_reqd--; } } if (last_free == cal) break; } while (1); } void nlm_setup_tx_cal_cfg(uint64_t nae_base, int total_num_ports, struct nae_port_config *cfg) { int tx_slots = 0, port; int cal = 0, last_free = 0; uint32_t val; for (port = 0; port < total_num_ports; port++) { if (cfg[port].tx_slots_reqd) tx_slots += cfg[port].tx_slots_reqd; if (tx_slots > MAX_CAL_SLOTS) { tx_slots = MAX_CAL_SLOTS; break; } } nlm_write_nae_reg(nae_base, NAE_EGR_NIOR_CAL_LEN_REG, tx_slots - 1); do { if (cal >= MAX_CAL_SLOTS) break; last_free = cal; for (port = 0; port < total_num_ports; port++) { if (cfg[port].tx_slots_reqd > 0) { val = (port << 7) | (cal << 1) | 1; nlm_write_nae_reg(nae_base, NAE_EGR_NIOR_CRDT_CAL_PROG, val); cal++; cfg[port].tx_slots_reqd--; } } if (last_free == cal) break; } while (1); } void nlm_deflate_frin_fifo_carving(uint64_t nae_base, int total_num_ports) { const int minimum_size = 8; uint32_t value; int intf, start; for (intf = 0; intf < total_num_ports; intf++) { start = minimum_size * intf; value = (minimum_size << 20) | (start << 8) | (intf); nlm_write_nae_reg(nae_base, NAE_FREE_IN_FIFO_CFG, value); } } void nlm_reset_nae(int node) { uint64_t sysbase; uint64_t nae_base; uint64_t nae_pcibase; uint32_t rx_config; uint32_t bar0; int reset_bit; sysbase = nlm_get_sys_regbase(node); nae_base = nlm_get_nae_regbase(node); nae_pcibase = nlm_get_nae_pcibase(node); bar0 = nlm_read_pci_reg(nae_pcibase, XLP_PCI_CFGREG4); #if BYTE_ORDER == LITTLE_ENDIAN if (nlm_is_xlp8xx_ax()) { uint8_t val; /* membar fixup */ val = (bar0 >> 24) & 0xff; bar0 = (val << 24) | (val << 16) | (val << 8) | val; } #endif if (nlm_is_xlp3xx()) reset_bit = 6; else reset_bit = 9; /* Reset NAE */ nlm_write_sys_reg(sysbase, SYS_RESET, (1 << reset_bit)); /* XXXJC - 1s delay here may be too high */ DELAY(1000000); nlm_write_sys_reg(sysbase, SYS_RESET, (0 << reset_bit)); DELAY(1000000); rx_config = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); nlm_write_pci_reg(nae_pcibase, XLP_PCI_CFGREG4, bar0); } void nlm_setup_poe_class_config(uint64_t nae_base, int max_poe_classes, int num_contexts, int *poe_cl_tbl) { uint32_t val; int i, max_poe_class_ctxt_tbl_sz; max_poe_class_ctxt_tbl_sz = num_contexts/max_poe_classes; for (i = 0; i < max_poe_class_ctxt_tbl_sz; i++) { val = (poe_cl_tbl[(i/max_poe_classes) & 0x7] << 8) | i; nlm_write_nae_reg(nae_base, NAE_POE_CLASS_SETUP_CFG, val); } } void nlm_setup_vfbid_mapping(uint64_t nae_base) { uint32_t val; int dest_vc, vfbid; /* 127 is max vfbid */ for (vfbid = 127; vfbid >= 0; vfbid--) { dest_vc = nlm_get_vfbid_mapping(vfbid); if (dest_vc < 0) continue; val = (dest_vc << 16) | (vfbid << 4) | 1; nlm_write_nae_reg(nae_base, NAE_VFBID_DESTMAP_CMD, val); } } void nlm_setup_flow_crc_poly(uint64_t nae_base, uint32_t poly) { nlm_write_nae_reg(nae_base, NAE_FLOW_CRC16_POLY_CFG, poly); } void nlm_setup_iface_fifo_cfg(uint64_t nae_base, int maxports, struct nae_port_config *cfg) { uint32_t reg; int fifo_xoff_thresh = 12; int i, size; int cur_iface_start = 0; for (i = 0; i < maxports; i++) { size = cfg[i].iface_fifo_size; reg = ((fifo_xoff_thresh << 25) | ((size & 0x1ff) << 16) | ((cur_iface_start & 0xff) << 8) | (i & 0x1f)); nlm_write_nae_reg(nae_base, NAE_IFACE_FIFO_CFG, reg); cur_iface_start += size; } } void nlm_setup_rx_base_config(uint64_t nae_base, int maxports, struct nae_port_config *cfg) { int base = 0; uint32_t val; int i; int id; for (i = 0; i < (maxports/2); i++) { id = 0x12 + i; /* RX_IF_BASE_CONFIG0 */ val = (base & 0x3ff); base += cfg[(i * 2)].num_channels; val |= ((base & 0x3ff) << 16); base += cfg[(i * 2) + 1].num_channels; nlm_write_nae_reg(nae_base, NAE_REG(7, 0, id), val); } } void nlm_setup_rx_buf_config(uint64_t nae_base, int maxports, struct nae_port_config *cfg) { uint32_t val; int i, sz, k; int context = 0; int base = 0; for (i = 0; i < maxports; i++) { if (cfg[i].type == UNKNOWN) continue; for (k = 0; k < cfg[i].num_channels; k++) { /* write index (context num) */ nlm_write_nae_reg(nae_base, NAE_RXBUF_BASE_DPTH_ADDR, (context+k)); /* write value (rx buf sizes) */ sz = cfg[i].rxbuf_size; val = 0x80000000 | ((base << 2) & 0x3fff); /* base */ val |= (((sz << 2) & 0x3fff) << 16); /* size */ nlm_write_nae_reg(nae_base, NAE_RXBUF_BASE_DPTH, val); nlm_write_nae_reg(nae_base, NAE_RXBUF_BASE_DPTH, (0x7fffffff & val)); base += sz; } context += cfg[i].num_channels; } } void nlm_setup_freein_fifo_cfg(uint64_t nae_base, struct nae_port_config *cfg) { int size, i; uint32_t reg; int start = 0, maxbufpool; if (nlm_is_xlp8xx()) maxbufpool = MAX_FREE_FIFO_POOL_8XX; else maxbufpool = MAX_FREE_FIFO_POOL_3XX; for (i = 0; i < maxbufpool; i++) { /* Each entry represents 2 descs; hence division by 2 */ size = (cfg[i].num_free_descs / 2); if (size == 0) size = 8; reg = ((size & 0x3ff ) << 20) | /* fcSize */ ((start & 0x1ff) << 8) | /* fcStart */ (i & 0x1f); nlm_write_nae_reg(nae_base, NAE_FREE_IN_FIFO_CFG, reg); start += size; } } /* XXX function name */ int nlm_get_flow_mask(int num_ports) { const int max_bits = 5; /* upto 32 ports */ int i; /* Compute the number of bits to needed to * represent all the ports */ for (i = 0; i < max_bits; i++) { if (num_ports <= (2 << i)) return (i + 1); } return (max_bits); } void nlm_program_flow_cfg(uint64_t nae_base, int port, uint32_t cur_flow_base, uint32_t flow_mask) { uint32_t val; val = (cur_flow_base << 16) | port; val |= ((flow_mask & 0x1f) << 8); nlm_write_nae_reg(nae_base, NAE_FLOW_BASEMASK_CFG, val); } void xlp_ax_nae_lane_reset_txpll(uint64_t nae_base, int block, int lane_ctrl, int mode) { uint32_t val = 0, saved_data; int rext_sel = 0; val = PHY_LANE_CTRL_RST | PHY_LANE_CTRL_PWRDOWN | (mode << PHY_LANE_CTRL_PHYMODE_POS); /* set comma bypass for XAUI */ if (mode != PHYMODE_SGMII) val |= PHY_LANE_CTRL_BPC_XAUI; nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), val); if (lane_ctrl != 4) { rext_sel = (1 << 23); if (mode != PHYMODE_SGMII) rext_sel |= PHY_LANE_CTRL_BPC_XAUI; val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)); val &= ~PHY_LANE_CTRL_RST; val |= rext_sel; /* Resetting PMA for non-zero lanes */ nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), val); DELAY(20000); /* 20 ms delay, XXXJC: needed? */ val |= PHY_LANE_CTRL_RST; nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), val); val = 0; } /* Come out of reset for TXPLL */ saved_data = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)) & 0xFFC00000; nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), (0x66 << PHY_LANE_CTRL_ADDR_POS) | PHY_LANE_CTRL_CMD_READ | PHY_LANE_CTRL_CMD_START | PHY_LANE_CTRL_RST | rext_sel | val ); while (((val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl))) & PHY_LANE_CTRL_CMD_PENDING)); val &= 0xFF; /* set bit[4] to 0 */ val &= ~(1 << 4); nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), (0x66 << PHY_LANE_CTRL_ADDR_POS) | PHY_LANE_CTRL_CMD_WRITE | PHY_LANE_CTRL_CMD_START | (0x0 << 19) /* (0x4 << 19) */ | rext_sel | saved_data | val ); /* re-do */ nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), (0x66 << PHY_LANE_CTRL_ADDR_POS) | PHY_LANE_CTRL_CMD_WRITE | PHY_LANE_CTRL_CMD_START | (0x0 << 19) /* (0x4 << 19) */ | rext_sel | saved_data | val ); while (!((val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, (lane_ctrl - PHY_LANE_0_CTRL)))) & PHY_LANE_STAT_PCR)); /* Clear the Power Down bit */ val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)); val &= ~((1 << 29) | (0x7ffff)); nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), (rext_sel | val)); } void xlp_nae_lane_reset_txpll(uint64_t nae_base, int block, int lane_ctrl, int mode) { uint32_t val = 0; int rext_sel = 0; if (lane_ctrl != 4) rext_sel = (1 << 23); val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)); /* set comma bypass for XAUI */ if (mode != PHYMODE_SGMII) val |= PHY_LANE_CTRL_BPC_XAUI; val |= 0x100000; val |= (mode << PHY_LANE_CTRL_PHYMODE_POS); val &= ~(0x20000); nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), val); val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)); val |= 0x40000000; nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), val); /* clear the power down bit */ val = nlm_read_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl)); val &= ~( (1 << 29) | (0x7ffff)); nlm_write_nae_reg(nae_base, NAE_REG(block, PHY, lane_ctrl), rext_sel | val); } void xlp_nae_config_lane_gmac(uint64_t nae_base, int cplx_mask) { int block, lane_ctrl; int cplx_lane_enable; int lane_enable = 0; cplx_lane_enable = LM_SGMII | (LM_SGMII << 4) | (LM_SGMII << 8) | (LM_SGMII << 12); /* Lane mode progamming */ block = 7; /* Complexes 0, 1 */ if (cplx_mask & 0x1) lane_enable |= cplx_lane_enable; if (cplx_mask & 0x2) lane_enable |= (cplx_lane_enable << 16); if (lane_enable) { nlm_write_nae_reg(nae_base, NAE_REG(block, LANE_CFG, LANE_CFG_CPLX_0_1), lane_enable); lane_enable = 0; } /* Complexes 2 3 */ if (cplx_mask & 0x4) lane_enable |= cplx_lane_enable; if (cplx_mask & 0x8) lane_enable |= (cplx_lane_enable << 16); nlm_write_nae_reg(nae_base, NAE_REG(block, LANE_CFG, LANE_CFG_CPLX_2_3), lane_enable); /* complex 4 */ /* XXXJC : fix duplicate code */ if (cplx_mask & 0x10) { nlm_write_nae_reg(nae_base, NAE_REG(block, LANE_CFG, LANE_CFG_CPLX_4), ((LM_SGMII << 4) | LM_SGMII)); for (lane_ctrl = PHY_LANE_0_CTRL; lane_ctrl <= PHY_LANE_1_CTRL; lane_ctrl++) { if (!nlm_is_xlp8xx_ax()) xlp_nae_lane_reset_txpll(nae_base, 4, lane_ctrl, PHYMODE_SGMII); else xlp_ax_nae_lane_reset_txpll(nae_base, 4, lane_ctrl, PHYMODE_SGMII); } } for (block = 0; block < 4; block++) { if ((cplx_mask & (1 << block)) == 0) continue; for (lane_ctrl = PHY_LANE_0_CTRL; lane_ctrl <= PHY_LANE_3_CTRL; lane_ctrl++) { if (!nlm_is_xlp8xx_ax()) xlp_nae_lane_reset_txpll(nae_base, block, lane_ctrl, PHYMODE_SGMII); else xlp_ax_nae_lane_reset_txpll(nae_base, block, lane_ctrl, PHYMODE_SGMII); } } } void config_egress_fifo_carvings(uint64_t nae_base, int hwport, int start_ctxt, int num_ctxts, int max_ctxts, struct nae_port_config *cfg) { static uint32_t cur_start[6] = {0, 0, 0, 0, 0, 0}; uint32_t data = 0; uint32_t start = 0, size, offset; int i, limit; limit = start_ctxt + num_ctxts; /* Stage 2 FIFO */ start = cur_start[0]; for (i = start_ctxt; i < limit; i++) { size = cfg[hwport].stg2_fifo_size / max_ctxts; if (size) offset = size - 1; else offset = size; if (offset > cfg[hwport].max_stg2_offset) offset = cfg[hwport].max_stg2_offset; data = offset << 23 | start << 11 | i << 1 | 1; nlm_write_nae_reg(nae_base, NAE_STG2_PMEM_PROG, data); start += size; } cur_start[0] = start; /* EH FIFO */ start = cur_start[1]; for (i = start_ctxt; i < limit; i++) { size = cfg[hwport].eh_fifo_size / max_ctxts; if (size) offset = size - 1; else offset = size ; if (offset > cfg[hwport].max_eh_offset) offset = cfg[hwport].max_eh_offset; data = offset << 23 | start << 11 | i << 1 | 1; nlm_write_nae_reg(nae_base, NAE_EH_PMEM_PROG, data); start += size; } cur_start[1] = start; /* FROUT FIFO */ start = cur_start[2]; for (i = start_ctxt; i < limit; i++) { size = cfg[hwport].frout_fifo_size / max_ctxts; if (size) offset = size - 1; else offset = size ; if (offset > cfg[hwport].max_frout_offset) offset = cfg[hwport].max_frout_offset; data = offset << 23 | start << 11 | i << 1 | 1; nlm_write_nae_reg(nae_base, NAE_FREE_PMEM_PROG, data); start += size; } cur_start[2] = start; /* MS FIFO */ start = cur_start[3]; for (i = start_ctxt; i < limit; i++) { size = cfg[hwport].ms_fifo_size / max_ctxts; if (size) offset = size - 1; else offset = size ; if (offset > cfg[hwport].max_ms_offset) offset = cfg[hwport].max_ms_offset; data = offset << 22 | /* FIXME in PRM */ start << 11 | i << 1 | 1; nlm_write_nae_reg(nae_base, NAE_STR_PMEM_CMD, data); start += size; } cur_start[3] = start; /* PKT FIFO */ start = cur_start[4]; for (i = start_ctxt; i < limit; i++) { size = cfg[hwport].pkt_fifo_size / max_ctxts; if (size) offset = size - 1; else offset = size ; if (offset > cfg[hwport].max_pmem_offset) offset = cfg[hwport].max_pmem_offset; nlm_write_nae_reg(nae_base, NAE_TX_PKT_PMEM_CMD1, offset); data = start << 11 | i << 1 | 1; nlm_write_nae_reg(nae_base, NAE_TX_PKT_PMEM_CMD0, data); start += size; } cur_start[4] = start; /* PKT LEN FIFO */ start = cur_start[5]; for (i = start_ctxt; i < limit; i++) { size = cfg[hwport].pktlen_fifo_size / max_ctxts; if (size) offset = size - 1; else offset = size ; data = offset << 22 | start << 11 | i << 1 | 1; nlm_write_nae_reg(nae_base, NAE_TX_PKTLEN_PMEM_CMD, data); start += size; } cur_start[5] = start; } void config_egress_fifo_credits(uint64_t nae_base, int hwport, int start_ctxt, int num_ctxts, int max_ctxts, struct nae_port_config *cfg) { uint32_t data, credit, max_credit; int i, limit; limit = start_ctxt + num_ctxts; /* Stage1 -> Stage2 */ max_credit = cfg[hwport].max_stg2_offset + 1; for (i = start_ctxt; i < limit; i++) { credit = cfg[hwport].stg1_2_credit / max_ctxts; if (credit > max_credit) credit = max_credit; data = credit << 16 | i << 4 | 1; nlm_write_nae_reg(nae_base, NAE_STG1_STG2CRDT_CMD, data); } /* Stage2 -> EH */ max_credit = cfg[hwport].max_eh_offset + 1; for (i = start_ctxt; i < limit; i++) { credit = cfg[hwport].stg2_eh_credit / max_ctxts; if (credit > max_credit) credit = max_credit; data = credit << 16 | i << 4 | 1; nlm_write_nae_reg(nae_base, NAE_STG2_EHCRDT_CMD, data); } /* Stage2 -> Frout */ max_credit = cfg[hwport].max_frout_offset + 1; for (i = start_ctxt; i < limit; i++) { credit = cfg[hwport].stg2_frout_credit / max_ctxts; if (credit > max_credit) credit = max_credit; data = credit << 16 | i << 4 | 1; nlm_write_nae_reg(nae_base, NAE_EH_FREECRDT_CMD, data); } /* Stage2 -> MS */ max_credit = cfg[hwport].max_ms_offset + 1; for (i = start_ctxt; i < limit; i++) { credit = cfg[hwport].stg2_ms_credit / max_ctxts; if (credit > max_credit) credit = max_credit; data = credit << 16 | i << 4 | 1; nlm_write_nae_reg(nae_base, NAE_STG2_STRCRDT_CMD, data); } } void nlm_config_freein_fifo_uniq_cfg(uint64_t nae_base, int port, int nblock_free_desc) { uint32_t val; int size_in_clines; size_in_clines = (nblock_free_desc / NAE_CACHELINE_SIZE); val = (size_in_clines << 8) | (port & 0x1f); nlm_write_nae_reg(nae_base, NAE_FREEIN_FIFO_UNIQ_SZ_CFG, val); } /* XXXJC: redundant, see ucore_spray_config() */ void nlm_config_ucore_iface_mask_cfg(uint64_t nae_base, int port, int nblock_ucore_mask) { uint32_t val; val = ( 0x1U << 31) | ((nblock_ucore_mask & 0xffff) << 8) | (port & 0x1f); nlm_write_nae_reg(nae_base, NAE_UCORE_IFACEMASK_CFG, val); } int nlm_nae_init_netior(uint64_t nae_base, int nblocks) { uint32_t ctrl1, ctrl2, ctrl3; if (nblocks == 5) ctrl3 = 0x07 << 18; else ctrl3 = 0; switch (nblocks) { case 2: ctrl1 = 0xff; ctrl2 = 0x0707; break; case 4: case 5: ctrl1 = 0xfffff; ctrl2 = 0x07070707; break; default: printf("WARNING: unsupported blocks %d\n", nblocks); return (-1); } nlm_write_nae_reg(nae_base, NAE_LANE_CFG_SOFTRESET, 0); nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL3, ctrl3); nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL2, ctrl2); nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL1, ctrl1); nlm_write_nae_reg(nae_base, NAE_NETIOR_MISC_CTRL1, 0x0); return (0); } void nlm_nae_init_ingress(uint64_t nae_base, uint32_t desc_size) { uint32_t rx_cfg; uint32_t parser_threshold = 384; rx_cfg = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); rx_cfg &= ~(0x3 << 1); /* reset max message size */ rx_cfg &= ~(0xff << 4); /* clear freein desc cluster size */ rx_cfg &= ~(0x3f << 24); /* reset rx status mask */ /*XXX: why not 7f */ rx_cfg |= 1; /* rx enable */ rx_cfg |= (0x0 << 1); /* max message size */ rx_cfg |= (0x43 & 0x7f) << 24; /* rx status mask */ rx_cfg |= ((desc_size / 64) & 0xff) << 4; /* freein desc cluster size */ nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, rx_cfg); nlm_write_nae_reg(nae_base, NAE_PARSER_CONFIG, (parser_threshold & 0x3ff) | (((parser_threshold / desc_size) + 1) & 0xff) << 12 | (((parser_threshold / 64) % desc_size) & 0xff) << 20); /*nlm_write_nae_reg(nae_base, NAE_RX_FREE_FIFO_THRESH, 33);*/ } void nlm_nae_init_egress(uint64_t nae_base) { uint32_t tx_cfg; tx_cfg = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG); if (!nlm_is_xlp8xx_ax()) { nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, tx_cfg | 0x1 | /* tx enable */ 0x2 | /* tx ace */ 0x4 | /* tx compatible */ (1 << 3)); } else { nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, tx_cfg | 0x1 | /* tx enable */ 0x2); /* tx ace */ } } uint32_t ucore_spray_config(uint32_t interface, uint32_t ucore_mask, int cmd) { return ((cmd & 0x1) << 31) | ((ucore_mask & 0xffff) << 8) | (interface & 0x1f); } void nlm_nae_init_ucore(uint64_t nae_base, int if_num, u_int ucore_mask) { uint32_t ucfg; ucfg = ucore_spray_config(if_num, ucore_mask, 1); /* 1 : write */ nlm_write_nae_reg(nae_base, NAE_UCORE_IFACEMASK_CFG, ucfg); } uint64_t nae_tx_desc(u_int type, u_int rdex, u_int fbid, u_int len, uint64_t addr) { return ((uint64_t)type << 62) | ((uint64_t)rdex << 61) | ((uint64_t)fbid << 54) | ((uint64_t)len << 40) | addr; } void nlm_setup_l2type(uint64_t nae_base, int hwport, uint32_t l2extlen, uint32_t l2extoff, uint32_t extra_hdrsize, uint32_t proto_offset, uint32_t fixed_hdroff, uint32_t l2proto) { uint32_t val; val = ((l2extlen & 0x3f) << 26) | ((l2extoff & 0x3f) << 20) | ((extra_hdrsize & 0x3f) << 14) | ((proto_offset & 0x3f) << 8) | ((fixed_hdroff & 0x3f) << 2) | (l2proto & 0x3); nlm_write_nae_reg(nae_base, (NAE_L2_TYPE_PORT0 + hwport), val); } void nlm_setup_l3ctable_mask(uint64_t nae_base, int hwport, uint32_t ptmask, uint32_t l3portmask) { uint32_t val; val = ((ptmask & 0x1) << 6) | ((l3portmask & 0x1) << 5) | (hwport & 0x1f); nlm_write_nae_reg(nae_base, NAE_L3_CTABLE_MASK0, val); } void nlm_setup_l3ctable_even(uint64_t nae_base, int entry, uint32_t l3hdroff, uint32_t ipcsum_en, uint32_t l4protooff, uint32_t l2proto, uint32_t eth_type) { uint32_t val; val = ((l3hdroff & 0x3f) << 26) | ((l4protooff & 0x3f) << 20) | ((ipcsum_en & 0x1) << 18) | ((l2proto & 0x3) << 16) | (eth_type & 0xffff); nlm_write_nae_reg(nae_base, (NAE_L3CTABLE0 + (entry * 2)), val); } void nlm_setup_l3ctable_odd(uint64_t nae_base, int entry, uint32_t l3off0, uint32_t l3len0, uint32_t l3off1, uint32_t l3len1, uint32_t l3off2, uint32_t l3len2) { uint32_t val; val = ((l3off0 & 0x3f) << 26) | ((l3len0 & 0x1f) << 21) | ((l3off1 & 0x3f) << 15) | ((l3len1 & 0x1f) << 10) | ((l3off2 & 0x3f) << 4) | (l3len2 & 0xf); nlm_write_nae_reg(nae_base, (NAE_L3CTABLE0 + ((entry * 2) + 1)), val); } void nlm_setup_l4ctable_even(uint64_t nae_base, int entry, uint32_t im, uint32_t l3cm, uint32_t l4pm, uint32_t port, uint32_t l3camaddr, uint32_t l4proto) { uint32_t val; val = ((im & 0x1) << 19) | ((l3cm & 0x1) << 18) | ((l4pm & 0x1) << 17) | ((port & 0x1f) << 12) | ((l3camaddr & 0xf) << 8) | (l4proto & 0xff); nlm_write_nae_reg(nae_base, (NAE_L4CTABLE0 + (entry * 2)), val); } void nlm_setup_l4ctable_odd(uint64_t nae_base, int entry, uint32_t l4off0, uint32_t l4len0, uint32_t l4off1, uint32_t l4len1) { uint32_t val; val = ((l4off0 & 0x3f) << 21) | ((l4len0 & 0xf) << 17) | ((l4off1 & 0x3f) << 11) | (l4len1 & 0xf); nlm_write_nae_reg(nae_base, (NAE_L4CTABLE0 + ((entry * 2) + 1)), val); } void nlm_enable_hardware_parser(uint64_t nae_base) { uint32_t val; val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); val |= (1 << 12); /* hardware parser enable */ nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val); /*********************************************** * program L3 CAM table ***********************************************/ /* * entry-0 is ipv4 MPLS type 1 label */ /* l3hdroff = 4 bytes, ether_type = 0x8847 for MPLS_type1 */ nlm_setup_l3ctable_even(nae_base, 0, 4, 1, 9, 1, 0x8847); /* l3off0 (8 bytes) -> l3len0 (1 byte) := ip proto * l3off1 (12 bytes) -> l3len1 (4 bytes) := src ip * l3off2 (16 bytes) -> l3len2 (4 bytes) := dst ip */ nlm_setup_l3ctable_odd(nae_base, 0, 9, 1, 12, 4, 16, 4); /* * entry-1 is for ethernet IPv4 packets */ nlm_setup_l3ctable_even(nae_base, 1, 0, 1, 9, 1, 0x0800); /* l3off0 (8 bytes) -> l3len0 (1 byte) := ip proto * l3off1 (12 bytes) -> l3len1 (4 bytes) := src ip * l3off2 (16 bytes) -> l3len2 (4 bytes) := dst ip */ nlm_setup_l3ctable_odd(nae_base, 1, 9, 1, 12, 4, 16, 4); /* * entry-2 is for ethernet IPv6 packets */ nlm_setup_l3ctable_even(nae_base, 2, 0, 1, 6, 1, 0x86dd); /* l3off0 (6 bytes) -> l3len0 (1 byte) := next header (ip proto) * l3off1 (8 bytes) -> l3len1 (16 bytes) := src ip * l3off2 (24 bytes) -> l3len2 (16 bytes) := dst ip */ nlm_setup_l3ctable_odd(nae_base, 2, 6, 1, 8, 16, 24, 16); /* * entry-3 is for ethernet ARP packets */ nlm_setup_l3ctable_even(nae_base, 3, 0, 0, 9, 1, 0x0806); /* extract 30 bytes from packet start */ nlm_setup_l3ctable_odd(nae_base, 3, 0, 30, 0, 0, 0, 0); /* * entry-4 is for ethernet FCoE packets */ nlm_setup_l3ctable_even(nae_base, 4, 0, 0, 9, 1, 0x8906); /* FCoE packet consists of 4 byte start-of-frame, * and 24 bytes of frame header, followed by * 64 bytes of optional-header (ESP, network..), * 2048 bytes of payload, 36 bytes of optional * "fill bytes" or ESP trailer, 4 bytes of CRC, * and 4 bytes of end-of-frame * We extract the first 4 + 24 = 28 bytes */ nlm_setup_l3ctable_odd(nae_base, 4, 0, 28, 0, 0, 0, 0); /* * entry-5 is for vlan tagged frames (0x8100) */ nlm_setup_l3ctable_even(nae_base, 5, 0, 0, 9, 1, 0x8100); /* we extract 31 bytes from the payload */ nlm_setup_l3ctable_odd(nae_base, 5, 0, 31, 0, 0, 0, 0); /* * entry-6 is for ieee 802.1ad provider bridging * tagged frames (0x88a8) */ nlm_setup_l3ctable_even(nae_base, 6, 0, 0, 9, 1, 0x88a8); /* we extract 31 bytes from the payload */ nlm_setup_l3ctable_odd(nae_base, 6, 0, 31, 0, 0, 0, 0); /* * entry-7 is for Cisco's Q-in-Q tagged frames (0x9100) */ nlm_setup_l3ctable_even(nae_base, 7, 0, 0, 9, 1, 0x9100); /* we extract 31 bytes from the payload */ nlm_setup_l3ctable_odd(nae_base, 7, 0, 31, 0, 0, 0, 0); /* * entry-8 is for Ethernet Jumbo frames (0x8870) */ nlm_setup_l3ctable_even(nae_base, 8, 0, 0, 9, 1, 0x8870); /* we extract 31 bytes from the payload */ nlm_setup_l3ctable_odd(nae_base, 8, 0, 31, 0, 0, 0, 0); /* * entry-9 is for MPLS Multicast frames (0x8848) */ nlm_setup_l3ctable_even(nae_base, 9, 0, 0, 9, 1, 0x8848); /* we extract 31 bytes from the payload */ nlm_setup_l3ctable_odd(nae_base, 9, 0, 31, 0, 0, 0, 0); /* * entry-10 is for IEEE 802.1ae MAC Security frames (0x88e5) */ nlm_setup_l3ctable_even(nae_base, 10, 0, 0, 9, 1, 0x88e5); /* we extract 31 bytes from the payload */ nlm_setup_l3ctable_odd(nae_base, 10, 0, 31, 0, 0, 0, 0); /* * entry-11 is for PTP frames (0x88f7) */ nlm_setup_l3ctable_even(nae_base, 11, 0, 0, 9, 1, 0x88f7); /* PTP messages can be sent as UDP messages over * IPv4 or IPv6; and as a raw ethernet message * with ethertype 0x88f7. The message contents * are the same for UDP or ethernet based encapsulations * The header is 34 bytes long, and we extract * it all out. */ nlm_setup_l3ctable_odd(nae_base, 11, 0, 31, 31, 2, 0, 0); /* * entry-12 is for ethernet Link Control Protocol (LCP) * used with PPPoE */ nlm_setup_l3ctable_even(nae_base, 12, 0, 0, 9, 1, 0xc021); /* LCP packet consists of 1 byte of code, 1 byte of * identifier and two bytes of length followed by * data (upto length bytes). * We extract 4 bytes from start of packet */ nlm_setup_l3ctable_odd(nae_base, 12, 0, 4, 0, 0, 0, 0); /* * entry-13 is for ethernet Link Quality Report (0xc025) * used with PPPoE */ nlm_setup_l3ctable_even(nae_base, 13, 0, 0, 9, 1, 0xc025); /* We extract 31 bytes from packet start */ nlm_setup_l3ctable_odd(nae_base, 13, 0, 31, 0, 0, 0, 0); /* * entry-14 is for PPPoE Session (0x8864) */ nlm_setup_l3ctable_even(nae_base, 14, 0, 0, 9, 1, 0x8864); /* We extract 31 bytes from packet start */ nlm_setup_l3ctable_odd(nae_base, 14, 0, 31, 0, 0, 0, 0); /* * entry-15 - default entry */ nlm_setup_l3ctable_even(nae_base, 15, 0, 0, 0, 0, 0x0000); /* We extract 31 bytes from packet start */ nlm_setup_l3ctable_odd(nae_base, 15, 0, 31, 0, 0, 0, 0); /*********************************************** * program L4 CAM table ***********************************************/ /* * entry-0 - tcp packets (0x6) */ nlm_setup_l4ctable_even(nae_base, 0, 0, 0, 1, 0, 0, 0x6); /* tcp header is 20 bytes without tcp options * We extract 20 bytes from tcp start */ nlm_setup_l4ctable_odd(nae_base, 0, 0, 15, 15, 5); /* * entry-1 - udp packets (0x11) */ nlm_setup_l4ctable_even(nae_base, 1, 0, 0, 1, 0, 0, 0x11); /* udp header is 8 bytes in size. * We extract 8 bytes from udp start */ nlm_setup_l4ctable_odd(nae_base, 1, 0, 8, 0, 0); /* * entry-2 - sctp packets (0x84) */ nlm_setup_l4ctable_even(nae_base, 2, 0, 0, 1, 0, 0, 0x84); /* sctp packets have a 12 byte generic header * and various chunks. * We extract 12 bytes from sctp start */ nlm_setup_l4ctable_odd(nae_base, 2, 0, 12, 0, 0); /* * entry-3 - RDP packets (0x1b) */ nlm_setup_l4ctable_even(nae_base, 3, 0, 0, 1, 0, 0, 0x1b); /* RDP packets have 18 bytes of generic header * before variable header starts. * We extract 18 bytes from rdp start */ nlm_setup_l4ctable_odd(nae_base, 3, 0, 15, 15, 3); /* * entry-4 - DCCP packets (0x21) */ nlm_setup_l4ctable_even(nae_base, 4, 0, 0, 1, 0, 0, 0x21); /* DCCP has two types of generic headers of * sizes 16 bytes and 12 bytes if X = 1. * We extract 16 bytes from dccp start */ nlm_setup_l4ctable_odd(nae_base, 4, 0, 15, 15, 1); /* * entry-5 - ipv6 encapsulated in ipv4 packets (0x29) */ nlm_setup_l4ctable_even(nae_base, 5, 0, 0, 1, 0, 0, 0x29); /* ipv4 header is 20 bytes excluding IP options. * We extract 20 bytes from IPv4 start */ nlm_setup_l4ctable_odd(nae_base, 5, 0, 15, 15, 5); /* * entry-6 - ip in ip encapsulation packets (0x04) */ nlm_setup_l4ctable_even(nae_base, 6, 0, 0, 1, 0, 0, 0x04); /* ipv4 header is 20 bytes excluding IP options. * We extract 20 bytes from ipv4 start */ nlm_setup_l4ctable_odd(nae_base, 6, 0, 15, 15, 5); /* * entry-7 - default entry (0x0) */ nlm_setup_l4ctable_even(nae_base, 7, 0, 0, 1, 0, 0, 0x0); /* We extract 20 bytes from packet start */ nlm_setup_l4ctable_odd(nae_base, 7, 0, 15, 15, 5); } void nlm_enable_hardware_parser_per_port(uint64_t nae_base, int block, int port) { int hwport = (block * 4) + (port & 0x3); /* program L2 and L3 header extraction for each port */ /* enable ethernet L2 mode on port */ nlm_setup_l2type(nae_base, hwport, 0, 0, 0, 0, 0, 1); /* l2proto and ethtype included in l3cam */ nlm_setup_l3ctable_mask(nae_base, hwport, 1, 0); } void nlm_prepad_enable(uint64_t nae_base, int size) { uint32_t val; val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); val |= (1 << 13); /* prepad enable */ val |= ((size & 0x3) << 22); /* prepad size */ nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val); } void nlm_setup_1588_timer(uint64_t nae_base, struct nae_port_config *cfg) { uint32_t hi, lo, val; hi = cfg[0].ieee1588_userval >> 32; lo = cfg[0].ieee1588_userval & 0xffffffff; nlm_write_nae_reg(nae_base, NAE_1588_PTP_USER_VALUE_HI, hi); nlm_write_nae_reg(nae_base, NAE_1588_PTP_USER_VALUE_LO, lo); hi = cfg[0].ieee1588_ptpoff >> 32; lo = cfg[0].ieee1588_ptpoff & 0xffffffff; nlm_write_nae_reg(nae_base, NAE_1588_PTP_OFFSET_HI, hi); nlm_write_nae_reg(nae_base, NAE_1588_PTP_OFFSET_LO, lo); hi = cfg[0].ieee1588_tmr1 >> 32; lo = cfg[0].ieee1588_tmr1 & 0xffffffff; nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR1_HI, hi); nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR1_LO, lo); hi = cfg[0].ieee1588_tmr2 >> 32; lo = cfg[0].ieee1588_tmr2 & 0xffffffff; nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR2_HI, hi); nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR2_LO, lo); hi = cfg[0].ieee1588_tmr3 >> 32; lo = cfg[0].ieee1588_tmr3 & 0xffffffff; nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR3_HI, hi); nlm_write_nae_reg(nae_base, NAE_1588_PTP_TMR3_LO, lo); nlm_write_nae_reg(nae_base, NAE_1588_PTP_INC_INTG, cfg[0].ieee1588_inc_intg); nlm_write_nae_reg(nae_base, NAE_1588_PTP_INC_NUM, cfg[0].ieee1588_inc_num); nlm_write_nae_reg(nae_base, NAE_1588_PTP_INC_DEN, cfg[0].ieee1588_inc_den); val = nlm_read_nae_reg(nae_base, NAE_1588_PTP_CONTROL); /* set and clear freq_mul = 1 */ nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val | (0x1 << 1)); nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val); /* set and clear load_user_val = 1 */ nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val | (0x1 << 6)); nlm_write_nae_reg(nae_base, NAE_1588_PTP_CONTROL, val); } void nlm_mac_enable(uint64_t nae_base, int nblock, int port_type, int port) { uint32_t mac_cfg1, xaui_cfg; uint32_t netwk_inf; int iface = port & 0x3; switch(port_type) { case SGMIIC: netwk_inf = nlm_read_nae_reg(nae_base, SGMII_NET_IFACE_CTRL(nblock, iface)); nlm_write_nae_reg(nae_base, SGMII_NET_IFACE_CTRL(nblock, iface), netwk_inf | (1 << 2)); /* enable tx */ mac_cfg1 = nlm_read_nae_reg(nae_base, SGMII_MAC_CONF1(nblock, iface)); nlm_write_nae_reg(nae_base, SGMII_MAC_CONF1(nblock, iface), mac_cfg1 | (1 << 2) | /* rx enable */ 1); /* tx enable */ break; case XAUIC: xaui_cfg = nlm_read_nae_reg(nae_base, XAUI_CONFIG1(nblock)); nlm_write_nae_reg(nae_base, XAUI_CONFIG1(nblock), xaui_cfg | XAUI_CONFIG_TFEN | XAUI_CONFIG_RFEN); break; case ILC: break; } } void nlm_mac_disable(uint64_t nae_base, int nblock, int port_type, int port) { uint32_t mac_cfg1, xaui_cfg; uint32_t netwk_inf; int iface = port & 0x3; switch(port_type) { case SGMIIC: mac_cfg1 = nlm_read_nae_reg(nae_base, SGMII_MAC_CONF1(nblock, iface)); nlm_write_nae_reg(nae_base, SGMII_MAC_CONF1(nblock, iface), mac_cfg1 & ~((1 << 2) | /* rx enable */ 1)); /* tx enable */ netwk_inf = nlm_read_nae_reg(nae_base, SGMII_NET_IFACE_CTRL(nblock, iface)); nlm_write_nae_reg(nae_base, SGMII_NET_IFACE_CTRL(nblock, iface), netwk_inf & ~(1 << 2)); /* enable tx */ break; case XAUIC: xaui_cfg = nlm_read_nae_reg(nae_base, XAUI_CONFIG1(nblock)); nlm_write_nae_reg(nae_base, XAUI_CONFIG1(nblock), xaui_cfg & ~(XAUI_CONFIG_TFEN | XAUI_CONFIG_RFEN)); break; case ILC: break; } } /* * Set IOR credits for the ports in ifmask to valmask */ static void nlm_nae_set_ior_credit(uint64_t nae_base, uint32_t ifmask, uint32_t valmask) { uint32_t tx_config, tx_ior_credit; tx_ior_credit = nlm_read_nae_reg(nae_base, NAE_TX_IORCRDT_INIT); tx_ior_credit &= ~ifmask; tx_ior_credit |= valmask; nlm_write_nae_reg(nae_base, NAE_TX_IORCRDT_INIT, tx_ior_credit); tx_config = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG); /* need to toggle these bits for credits to be loaded */ nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, tx_config | (TXINITIORCR(ifmask))); nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, tx_config & ~(TXINITIORCR(ifmask))); } int nlm_nae_open_if(uint64_t nae_base, int nblock, int port_type, int port, uint32_t desc_size) { uint32_t netwk_inf; uint32_t mac_cfg1, netior_ctrl3; int iface, iface_ctrl_reg, iface_ctrl3_reg, conf1_reg, conf2_reg; switch (port_type) { case XAUIC: netwk_inf = nlm_read_nae_reg(nae_base, XAUI_NETIOR_XGMAC_CTRL1(nblock)); netwk_inf |= (1 << NETIOR_XGMAC_STATS_CLR_POS); nlm_write_nae_reg(nae_base, XAUI_NETIOR_XGMAC_CTRL1(nblock), netwk_inf); nlm_nae_set_ior_credit(nae_base, 0xf << port, 0xf << port); break; case ILC: nlm_nae_set_ior_credit(nae_base, 0xff << port, 0xff << port); break; case SGMIIC: nlm_nae_set_ior_credit(nae_base, 0x1 << port, 0); /* * XXXJC: split this and merge to sgmii.c * some of this is duplicated from there. */ /* init phy id to access internal PCS */ iface = port & 0x3; iface_ctrl_reg = SGMII_NET_IFACE_CTRL(nblock, iface); conf1_reg = SGMII_MAC_CONF1(nblock, iface); conf2_reg = SGMII_MAC_CONF2(nblock, iface); netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); netwk_inf &= 0x7ffffff; netwk_inf |= (port << 27); nlm_write_nae_reg(nae_base, iface_ctrl_reg, netwk_inf); /* Sofreset sgmii port - set bit 11 to 0 */ netwk_inf &= 0xfffff7ff; nlm_write_nae_reg(nae_base, iface_ctrl_reg, netwk_inf); /* Reset Gmac */ mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 | - (1 << 31) | /* soft reset */ + (1U << 31) | /* soft reset */ (1 << 2) | /* rx enable */ (1)); /* tx enable */ /* default to 1G */ nlm_write_nae_reg(nae_base, conf2_reg, (0x7 << 12) | /* interface preamble length */ (0x2 << 8) | /* interface mode */ (0x1 << 2) | /* pad crc enable */ (0x1)); /* full duplex */ /* clear gmac reset */ mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); - nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 & ~(1 << 31)); + nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 & ~(1U << 31)); /* clear speed debug bit */ iface_ctrl3_reg = SGMII_NET_IFACE_CTRL3(nblock, iface); netior_ctrl3 = nlm_read_nae_reg(nae_base, iface_ctrl3_reg); nlm_write_nae_reg(nae_base, iface_ctrl3_reg, netior_ctrl3 & ~(1 << 6)); /* disable TX, RX for now */ mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 & ~(0x5)); netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); nlm_write_nae_reg(nae_base, iface_ctrl_reg, netwk_inf & ~(0x1 << 2)); /* clear stats counters */ netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); nlm_write_nae_reg(nae_base, iface_ctrl_reg, netwk_inf | (1 << 15)); /* enable stats counters */ netwk_inf = nlm_read_nae_reg(nae_base, iface_ctrl_reg); nlm_write_nae_reg(nae_base, iface_ctrl_reg, (netwk_inf & ~(1 << 15)) | (1 << 16)); /* flow control? */ mac_cfg1 = nlm_read_nae_reg(nae_base, conf1_reg); nlm_write_nae_reg(nae_base, conf1_reg, mac_cfg1 | (0x3 << 4)); break; } nlm_nae_init_ingress(nae_base, desc_size); nlm_nae_init_egress(nae_base); return (0); } Index: head/sys/mips/nlm/xlp_machdep.c =================================================================== --- head/sys/mips/nlm/xlp_machdep.c (revision 258779) +++ head/sys/mips/nlm/xlp_machdep.c (revision 258780) @@ -1,737 +1,737 @@ /*- * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * NETLOGIC_BSD */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include /* cinit() */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif /* 4KB static data aread to keep a copy of the bootload env until the dynamic kenv is setup */ char boot1_env[4096]; uint64_t xlp_cpu_frequency; uint64_t xlp_io_base = MIPS_PHYS_TO_DIRECT_UNCACHED(XLP_DEFAULT_IO_BASE); int xlp_ncores; int xlp_threads_per_core; uint32_t xlp_hw_thread_mask; int xlp_cpuid_to_hwtid[MAXCPU]; int xlp_hwtid_to_cpuid[MAXCPU]; uint64_t xlp_pic_base; static int xlp_mmuval; extern uint32_t _end; extern char XLPResetEntry[], XLPResetEntryEnd[]; static void xlp_setup_core(void) { uint64_t reg; reg = nlm_mfcr(LSU_DEFEATURE); /* Enable Unaligned and L2HPE */ reg |= (1 << 30) | (1 << 23); /* * Experimental : Enable SUE * Speculative Unmap Enable. Enable speculative L2 cache request for * unmapped access. */ reg |= (1ull << 31); /* Clear S1RCM - A0 errata */ reg &= ~0xeull; nlm_mtcr(LSU_DEFEATURE, reg); reg = nlm_mfcr(SCHED_DEFEATURE); /* Experimental: Disable BRU accepting ALU ops - A0 errata */ reg |= (1 << 24); nlm_mtcr(SCHED_DEFEATURE, reg); } static void xlp_setup_mmu(void) { uint32_t pagegrain; if (nlm_threadid() == 0) { nlm_setup_extended_pagemask(0); nlm_large_variable_tlb_en(1); nlm_extended_tlb_en(1); nlm_mmu_setup(0, 0, 0); } /* Enable no-read, no-exec, large-physical-address */ pagegrain = mips_rd_pagegrain(); - pagegrain |= (1 << 31) | /* RIE */ + pagegrain |= (1U << 31) | /* RIE */ (1 << 30) | /* XIE */ (1 << 29); /* ELPA */ mips_wr_pagegrain(pagegrain); } static void xlp_enable_blocks(void) { uint64_t sysbase; int i; for (i = 0; i < XLP_MAX_NODES; i++) { if (!nlm_dev_exists(XLP_IO_SYS_OFFSET(i))) continue; sysbase = nlm_get_sys_regbase(i); nlm_sys_enable_block(sysbase, DFS_DEVICE_RSA); } } static void xlp_parse_mmu_options(void) { uint64_t sysbase; uint32_t cpu_map = xlp_hw_thread_mask; uint32_t core0_thr_mask, core_thr_mask, cpu_rst_mask; int i, j, k; #ifdef SMP if (cpu_map == 0) cpu_map = 0xffffffff; #else /* Uniprocessor! */ if (cpu_map == 0) cpu_map = 0x1; else if (cpu_map != 0x1) { printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n" "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map); cpu_map = 0x1; } #endif xlp_ncores = 1; core0_thr_mask = cpu_map & 0xf; switch (core0_thr_mask) { case 1: xlp_threads_per_core = 1; xlp_mmuval = 0; break; case 3: xlp_threads_per_core = 2; xlp_mmuval = 2; break; case 0xf: xlp_threads_per_core = 4; xlp_mmuval = 3; break; default: goto unsupp; } /* Try to find the enabled cores from SYS block */ sysbase = nlm_get_sys_regbase(0); cpu_rst_mask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET) & 0xff; /* XLP 416 does not report this correctly, fix */ if (nlm_processor_id() == CHIP_PROCESSOR_ID_XLP_416) cpu_rst_mask = 0xe; /* Take out cores which do not exist on chip */ for (i = 1; i < XLP_MAX_CORES; i++) { if ((cpu_rst_mask & (1 << i)) == 0) cpu_map &= ~(0xfu << (4 * i)); } /* Verify other cores' CPU masks */ for (i = 1; i < XLP_MAX_CORES; i++) { core_thr_mask = (cpu_map >> (4 * i)) & 0xf; if (core_thr_mask == 0) continue; if (core_thr_mask != core0_thr_mask) goto unsupp; xlp_ncores++; } xlp_hw_thread_mask = cpu_map; /* setup hardware processor id to cpu id mapping */ for (i = 0; i< MAXCPU; i++) xlp_cpuid_to_hwtid[i] = xlp_hwtid_to_cpuid[i] = -1; for (i = 0, k = 0; i < XLP_MAX_CORES; i++) { if (((cpu_map >> (i * 4)) & 0xf) == 0) continue; for (j = 0; j < xlp_threads_per_core; j++) { xlp_cpuid_to_hwtid[k] = i * 4 + j; xlp_hwtid_to_cpuid[i * 4 + j] = k; k++; } } return; unsupp: printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n" "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n", (u_long)core0_thr_mask, (u_long)cpu_map); panic("Invalid CPU mask - halting.\n"); return; } /* Parse cmd line args as env - copied from ar71xx */ static void xlp_parse_bootargs(char *cmdline) { char *n, *v; while ((v = strsep(&cmdline, " \n")) != NULL) { if (*v == '\0') continue; if (*v == '-') { while (*v != '\0') { v++; switch (*v) { case 'a': boothowto |= RB_ASKNAME; break; case 'd': boothowto |= RB_KDB; break; case 'g': boothowto |= RB_GDB; break; case 's': boothowto |= RB_SINGLE; break; case 'v': boothowto |= RB_VERBOSE; break; } } } else { n = strsep(&v, "="); if (v == NULL) setenv(n, "1"); else setenv(n, v); } } } #ifdef FDT static void xlp_bootargs_init(__register_t arg) { char buf[2048]; /* early stack is big enough */ void *dtbp; phandle_t chosen; ihandle_t mask; dtbp = (void *)(intptr_t)arg; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not passed as argument try * to use the statically embedded one. */ if (dtbp == NULL) dtbp = &fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) while (1); if (OF_init((void *)dtbp) != 0) while (1); if (fdt_immr_addr(xlp_io_base) != 0) while (1); OF_interpret("perform-fixup", 0); chosen = OF_finddevice("/chosen"); if (OF_getprop(chosen, "cpumask", &mask, sizeof(mask)) != -1) { xlp_hw_thread_mask = mask; } if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1) xlp_parse_bootargs(buf); } #else /* * arg is a pointer to the environment block, the format of the block is * a=xyz\0b=pqr\0\0 */ static void xlp_bootargs_init(__register_t arg) { char buf[2048]; /* early stack is big enough */ char *p, *v, *n; uint32_t mask; /* * provide backward compat for passing cpu mask as arg */ if (arg & 1) { xlp_hw_thread_mask = arg; return; } p = (void *)(intptr_t)arg; while (*p != '\0') { strlcpy(buf, p, sizeof(buf)); v = buf; n = strsep(&v, "="); if (v == NULL) setenv(n, "1"); else setenv(n, v); p += strlen(p) + 1; } /* CPU mask can be passed thru env */ if (getenv_uint("cpumask", &mask) != 0) xlp_hw_thread_mask = mask; /* command line argument */ v = getenv("bootargs"); if (v != NULL) { strlcpy(buf, v, sizeof(buf)); xlp_parse_bootargs(buf); freeenv(v); } } #endif static void mips_init(void) { init_param1(); init_param2(physmem); mips_cpu_init(); cpuinfo.cache_coherent_dma = TRUE; pmap_bootstrap(); mips_proc0_init(); mutex_init(); #ifdef DDB kdb_init(); if (boothowto & RB_KDB) { kdb_enter("Boot flags requested debugger", NULL); } #endif } unsigned int platform_get_timecount(struct timecounter *tc __unused) { uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER); return (unsigned int)~count; } static void xlp_pic_init(void) { struct timecounter pic_timecounter = { platform_get_timecount, /* get_timecount */ 0, /* no poll_pps */ ~0U, /* counter_mask */ XLP_IO_CLK, /* frequency */ "XLRPIC", /* name */ 2000, /* quality (adjusted in code) */ }; int i; int maxirt; xlp_pic_base = nlm_get_pic_regbase(0); /* TOOD: Add other nodes */ maxirt = nlm_read_reg(nlm_get_pic_pcibase(nlm_nodeid()), XLP_PCI_DEVINFO_REG0); printf("Initializing PIC...@%jx %d IRTs\n", (uintmax_t)xlp_pic_base, maxirt); /* Bind all PIC irqs to cpu 0 */ for (i = 0; i < maxirt; i++) nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0, 1, 0, 0x1); nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0); platform_timecounter = &pic_timecounter; } #if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */ #ifdef XLP_SIM #define XLP_MEM_LIM 0x200000000ULL #else #define XLP_MEM_LIM 0x10000000000ULL #endif #else #define XLP_MEM_LIM 0xfffff000UL #endif static vm_paddr_t xlp_mem_excl[] = { 0, 0, /* for kernel image region, see xlp_mem_init */ 0x0c000000, 0x14000000, /* uboot area, cms queue and other stuff */ 0x1fc00000, 0x1fd00000, /* reset vec */ 0x1e000000, 0x1e200000, /* poe buffers */ }; static int mem_exclude_add(vm_paddr_t *avail, vm_paddr_t mstart, vm_paddr_t mend) { int nreg = sizeof(xlp_mem_excl)/sizeof(xlp_mem_excl[0]); int i, pos; pos = 0; for (i = 0; i < nreg; i += 2) { if (mstart > xlp_mem_excl[i + 1]) continue; if (mstart < xlp_mem_excl[i]) { avail[pos++] = mstart; if (mend < xlp_mem_excl[i]) avail[pos++] = mend; else avail[pos++] = xlp_mem_excl[i]; } mstart = xlp_mem_excl[i + 1]; if (mend <= mstart) break; } if (mstart < mend) { avail[pos++] = mstart; avail[pos++] = mend; } return (pos); } static void xlp_mem_init(void) { vm_paddr_t physsz, tmp; uint64_t bridgebase, base, lim, val; int i, j, k, n; /* update kernel image area in exclude regions */ tmp = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end); tmp = round_page(tmp) + 0x20000; /* round up */ xlp_mem_excl[1] = tmp; printf("Memory (from DRAM BARs):\n"); bridgebase = nlm_get_bridge_regbase(0); /* TODO: Add other nodes */ physsz = 0; for (i = 0, j = 0; i < 8; i++) { val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i)); val = (val >> 12) & 0xfffff; base = val << 20; val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i)); val = (val >> 12) & 0xfffff; if (val == 0) /* BAR not enabled */ continue; lim = (val + 1) << 20; printf(" BAR %d: %#jx - %#jx : ", i, (intmax_t)base, (intmax_t)lim); if (lim <= base) { printf("\tskipped - malformed %#jx -> %#jx\n", (intmax_t)base, (intmax_t)lim); continue; } else if (base >= XLP_MEM_LIM) { printf(" skipped - outside usable limit %#jx.\n", (intmax_t)XLP_MEM_LIM); continue; } else if (lim >= XLP_MEM_LIM) { lim = XLP_MEM_LIM; printf(" truncated to %#jx.\n", (intmax_t)XLP_MEM_LIM); } else printf(" usable\n"); /* exclude unusable regions from BAR and add rest */ n = mem_exclude_add(&phys_avail[j], base, lim); for (k = j; k < j + n; k += 2) { physsz += phys_avail[k + 1] - phys_avail[k]; printf("\tMem[%d]: %#jx - %#jx\n", k/2, (intmax_t)phys_avail[k], (intmax_t)phys_avail[k+1]); } j = k; } /* setup final entry with 0 */ phys_avail[j] = phys_avail[j + 1] = 0; /* copy phys_avail to dump_avail */ for (i = 0; i <= j + 1; i++) dump_avail[i] = phys_avail[i]; realmem = physmem = btoc(physsz); } void platform_start(__register_t a0 __unused, __register_t a1 __unused, __register_t a2 __unused, __register_t a3 __unused) { /* Initialize pcpu stuff */ mips_pcpu0_init(); /* initialize console so that we have printf */ boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */ init_static_kenv(boot1_env, sizeof(boot1_env)); xlp_bootargs_init(a0); /* clockrate used by delay, so initialize it here */ xlp_cpu_frequency = xlp_get_cpu_frequency(0, 0); cpu_clock = xlp_cpu_frequency / 1000000; mips_timer_early_init(xlp_cpu_frequency); /* Init console please */ cninit(); /* Early core init and fixes for errata */ xlp_setup_core(); xlp_parse_mmu_options(); xlp_mem_init(); bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC, XLPResetEntryEnd - XLPResetEntry); #ifdef SMP /* * We will enable the other threads in core 0 here * so that the TLB and cache info is correct when * mips_init runs */ xlp_enable_threads(xlp_mmuval); #endif /* setup for the startup core */ xlp_setup_mmu(); xlp_enable_blocks(); /* Read/Guess/setup board information */ nlm_board_info_setup(); /* MIPS generic init */ mips_init(); /* * XLP specific post initialization * initialize other on chip stuff */ xlp_pic_init(); mips_timer_init_params(xlp_cpu_frequency, 0); } void platform_cpu_init() { } void platform_reset(void) { uint64_t sysbase = nlm_get_sys_regbase(0); nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1); for( ; ; ) __asm __volatile("wait"); } #ifdef SMP /* * XLP threads are started simultaneously when we enable threads, this will * ensure that the threads are blocked in platform_init_ap, until they are * ready to proceed to smp_init_secondary() */ static volatile int thr_unblock[4]; int platform_start_ap(int cpuid) { uint32_t coremask, val; uint64_t sysbase = nlm_get_sys_regbase(0); int hwtid = xlp_cpuid_to_hwtid[cpuid]; int core, thr; core = hwtid / 4; thr = hwtid % 4; if (thr == 0) { /* First thread in core, do core wake up */ coremask = 1u << core; /* Enable core clock */ val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL); val &= ~coremask; nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val); /* Remove CPU Reset */ val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET); val &= ~coremask & 0xff; nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val); if (bootverbose) printf("Waking up core %d ...", core); /* Poll for CPU to mark itself coherent */ do { val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE); } while ((val & coremask) != 0); if (bootverbose) printf("Done\n"); } else { /* otherwise release the threads stuck in platform_init_ap */ thr_unblock[thr] = 1; } return (0); } void platform_init_ap(int cpuid) { uint32_t stat; int thr; /* The first thread has to setup the MMU and enable other threads */ thr = nlm_threadid(); if (thr == 0) { xlp_setup_core(); xlp_enable_threads(xlp_mmuval); } else { /* * FIXME busy wait here eats too many cycles, especially * in the core 0 while bootup */ while (thr_unblock[thr] == 0) __asm__ __volatile__ ("nop;nop;nop;nop"); thr_unblock[thr] = 0; } xlp_setup_mmu(); stat = mips_rd_status(); KASSERT((stat & MIPS_SR_INT_IE) == 0, ("Interrupts enabled in %s!", __func__)); stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT; mips_wr_status(stat); nlm_write_c0_eimr(0ull); xlp_enable_irq(IRQ_IPI); xlp_enable_irq(IRQ_TIMER); xlp_enable_irq(IRQ_MSGRING); return; } int platform_ipi_intrnum(void) { return (IRQ_IPI); } void platform_ipi_send(int cpuid) { nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid], platform_ipi_intrnum(), 0); } void platform_ipi_clear(void) { } int platform_processor_id(void) { return (xlp_hwtid_to_cpuid[nlm_cpuid()]); } void platform_cpu_mask(cpuset_t *mask) { int i, s; CPU_ZERO(mask); s = xlp_ncores * xlp_threads_per_core; for (i = 0; i < s; i++) CPU_SET(i, mask); } struct cpu_group * platform_smp_topo() { return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1, xlp_threads_per_core, CG_FLAG_THREAD)); } #endif Index: head/sys/mips/rmi/pic.h =================================================================== --- head/sys/mips/rmi/pic.h (revision 258779) +++ head/sys/mips/rmi/pic.h (revision 258780) @@ -1,272 +1,272 @@ /*- * Copyright (c) 2003-2009 RMI Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of RMI Corporation, nor the names of its contributors, * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RMI_BSD * $FreeBSD$ */ #ifndef _RMI_PIC_H_ #define _RMI_PIC_H_ #include #include #include #include #define PIC_IRT_WD_INDEX 0 #define PIC_IRT_TIMER_INDEX(i) (1 + (i)) #define PIC_IRT_UART_0_INDEX 9 #define PIC_IRT_UART_1_INDEX 10 #define PIC_IRT_I2C_0_INDEX 11 #define PIC_IRT_I2C_1_INDEX 12 #define PIC_IRT_PCMCIA_INDEX 13 #define PIC_IRT_GPIO_INDEX 14 #define PIC_IRT_HYPER_INDEX 15 #define PIC_IRT_PCIX_INDEX 16 #define PIC_IRT_GMAC0_INDEX 17 #define PIC_IRT_GMAC1_INDEX 18 #define PIC_IRT_GMAC2_INDEX 19 #define PIC_IRT_GMAC3_INDEX 20 #define PIC_IRT_XGS0_INDEX 21 #define PIC_IRT_XGS1_INDEX 22 #define PIC_IRT_HYPER_FATAL_INDEX 23 #define PIC_IRT_PCIX_FATAL_INDEX 24 #define PIC_IRT_BRIDGE_AERR_INDEX 25 #define PIC_IRT_BRIDGE_BERR_INDEX 26 #define PIC_IRT_BRIDGE_TB_INDEX 27 #define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28 /* numbering for XLS */ #define PIC_IRT_BRIDGE_ERR_INDEX 25 #define PIC_IRT_PCIE_LINK0_INDEX 26 #define PIC_IRT_PCIE_LINK1_INDEX 27 #define PIC_IRT_PCIE_LINK2_INDEX 23 #define PIC_IRT_PCIE_LINK3_INDEX 24 #define PIC_IRT_PCIE_B0_LINK2_INDEX 28 #define PIC_IRT_PCIE_B0_LINK3_INDEX 29 #define PIC_IRT_PCIE_INT_INDEX 28 #define PIC_IRT_PCIE_FATAL_INDEX 29 #define PIC_IRT_GPIO_B_INDEX 30 #define PIC_IRT_USB_INDEX 31 #define PIC_NUM_IRTS 32 #define PIC_CLOCK_TIMER 7 #define PIC_CTRL 0x00 #define PIC_IPI 0x04 #define PIC_INT_ACK 0x06 #define WD_MAX_VAL_0 0x08 #define WD_MAX_VAL_1 0x09 #define WD_MASK_0 0x0a #define WD_MASK_1 0x0b #define WD_HEARBEAT_0 0x0c #define WD_HEARBEAT_1 0x0d #define PIC_IRT_0_BASE 0x40 #define PIC_IRT_1_BASE 0x80 #define PIC_TIMER_MAXVAL_0_BASE 0x100 #define PIC_TIMER_MAXVAL_1_BASE 0x110 #define PIC_TIMER_COUNT_0_BASE 0x120 #define PIC_TIMER_COUNT_1_BASE 0x130 #define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr)) #define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr)) #define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i)) #define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i)) #define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i)) #define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i)) #define PIC_TIMER_HZ 66000000U /* * We use a simple mapping form PIC interrupts to CPU IRQs. * The PIC interrupts 0-31 are mapped to CPU irq's 8-39. * this leaves the lower 0-7 for the cpu interrupts (like * count/compare, msgrng) and 40-63 for IPIs */ #define PIC_IRQ_BASE 8 #define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i)) #define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE) #define PIC_WD_IRQ (PIC_IRQ_BASE + PIC_IRT_WD_INDEX) #define PIC_TIMER_IRQ(i) (PIC_IRQ_BASE + PIC_IRT_TIMER_INDEX(i)) #define PIC_CLOCK_IRQ PIC_TIMER_IRQ(PIC_CLOCK_TIMER) #define PIC_UART_0_IRQ (PIC_IRQ_BASE + PIC_IRT_UART_0_INDEX) #define PIC_UART_1_IRQ (PIC_IRQ_BASE + PIC_IRT_UART_1_INDEX) #define PIC_I2C_0_IRQ (PIC_IRQ_BASE + PIC_IRT_I2C_0_INDEX) #define PIC_I2C_1_IRQ (PIC_IRQ_BASE + PIC_IRT_I2C_1_INDEX) #define PIC_PCMCIA_IRQ (PIC_IRQ_BASE + PIC_IRT_PCMCIA_INDEX) #define PIC_GPIO_IRQ (PIC_IRQ_BASE + PIC_IRT_GPIO_INDEX) #define PIC_HYPER_IRQ (PIC_IRQ_BASE + PIC_IRT_HYPER_INDEX) #define PIC_PCIX_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIX_INDEX) #define PIC_GMAC_0_IRQ (PIC_IRQ_BASE + PIC_IRT_GMAC0_INDEX) #define PIC_GMAC_1_IRQ (PIC_IRQ_BASE + PIC_IRT_GMAC1_INDEX) #define PIC_GMAC_2_IRQ (PIC_IRQ_BASE + PIC_IRT_GMAC2_INDEX) #define PIC_GMAC_3_IRQ (PIC_IRQ_BASE + PIC_IRT_GMAC3_INDEX) #define PIC_XGS_0_IRQ (PIC_IRQ_BASE + PIC_IRT_XGS0_INDEX) #define PIC_XGS_1_IRQ (PIC_IRQ_BASE + PIC_IRT_XGS1_INDEX) #define PIC_HYPER_FATAL_IRQ (PIC_IRQ_BASE + PIC_IRT_HYPER_FATAL_INDEX) #define PIC_PCIX_FATAL_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIX_FATAL_INDEX) #define PIC_BRIDGE_AERR_IRQ (PIC_IRQ_BASE + PIC_IRT_BRIDGE_AERR_INDEX) #define PIC_BRIDGE_BERR_IRQ (PIC_IRQ_BASE + PIC_IRT_BRIDGE_BERR_INDEX) #define PIC_BRIDGE_TB_IRQ (PIC_IRQ_BASE + PIC_IRT_BRIDGE_TB_INDEX) #define PIC_BRIDGE_AERR_NMI_IRQ (PIC_IRQ_BASE + PIC_IRT_BRIDGE_AERR_NMI_INDEX) #define PIC_BRIDGE_ERR_IRQ (PIC_IRQ_BASE + PIC_IRT_BRIDGE_ERR_INDEX) #define PIC_PCIE_LINK0_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_LINK0_INDEX) #define PIC_PCIE_LINK1_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_LINK1_INDEX) #define PIC_PCIE_LINK2_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_LINK2_INDEX) #define PIC_PCIE_LINK3_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_LINK3_INDEX) #define PIC_PCIE_B0_LINK2_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_B0_LINK2_INDEX) #define PIC_PCIE_B0_LINK3_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_B0_LINK3_INDEX) #define PIC_PCIE_INT_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_INT_INDEX) #define PIC_PCIE_FATAL_IRQ (PIC_IRQ_BASE + PIC_IRT_PCIE_FATAL_INDEX) #define PIC_GPIO_B_IRQ (PIC_IRQ_BASE + PIC_IRT_GPIO_B_INDEX) #define PIC_USB_IRQ (PIC_IRQ_BASE + PIC_IRT_USB_INDEX) #define PIC_IRQ_IS_PICINTR(irq) ((irq) >= PIC_IRQ_BASE && \ (irq) < PIC_IRQ_BASE + PIC_NUM_IRTS) #define PIC_IS_EDGE_TRIGGERED(i) ((i) >= PIC_IRT_TIMER_INDEX(0) && \ (i) <= PIC_IRT_TIMER_INDEX(7)) extern struct mtx xlr_pic_lock; static __inline uint32_t pic_read_control(void) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); uint32_t reg; mtx_lock_spin(&xlr_pic_lock); reg = xlr_read_reg(mmio, PIC_CTRL); mtx_unlock_spin(&xlr_pic_lock); return (reg); } static __inline void pic_write_control(uint32_t control) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); mtx_lock_spin(&xlr_pic_lock); xlr_write_reg(mmio, PIC_CTRL, control); mtx_unlock_spin(&xlr_pic_lock); } static __inline void pic_update_control(uint32_t control) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); mtx_lock_spin(&xlr_pic_lock); xlr_write_reg(mmio, PIC_CTRL, (control | xlr_read_reg(mmio, PIC_CTRL))); mtx_unlock_spin(&xlr_pic_lock); } static __inline void pic_ack(int picintr) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); xlr_write_reg(mmio, PIC_INT_ACK, 1U << picintr); } static __inline void pic_send_ipi(int cpu, int ipi) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); int tid, pid; tid = cpu & 0x3; pid = (cpu >> 2) & 0x7; xlr_write_reg(mmio, PIC_IPI, (pid << 20) | (tid << 16) | ipi); } static __inline void pic_setup_intr(int picintr, int irq, uint32_t cpumask, int level) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); mtx_lock_spin(&xlr_pic_lock); xlr_write_reg(mmio, PIC_IRT_0(picintr), cpumask); - xlr_write_reg(mmio, PIC_IRT_1(picintr), ((1 << 31) | (level << 30) | + xlr_write_reg(mmio, PIC_IRT_1(picintr), ((1U << 31) | (level << 30) | (1 << 6) | irq)); mtx_unlock_spin(&xlr_pic_lock); } static __inline void pic_init_timer(int timer) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); uint32_t val; mtx_lock_spin(&xlr_pic_lock); val = xlr_read_reg(mmio, PIC_CTRL); val |= (1 << (8 + timer)); xlr_write_reg(mmio, PIC_CTRL, val); mtx_unlock_spin(&xlr_pic_lock); } static __inline void pic_set_timer(int timer, uint64_t maxval) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); xlr_write_reg(mmio, PIC_TIMER_MAXVAL_0(timer), (maxval & 0xffffffff)); xlr_write_reg(mmio, PIC_TIMER_MAXVAL_1(timer), (maxval >> 32) & 0xffffffff); } static __inline uint32_t pic_timer_count32(int timer) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); return (xlr_read_reg(mmio, PIC_TIMER_COUNT_0(timer))); } /* * The timer can wrap 32 bits between the two reads, so we * need additional logic to detect that. */ static __inline uint64_t pic_timer_count(int timer) { xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); uint32_t tu1, tu2, tl; tu1 = xlr_read_reg(mmio, PIC_TIMER_COUNT_1(timer)); tl = xlr_read_reg(mmio, PIC_TIMER_COUNT_0(timer)); tu2 = xlr_read_reg(mmio, PIC_TIMER_COUNT_1(timer)); if (tu2 != tu1) tl = xlr_read_reg(mmio, PIC_TIMER_COUNT_0(timer)); return (((uint64_t)tu2 << 32) | tl); } #endif /* _RMI_PIC_H_ */ Index: head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c =================================================================== --- head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c (revision 258779) +++ head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c (revision 258780) @@ -1,3588 +1,3588 @@ /* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __linux__ #define asm __asm #endif #include "mlx4_ib.h" #include "user.h" enum { MLX4_IB_ACK_REQ_FREQ = 8, }; enum { MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, MLX4_IB_LINK_TYPE_IB = 0, MLX4_IB_LINK_TYPE_ETH = 1 }; enum { /* * Largest possible UD header: send with GRH and immediate * data plus 18 bytes for an Ethernet header with VLAN/802.1Q * tag. (LRH would only use 8 bytes, so Ethernet is the * biggest case) */ MLX4_IB_UD_HEADER_SIZE = 82, MLX4_IB_LSO_HEADER_SPARE = 128, }; enum { MLX4_IB_IBOE_ETHERTYPE = 0x8915 }; struct mlx4_ib_sqp { struct mlx4_ib_qp qp; int pkey_index; u32 qkey; u32 send_psn; struct ib_ud_header ud_header; u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; }; enum { MLX4_IB_MIN_SQ_STRIDE = 6, MLX4_IB_CACHE_LINE_SIZE = 64, }; enum { MLX4_RAW_QP_MTU = 7, MLX4_RAW_QP_MSGMAX = 31, }; static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), }; #ifndef wc_wmb #if defined(__i386__) #define wc_wmb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory") #elif defined(__x86_64__) #define wc_wmb() asm volatile("sfence" ::: "memory") #elif defined(__ia64__) #define wc_wmb() asm volatile("fwb" ::: "memory") #else #define wc_wmb() wmb() #endif #endif static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) { return container_of(mqp, struct mlx4_ib_sqp, qp); } static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (!mlx4_is_master(dev->dev)) return 0; return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX; } static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { int proxy_sqp = 0; int real_sqp = 0; int i; /* PPF or Native -- real SQP */ real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); if (real_sqp) return 1; /* VF or PF -- proxy SQP */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { proxy_sqp = 1; break; } } } return proxy_sqp; } /* used for INIT/CLOSE port logic */ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { int proxy_qp0 = 0; int real_qp0 = 0; int i; /* PPF or Native -- real QP0 */ real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); if (real_qp0) return 1; /* VF or PF -- proxy QP0 */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { proxy_qp0 = 1; break; } } } return proxy_qp0; } static void *get_wqe(struct mlx4_ib_qp *qp, int offset) { return mlx4_buf_offset(&qp->buf, offset); } static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); } static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); } /* * Stamp a SQ WQE so that it is invalid if prefetched by marking the * first four bytes of every 64 byte chunk with * 0x7FFFFFF | (invalid_ownership_value << 31). * * When the max work request size is less than or equal to the WQE * basic block size, as an optimization, we can stamp all WQEs with * 0xffffffff, and skip the very first chunk of each WQE. */ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) { __be32 *wqe; int i; int s; int ind; void *buf; __be32 stamp; struct mlx4_wqe_ctrl_seg *ctrl; if (qp->sq_max_wqes_per_wr > 1) { s = roundup(size, 1U << qp->sq.wqe_shift); for (i = 0; i < s; i += 64) { ind = (i >> qp->sq.wqe_shift) + n; stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : cpu_to_be32(0xffffffff); buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); *wqe = stamp; } } else { ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); s = (ctrl->fence_size & 0x3f) << 4; for (i = 64; i < s; i += 64) { wqe = buf + i; *wqe = cpu_to_be32(0xffffffff); } } } static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) { struct mlx4_wqe_ctrl_seg *ctrl; struct mlx4_wqe_inline_seg *inl; void *wqe; int s; ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); s = sizeof(struct mlx4_wqe_ctrl_seg); if (qp->ibqp.qp_type == IB_QPT_UD) { struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; struct mlx4_av *av = (struct mlx4_av *)dgram->av; memset(dgram, 0, sizeof *dgram); av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); s += sizeof(struct mlx4_wqe_datagram_seg); } /* Pad the remainder of the WQE with an inline data segment. */ if (size > s) { inl = wqe + s; inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); } ctrl->srcrb_flags = 0; ctrl->fence_size = size / 16; /* * Make sure descriptor is fully written before setting ownership bit * (because HW can start executing as soon as we do). */ wmb(); ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | - (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); + (n & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0); stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); } /* Post NOP WQE to prevent wrap-around in the middle of WR */ static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) { unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); if (unlikely(s < qp->sq_max_wqes_per_wr)) { post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); ind += s; } return ind; } static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { struct ib_event event; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; if (ibqp->event_handler) { event.device = ibqp->device; event.element.qp = ibqp; switch (type) { case MLX4_EVENT_TYPE_PATH_MIG: event.event = IB_EVENT_PATH_MIG; break; case MLX4_EVENT_TYPE_COMM_EST: event.event = IB_EVENT_COMM_EST; break; case MLX4_EVENT_TYPE_SQ_DRAINED: event.event = IB_EVENT_SQ_DRAINED; break; case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: event.event = IB_EVENT_QP_LAST_WQE_REACHED; break; case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: event.event = IB_EVENT_QP_FATAL; break; case MLX4_EVENT_TYPE_PATH_MIG_FAILED: event.event = IB_EVENT_PATH_MIG_ERR; break; case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: event.event = IB_EVENT_QP_REQ_ERR; break; case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: event.event = IB_EVENT_QP_ACCESS_ERR; break; default: pr_warn("Unexpected event type %d " "on QP %06x\n", type, qp->qpn); return; } ibqp->event_handler(&event, ibqp->qp_context); } } static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) { /* * UD WQEs must have a datagram segment. * RC and UC WQEs might have a remote address segment. * MLX WQEs need two extra inline data segments (for the UD * header and space for the ICRC). */ switch (type) { case MLX4_IB_QPT_UD: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_PROXY_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + 64; case MLX4_IB_QPT_TUN_SMI_OWNER: case MLX4_IB_QPT_TUN_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg); case MLX4_IB_QPT_UC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_RC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + ALIGN(MLX4_IB_UD_HEADER_SIZE + DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, MLX4_INLINE_ALIGN) * sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)) + ALIGN(4 + sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)); default: return sizeof (struct mlx4_wqe_ctrl_seg); } } static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) { /* Sanity check RQ size before proceeding */ if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) return -EINVAL; if (!has_rq) { if (cap->max_recv_wr) return -EINVAL; qp->rq.wqe_cnt = qp->rq.max_gs = 0; } else { /* HW requires >= 1 RQ entry with >= 1 gather entry */ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) return -EINVAL; qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); } /* leave userspace return values as they were, so as not to break ABI */ if (is_user) { cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; cap->max_recv_sge = qp->rq.max_gs; } else { cap->max_recv_wr = qp->rq.max_post = min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); cap->max_recv_sge = min(qp->rq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); } return 0; } static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) { int s; /* Sanity check SQ size before proceeding */ if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || cap->max_inline_data + send_wqe_overhead(type, qp->flags) + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * For MLX transport we need 2 extra S/G entries: * one for the header and one for the checksum at the end */ if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI || type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) && cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + send_wqe_overhead(type, qp->flags); if (s > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * Hermon supports shrinking WQEs, such that a single work * request can include multiple units of 1 << wqe_shift. This * way, work requests can differ in size, and do not have to * be a power of 2 in size, saving memory and speeding up send * WR posting. Unfortunately, if we do this then the * wqe_index field in CQEs can't be used to look up the WR ID * anymore, so we do this only if selective signaling is off. * * Further, on 32-bit platforms, we can't use vmap() to make * the QP buffer virtually contiguous. Thus we have to use * constant-sized WRs to make sure a WR is always fully within * a single page-sized chunk. * * Finally, we use NOP work requests to pad the end of the * work queue, to avoid wrap-around in the middle of WR. We * set NEC bit to avoid getting completions with error for * these NOP WRs, but since NEC is only supported starting * with firmware 2.2.232, we use constant-sized WRs for older * firmware. * * And, since MLX QPs only support SEND, we use constant-sized * WRs in this case. * * We look for the smallest value of wqe_shift such that the * resulting number of wqes does not exceed device * capabilities. * * We set WQE size to at least 64 bytes, this way stamping * invalidates each WQE. */ if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && qp->sq_signal_bits && BITS_PER_LONG == 64 && type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI && !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) qp->sq.wqe_shift = ilog2(64); else qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); for (;;) { qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); /* * We need to leave 2 KB + 1 WR of headroom in the SQ to * allow HW to prefetch. */ qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * qp->sq_max_wqes_per_wr + qp->sq_spare_wqes); if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) break; if (qp->sq_max_wqes_per_wr <= 1) return -EINVAL; ++qp->sq.wqe_shift; } qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - send_wqe_overhead(type, qp->flags)) / sizeof (struct mlx4_wqe_data_seg); qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); if (qp->rq.wqe_shift > qp->sq.wqe_shift) { qp->rq.offset = 0; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; } else { qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; qp->sq.offset = 0; } cap->max_send_wr = qp->sq.max_post = (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; cap->max_send_sge = min(qp->sq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); qp->max_inline_data = cap->max_inline_data; return 0; } static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { /* Sanity check SQ size before proceeding */ if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; qp->sq.wqe_shift = ucmd->log_sq_stride; qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); return 0; } static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) { int i; qp->sqp_proxy_rcv = kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, GFP_KERNEL); if (!qp->sqp_proxy_rcv) return -ENOMEM; for (i = 0; i < qp->rq.wqe_cnt; i++) { qp->sqp_proxy_rcv[i].addr = kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr), GFP_KERNEL); if (!qp->sqp_proxy_rcv[i].addr) goto err; qp->sqp_proxy_rcv[i].map = ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); } return 0; err: while (i > 0) { --i; ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); kfree(qp->sqp_proxy_rcv[i].addr); } kfree(qp->sqp_proxy_rcv); qp->sqp_proxy_rcv = NULL; return -ENOMEM; } static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) { int i; for (i = 0; i < qp->rq.wqe_cnt; i++) { ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); kfree(qp->sqp_proxy_rcv[i].addr); } kfree(qp->sqp_proxy_rcv); } static int qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) return 0; return !attr->srq; } #ifdef __linux__ static int init_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp, struct ib_qp_init_attr *attr, int *qpn) { struct mlx4_ib_qpg_data *qpg_data; int tss_num, rss_num; int tss_align_num, rss_align_num; int tss_base, rss_base = 0; int err; /* Parent is part of the TSS range (in SW TSS ARP is sent via parent) */ tss_num = 1 + attr->parent_attrib.tss_child_count; tss_align_num = roundup_pow_of_two(tss_num); rss_num = attr->parent_attrib.rss_child_count; rss_align_num = roundup_pow_of_two(rss_num); if (rss_num > 1) { /* RSS is requested */ if (!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS)) return -ENOSYS; if (rss_align_num > dev->dev->caps.max_rss_tbl_sz) return -EINVAL; /* We must work with power of two */ attr->parent_attrib.rss_child_count = rss_align_num; } qpg_data = kzalloc(sizeof *qpg_data, GFP_KERNEL); if (!qpg_data) return -ENOMEM; if(pqp->flags & MLX4_IB_QP_NETIF) err = mlx4_ib_steer_qp_alloc(dev, tss_align_num, &tss_base); else err = mlx4_qp_reserve_range(dev->dev, tss_align_num, tss_align_num, &tss_base, 1); if (err) goto err1; if (tss_num > 1) { u32 alloc = BITS_TO_LONGS(tss_align_num) * sizeof(long); qpg_data->tss_bitmap = kzalloc(alloc, GFP_KERNEL); if (qpg_data->tss_bitmap == NULL) { err = -ENOMEM; goto err2; } bitmap_fill(qpg_data->tss_bitmap, tss_num); /* Note parent takes first index */ clear_bit(0, qpg_data->tss_bitmap); } if (rss_num > 1) { u32 alloc = BITS_TO_LONGS(rss_align_num) * sizeof(long); err = mlx4_qp_reserve_range(dev->dev, rss_align_num, 1, &rss_base, 0); if (err) goto err3; qpg_data->rss_bitmap = kzalloc(alloc, GFP_KERNEL); if (qpg_data->rss_bitmap == NULL) { err = -ENOMEM; goto err4; } bitmap_fill(qpg_data->rss_bitmap, rss_align_num); } qpg_data->tss_child_count = attr->parent_attrib.tss_child_count; qpg_data->rss_child_count = attr->parent_attrib.rss_child_count; qpg_data->qpg_parent = pqp; qpg_data->qpg_tss_mask_sz = ilog2(tss_align_num); qpg_data->tss_qpn_base = tss_base; qpg_data->rss_qpn_base = rss_base; pqp->qpg_data = qpg_data; *qpn = tss_base; return 0; err4: mlx4_qp_release_range(dev->dev, rss_base, rss_align_num); err3: if (tss_num > 1) kfree(qpg_data->tss_bitmap); err2: if(pqp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_free(dev, tss_base, tss_align_num); else mlx4_qp_release_range(dev->dev, tss_base, tss_align_num); err1: kfree(qpg_data); return err; } static void free_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp) { struct mlx4_ib_qpg_data *qpg_data = pqp->qpg_data; int align_num; if (qpg_data->tss_child_count > 1) kfree(qpg_data->tss_bitmap); align_num = roundup_pow_of_two(1 + qpg_data->tss_child_count); if(pqp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_free(dev, qpg_data->tss_qpn_base, align_num); else mlx4_qp_release_range(dev->dev, qpg_data->tss_qpn_base, align_num); if (qpg_data->rss_child_count > 1) { kfree(qpg_data->rss_bitmap); align_num = roundup_pow_of_two(qpg_data->rss_child_count); mlx4_qp_release_range(dev->dev, qpg_data->rss_qpn_base, align_num); } kfree(qpg_data); } static int alloc_qpg_qpn(struct ib_qp_init_attr *init_attr, struct mlx4_ib_qp *pqp, int *qpn) { struct mlx4_ib_qp *mqp = to_mqp(init_attr->qpg_parent); struct mlx4_ib_qpg_data *qpg_data = mqp->qpg_data; u32 idx, old; switch (init_attr->qpg_type) { case IB_QPG_CHILD_TX: if (qpg_data->tss_child_count == 0) return -EINVAL; do { /* Parent took index 0 */ idx = find_first_bit(qpg_data->tss_bitmap, qpg_data->tss_child_count + 1); if (idx >= qpg_data->tss_child_count + 1) return -ENOMEM; old = test_and_clear_bit(idx, qpg_data->tss_bitmap); } while (old == 0); idx += qpg_data->tss_qpn_base; break; case IB_QPG_CHILD_RX: if (qpg_data->rss_child_count == 0) return -EINVAL; do { idx = find_first_bit(qpg_data->rss_bitmap, qpg_data->rss_child_count); if (idx >= qpg_data->rss_child_count) return -ENOMEM; old = test_and_clear_bit(idx, qpg_data->rss_bitmap); } while (old == 0); idx += qpg_data->rss_qpn_base; break; default: return -EINVAL; } pqp->qpg_data = qpg_data; *qpn = idx; return 0; } static void free_qpg_qpn(struct mlx4_ib_qp *mqp, int qpn) { struct mlx4_ib_qpg_data *qpg_data = mqp->qpg_data; switch (mqp->qpg_type) { case IB_QPG_CHILD_TX: /* Do range check */ qpn -= qpg_data->tss_qpn_base; set_bit(qpn, qpg_data->tss_bitmap); break; case IB_QPG_CHILD_RX: qpn -= qpg_data->rss_qpn_base; set_bit(qpn, qpg_data->rss_bitmap); break; default: /* error */ pr_warn("wrong qpg type (%d)\n", mqp->qpg_type); break; } } #endif static int alloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *attr, int *qpn) { int err = 0; switch (attr->qpg_type) { case IB_QPG_NONE: /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE * BlueFlame setup flow wrongly causes VLAN insertion. */ if (attr->qp_type == IB_QPT_RAW_PACKET) { err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn, 1); } else { if(qp->flags & MLX4_IB_QP_NETIF) err = mlx4_ib_steer_qp_alloc(dev, 1, qpn); else err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn, 0); } break; case IB_QPG_PARENT: #ifdef __linux__ err = init_qpg_parent(dev, qp, attr, qpn); #endif break; case IB_QPG_CHILD_TX: case IB_QPG_CHILD_RX: #ifdef __linux__ err = alloc_qpg_qpn(attr, qp, qpn); #endif break; default: qp->qpg_type = IB_QPG_NONE; err = -EINVAL; break; } if (err) return err; qp->qpg_type = attr->qpg_type; return 0; } static void free_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, enum ib_qpg_type qpg_type, int qpn) { switch (qpg_type) { case IB_QPG_NONE: if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_free(dev, qpn, 1); else mlx4_qp_release_range(dev->dev, qpn, 1); break; case IB_QPG_PARENT: #ifdef __linux__ free_qpg_parent(dev, qp); #endif break; case IB_QPG_CHILD_TX: case IB_QPG_CHILD_RX: #ifdef __linux__ free_qpg_qpn(qp, qpn); #endif break; default: break; } } /* Revert allocation on create_qp_common */ static void unalloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *attr, int qpn) { free_qpn_common(dev, qp, attr->qpg_type, qpn); } static void release_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { free_qpn_common(dev, qp, qp->qpg_type, qp->mqp.qpn); } static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp) { int qpn; int err; struct mlx4_ib_sqp *sqp; struct mlx4_ib_qp *qp; enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; #ifndef __linux__ init_attr->qpg_type = IB_QPG_NONE; #endif /* When tunneling special qps, we use a plain UD qp */ if (sqpn) { if (mlx4_is_mfunc(dev->dev) && (!mlx4_is_master(dev->dev) || !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { if (init_attr->qp_type == IB_QPT_GSI) qp_type = MLX4_IB_QPT_PROXY_GSI; else if (mlx4_is_master(dev->dev)) qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; else qp_type = MLX4_IB_QPT_PROXY_SMI; } qpn = sqpn; /* add extra sg entry for tunneling */ init_attr->cap.max_recv_sge++; } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) { struct mlx4_ib_qp_tunnel_init_attr *tnl_init = container_of(init_attr, struct mlx4_ib_qp_tunnel_init_attr, init_attr); if ((tnl_init->proxy_qp_type != IB_QPT_SMI && tnl_init->proxy_qp_type != IB_QPT_GSI) || !mlx4_is_master(dev->dev)) return -EINVAL; if (tnl_init->proxy_qp_type == IB_QPT_GSI) qp_type = MLX4_IB_QPT_TUN_GSI; else if (tnl_init->slave == mlx4_master_func_num(dev->dev)) qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; else qp_type = MLX4_IB_QPT_TUN_SMI; /* we are definitely in the PPF here, since we are creating * tunnel QPs. base_tunnel_sqpn is therefore valid. */ qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1; sqpn = qpn; } if (!*caller_qp) { if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL); if (!sqp) return -ENOMEM; qp = &sqp->qp; qp->pri.vid = qp->alt.vid = 0xFFFF; } else { qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; qp->pri.vid = qp->alt.vid = 0xFFFF; } } else qp = *caller_qp; qp->mlx4_ib_qp_type = qp_type; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); INIT_LIST_HEAD(&qp->rules_list); qp->state = IB_QPS_RESET; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); if (err) goto err; if (pd->uobject) { struct mlx4_ib_create_qp ucmd; int shift; int n; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err; } qp->sq_no_prefetch = ucmd.sq_no_prefetch; err = set_user_sq_size(dev, qp, &ucmd); if (err) goto err; qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, qp->buf_size, 0, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; } n = ib_umem_page_count(qp->umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); if (err) goto err_mtt; if (qp_has_rq(init_attr)) { err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &qp->db); if (err) goto err_mtt; } } else { qp->sq_no_prefetch = 0; if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) qp->flags |= MLX4_IB_QP_LSO; if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP && dev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && !mlx4_is_mfunc(dev->dev)) qp->flags |= MLX4_IB_QP_NETIF; err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); if (err) goto err; if (qp_has_rq(init_attr)) { err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; *qp->db.db = 0; } if (qp->max_inline_data) { err = mlx4_bf_alloc(dev->dev, &qp->bf, 0); if (err) { pr_debug("failed to allocate blue flame" " register (%d)", err); qp->bf.uar = &dev->priv_uar; } } else qp->bf.uar = &dev->priv_uar; if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, &qp->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; } } if (sqpn) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { if (alloc_proxy_bufs(pd->device, qp)) { err = -ENOMEM; goto err_wrid; } } } else { err = alloc_qpn_common(dev, qp, init_attr, &qpn); if (err) goto err_proxy; } err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; if (init_attr->qp_type == IB_QPT_XRC_TGT) qp->mqp.qpn |= (1 << 23); /* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->mqp.event = mlx4_ib_qp_event; if (!*caller_qp) *caller_qp = qp; return 0; err_qpn: unalloc_qpn_common(dev, qp, init_attr, qpn); err_proxy: if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) free_proxy_bufs(pd->device, qp); err_wrid: if (pd->uobject) { if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { kfree(qp->sq.wrid); kfree(qp->rq.wrid); } err_mtt: mlx4_mtt_cleanup(dev->dev, &qp->mtt); err_buf: if (pd->uobject) ib_umem_release(qp->umem); else mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); err_db: if (!pd->uobject && qp_has_rq(init_attr)) mlx4_db_free(dev->dev, &qp->db); if (qp->max_inline_data) mlx4_bf_free(dev->dev, &qp->bf); err: if (!*caller_qp) kfree(qp); return err; } static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return MLX4_QP_STATE_RST; case IB_QPS_INIT: return MLX4_QP_STATE_INIT; case IB_QPS_RTR: return MLX4_QP_STATE_RTR; case IB_QPS_RTS: return MLX4_QP_STATE_RTS; case IB_QPS_SQD: return MLX4_QP_STATE_SQD; case IB_QPS_SQE: return MLX4_QP_STATE_SQER; case IB_QPS_ERR: return MLX4_QP_STATE_ERR; default: return -1; } } static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } static void del_gid_entries(struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { list_del(&ge->list); kfree(ge); } } static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) { if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); else return to_mpd(qp->ibqp.pd); } static void get_cqs(struct mlx4_ib_qp *qp, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) { switch (qp->ibqp.qp_type) { case IB_QPT_XRC_TGT: *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); *recv_cq = *send_cq; break; case IB_QPT_XRC_INI: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = *send_cq; break; default: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = to_mcq(qp->ibqp.recv_cq); break; } } static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, int is_user) { struct mlx4_ib_cq *send_cq, *recv_cq; if (qp->state != IB_QPS_RESET) { if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) pr_warn("modify QP %06x to RESET failed.\n", qp->mqp.qpn); if (qp->pri.smac) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = 0; } if (qp->alt.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); qp->alt.smac = 0; } if (qp->pri.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); qp->pri.vid = 0xFFFF; qp->pri.candidate_vid = 0xFFFF; qp->pri.update_vid = 0; } if (qp->alt.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); qp->alt.vid = 0xFFFF; qp->alt.candidate_vid = 0xFFFF; qp->alt.update_vid = 0; } } get_cqs(qp, &send_cq, &recv_cq); mlx4_ib_lock_cqs(send_cq, recv_cq); if (!is_user) { __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); if (send_cq != recv_cq) __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); } mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_ib_unlock_cqs(send_cq, recv_cq); mlx4_qp_free(dev->dev, &qp->mqp); if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) release_qpn_common(dev, qp); mlx4_mtt_cleanup(dev->dev, &qp->mtt); if (is_user) { if (qp->rq.wqe_cnt) mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), &qp->db); ib_umem_release(qp->umem); } else { kfree(qp->sq.wrid); kfree(qp->rq.wrid); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) free_proxy_bufs(&dev->ib_dev, qp); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); if (qp->max_inline_data) mlx4_bf_free(dev->dev, &qp->bf); if (qp->rq.wqe_cnt) mlx4_db_free(dev->dev, &qp->db); } del_gid_entries(qp); } static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) { /* Native or PPF */ if (!mlx4_is_mfunc(dev->dev) || (mlx4_is_master(dev->dev) && attr->create_flags & MLX4_IB_SRIOV_SQP)) { return dev->dev->phys_caps.base_sqpn + (attr->qp_type == IB_QPT_SMI ? 0 : 2) + attr->port_num - 1; } /* PF or VF -- creating proxies */ if (attr->qp_type == IB_QPT_SMI) return dev->dev->caps.qp0_proxy[attr->port_num - 1]; else return dev->dev->caps.qp1_proxy[attr->port_num - 1]; } #ifdef __linux__ static int check_qpg_attr(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) { if (attr->qpg_type == IB_QPG_NONE) return 0; if (attr->qp_type != IB_QPT_UD) return -EINVAL; if (attr->qpg_type == IB_QPG_PARENT) { if (attr->parent_attrib.tss_child_count == 1) return -EINVAL; /* Doesn't make sense */ if (attr->parent_attrib.rss_child_count == 1) return -EINVAL; /* Doesn't make sense */ if ((attr->parent_attrib.tss_child_count == 0) && (attr->parent_attrib.rss_child_count == 0)) /* Should be called with IP_QPG_NONE */ return -EINVAL; if (attr->parent_attrib.rss_child_count > 1) { int rss_align_num; if (!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS)) return -ENOSYS; rss_align_num = roundup_pow_of_two( attr->parent_attrib.rss_child_count); if (rss_align_num > dev->dev->caps.max_rss_tbl_sz) return -EINVAL; } } else { struct mlx4_ib_qpg_data *qpg_data; if (attr->qpg_parent == NULL) return -EINVAL; if (IS_ERR(attr->qpg_parent)) return -EINVAL; qpg_data = to_mqp(attr->qpg_parent)->qpg_data; if (qpg_data == NULL) return -EINVAL; if (attr->qpg_type == IB_QPG_CHILD_TX && !qpg_data->tss_child_count) return -EINVAL; if (attr->qpg_type == IB_QPG_CHILD_RX && !qpg_data->rss_child_count) return -EINVAL; } return 0; } #endif #define RESERVED_FLAGS_MASK ((((unsigned int)IB_QP_CREATE_RESERVED_END - 1) | IB_QP_CREATE_RESERVED_END) \ & ~(IB_QP_CREATE_RESERVED_START - 1)) static enum mlx4_ib_qp_flags to_mlx4_ib_qp_flags(enum ib_qp_create_flags ib_qp_flags) { enum mlx4_ib_qp_flags mlx4_ib_qp_flags = 0; if (ib_qp_flags & IB_QP_CREATE_IPOIB_UD_LSO) mlx4_ib_qp_flags |= MLX4_IB_QP_LSO; if (ib_qp_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) mlx4_ib_qp_flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; if (ib_qp_flags & IB_QP_CREATE_NETIF_QP) mlx4_ib_qp_flags |= MLX4_IB_QP_NETIF; /* reserved flags */ mlx4_ib_qp_flags |= (ib_qp_flags & RESERVED_FLAGS_MASK); return mlx4_ib_qp_flags; } struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_qp *qp = NULL; int err; u16 xrcdn = 0; enum mlx4_ib_qp_flags mlx4_qp_flags = to_mlx4_ib_qp_flags(init_attr->create_flags); struct ib_device *device; /* see ib_core::ib_create_qp same handling */ device = pd ? pd->device : init_attr->xrcd->device; /* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs. */ if (mlx4_qp_flags & ~(MLX4_IB_QP_LSO | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP | MLX4_IB_QP_NETIF)) return ERR_PTR(-EINVAL); if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { if (init_attr->qp_type != IB_QPT_UD) return ERR_PTR(-EINVAL); } if (init_attr->create_flags && (udata || ((mlx4_qp_flags & ~MLX4_IB_SRIOV_SQP) && init_attr->qp_type != IB_QPT_UD) || ((mlx4_qp_flags & MLX4_IB_SRIOV_SQP) && init_attr->qp_type > IB_QPT_GSI))) return ERR_PTR(-EINVAL); #ifdef __linux__ err = check_qpg_attr(to_mdev(device), init_attr); if (err) return ERR_PTR(err); #endif switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; /* fall through */ case IB_QPT_XRC_INI: if (!(to_mdev(device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); init_attr->recv_cq = init_attr->send_cq; /* fall through */ case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->pri.vid = qp->alt.vid = 0xFFFF; /* fall through */ case IB_QPT_UD: { err = create_qp_common(to_mdev(device), pd, init_attr, udata, 0, &qp); if (err) { kfree(qp); return ERR_PTR(err); } qp->ibqp.qp_num = qp->mqp.qpn; qp->xrcdn = xrcdn; break; } case IB_QPT_SMI: case IB_QPT_GSI: { /* Userspace is not allowed to create special QPs: */ if (udata) return ERR_PTR(-EINVAL); err = create_qp_common(to_mdev(device), pd, init_attr, udata, get_sqp_num(to_mdev(device), init_attr), &qp); if (err) return ERR_PTR(err); qp->port = init_attr->port_num; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; break; } default: /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } return &qp->ibqp; } int mlx4_ib_destroy_qp(struct ib_qp *qp) { struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_ib_qp *mqp = to_mqp(qp); struct mlx4_ib_pd *pd; if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); pd = get_pd(mqp); destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); if (is_sqp(dev, mqp)) kfree(to_msqp(mqp)); else kfree(mqp); return 0; } static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) { switch (type) { case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC; case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC; case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD; case MLX4_IB_QPT_XRC_INI: case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX; case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ? MLX4_QP_ST_MLX : -1); case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_PROXY_GSI: case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ? MLX4_QP_ST_UD : -1); default: return -1; } } static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MLX4_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MLX4_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MLX4_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) { path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); } static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx4_ib_qp *qp, struct mlx4_qp_path *path, u8 port, int is_primary) { struct net_device *ndev; int err; int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_ETHERNET; u8 mac[6]; int is_mcast; u16 vlan_tag; int vidx; int smac_index; u64 u64_mac; u8 *smac; struct mlx4_roce_smac_vlan_info *smac_info; path->grh_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); if (ah->static_rate) { path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) --path->static_rate; } else path->static_rate = 0; if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { pr_err("sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1; } path->grh_mylmc |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->tclass_flowlabel = cpu_to_be32((ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } if (is_eth) { if (!(ah->ah_flags & IB_AH_GRH)) return -1; path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 7) << 3); if (is_primary) smac_info = &qp->pri; else smac_info = &qp->alt; vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); if (vlan_tag < 0x1000) { if (smac_info->vid < 0x1000) { /* both valid vlan ids */ if (smac_info->vid != vlan_tag) { /* different VIDs. unreg old and reg new */ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); if (err) return err; smac_info->candidate_vid = vlan_tag; smac_info->candidate_vlan_index = vidx; smac_info->candidate_vlan_port = port; smac_info->update_vid = 1; path->vlan_index = vidx; path->fl = 1 << 6; } else { path->vlan_index = smac_info->vlan_index; path->fl = 1 << 6; } } else { /* no current vlan tag in qp */ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); if (err) return err; smac_info->candidate_vid = vlan_tag; smac_info->candidate_vlan_index = vidx; smac_info->candidate_vlan_port = port; smac_info->update_vid = 1; path->vlan_index = vidx; path->fl = 1 << 6; } } else { /* have current vlan tag. unregister it at modify-qp success */ if (smac_info->vid < 0x1000) { smac_info->candidate_vid = 0xFFFF; smac_info->update_vid = 1; } } err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); if (err) return err; /* get smac_index for RoCE use. * If no smac was yet assigned, register one. * If one was already assigned, but the new mac differs, * unregister the old one and register the new one. */ spin_lock(&dev->iboe.lock); ndev = dev->iboe.netdevs[port - 1]; if (ndev) { #ifdef __linux__ smac = ndev->dev_addr; /* fixme: cache this value */ #else smac = IF_LLADDR(ndev); /* fixme: cache this value */ #endif u64_mac = mlx4_mac_to_u64(smac); } else u64_mac = dev->dev->caps.def_mac[port]; spin_unlock(&dev->iboe.lock); if (!smac_info->smac || smac_info->smac != u64_mac) { /* register candidate now, unreg if needed, after success */ smac_index = mlx4_register_mac(dev->dev, port, u64_mac); if (smac_index >= 0) { smac_info->candidate_smac_index = smac_index; smac_info->candidate_smac = u64_mac; smac_info->candidate_smac_port = port; } else return -EINVAL; } else smac_index = smac_info->smac_index; memcpy(path->dmac, mac, 6); path->ackto = MLX4_IB_LINK_TYPE_ETH; /* put MAC table smac index for IBoE */ path->grh_mylmc = (u8) (smac_index) | 0x80 ; } else path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 0xf) << 2); return 0; } static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { ge->added = 1; ge->port = qp->port; } } } static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_qp_context *context) { struct net_device *ndev; u64 u64_mac; u8 *smac; int smac_index; ndev = dev->iboe.netdevs[qp->port - 1]; if (ndev) { #ifdef __linux__ smac = ndev->dev_addr; /* fixme: cache this value */ #else smac = IF_LLADDR(ndev); /* fixme: cache this value */ #endif u64_mac = mlx4_mac_to_u64(smac); } else u64_mac = dev->dev->caps.def_mac[qp->port]; context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); if (!qp->pri.smac) { smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); if (smac_index >= 0) { qp->pri.candidate_smac_index = smac_index; qp->pri.candidate_smac = u64_mac; qp->pri.candidate_smac_port = qp->port; context->pri_path.grh_mylmc = 0x80 | (u8) smac_index; } else return -ENOENT; } return 0; } static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_ib_pd *pd; struct mlx4_ib_cq *send_cq, *recv_cq; struct mlx4_qp_context *context; enum mlx4_qp_optpar optpar = 0; int sqd_event; int steer_qp = 0; int err = -EINVAL; int is_eth = -1; context = kzalloc(sizeof *context, GFP_KERNEL); if (!context) return -ENOMEM; context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { optpar |= MLX4_QP_OPTPAR_PM_STATE; switch (attr->path_mig_state) { case IB_MIG_MIGRATED: context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); break; case IB_MIG_ARMED: context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); break; } } if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; else if (ibqp->qp_type == IB_QPT_RAW_PACKET) context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX; else if (ibqp->qp_type == IB_QPT_UD) { if (qp->flags & MLX4_IB_QP_LSO) context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("path MTU (%u) is invalid\n", attr->path_mtu); goto out; } context->mtu_msgmax = (attr->path_mtu << 5) | ilog2(dev->dev->caps.max_msg_sz); } if (qp->rq.wqe_cnt) context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; context->rq_size_stride |= qp->rq.wqe_shift - 4; if (qp->sq.wqe_cnt) context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; context->sq_size_stride |= qp->sq.wqe_shift - 4; if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->xrcd = cpu_to_be32((u32) qp->xrcdn); context->param3 |= cpu_to_be32(1 << 30); } if (qp->ibqp.uobject) context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); else context->usr_page = cpu_to_be32(qp->bf.uar->index); if (attr_mask & IB_QP_DEST_QPN) context->remote_qpn = cpu_to_be32(attr->dest_qp_num); if (attr_mask & IB_QP_PORT) { if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && !(attr_mask & IB_QP_AV)) { mlx4_set_sched(&context->pri_path, attr->port_num); optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; } } if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { if (dev->counters[qp->port - 1] != -1) { context->pri_path.counter_index = dev->counters[qp->port - 1]; optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; } else context->pri_path.counter_index = 0xff; if (qp->flags & MLX4_IB_QP_NETIF && (qp->qpg_type == IB_QPG_NONE || qp->qpg_type == IB_QPG_PARENT)) { mlx4_ib_steer_qp_reg(dev, qp, 1); steer_qp = 1; } } if (attr_mask & IB_QP_PKEY_INDEX) { if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) context->pri_path.disable_pkey_check = 0x40; context->pri_path.pkey_index = attr->pkey_index; optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; } if (attr_mask & IB_QP_AV) { if (mlx4_set_path(dev, &attr->ah_attr, qp, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port, 1)) goto out; optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE); } if (attr_mask & IB_QP_TIMEOUT) { context->pri_path.ackto |= attr->timeout << 3; optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_port_num == 0 || attr->alt_port_num > dev->dev->caps.num_ports) goto out; if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len[attr->alt_port_num]) goto out; if (mlx4_set_path(dev, &attr->alt_ah_attr, qp, &context->alt_path, attr->alt_port_num, 0)) goto out; context->alt_path.pkey_index = attr->alt_pkey_index; context->alt_path.ackto = attr->alt_timeout << 3; optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; } pd = get_pd(qp); get_cqs(qp, &send_cq, &recv_cq); context->pd = cpu_to_be32(pd->pdn); context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); /* Set "fast registration enabled" for all kernel QPs */ if (!qp->ibqp.uobject) context->params1 |= cpu_to_be32(1 << 11); if (attr_mask & IB_QP_RNR_RETRY) { context->params1 |= cpu_to_be32(attr->rnr_retry << 13); optpar |= MLX4_QP_OPTPAR_RNR_RETRY; } if (attr_mask & IB_QP_RETRY_CNT) { context->params1 |= cpu_to_be32(attr->retry_cnt << 16); optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_SRA_MAX; } if (attr_mask & IB_QP_SQ_PSN) context->next_send_psn = cpu_to_be32(attr->sq_psn); if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_RRA_MAX; } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; } if (attr_mask & IB_M_EXT_CLASS_1) context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER); /* for now we enable also sqe on send */ if (attr_mask & IB_M_EXT_CLASS_2) { context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_SQ); context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER); } if (attr_mask & IB_M_EXT_CLASS_3) context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_RQ); if (ibqp->srq) context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; } if (attr_mask & IB_QP_RQ_PSN) context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ if (attr_mask & IB_QP_QKEY) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) context->qkey = cpu_to_be32(IB_QP_SET_QKEY); else { if (mlx4_is_mfunc(dev->dev) && !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && (attr->qkey & MLX4_RESERVED_QKEY_MASK) == MLX4_RESERVED_QKEY_BASE) { pr_err("Cannot use reserved QKEY" " 0x%x (range 0xffff0000..0xffffffff" " is reserved)\n", attr->qkey); err = -EINVAL; goto out; } context->qkey = cpu_to_be32(attr->qkey); } optpar |= MLX4_QP_OPTPAR_Q_KEY; } if (ibqp->srq) context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->db_rec_addr = cpu_to_be64(qp->db.dma); if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR && (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_RAW_PACKET)) { context->pri_path.sched_queue = (qp->port - 1) << 6; if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) { context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) context->pri_path.fl = 0x80; } else { if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) context->pri_path.fl = 0x80; context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; } is_eth = rdma_port_get_link_layer(&dev->ib_dev, qp->port) == IB_LINK_LAYER_ETHERNET; if (is_eth) { if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) context->pri_path.feup = 1 << 7; /* don't fsm */ /* handle smac_index */ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { err = handle_eth_ud_smac_index(dev, qp, context); if (err) return -EINVAL; } } } if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; else sqd_event = 0; if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->rlkey |= (1 << 4); if ((attr_mask & IB_QP_GROUP_RSS) && (qp->qpg_data->rss_child_count > 1)) { struct mlx4_ib_qpg_data *qpg_data = qp->qpg_data; void *rss_context_base = &context->pri_path; struct mlx4_rss_context *rss_context = (struct mlx4_rss_context *) (rss_context_base + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH); context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); /* This should be tbl_sz_base_qpn */ rss_context->base_qpn = cpu_to_be32(qpg_data->rss_qpn_base | (ilog2(qpg_data->rss_child_count) << 24)); rss_context->default_qpn = cpu_to_be32(qpg_data->rss_qpn_base); /* This should be flags_hash_fn */ rss_context->flags = MLX4_RSS_TCP_IPV6 | MLX4_RSS_TCP_IPV4; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS) { rss_context->base_qpn_udp = rss_context->default_qpn; rss_context->flags |= MLX4_RSS_IPV6 | MLX4_RSS_IPV4 | MLX4_RSS_UDP_IPV6 | MLX4_RSS_UDP_IPV4; } if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; rss_context->hash_fn = MLX4_RSS_HASH_TOP; memcpy(rss_context->rss_key, rsskey, sizeof(rss_context->rss_key)); } else { rss_context->hash_fn = MLX4_RSS_HASH_XOR; memset(rss_context->rss_key, 0, sizeof(rss_context->rss_key)); } } /* * Before passing a kernel QP to the HW, make sure that the * ownership bits of the send queue are set and the SQ * headroom is stamped so that the hardware doesn't start * processing stale work requests. */ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { struct mlx4_wqe_ctrl_seg *ctrl; int i; for (i = 0; i < qp->sq.wqe_cnt; ++i) { ctrl = get_send_wqe(qp, i); - ctrl->owner_opcode = cpu_to_be32(1 << 31); + ctrl->owner_opcode = cpu_to_be32(1U << 31); if (qp->sq_max_wqes_per_wr == 1) ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); } } err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); if (err) goto out; qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) { qp->port = attr->port_num; update_mcg_macs(dev, qp); } if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_sqp_attrs(to_msqp(qp), attr, attr_mask); /* Set 'ignore_cq_overrun' bits for collectives offload */ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (attr_mask & (IB_M_EXT_CLASS_2 | IB_M_EXT_CLASS_3)) { err = mlx4_ib_ignore_overrun_cq(ibqp->send_cq); if (err) { pr_err("Failed to set ignore CQ " "overrun for QP 0x%x's send CQ\n", ibqp->qp_num); goto out; } if (ibqp->recv_cq != ibqp->send_cq) { err = mlx4_ib_ignore_overrun_cq(ibqp->recv_cq); if (err) { pr_err("Failed to set ignore " "CQ overrun for QP 0x%x's recv " "CQ\n", ibqp->qp_num); goto out; } } } } /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) if (mlx4_INIT_PORT(dev->dev, qp->port)) pr_warn("INIT_PORT failed for port %d\n", qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mlx4_CLOSE_PORT(dev->dev, qp->port); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET) { if (!ibqp->uobject) { mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, ibqp->srq ? to_msrq(ibqp->srq) : NULL); if (send_cq != recv_cq) mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); qp->rq.head = 0; qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; qp->sq_next_wqe = 0; if (qp->rq.wqe_cnt) *qp->db.db = 0; if (qp->flags & MLX4_IB_QP_NETIF && (qp->qpg_type == IB_QPG_NONE || qp->qpg_type == IB_QPG_PARENT)) mlx4_ib_steer_qp_reg(dev, qp, 0); } if (qp->pri.smac) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = 0; } if (qp->alt.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); qp->alt.smac = 0; } if (qp->pri.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); qp->pri.vid = 0xFFFF; qp->pri.candidate_vid = 0xFFFF; qp->pri.update_vid = 0; } if (qp->alt.vid < 0x1000) { mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); qp->alt.vid = 0xFFFF; qp->alt.candidate_vid = 0xFFFF; qp->alt.update_vid = 0; } } out: if (err && steer_qp) mlx4_ib_steer_qp_reg(dev, qp, 0); kfree(context); if (qp->pri.candidate_smac) { if (err) mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); else { if (qp->pri.smac) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); } qp->pri.smac = qp->pri.candidate_smac; qp->pri.smac_index = qp->pri.candidate_smac_index; qp->pri.smac_port = qp->pri.candidate_smac_port; } qp->pri.candidate_smac = 0; qp->pri.candidate_smac_index = 0; qp->pri.candidate_smac_port = 0; } if (qp->alt.candidate_smac) { if (err) mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->pri.candidate_smac); else { if (qp->pri.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); } qp->alt.smac = qp->alt.candidate_smac; qp->alt.smac_index = qp->alt.candidate_smac_index; qp->alt.smac_port = qp->alt.candidate_smac_port; } qp->pri.candidate_smac = 0; qp->pri.candidate_smac_index = 0; qp->pri.candidate_smac_port = 0; } if (qp->pri.update_vid) { if (err) { if (qp->pri.candidate_vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, qp->pri.candidate_vid); } else { if (qp->pri.vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); qp->pri.vid = qp->pri.candidate_vid; qp->pri.vlan_port = qp->pri.candidate_vlan_port; qp->pri.vlan_index = qp->pri.candidate_vlan_index; } qp->pri.candidate_vid = 0xFFFF; qp->pri.update_vid = 0; } if (qp->alt.update_vid) { if (err) { if (qp->alt.candidate_vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, qp->alt.candidate_vid); } else { if (qp->alt.vid < 0x1000) mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); qp->alt.vid = qp->alt.candidate_vid; qp->alt.vlan_port = qp->alt.candidate_vlan_port; qp->alt.vlan_index = qp->alt.candidate_vlan_index; } qp->alt.candidate_vid = 0xFFFF; qp->alt.update_vid = 0; } return err; } int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask & ~IB_M_QP_MOD_VEND_MASK)) { pr_debug("qpn 0x%x: invalid attribute mask specified " "for transition %d to %d. qp_type %d," " attr_mask 0x%x\n", ibqp->qp_num, cur_state, new_state, ibqp->qp_type, attr_mask); goto out; } if ((attr_mask & IB_M_QP_MOD_VEND_MASK) && !dev->dev->caps.sync_qp) { pr_err("extended verbs are not supported by %s\n", dev->ib_dev.name); goto out; } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->num_ports)) { pr_debug("qpn 0x%x: invalid port number (%d) specified " "for transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->port_num, cur_state, new_state, ibqp->qp_type); goto out; } if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != IB_LINK_LAYER_ETHERNET)) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { pr_debug("qpn 0x%x: invalid pkey index (%d) specified " "for transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->pkey_index, cur_state, new_state, ibqp->qp_type); goto out; } } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " "Transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->max_rd_atomic, cur_state, new_state, ibqp->qp_type); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " "Transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, new_state, ibqp->qp_type); goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); return err; } static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); struct ib_device *ib_dev = &mdev->ib_dev; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); u16 pkey; u32 qkey; int send_size; int header_size; int spc; int i; if (wr->opcode != IB_WR_SEND) return -EINVAL; send_size = 0; for (i = 0; i < wr->num_sge; ++i) send_size += wr->sg_list[i].length; /* for proxy-qp0 sends, need to add in size of tunnel header */ /* for tunnel-qp0 sends, tunnel header is already in s/g list */ if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) send_size += sizeof (struct mlx4_ib_tunnel_header); ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); /* force loopback */ mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR); mlx->rlid = sqp->ud_header.lrh.destination_lid; sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); else sqp->ud_header.bth.destination_qpn = cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) return -EINVAL; sqp->ud_header.deth.qkey = cpu_to_be32(qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct ib_device *ib_dev = sqp->qp.ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_ctrl_seg *ctrl = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); union ib_gid sgid; u16 pkey; int send_size; int header_size; int spc; int i; int is_eth; int is_vlan = 0; int is_grh; u16 vlan = 0; int err = 0; send_size = 0; for (i = 0; i < wr->num_sge; ++i) send_size += wr->sg_list[i].length; is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; is_grh = mlx4_ib_ah_grh_present(ah); if (is_eth) { if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sgid.raw[0]); if (err) return err; } else { err = ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sgid); if (err) return err; } vlan = rdma_get_vlan_id(&sgid); is_vlan = vlan < 0x1000; } ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); if (!is_eth) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } if (is_grh) { sqp->ud_header.grh.traffic_class = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; if (is_eth) memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); else { if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ sqp->ud_header.grh.source_gid.global.subnet_prefix = to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. subnet_prefix; sqp->ud_header.grh.source_gid.global.interface_id = to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. guid_cache[ah->av.ib.gid_index]; } else ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid); } memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); if (!is_eth) { mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); if (ah->av.ib.port_pd & cpu_to_be32(0x80000000)) mlx->flags |= cpu_to_be32(0x1); /* force loopback */ mlx->rlid = sqp->ud_header.lrh.destination_lid; } switch (wr->opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->ex.imm_data; break; default: return -EINVAL; } if (is_eth) { u8 smac[6]; struct in6_addr in6; u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; mlx->sched_prio = cpu_to_be16(pcp); memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2); memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); memcpy(&in6, sgid.raw, sizeof(in6)); rdma_get_ll_mac(&in6, smac); memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); if (!is_vlan) { sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); } else { sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); } } else { sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; } sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? sqp->qkey : wr->wr.ud.remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); if (0) { pr_err("built UD header of size %d:\n", header_size); for (i = 0; i < header_size / 4; ++i) { if (i % 8 == 0) pr_err(" [%02x] ", i * 4); pr_cont(" %08x", be32_to_cpu(((__be32 *) sqp->header_buf)[i])); if ((i + 1) % 8 == 0) pr_cont("\n"); } pr_err("\n"); } /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mlx4_ib_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max_post)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max_post; } static __be32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); } static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) { struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); int i; for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) mfrpl->mapped_page_list[i] = cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | MLX4_MTT_FLAG_PRESENT); fseg->flags = convert_access(wr->wr.fast_reg.access_flags); fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); fseg->buf_list = cpu_to_be64(mfrpl->map); fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); fseg->offset = 0; /* XXX -- is this just for ZBVA? */ fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); fseg->reserved[0] = 0; fseg->reserved[1] = 0; } static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) { iseg->flags = 0; iseg->mem_key = cpu_to_be32(rkey); iseg->guest_id = 0; iseg->pa = 0; } static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) { if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); } else { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = 0; } } static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, struct ib_send_wr *wr) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); } static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr) { memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); } static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr, enum ib_qp_type qpt) { union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; struct mlx4_av sqp_av = {0}; int port = *((u8 *) &av->ib.port_pd) & 0x3; /* force loopback */ sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000); sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */ sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel & cpu_to_be32(0xf0000000); memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); /* This function used only for sending on QP1 proxies */ dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); /* Use QKEY from the QP context, which is set by master */ dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); } static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_wqe_inline_seg *inl = wqe; struct mlx4_ib_tunnel_header hdr; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); int spc; int i; memcpy(&hdr.av, &ah->av, sizeof hdr.av); hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (sizeof (hdr) <= spc) { memcpy(inl + 1, &hdr, sizeof (hdr)); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr)); i = 1; } else { memcpy(inl + 1, &hdr, spc); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16); } static void set_mlx_icrc_seg(void *dseg) { u32 *t = dseg; struct mlx4_wqe_inline_seg *iseg = dseg; t[1] = 0; /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); - iseg->byte_count = cpu_to_be32((1 << 31) | 4); + iseg->byte_count = cpu_to_be32((1U << 31) | 4); } static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); dseg->byte_count = cpu_to_be32(sg->length); } static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); } static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) { unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) *blh = cpu_to_be32(1 << 6); if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && wr->num_sge > qp->sq.max_gs - (halign >> 4))) return -EINVAL; memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } static __be32 send_ieth(struct ib_send_wr *wr) { switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM: return wr->ex.imm_data; case IB_WR_SEND_WITH_INV: return cpu_to_be32(wr->ex.invalidate_rkey); default: return 0; } } static void add_zero_len_inline(void *wqe) { struct mlx4_wqe_inline_seg *inl = wqe; memset(wqe, 0, 16); - inl->byte_count = cpu_to_be32(1 << 31); + inl->byte_count = cpu_to_be32(1U << 31); } static int lay_inline_data(struct mlx4_ib_qp *qp, struct ib_send_wr *wr, void *wqe, int *sz) { struct mlx4_wqe_inline_seg *seg; void *addr; int len, seg_len; int num_seg; int off, to_copy; int i; int inl = 0; seg = wqe; wqe += sizeof *seg; off = ((unsigned long)wqe) & (unsigned long)(MLX4_INLINE_ALIGN - 1); num_seg = 0; seg_len = 0; for (i = 0; i < wr->num_sge; ++i) { addr = (void *) (unsigned long)(wr->sg_list[i].addr); len = wr->sg_list[i].length; inl += len; if (inl > qp->max_inline_data) { inl = 0; return -1; } while (len >= MLX4_INLINE_ALIGN - off) { to_copy = MLX4_INLINE_ALIGN - off; memcpy(wqe, addr, to_copy); len -= to_copy; wqe += to_copy; addr += to_copy; seg_len += to_copy; wmb(); /* see comment below */ seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len); seg_len = 0; seg = wqe; wqe += sizeof *seg; off = sizeof *seg; ++num_seg; } memcpy(wqe, addr, len); wqe += len; seg_len += len; off += len; } if (seg_len) { ++num_seg; /* * Need a barrier here to make sure * all the data is visible before the * byte_count field is set. Otherwise * the HCA prefetcher could grab the * 64-byte chunk with this inline * segment and get a valid (!= * 0xffffffff) byte count but stale * data, and end up sending the wrong * data. */ wmb(); seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len); } *sz = (inl + num_seg * sizeof *seg + 15) / 16; return 0; } /* * Avoid using memcpy() to copy to BlueFlame page, since memcpy() * implementations may use move-string-buffer assembler instructions, * which do not guarantee order of copying. */ static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) { __iowrite64_copy(dst, src, bytecnt / 8); } int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mlx4_ib_qp *qp = to_mqp(ibqp); void *wqe; struct mlx4_wqe_ctrl_seg *uninitialized_var(ctrl); struct mlx4_wqe_data_seg *dseg; unsigned long flags; int nreq; int err = 0; unsigned ind; int uninitialized_var(stamp); int uninitialized_var(size); unsigned uninitialized_var(seglen); __be32 dummy; __be32 *lso_wqe; __be32 uninitialized_var(lso_hdr_sz); __be32 blh; int i; int inl = 0; spin_lock_irqsave(&qp->sq.lock, flags); ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { lso_wqe = &dummy; blh = 0; if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->sq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); *((u32 *) (&ctrl->vlan_tag)) = 0; qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; ctrl->srcrb_flags = (wr->send_flags & IB_SEND_SIGNALED ? cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | (wr->send_flags & IB_SEND_SOLICITED ? cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | qp->sq_signal_bits; ctrl->imm = send_ieth(wr); wqe += sizeof *ctrl; size = sizeof *ctrl / 16; switch (qp->mlx4_ib_qp_type) { case MLX4_IB_QPT_RC: case MLX4_IB_QPT_UC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_atomic_seg)) / 16; break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_masked_atomic_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); size += sizeof (struct mlx4_wqe_raddr_seg) / 16; break; case IB_WR_LOCAL_INV: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_local_inv_seg(wqe, wr->ex.invalidate_rkey); wqe += sizeof (struct mlx4_wqe_local_inval_seg); size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; break; case IB_WR_FAST_REG_MR: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_fmr_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_fmr_seg); size += sizeof (struct mlx4_wqe_fmr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case MLX4_IB_QPT_TUN_SMI_OWNER: err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_TUN_GSI: /* this is a UD qp used in MAD responses to slaves. */ set_datagram_seg(wqe, wr); /* set the forced-loopback bit in the data seg av */ *(__be32 *) wqe |= cpu_to_be32(0x80000000); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; break; case MLX4_IB_QPT_UD: set_datagram_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); if (unlikely(err)) { *bad_wr = wr; goto out; } lso_wqe = (__be32 *) wqe; wqe += seglen; size += seglen / 16; } break; case MLX4_IB_QPT_PROXY_SMI_OWNER: if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) { err = -ENOSYS; *bad_wr = wr; goto out; } err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; /* to start tunnel header on a cache-line boundary */ add_zero_len_inline(wqe); wqe += 16; size++; build_tunnel_header(wr, wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_PROXY_SMI: /* don't allow QP0 sends on guests */ err = -ENOSYS; *bad_wr = wr; goto out; case MLX4_IB_QPT_PROXY_GSI: /* If we are tunneling special qps, this is a UD qp. * In this case we first add a UD segment targeting * the tunnel qp, and then add a header with address * information */ set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; build_tunnel_header(wr, wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; default: break; } /* * Write data segments in reverse order, so as to * overwrite cacheline stamp last within each * cacheline. This avoids issues with WQE * prefetching. */ dseg = wqe; dseg += wr->num_sge - 1; /* Add one more inline data segment for ICRC for MLX sends */ if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) { set_mlx_icrc_seg(dseg + 1); size += sizeof (struct mlx4_wqe_data_seg) / 16; } if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { int sz; err = lay_inline_data(qp, wr, wqe, &sz); if (!err) { inl = 1; size += sz; } } else { size += wr->num_sge * (sizeof(struct mlx4_wqe_data_seg) / 16); for (i = wr->num_sge - 1; i >= 0; --i, --dseg) set_data_seg(dseg, wr->sg_list + i); } /* * Possibly overwrite stamping in cacheline with LSO * segment only after making sure all data segments * are written. */ wmb(); *lso_wqe = lso_hdr_sz; ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start * executing as soon as we do). */ wmb(); if (wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { *bad_wr = wr; err = -EINVAL; goto out; } ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | - (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; + (ind & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0) | blh; stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); /* * We can improve latency by not stamping the last * send queue WQE until after ringing the doorbell, so * only stamp here if there are still more WQEs to post. * * Same optimization applies to padding with NOP wqe * in case of WQE shrinking (used to prevent wrap-around * in the middle of WR). */ if (wr->next) { stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); } } out: if (nreq == 1 && inl && size > 1 && size < qp->bf.buf_size / 16) { ctrl->owner_opcode |= htonl((qp->sq_next_wqe & 0xffff) << 8); /* We set above doorbell_qpn bits to 0 as part of vlan * tag initialization, so |= should be correct. */ *(u32 *) (&ctrl->vlan_tag) |= qp->doorbell_qpn; /* * Make sure that descriptor is written to memory * before writing to BlueFlame page. */ wmb(); ++qp->sq.head; mlx4_bf_copy(qp->bf.reg + qp->bf.offset, (unsigned long *) ctrl, ALIGN(size * 16, 64)); wc_wmb(); qp->bf.offset ^= qp->bf.buf_size; } else if (nreq) { qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); writel(qp->doorbell_qpn, qp->bf.uar->map + MLX4_SEND_DOORBELL); /* * Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order. */ mmiowb(); } if (likely(nreq)) { stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); qp->sq_next_wqe = ind; } spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int ind; int max_gs; int i; max_gs = qp->rq.max_gs; spin_lock_irqsave(&qp->rq.lock, flags); ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } scat = get_recv_wqe(qp, ind); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { ib_dma_sync_single_for_device(ibqp->device, qp->sqp_proxy_rcv[ind].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); scat->byte_count = cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr)); /* use dma lkey from upper layer entry */ scat->lkey = cpu_to_be32(wr->sg_list->lkey); scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); scat++; max_gs--; } for (i = 0; i < wr->num_sge; ++i) __set_data_seg(scat + i, wr->sg_list + i); if (i < max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); scat[i].addr = 0; } qp->rq.wrid[ind] = wr->wr_id; ind = (ind + 1) & (qp->rq.wqe_cnt - 1); } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) { switch (mlx4_state) { case MLX4_QP_STATE_RST: return IB_QPS_RESET; case MLX4_QP_STATE_INIT: return IB_QPS_INIT; case MLX4_QP_STATE_RTR: return IB_QPS_RTR; case MLX4_QP_STATE_RTS: return IB_QPS_RTS; case MLX4_QP_STATE_SQ_DRAINING: case MLX4_QP_STATE_SQD: return IB_QPS_SQD; case MLX4_QP_STATE_SQER: return IB_QPS_SQE; case MLX4_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) { switch (mlx4_mig_state) { case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; case MLX4_QP_PM_REARM: return IB_MIG_REARM; case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mlx4_flags) { int ib_flags = 0; if (mlx4_flags & MLX4_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mlx4_flags & MLX4_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mlx4_flags & MLX4_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, struct mlx4_qp_path *path) { struct mlx4_dev *dev = ibdev->dev; int is_eth; memset(ib_ah_attr, 0, sizeof *ib_ah_attr); ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) return; is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) == IB_LINK_LAYER_ETHERNET; if (is_eth) ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | ((path->sched_queue & 4) << 1); else ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { ib_ah_attr->grh.sgid_index = path->mgid_index; ib_ah_attr->grh.hop_limit = path->hop_limit; ib_ah_attr->grh.traffic_class = (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; ib_ah_attr->grh.flow_label = be32_to_cpu(path->tclass_flowlabel) & 0xfffff; memcpy(ib_ah_attr->grh.dgid.raw, path->rgid, sizeof ib_ah_attr->grh.dgid.raw); } } int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_qp_context context; int mlx4_state; int err = 0; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } err = mlx4_qp_query(dev->dev, &qp->mqp, &context); if (err) { err = -EINVAL; goto out; } mlx4_state = be32_to_cpu(context.flags) >> 28; qp->state = to_ib_qp_state(mlx4_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context.mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context.qkey); qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context.params2)); if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; if (qp_attr->qp_state == IB_QPS_INIT) qp_attr->port_num = qp->port; else qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context.pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; qp_attr->alt_timeout = context.alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = qp->rq.max_gs; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; qp_attr->cap.max_send_sge = qp->sq.max_gs; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } /* * We don't support inline sends for kernel QPs (yet), and we * don't know what userspace's value should be. */ qp_attr->cap.max_inline_data = 0; qp_init_attr->cap = qp_attr->cap; qp_init_attr->create_flags = 0; if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; if (qp->flags & MLX4_IB_QP_LSO) qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; if (qp->flags & MLX4_IB_QP_NETIF) qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP; qp_init_attr->sq_sig_type = qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; qp_init_attr->qpg_type = ibqp->qpg_type; if (ibqp->qpg_type == IB_QPG_PARENT) qp_init_attr->cap.qpg_tss_mask_sz = qp->qpg_data->qpg_tss_mask_sz; else qp_init_attr->cap.qpg_tss_mask_sz = 0; out: mutex_unlock(&qp->mutex); return err; } Index: head/sys/ofed/drivers/infiniband/hw/mthca/mthca_mcg.c =================================================================== --- head/sys/ofed/drivers/infiniband/hw/mthca/mthca_mcg.c (revision 258779) +++ head/sys/ofed/drivers/infiniband/hw/mthca/mthca_mcg.c (revision 258780) @@ -1,372 +1,372 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include "mthca_dev.h" #include "mthca_cmd.h" struct mthca_mgm { __be32 next_gid_index; u32 reserved[3]; u8 gid[16]; __be32 qp[MTHCA_QP_PER_MGM]; }; static const u8 zero_gid[16]; /* automatically initialized to 0 */ /* * Caller must hold MCG table semaphore. gid and mgm parameters must * be properly aligned for command interface. * * Returns 0 unless a firmware command error occurs. * * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 * and *mgm holds MGM entry. * * if GID is found in AMGM, *index = index in AMGM, *prev = index of * previous entry in hash chain and *mgm holds AMGM entry. * * If no AMGM exists for given gid, *index = -1, *prev = index of last * entry in hash chain and *mgm holds end of hash chain. */ static int find_mgm(struct mthca_dev *dev, u8 *gid, struct mthca_mailbox *mgm_mailbox, u16 *hash, int *prev, int *index) { struct mthca_mailbox *mailbox; struct mthca_mgm *mgm = mgm_mailbox->buf; u8 *mgid; int err; u8 status; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return -ENOMEM; mgid = mailbox->buf; memcpy(mgid, gid, 16); err = mthca_MGID_HASH(dev, mailbox, hash, &status); if (err) goto out; if (status) { mthca_err(dev, "MGID_HASH returned status %02x\n", status); err = -EINVAL; goto out; } if (0) mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); *index = *hash; *prev = -1; do { err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (!memcmp(mgm->gid, zero_gid, 16)) { if (*index != *hash) { mthca_err(dev, "Found zero MGID in AMGM.\n"); err = -EINVAL; } goto out; } if (!memcmp(mgm->gid, gid, 16)) goto out; *prev = *index; *index = be32_to_cpu(mgm->next_gid_index) >> 6; } while (*index); *index = -1; out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int index, prev; int link = 0; int i; int err; u8 status; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index != -1) { if (!memcmp(mgm->gid, zero_gid, 16)) memcpy(mgm->gid, gid->raw, 16); } else { link = 1; index = mthca_alloc(&dev->mcg_table.alloc); if (index == -1) { mthca_err(dev, "No AMGM entries left\n"); err = -ENOMEM; goto out; } err = mthca_READ_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid->raw, 16); } for (i = 0; i < MTHCA_QP_PER_MGM; ++i) - if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { + if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1U << 31))) { mthca_dbg(dev, "QP %06x already a member of MGM\n", ibqp->qp_num); err = 0; goto out; - } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { - mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); + } else if (!(mgm->qp[i] & cpu_to_be32(1U << 31))) { + mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1U << 31)); break; } if (i == MTHCA_QP_PER_MGM) { mthca_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; } err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (!link) goto out; err = mthca_READ_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } mgm->next_gid_index = cpu_to_be32(index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; } out: if (err && link && index != -1) { BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int prev, index; int i, loc; int err; u8 status; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index == -1) { mthca_err(dev, "MGID %pI6 not found\n", gid->raw); err = -EINVAL; goto out; } for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) { - if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) + if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1U << 31))) loc = i; - if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) + if (!(mgm->qp[i] & cpu_to_be32(1U << 31))) break; } if (loc == -1) { mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); err = -EINVAL; goto out; } mgm->qp[loc] = mgm->qp[i - 1]; mgm->qp[i - 1] = 0; err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (i != 1) goto out; if (prev == -1) { /* Remove entry from MGM */ int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6; if (amgm_index_to_free) { err = mthca_READ_MGM(dev, amgm_index_to_free, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } } else memset(mgm->gid, 0, 16); err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (amgm_index_to_free) { BUG_ON(amgm_index_to_free < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, amgm_index_to_free); } } else { /* Remove entry from AMGM */ int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; err = mthca_READ_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } mgm->next_gid_index = cpu_to_be32(curr_next_index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } out: mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_init_mcg_table(struct mthca_dev *dev) { int err; int table_size = dev->limits.num_mgms + dev->limits.num_amgms; err = mthca_alloc_init(&dev->mcg_table.alloc, table_size, table_size - 1, dev->limits.num_mgms); if (err) return err; mutex_init(&dev->mcg_table.mutex); return 0; } void mthca_cleanup_mcg_table(struct mthca_dev *dev) { mthca_alloc_cleanup(&dev->mcg_table.alloc); } Index: head/sys/ofed/drivers/infiniband/hw/mthca/mthca_qp.c =================================================================== --- head/sys/ofed/drivers/infiniband/hw/mthca/mthca_qp.c (revision 258779) +++ head/sys/ofed/drivers/infiniband/hw/mthca/mthca_qp.c (revision 258780) @@ -1,2332 +1,2332 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include #include #include #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" #include "mthca_wqe.h" enum { MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, MTHCA_ACK_REQ_FREQ = 10, MTHCA_FLIGHT_LIMIT = 9, MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ }; enum { MTHCA_QP_STATE_RST = 0, MTHCA_QP_STATE_INIT = 1, MTHCA_QP_STATE_RTR = 2, MTHCA_QP_STATE_RTS = 3, MTHCA_QP_STATE_SQE = 4, MTHCA_QP_STATE_SQD = 5, MTHCA_QP_STATE_ERR = 6, MTHCA_QP_STATE_DRAINING = 7 }; enum { MTHCA_QP_ST_RC = 0x0, MTHCA_QP_ST_UC = 0x1, MTHCA_QP_ST_RD = 0x2, MTHCA_QP_ST_UD = 0x3, MTHCA_QP_ST_MLX = 0x7 }; enum { MTHCA_QP_PM_MIGRATED = 0x3, MTHCA_QP_PM_ARMED = 0x0, MTHCA_QP_PM_REARM = 0x1 }; enum { /* qp_context flags */ MTHCA_QP_BIT_DE = 1 << 8, /* params1 */ MTHCA_QP_BIT_SRE = 1 << 15, MTHCA_QP_BIT_SWE = 1 << 14, MTHCA_QP_BIT_SAE = 1 << 13, MTHCA_QP_BIT_SIC = 1 << 4, MTHCA_QP_BIT_SSC = 1 << 3, /* params2 */ MTHCA_QP_BIT_RRE = 1 << 15, MTHCA_QP_BIT_RWE = 1 << 14, MTHCA_QP_BIT_RAE = 1 << 13, MTHCA_QP_BIT_RIC = 1 << 4, MTHCA_QP_BIT_RSC = 1 << 3 }; enum { MTHCA_SEND_DOORBELL_FENCE = 1 << 5 }; struct mthca_qp_path { __be32 port_pkey; u8 rnr_retry; u8 g_mylmc; __be16 rlid; u8 ackto; u8 mgid_index; u8 static_rate; u8 hop_limit; __be32 sl_tclass_flowlabel; u8 rgid[16]; } __attribute__((packed)); struct mthca_qp_context { __be32 flags; __be32 tavor_sched_queue; /* Reserved on Arbel */ u8 mtu_msgmax; u8 rq_size_stride; /* Reserved on Tavor */ u8 sq_size_stride; /* Reserved on Tavor */ u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ __be32 usr_page; __be32 local_qpn; __be32 remote_qpn; u32 reserved1[2]; struct mthca_qp_path pri_path; struct mthca_qp_path alt_path; __be32 rdd; __be32 pd; __be32 wqe_base; __be32 wqe_lkey; __be32 params1; __be32 reserved2; __be32 next_send_psn; __be32 cqn_snd; __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ __be32 snd_db_index; /* (debugging only entries) */ __be32 last_acked_psn; __be32 ssn; __be32 params2; __be32 rnr_nextrecvpsn; __be32 ra_buff_indx; __be32 cqn_rcv; __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ __be32 rcv_db_index; /* (debugging only entries) */ __be32 qkey; __be32 srqn; __be32 rmsn; __be16 rq_wqe_counter; /* reserved on Tavor */ __be16 sq_wqe_counter; /* reserved on Tavor */ u32 reserved3[18]; } __attribute__((packed)); struct mthca_qp_param { __be32 opt_param_mask; u32 reserved1; struct mthca_qp_context context; u32 reserved2[62]; } __attribute__((packed)); enum { MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MTHCA_QP_OPTPAR_RRE = 1 << 1, MTHCA_QP_OPTPAR_RAE = 1 << 2, MTHCA_QP_OPTPAR_RWE = 1 << 3, MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 }; static const u8 mthca_opcode[] = { [IB_WR_SEND] = MTHCA_OPCODE_SEND, [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, }; static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) { return qp->qpn >= dev->qp_table.sqp_start && qp->qpn <= dev->qp_table.sqp_start + 3; } static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) { return qp->qpn >= dev->qp_table.sqp_start && qp->qpn <= dev->qp_table.sqp_start + 1; } static void *get_recv_wqe(struct mthca_qp *qp, int n) { if (qp->is_direct) return qp->queue.direct.buf + (n << qp->rq.wqe_shift); else return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); } static void *get_send_wqe(struct mthca_qp *qp, int n) { if (qp->is_direct) return qp->queue.direct.buf + qp->send_wqe_offset + (n << qp->sq.wqe_shift); else return qp->queue.page_list[(qp->send_wqe_offset + (n << qp->sq.wqe_shift)) >> PAGE_SHIFT].buf + ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & (PAGE_SIZE - 1)); } static void mthca_wq_reset(struct mthca_wq *wq) { wq->next_ind = 0; wq->last_comp = wq->max - 1; wq->head = 0; wq->tail = 0; } void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type) { struct mthca_qp *qp; struct ib_event event; spin_lock(&dev->qp_table.lock); qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); if (qp) ++qp->refcount; spin_unlock(&dev->qp_table.lock); if (!qp) { mthca_warn(dev, "Async event %d for bogus QP %08x\n", (int) event_type, qpn); return; } if (event_type == IB_EVENT_PATH_MIG) qp->port = qp->alt_port; event.device = &dev->ib_dev; event.event = event_type; event.element.qp = &qp->ibqp; if (qp->ibqp.event_handler) qp->ibqp.event_handler(&event, qp->ibqp.qp_context); spin_lock(&dev->qp_table.lock); if (!--qp->refcount) wake_up(&qp->wait); spin_unlock(&dev->qp_table.lock); } static int to_mthca_state(enum ib_qp_state ib_state) { switch (ib_state) { case IB_QPS_RESET: return MTHCA_QP_STATE_RST; case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; default: return -1; } } enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; static int to_mthca_st(int transport) { switch (transport) { case RC: return MTHCA_QP_ST_RC; case UC: return MTHCA_QP_ST_UC; case UD: return MTHCA_QP_ST_UD; case RD: return MTHCA_QP_ST_RD; case MLX: return MTHCA_QP_ST_MLX; default: return -1; } } static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void init_port(struct mthca_dev *dev, int port) { int err; u8 status; struct mthca_init_ib_param param; memset(¶m, 0, sizeof param); param.port_width = dev->limits.port_width_cap; param.vl_cap = dev->limits.vl_cap; param.mtu_cap = dev->limits.mtu_cap; param.gid_cap = dev->limits.gid_table_len; param.pkey_cap = dev->limits.pkey_table_len; err = mthca_INIT_IB(dev, ¶m, port, &status); if (err) mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); if (status) mthca_warn(dev, "INIT_IB returned status %02x.\n", status); } static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MTHCA_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MTHCA_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MTHCA_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static inline enum ib_qp_state to_ib_qp_state(int mthca_state) { switch (mthca_state) { case MTHCA_QP_STATE_RST: return IB_QPS_RESET; case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; case MTHCA_QP_STATE_DRAINING: case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) { switch (mthca_mig_state) { case 0: return IB_MIG_ARMED; case 1: return IB_MIG_REARM; case 3: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mthca_flags) { int ib_flags = 0; if (mthca_flags & MTHCA_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mthca_flags & MTHCA_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mthca_flags & MTHCA_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, struct mthca_qp_path *path) { memset(ib_ah_attr, 0, sizeof *ib_ah_attr); ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) return; ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; ib_ah_attr->static_rate = mthca_rate_to_ib(dev, path->static_rate & 0xf, ib_ah_attr->port_num); ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); ib_ah_attr->grh.hop_limit = path->hop_limit; ib_ah_attr->grh.traffic_class = (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; ib_ah_attr->grh.flow_label = be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; memcpy(ib_ah_attr->grh.dgid.raw, path->rgid, sizeof ib_ah_attr->grh.dgid.raw); } } int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); int err = 0; struct mthca_mailbox *mailbox = NULL; struct mthca_qp_param *qp_param; struct mthca_qp_context *context; int mthca_state; u8 status; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto out; } err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); if (err) goto out_mailbox; if (status) { mthca_warn(dev, "QUERY_QP returned status %02x\n", status); err = -EINVAL; goto out_mailbox; } qp_param = mailbox->buf; context = &qp_param->context; mthca_state = be32_to_cpu(context->flags) >> 28; qp->state = to_ib_qp_state(mthca_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context->mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context->qkey); qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context->params2)); if (qp->transport == RC || qp->transport == UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; qp_attr->port_num = (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context->pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; qp_attr->alt_timeout = context->alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_send_wr = qp->sq.max; qp_attr->cap.max_recv_wr = qp->rq.max; qp_attr->cap.max_send_sge = qp->sq.max_gs; qp_attr->cap.max_recv_sge = qp->rq.max_gs; qp_attr->cap.max_inline_data = qp->max_inline_data; qp_init_attr->cap = qp_attr->cap; out_mailbox: mthca_free_mailbox(dev, mailbox); out: mutex_unlock(&qp->mutex); return err; } static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah, struct mthca_qp_path *path, u8 port) { path->g_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); path->static_rate = mthca_get_rate(dev, ah->static_rate, port); if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= dev->limits.gid_table_len) { mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, dev->limits.gid_table_len-1); return -1; } path->g_mylmc |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->sl_tclass_flowlabel = cpu_to_be32((ah->sl << 28) | (ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } else path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); return 0; } static int __mthca_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); struct mthca_mailbox *mailbox; struct mthca_qp_param *qp_param; struct mthca_qp_context *qp_context; u32 sqd_event = 0; u8 status; int err = -EINVAL; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto out; } qp_param = mailbox->buf; qp_context = &qp_param->context; memset(qp_param, 0, sizeof *qp_param); qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | (to_mthca_st(qp->transport) << 16)); qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); else { qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); switch (attr->path_mig_state) { case IB_MIG_MIGRATED: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); break; case IB_MIG_ARMED: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); break; } } /* leave tavor_sched_queue as 0 */ if (qp->transport == MLX || qp->transport == UD) qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { mthca_dbg(dev, "path MTU (%u) is invalid\n", attr->path_mtu); goto out_mailbox; } qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; } if (mthca_is_memfree(dev)) { if (qp->rq.max) qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; if (qp->sq.max) qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; } /* leave arbel_sched_queue as 0 */ if (qp->ibqp.uobject) qp_context->usr_page = cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); else qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); qp_context->local_qpn = cpu_to_be32(qp->qpn); if (attr_mask & IB_QP_DEST_QPN) { qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); } if (qp->transport == MLX) qp_context->pri_path.port_pkey |= cpu_to_be32(qp->port << 24); else { if (attr_mask & IB_QP_PORT) { qp_context->pri_path.port_pkey |= cpu_to_be32(attr->port_num << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); } } if (attr_mask & IB_QP_PKEY_INDEX) { qp_context->pri_path.port_pkey |= cpu_to_be32(attr->pkey_index); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); } if (attr_mask & IB_QP_RNR_RETRY) { qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | MTHCA_QP_OPTPAR_ALT_RNR_RETRY); } if (attr_mask & IB_QP_AV) { if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out_mailbox; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); } if (ibqp->qp_type == IB_QPT_RC && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; if (mthca_is_memfree(dev)) qp_context->rlkey_arbel_sched_queue |= sched_queue; else qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); } if (attr_mask & IB_QP_TIMEOUT) { qp_context->pri_path.ackto = attr->timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", attr->alt_pkey_index, dev->limits.pkey_table_len-1); goto out_mailbox; } if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { mthca_dbg(dev, "Alternate port number (%u) is invalid\n", attr->alt_port_num); goto out_mailbox; } if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, attr->alt_ah_attr.port_num)) goto out_mailbox; qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | attr->alt_port_num << 24); qp_context->alt_path.ackto = attr->alt_timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); } /* leave rdd as 0 */ qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | (MTHCA_FLIGHT_LIMIT << 24) | MTHCA_QP_BIT_SWE); if (qp->sq_policy == IB_SIGNAL_ALL_WR) qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); if (attr_mask & IB_QP_RETRY_CNT) { qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) { qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SRE | MTHCA_QP_BIT_SAE); qp_context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); } qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); } if (attr_mask & IB_QP_SQ_PSN) qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); if (mthca_is_memfree(dev)) { qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) qp_context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); if (ibqp->srq) qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); } if (attr_mask & IB_QP_RQ_PSN) qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); qp_context->ra_buff_indx = cpu_to_be32(dev->qp_table.rdb_base + ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << dev->qp_table.rdb_shift)); qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); if (mthca_is_memfree(dev)) qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); if (attr_mask & IB_QP_QKEY) { qp_context->qkey = cpu_to_be32(attr->qkey); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); } if (ibqp->srq) qp_context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->srqn); if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1 << 31; err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, mailbox, sqd_event, &status); if (err) goto out_mailbox; if (status) { mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", cur_state, new_state, status); err = -EINVAL; goto out_mailbox; } qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) qp->port = attr->port_num; if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_attrs(to_msqp(qp), attr, attr_mask); /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) init_port(dev, qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mthca_CLOSE_IB(dev, qp->port, &status); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); mthca_wq_reset(&qp->sq); qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); mthca_wq_reset(&qp->rq); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); if (mthca_is_memfree(dev)) { *qp->sq.db = 0; *qp->rq.db = 0; } } out_mailbox: mthca_free_mailbox(dev, mailbox); out: return err; } int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; mutex_lock(&qp->mutex); if (attr_mask & IB_QP_CUR_STATE) { cur_state = attr->cur_qp_state; } else { spin_lock_irq(&qp->sq.lock); spin_lock(&qp->rq.lock); cur_state = qp->state; spin_unlock(&qp->rq.lock); spin_unlock_irq(&qp->sq.lock); } new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { mthca_dbg(dev, "Bad QP transition (transport %d) " "%d->%d with attr 0x%08x\n", qp->transport, cur_state, new_state, attr_mask); goto out; } if ((attr_mask & IB_QP_PKEY_INDEX) && attr->pkey_index >= dev->limits.pkey_table_len) { mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", attr->pkey_index, dev->limits.pkey_table_len-1); goto out; } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", attr->max_rd_atomic, dev->limits.max_qp_init_rdma); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); return err; } static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) { /* * Calculate the maximum size of WQE s/g segments, excluding * the next segment and other non-data segments. */ int max_data_size = desc_sz - sizeof (struct mthca_next_seg); switch (qp->transport) { case MLX: max_data_size -= 2 * sizeof (struct mthca_data_seg); break; case UD: if (mthca_is_memfree(dev)) max_data_size -= sizeof (struct mthca_arbel_ud_seg); else max_data_size -= sizeof (struct mthca_tavor_ud_seg); break; default: max_data_size -= sizeof (struct mthca_raddr_seg); break; } return max_data_size; } static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) { /* We don't support inline data for kernel QPs (yet). */ return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; } static void mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) { int max_data_size = mthca_max_data_size(dev, qp, min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift)); qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); qp->sq.max_gs = min_t(int, dev->limits.max_sg, max_data_size / sizeof (struct mthca_data_seg)); qp->rq.max_gs = min_t(int, dev->limits.max_sg, (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - sizeof (struct mthca_next_seg)) / sizeof (struct mthca_data_seg)); } /* * Allocate and register buffer for WQEs. qp->rq.max, sq.max, * rq.max_gs and sq.max_gs must all be assigned. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and * sq.wqe_shift (as well as send_wqe_offset, is_direct, and * queue) */ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) { int size; int err = -ENOMEM; size = sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg); if (size > dev->limits.max_desc_sz) return -EINVAL; for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; qp->rq.wqe_shift++) ; /* nothing */ size = qp->sq.max_gs * sizeof (struct mthca_data_seg); switch (qp->transport) { case MLX: size += 2 * sizeof (struct mthca_data_seg); break; case UD: size += mthca_is_memfree(dev) ? sizeof (struct mthca_arbel_ud_seg) : sizeof (struct mthca_tavor_ud_seg); break; case UC: size += sizeof (struct mthca_raddr_seg); break; case RC: size += sizeof (struct mthca_raddr_seg); /* * An atomic op will require an atomic segment, a * remote address segment and one scatter entry. */ size = max_t(int, size, sizeof (struct mthca_atomic_seg) + sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_data_seg)); break; default: break; } /* Make sure that we have enough space for a bind request */ size = max_t(int, size, sizeof (struct mthca_bind_seg)); size += sizeof (struct mthca_next_seg); if (size > dev->limits.max_desc_sz) return -EINVAL; for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; qp->sq.wqe_shift++) ; /* nothing */ qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1 << qp->sq.wqe_shift); /* * If this is a userspace QP, we don't actually have to * allocate anything. All we need is to calculate the WQE * sizes and the send_wqe_offset, so we're done now. */ if (pd->ibpd.uobject) return 0; size = PAGE_ALIGN(qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift)); qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), GFP_KERNEL); if (!qp->wrid) goto err_out; err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, &qp->queue, &qp->is_direct, pd, 0, &qp->mr); if (err) goto err_out; return 0; err_out: kfree(qp->wrid); return err; } static void mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift)), &qp->queue, qp->is_direct, &qp->mr); kfree(qp->wrid); } static int mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { int ret; if (mthca_is_memfree(dev)) { ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); if (ret) return ret; ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); if (ret) goto err_qpc; ret = mthca_table_get(dev, dev->qp_table.rdb_table, qp->qpn << dev->qp_table.rdb_shift); if (ret) goto err_eqpc; } return 0; err_eqpc: mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); err_qpc: mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); return ret; } static void mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { mthca_table_put(dev, dev->qp_table.rdb_table, qp->qpn << dev->qp_table.rdb_shift); mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); } static int mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { if (mthca_is_memfree(dev)) { qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, qp->qpn, &qp->rq.db); if (qp->rq.db_index < 0) return -ENOMEM; qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, qp->qpn, &qp->sq.db); if (qp->sq.db_index < 0) { mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); return -ENOMEM; } } return 0; } static void mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); } } static int mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp) { int ret; int i; struct mthca_next_seg *next; qp->refcount = 1; init_waitqueue_head(&qp->wait); mutex_init(&qp->mutex); qp->state = IB_QPS_RESET; qp->atomic_rd_en = 0; qp->resp_depth = 0; qp->sq_policy = send_policy; mthca_wq_reset(&qp->sq); mthca_wq_reset(&qp->rq); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); ret = mthca_map_memfree(dev, qp); if (ret) return ret; ret = mthca_alloc_wqe_buf(dev, pd, qp); if (ret) { mthca_unmap_memfree(dev, qp); return ret; } mthca_adjust_qp_caps(dev, pd, qp); /* * If this is a userspace QP, we're done now. The doorbells * will be allocated and buffers will be initialized in * userspace. */ if (pd->ibpd.uobject) return 0; ret = mthca_alloc_memfree(dev, qp); if (ret) { mthca_free_wqe_buf(dev, qp); mthca_unmap_memfree(dev, qp); return ret; } if (mthca_is_memfree(dev)) { struct mthca_data_seg *scatter; int size = (sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; for (i = 0; i < qp->rq.max; ++i) { next = get_recv_wqe(qp, i); next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << qp->rq.wqe_shift); next->ee_nds = cpu_to_be32(size); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); ++scatter) scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); } for (i = 0; i < qp->sq.max; ++i) { next = get_send_wqe(qp, i); next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << qp->sq.wqe_shift) + qp->send_wqe_offset); } } else { for (i = 0; i < qp->rq.max; ++i) { next = get_recv_wqe(qp, i); next->nda_op = htonl((((i + 1) % qp->rq.max) << qp->rq.wqe_shift) | 1); } } qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); return 0; } static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) { int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); u32 max_inline_data; /* Sanity check QP size before proceeding */ if (cap->max_send_wr > dev->limits.max_wqes || cap->max_recv_wr > dev->limits.max_wqes || cap->max_send_sge > dev->limits.max_sg || cap->max_recv_sge > dev->limits.max_sg) return -EINVAL; if (pd->ibpd.uobject && cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) return -EINVAL; max_inline_data = pd->ibpd.uobject ? cap->max_inline_data : 0; /* * For MLX transport we need 2 extra send gather entries: * one for the header and one for the checksum at the end */ if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) return -EINVAL; if (mthca_is_memfree(dev)) { qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0; } else { qp->rq.max = cap->max_recv_wr; qp->sq.max = cap->max_send_wr; } qp->rq.max_gs = cap->max_recv_sge; qp->sq.max_gs = max_t(int, cap->max_send_sge, ALIGN(max_inline_data + MTHCA_INLINE_HEADER_SIZE, MTHCA_INLINE_CHUNK_SIZE) / sizeof (struct mthca_data_seg)); return 0; } int mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp) { int err; switch (type) { case IB_QPT_RC: qp->transport = RC; break; case IB_QPT_UC: qp->transport = UC; break; case IB_QPT_UD: qp->transport = UD; break; default: return -EINVAL; } err = mthca_set_qp_size(dev, cap, pd, qp); if (err) return err; qp->qpn = mthca_alloc(&dev->qp_table.alloc); if (qp->qpn == -1) return -ENOMEM; /* initialize port to zero for error-catching. */ qp->port = 0; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, qp); if (err) { mthca_free(&dev->qp_table.alloc, qp->qpn); return err; } spin_lock_irq(&dev->qp_table.lock); mthca_array_set(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1), qp); spin_unlock_irq(&dev->qp_table.lock); return 0; } static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) { if (send_cq == recv_cq) spin_lock_irq(&send_cq->lock); else if (send_cq->cqn < recv_cq->cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) { if (send_cq == recv_cq) spin_unlock_irq(&send_cq->lock); else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, int port, struct mthca_sqp *sqp) { u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; int err; sqp->qp.transport = MLX; err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); if (err) return err; sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, &sqp->header_dma, GFP_KERNEL); if (!sqp->header_buf) return -ENOMEM; spin_lock_irq(&dev->qp_table.lock); if (mthca_array_get(&dev->qp_table.qp, mqpn)) err = -EBUSY; else mthca_array_set(&dev->qp_table.qp, mqpn, sqp); spin_unlock_irq(&dev->qp_table.lock); if (err) goto err_out; sqp->qp.port = port; sqp->qp.qpn = mqpn; sqp->qp.transport = MLX; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, &sqp->qp); if (err) goto err_out_free; atomic_inc(&pd->sqp_count); return 0; err_out_free: /* * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, mqpn); spin_unlock(&dev->qp_table.lock); mthca_unlock_cqs(send_cq, recv_cq); err_out: dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, sqp->header_buf, sqp->header_dma); return err; } static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) { int c; spin_lock_irq(&dev->qp_table.lock); c = qp->refcount; spin_unlock_irq(&dev->qp_table.lock); return c; } void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) { u8 status; struct mthca_cq *send_cq; struct mthca_cq *recv_cq; send_cq = to_mcq(qp->ibqp.send_cq); recv_cq = to_mcq(qp->ibqp.recv_cq); /* * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1)); --qp->refcount; spin_unlock(&dev->qp_table.lock); mthca_unlock_cqs(send_cq, recv_cq); wait_event(qp->wait, !get_qp_refcount(dev, qp)); if (qp->state != IB_QPS_RESET) mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, NULL, 0, &status); /* * If this is a userspace QP, the buffers, MR, CQs and so on * will be cleaned up in userspace, so all we have to do is * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { mthca_cq_clean(dev, recv_cq, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (send_cq != recv_cq) mthca_cq_clean(dev, send_cq, qp->qpn, NULL); mthca_free_memfree(dev, qp); mthca_free_wqe_buf(dev, qp); } mthca_unmap_memfree(dev, qp); if (is_sqp(dev, qp)) { atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); dma_free_coherent(&dev->pdev->dev, to_msqp(qp)->header_buf_size, to_msqp(qp)->header_buf, to_msqp(qp)->header_dma); } else mthca_free(&dev->qp_table.alloc, qp->qpn); } /* Create UD header for an MLX send and build a data segment for it */ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, int ind, struct ib_send_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) { int header_size; int err; u16 pkey; ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, &sqp->ud_header); err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); if (err) return err; mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); mlx->rlid = sqp->ud_header.lrh.destination_lid; mlx->vcrc = 0; switch (wr->opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->ex.imm_data; break; default: return -EINVAL; } sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? sqp->qkey : wr->wr.ud.remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf + ind * MTHCA_UD_HEADER_SIZE); data->byte_count = cpu_to_be32(header_size); data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); data->addr = cpu_to_be64(sqp->header_dma + ind * MTHCA_UD_HEADER_SIZE); return 0; } static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mthca_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max; } static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, struct ib_send_wr *wr) { if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); } else { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = 0; } } static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, struct ib_send_wr *wr) { useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, struct ib_send_wr *wr) { memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); void *wqe; void *prev_wqe; unsigned long flags; int err = 0; int nreq; int i; int size; /* * f0 and size0 are only used if nreq != 0, and they will * always be initialized the first time through the main loop * before nreq is incremented. So nreq cannot become non-zero * without initializing f0 and size0, and they are in fact * never used uninitialized. */ int uninitialized_var(size0); u32 uninitialized_var(f0); int ind; u8 op0 = 0; spin_lock_irqsave(&qp->sq.lock, flags); /* XXX check that state is OK to post send */ ind = qp->sq.next_ind; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_err(dev, "SQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->sq.head, qp->sq.tail, qp->sq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_send_wqe(qp, ind); prev_wqe = qp->sq.last; qp->sq.last = wqe; ((struct mthca_next_seg *) wqe)->nda_op = 0; ((struct mthca_next_seg *) wqe)->ee_nds = 0; ((struct mthca_next_seg *) wqe)->flags = ((wr->send_flags & IB_SEND_SIGNALED) ? cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | ((wr->send_flags & IB_SEND_SOLICITED) ? cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | cpu_to_be32(1); if (wr->opcode == IB_WR_SEND_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; switch (qp->transport) { case RC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mthca_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_READ: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UD: set_tavor_ud_seg(wqe, wr); wqe += sizeof (struct mthca_tavor_ud_seg); size += sizeof (struct mthca_tavor_ud_seg) / 16; break; case MLX: err = build_mlx_header(dev, to_msqp(qp), ind, wr, wqe - sizeof (struct mthca_next_seg), wqe); if (err) { *bad_wr = wr; goto out; } wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; break; } if (wr->num_sge > qp->sq.max_gs) { mthca_err(dev, "too many gathers\n"); err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } /* Add one more inline data segment for ICRC */ if (qp->transport == MLX) { ((struct mthca_data_seg *) wqe)->byte_count = - cpu_to_be32((1 << 31) | 4); + cpu_to_be32((1U << 31) | 4); ((u32 *) wqe)[1] = 0; wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind] = wr->wr_id; if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { mthca_err(dev, "opcode invalid\n"); err = -EINVAL; *bad_wr = wr; goto out; } ((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | mthca_opcode[wr->opcode]); wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | ((wr->send_flags & IB_SEND_FENCE) ? MTHCA_NEXT_FENCE : 0)); if (!nreq) { size0 = size; op0 = mthca_opcode[wr->opcode]; f0 = wr->send_flags & IB_SEND_FENCE ? MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; if (unlikely(ind >= qp->sq.max)) ind -= qp->sq.max; } out: if (likely(nreq)) { wmb(); mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | f0 | op0, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); /* * Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order: */ mmiowb(); } qp->sq.next_ind = ind; qp->sq.head += nreq; spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); unsigned long flags; int err = 0; int nreq; int i; int size; /* * size0 is only used if nreq != 0, and it will always be * initialized the first time through the main loop before * nreq is incremented. So nreq cannot become non-zero * without initializing size0, and it is in fact never used * uninitialized. */ int uninitialized_var(size0); int ind; void *wqe; void *prev_wqe; spin_lock_irqsave(&qp->rq.lock, flags); /* XXX check that state is OK to post receive */ ind = qp->rq.next_ind; for (nreq = 0; wr; wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->rq.head, qp->rq.tail, qp->rq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_recv_wqe(qp, ind); prev_wqe = qp->rq.last; qp->rq.last = wqe; ((struct mthca_next_seg *) wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD); ((struct mthca_next_seg *) wqe)->flags = 0; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind + qp->sq.max] = wr->wr_id; ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size); if (!nreq) size0 = size; ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; ++nreq; if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { nreq = 0; wmb(); mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); qp->rq.next_ind = ind; qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; } } out: if (likely(nreq)) { wmb(); mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } qp->rq.next_ind = ind; qp->rq.head += nreq; /* * Make sure doorbells don't leak out of RQ spinlock and reach * the HCA out of order: */ mmiowb(); spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); u32 dbhi; void *wqe; void *prev_wqe; unsigned long flags; int err = 0; int nreq; int i; int size; /* * f0 and size0 are only used if nreq != 0, and they will * always be initialized the first time through the main loop * before nreq is incremented. So nreq cannot become non-zero * without initializing f0 and size0, and they are in fact * never used uninitialized. */ int uninitialized_var(size0); u32 uninitialized_var(f0); int ind; u8 op0 = 0; spin_lock_irqsave(&qp->sq.lock, flags); /* XXX check that state is OK to post send */ ind = qp->sq.head & (qp->sq.max - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { nreq = 0; dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); /* * Make sure doorbell record is written before we * write MMIO send doorbell. */ wmb(); mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_err(dev, "SQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->sq.head, qp->sq.tail, qp->sq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_send_wqe(qp, ind); prev_wqe = qp->sq.last; qp->sq.last = wqe; ((struct mthca_next_seg *) wqe)->flags = ((wr->send_flags & IB_SEND_SIGNALED) ? cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | ((wr->send_flags & IB_SEND_SOLICITED) ? cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | cpu_to_be32(1); if (wr->opcode == IB_WR_SEND_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; switch (qp->transport) { case RC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mthca_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UD: set_arbel_ud_seg(wqe, wr); wqe += sizeof (struct mthca_arbel_ud_seg); size += sizeof (struct mthca_arbel_ud_seg) / 16; break; case MLX: err = build_mlx_header(dev, to_msqp(qp), ind, wr, wqe - sizeof (struct mthca_next_seg), wqe); if (err) { *bad_wr = wr; goto out; } wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; break; } if (wr->num_sge > qp->sq.max_gs) { mthca_err(dev, "too many gathers\n"); err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } /* Add one more inline data segment for ICRC */ if (qp->transport == MLX) { ((struct mthca_data_seg *) wqe)->byte_count = - cpu_to_be32((1 << 31) | 4); + cpu_to_be32((1U << 31) | 4); ((u32 *) wqe)[1] = 0; wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind] = wr->wr_id; if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { mthca_err(dev, "opcode invalid\n"); err = -EINVAL; *bad_wr = wr; goto out; } ((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | mthca_opcode[wr->opcode]); wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size | ((wr->send_flags & IB_SEND_FENCE) ? MTHCA_NEXT_FENCE : 0)); if (!nreq) { size0 = size; op0 = mthca_opcode[wr->opcode]; f0 = wr->send_flags & IB_SEND_FENCE ? MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; if (unlikely(ind >= qp->sq.max)) ind -= qp->sq.max; } out: if (likely(nreq)) { dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); /* * Make sure doorbell record is written before we * write MMIO send doorbell. */ wmb(); mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } /* * Make sure doorbells don't leak out of SQ spinlock and reach * the HCA out of order: */ mmiowb(); spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); unsigned long flags; int err = 0; int nreq; int ind; int i; void *wqe; spin_lock_irqsave(&qp->rq.lock, flags); /* XXX check that state is OK to post receive */ ind = qp->rq.head & (qp->rq.max - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->rq.head, qp->rq.tail, qp->rq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_recv_wqe(qp, ind); ((struct mthca_next_seg *) wqe)->flags = 0; wqe += sizeof (struct mthca_next_seg); if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); } if (i < qp->rq.max_gs) mthca_set_data_seg_inval(wqe); qp->wrid[ind + qp->sq.max] = wr->wr_id; ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, int index, int *dbd, __be32 *new_wqe) { struct mthca_next_seg *next; /* * For SRQs, all receive WQEs generate a CQE, so we're always * at the end of the doorbell chain. */ if (qp->ibqp.srq && !is_send) { *new_wqe = 0; return; } if (is_send) next = get_send_wqe(qp, index); else next = get_recv_wqe(qp, index); *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); if (next->ee_nds & cpu_to_be32(0x3f)) *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | (next->ee_nds & cpu_to_be32(0x3f)); else *new_wqe = 0; } int mthca_init_qp_table(struct mthca_dev *dev) { int err; u8 status; int i; spin_lock_init(&dev->qp_table.lock); /* * We reserve 2 extra QPs per port for the special QPs. The * special QP for port 1 has to be even, so round up. */ dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; err = mthca_alloc_init(&dev->qp_table.alloc, dev->limits.num_qps, (1 << 24) - 1, dev->qp_table.sqp_start + MTHCA_MAX_PORTS * 2); if (err) return err; err = mthca_array_init(&dev->qp_table.qp, dev->limits.num_qps); if (err) { mthca_alloc_cleanup(&dev->qp_table.alloc); return err; } for (i = 0; i < 2; ++i) { err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, dev->qp_table.sqp_start + i * 2, &status); if (err) goto err_out; if (status) { mthca_warn(dev, "CONF_SPECIAL_QP returned " "status %02x, aborting.\n", status); err = -EINVAL; goto err_out; } } return 0; err_out: for (i = 0; i < 2; ++i) mthca_CONF_SPECIAL_QP(dev, i, 0, &status); mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_alloc_cleanup(&dev->qp_table.alloc); return err; } void mthca_cleanup_qp_table(struct mthca_dev *dev) { int i; u8 status; for (i = 0; i < 2; ++i) mthca_CONF_SPECIAL_QP(dev, i, 0, &status); mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_alloc_cleanup(&dev->qp_table.alloc); } Index: head/sys/ofed/drivers/net/mlx4/mcg.c =================================================================== --- head/sys/ofed/drivers/net/mlx4/mcg.c (revision 258779) +++ head/sys/ofed/drivers/net/mlx4/mcg.c (revision 258780) @@ -1,1426 +1,1426 @@ /* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include "mlx4.h" static const u8 zero_gid[16]; /* automatically initialized to 0 */ int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) { return 1 << dev->oper_log_mgm_entry_size; } int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) { return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); } static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, u32 size, u64 *reg_id) { u64 imm; int err = 0; err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; *reg_id = imm; return err; } static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) { int err = 0; err = mlx4_cmd(dev, regid, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); return err; } static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, struct mlx4_cmd_mailbox *mailbox) { u32 in_mod; in_mod = (u32) port << 16 | steer << 1; return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, u16 *hash, u8 op_mod) { u64 imm; int err; err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (!err) *hash = imm; return err; } static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; struct mlx4_promisc_qp *pqp; list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { if (pqp->qpn == qpn) return pqp; } /* not found */ return NULL; } /* * Add new entry to steering data structure. * All promisc QPs should be added as well */ static int new_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { struct mlx4_steer *s_steer; struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 members_count; struct mlx4_steer_index *new_entry; struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp = NULL; u32 prot; int err; s_steer = &mlx4_priv(dev)->steer[port - 1]; new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); if (!new_entry) return -ENOMEM; INIT_LIST_HEAD(&new_entry->duplicates); new_entry->index = index; list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); /* If the given qpn is also a promisc qp, * it should be inserted to duplicates list */ pqp = get_promisc_qp(dev, port, steer, qpn); if (pqp) { dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) { err = -ENOMEM; goto out_alloc; } dqp->qpn = qpn; list_add_tail(&dqp->list, &new_entry->duplicates); } /* if no promisc qps for this vep, we are done */ if (list_empty(&s_steer->promisc_qps[steer])) return 0; /* now need to add all the promisc qps to the new * steering entry, as they should also receive the packets * destined to this address */ mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = -ENOMEM; goto out_alloc; } mgm = mailbox->buf; err = mlx4_READ_ENTRY(dev, index, mailbox); if (err) goto out_mailbox; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; prot = be32_to_cpu(mgm->members_count) >> 30; list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { /* don't add already existing qpn */ if (pqp->qpn == qpn) continue; if (members_count == dev->caps.num_qp_per_mgm) { /* out of space */ err = -ENOMEM; goto out_mailbox; } /* add the qpn */ mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); } /* update the qps count and update the entry with all the promisc qps*/ mgm->members_count = cpu_to_be32(members_count | (prot << 30)); err = mlx4_WRITE_ENTRY(dev, index, mailbox); out_mailbox: mlx4_free_cmd_mailbox(dev, mailbox); if (!err) return 0; out_alloc: if (dqp) { list_del(&dqp->list); kfree(dqp); } list_del(&new_entry->list); kfree(new_entry); return err; } /* update the data structures with existing steering entry */ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { struct mlx4_steer *s_steer; struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp; s_steer = &mlx4_priv(dev)->steer[port - 1]; pqp = get_promisc_qp(dev, port, steer, qpn); if (!pqp) return 0; /* nothing to do */ list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { if (tmp_entry->index == index) { entry = tmp_entry; break; } } if (unlikely(!entry)) { mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); return -EINVAL; } /* the given qpn is listed as a promisc qpn * we need to add it as a duplicate to this entry * for future references */ list_for_each_entry(dqp, &entry->duplicates, list) { if (qpn == pqp->qpn) return 0; /* qp is already duplicated */ } /* add the qp as a duplicate on this index */ dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) return -ENOMEM; dqp->qpn = qpn; list_add_tail(&dqp->list, &entry->duplicates); return 0; } /* Check whether a qpn is a duplicate on steering entry * If so, it should not be removed from mgm */ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) { struct mlx4_steer *s_steer; struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *dqp, *tmp_dqp; s_steer = &mlx4_priv(dev)->steer[port - 1]; /* if qp is not promisc, it cannot be duplicated */ if (!get_promisc_qp(dev, port, steer, qpn)) return false; /* The qp is promisc qp so it is a duplicate on this index * Find the index entry, and remove the duplicate */ list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { if (tmp_entry->index == index) { entry = tmp_entry; break; } } if (unlikely(!entry)) { mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); return false; } list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { if (dqp->qpn == qpn) { list_del(&dqp->list); kfree(dqp); } } return true; } /* I a steering entry contains only promisc QPs, it can be removed. */ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 tqpn) { struct mlx4_steer *s_steer; struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; struct mlx4_steer_index *entry = NULL, *tmp_entry; u32 qpn; u32 members_count; bool ret = false; int i; s_steer = &mlx4_priv(dev)->steer[port - 1]; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return false; mgm = mailbox->buf; if (mlx4_READ_ENTRY(dev, index, mailbox)) goto out; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; i++) { qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { /* the qp is not promisc, the entry can't be removed */ goto out; } } /* All the qps currently registered for this entry are promiscuous, * Checking for duplicates */ ret = true; list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { if (entry->index == index) { if (list_empty(&entry->duplicates) || members_count == 1) { struct mlx4_promisc_qp *pqp, *tmp_pqp; /* * If there is only 1 entry in duplicates than * this is the QP we want to delete, going over * the list and deleting the entry. */ list_del(&entry->list); list_for_each_entry_safe(pqp, tmp_pqp, &entry->duplicates, list) { list_del(&pqp->list); kfree(pqp); } kfree(entry); } else { /* This entry contains duplicates so it shouldn't be removed */ ret = false; goto out; } } } out: mlx4_free_cmd_mailbox(dev, mailbox); return ret; } static int add_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_steer *s_steer; struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; struct mlx4_steer_index *entry; struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp; u32 members_count; u32 prot; int i; bool found; int err; struct mlx4_priv *priv = mlx4_priv(dev); s_steer = &mlx4_priv(dev)->steer[port - 1]; mutex_lock(&priv->mcg_table.mutex); if (get_promisc_qp(dev, port, steer, qpn)) { err = 0; /* Noting to do, already exists */ goto out_mutex; } pqp = kmalloc(sizeof *pqp, GFP_KERNEL); if (!pqp) { err = -ENOMEM; goto out_mutex; } pqp->qpn = qpn; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = -ENOMEM; goto out_alloc; } mgm = mailbox->buf; /* the promisc qp needs to be added for each one of the steering * entries, if it already exists, needs to be added as a duplicate * for this entry */ list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { err = mlx4_READ_ENTRY(dev, entry->index, mailbox); if (err) goto out_mailbox; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; prot = be32_to_cpu(mgm->members_count) >> 30; found = false; for (i = 0; i < members_count; i++) { if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { /* Entry already exists, add to duplicates */ dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) { err = -ENOMEM; goto out_mailbox; } dqp->qpn = qpn; list_add_tail(&dqp->list, &entry->duplicates); found = true; } } if (!found) { /* Need to add the qpn to mgm */ if (members_count == dev->caps.num_qp_per_mgm) { /* entry is full */ err = -ENOMEM; goto out_mailbox; } mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); mgm->members_count = cpu_to_be32(members_count | (prot << 30)); err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); if (err) goto out_mailbox; } } /* add the new qpn to list of promisc qps */ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); /* now need to add all the promisc qps to default entry */ memset(mgm, 0, sizeof *mgm); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { if (members_count == dev->caps.num_qp_per_mgm) { /* entry is full */ err = -ENOMEM; goto out_list; } mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); } mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); if (err) goto out_list; mlx4_free_cmd_mailbox(dev, mailbox); mutex_unlock(&priv->mcg_table.mutex); return 0; out_list: list_del(&pqp->list); out_mailbox: mlx4_free_cmd_mailbox(dev, mailbox); out_alloc: kfree(pqp); out_mutex: mutex_unlock(&priv->mcg_table.mutex); return err; } static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_steer *s_steer; struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; struct mlx4_steer_index *entry; struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp; u32 members_count; bool found; bool back_to_list = false; int i, loc = -1; int err; s_steer = &mlx4_priv(dev)->steer[port - 1]; mutex_lock(&priv->mcg_table.mutex); pqp = get_promisc_qp(dev, port, steer, qpn); if (unlikely(!pqp)) { mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); /* nothing to do */ err = 0; goto out_mutex; } /*remove from list of promisc qps */ list_del(&pqp->list); /* set the default entry not to include the removed one */ mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = -ENOMEM; back_to_list = true; goto out_list; } mgm = mailbox->buf; memset(mgm, 0, sizeof *mgm); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); if (err) goto out_mailbox; /* remove the qp from all the steering entries*/ list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { found = false; list_for_each_entry(dqp, &entry->duplicates, list) { if (dqp->qpn == qpn) { found = true; break; } } if (found) { /* a duplicate, no need to change the mgm, * only update the duplicates list */ list_del(&dqp->list); kfree(dqp); } else { err = mlx4_READ_ENTRY(dev, entry->index, mailbox); if (err) goto out_mailbox; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; ++i) if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { loc = i; break; } if (loc < 0) { mlx4_err(dev, "QP %06x wasn't found in entry %d\n", qpn, entry->index); err = -EINVAL; goto out_mailbox; } /* copy the last QP in this MGM over removed QP */ mgm->qp[loc] = mgm->qp[members_count - 1]; mgm->qp[members_count - 1] = 0; mgm->members_count = cpu_to_be32(--members_count | (MLX4_PROT_ETH << 30)); err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); if (err) goto out_mailbox; } } out_mailbox: mlx4_free_cmd_mailbox(dev, mailbox); out_list: if (back_to_list) list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); else kfree(pqp); out_mutex: mutex_unlock(&priv->mcg_table.mutex); return err; } /* * Caller must hold MCG table semaphore. gid and mgm parameters must * be properly aligned for command interface. * * Returns 0 unless a firmware command error occurs. * * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 * and *mgm holds MGM entry. * * if GID is found in AMGM, *index = index in AMGM, *prev = index of * previous entry in hash chain and *mgm holds AMGM entry. * * If no AMGM exists for given gid, *index = -1, *prev = index of last * entry in hash chain and *mgm holds end of hash chain. */ static int find_entry(struct mlx4_dev *dev, u8 port, u8 *gid, enum mlx4_protocol prot, struct mlx4_cmd_mailbox *mgm_mailbox, int *prev, int *index) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm = mgm_mailbox->buf; u8 *mgid; int err; u16 hash; u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return -ENOMEM; mgid = mailbox->buf; memcpy(mgid, gid, 16); err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); mlx4_free_cmd_mailbox(dev, mailbox); if (err) return err; if (0) mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); *index = hash; *prev = -1; do { err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); if (err) return err; if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { if (*index != hash) { mlx4_err(dev, "Found zero MGID in AMGM.\n"); err = -EINVAL; } return err; } if (!memcmp(mgm->gid, gid, 16) && be32_to_cpu(mgm->members_count) >> 30 == prot) return err; *prev = *index; *index = be32_to_cpu(mgm->next_gid_index) >> 6; } while (*index); *index = -1; return err; } static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, struct mlx4_net_trans_rule_hw_ctrl *hw) { static const u8 __promisc_mode[] = { [MLX4_FS_REGULAR] = 0x0, [MLX4_FS_ALL_DEFAULT] = 0x1, [MLX4_FS_MC_DEFAULT] = 0x3, [MLX4_FS_UC_SNIFFER] = 0x4, [MLX4_FS_MC_SNIFFER] = 0x5, }; u32 dw = 0; dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; dw |= ctrl->exclusive ? (1 << 2) : 0; dw |= ctrl->allow_loopback ? (1 << 3) : 0; dw |= __promisc_mode[ctrl->promisc_mode] << 8; dw |= ctrl->priority << 16; hw->ctrl = cpu_to_be32(dw); hw->port = ctrl->port; hw->qpn = cpu_to_be32(ctrl->qpn); } const u16 __sw_id_hw[] = { [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 }; static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) { static const size_t __rule_hw_sz[] = { [MLX4_NET_TRANS_RULE_ID_ETH] = sizeof(struct mlx4_net_trans_rule_hw_eth), [MLX4_NET_TRANS_RULE_ID_IB] = sizeof(struct mlx4_net_trans_rule_hw_ib), [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, [MLX4_NET_TRANS_RULE_ID_IPV4] = sizeof(struct mlx4_net_trans_rule_hw_ipv4), [MLX4_NET_TRANS_RULE_ID_TCP] = sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), [MLX4_NET_TRANS_RULE_ID_UDP] = sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) }; if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); return -EINVAL; } memset(rule_hw, 0, __rule_hw_sz[spec->id]); rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); rule_hw->size = __rule_hw_sz[spec->id] >> 2; switch (spec->id) { case MLX4_NET_TRANS_RULE_ID_ETH: memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, ETH_ALEN); memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, ETH_ALEN); if (spec->eth.ether_type_enable) { rule_hw->eth.ether_type_enable = 1; rule_hw->eth.ether_type = spec->eth.ether_type; } rule_hw->eth.vlan_id = spec->eth.vlan_id; rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; break; case MLX4_NET_TRANS_RULE_ID_IB: rule_hw->ib.r_u_qpn = spec->ib.r_u_qpn; rule_hw->ib.qpn_mask = spec->ib.qpn_msk; memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); break; case MLX4_NET_TRANS_RULE_ID_IPV6: return -EOPNOTSUPP; case MLX4_NET_TRANS_RULE_ID_IPV4: rule_hw->ipv4.src_ip = spec->ipv4.src_ip; rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; break; case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; break; default: return -EINVAL; } return __rule_hw_sz[spec->id]; } static void mlx4_err_rule(struct mlx4_dev *dev, char *str, struct mlx4_net_trans_rule *rule) { #define BUF_SIZE 256 struct mlx4_spec_list *cur; char buf[BUF_SIZE]; int len = 0; mlx4_err(dev, "%s", str); len += snprintf(buf + len, BUF_SIZE - len, "port = %d prio = 0x%x qp = 0x%x ", rule->port, rule->priority, rule->qpn); list_for_each_entry(cur, &rule->list, list) { switch (cur->id) { case MLX4_NET_TRANS_RULE_ID_ETH: len += snprintf(buf + len, BUF_SIZE - len, "dmac = %pM ", &cur->eth.dst_mac); if (cur->eth.ether_type) len += snprintf(buf + len, BUF_SIZE - len, "ethertype = 0x%x ", be16_to_cpu(cur->eth.ether_type)); if (cur->eth.vlan_id) len += snprintf(buf + len, BUF_SIZE - len, "vlan-id = %d ", be16_to_cpu(cur->eth.vlan_id)); break; case MLX4_NET_TRANS_RULE_ID_IPV4: if (cur->ipv4.src_ip) len += snprintf(buf + len, BUF_SIZE - len, "src-ip = %pI4 ", &cur->ipv4.src_ip); if (cur->ipv4.dst_ip) len += snprintf(buf + len, BUF_SIZE - len, "dst-ip = %pI4 ", &cur->ipv4.dst_ip); break; case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: if (cur->tcp_udp.src_port) len += snprintf(buf + len, BUF_SIZE - len, "src-port = %d ", be16_to_cpu(cur->tcp_udp.src_port)); if (cur->tcp_udp.dst_port) len += snprintf(buf + len, BUF_SIZE - len, "dst-port = %d ", be16_to_cpu(cur->tcp_udp.dst_port)); break; case MLX4_NET_TRANS_RULE_ID_IB: len += snprintf(buf + len, BUF_SIZE - len, "dst-gid = %pI6\n", cur->ib.dst_gid); len += snprintf(buf + len, BUF_SIZE - len, "dst-gid-mask = %pI6\n", cur->ib.dst_gid_msk); break; case MLX4_NET_TRANS_RULE_ID_IPV6: break; default: break; } } len += snprintf(buf + len, BUF_SIZE - len, "\n"); mlx4_err(dev, "%s", buf); if (len >= BUF_SIZE) mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); } int mlx4_flow_attach(struct mlx4_dev *dev, struct mlx4_net_trans_rule *rule, u64 *reg_id) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_spec_list *cur; u32 size = 0; int ret; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); trans_rule_ctrl_to_hw(rule, mailbox->buf); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); list_for_each_entry(cur, &rule->list, list) { ret = parse_trans_rule(dev, cur, mailbox->buf + size); if (ret < 0) { mlx4_free_cmd_mailbox(dev, mailbox); return -EINVAL; } size += ret; } ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); if (ret == -ENOMEM) mlx4_err_rule(dev, "mcg table is full. Fail to register network rule.\n", rule); else if (ret) mlx4_err_rule(dev, "Fail to register network rule.\n", rule); mlx4_free_cmd_mailbox(dev, mailbox); return ret; } EXPORT_SYMBOL_GPL(mlx4_flow_attach); int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) { int err; err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); if (err) mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", reg_id); return err; } EXPORT_SYMBOL_GPL(mlx4_flow_detach); int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) { int err; u64 in_param; in_param = ((u64) min_range_qpn) << 32; in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; err = mlx4_cmd(dev, in_param, 0, 0, MLX4_FLOW_STEERING_IB_UC_QP_RANGE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); return err; } EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 members_count; int index, prev; int link = 0; int i; int err; u8 port = gid[5]; u8 new_entry = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&priv->mcg_table.mutex); err = find_entry(dev, port, gid, prot, mailbox, &prev, &index); if (err) goto out; if (index != -1) { if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { new_entry = 1; memcpy(mgm->gid, gid, 16); } } else { link = 1; index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); if (index == -1) { mlx4_err(dev, "No AMGM entries left\n"); err = -ENOMEM; goto out; } index += dev->caps.num_mgms; new_entry = 1; memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid, 16); } members_count = be32_to_cpu(mgm->members_count) & 0xffffff; if (members_count == dev->caps.num_qp_per_mgm) { mlx4_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; } for (i = 0; i < members_count; ++i) if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); err = 0; goto out; } mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | (!!mlx4_blck_lb << MGM_BLCK_LB_BIT)); mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); err = mlx4_WRITE_ENTRY(dev, index, mailbox); if (err) goto out; if (!link) goto out; err = mlx4_READ_ENTRY(dev, prev, mailbox); if (err) goto out; mgm->next_gid_index = cpu_to_be32(index << 6); err = mlx4_WRITE_ENTRY(dev, prev, mailbox); if (err) goto out; if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) new_steering_entry(dev, port, steer, index, qp->qpn); else existing_steering_entry(dev, port, steer, index, qp->qpn); } out: if (err && link && index != -1) { if (index < dev->caps.num_mgms) mlx4_warn(dev, "Got AMGM index %d < %d", index, dev->caps.num_mgms); else mlx4_bitmap_free(&priv->mcg_table.bitmap, index - dev->caps.num_mgms); } mutex_unlock(&priv->mcg_table.mutex); mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot, enum mlx4_steer_type steer) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 members_count; int prev, index; int i, loc = -1; int err; u8 port = gid[5]; bool removed_entry = false; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&priv->mcg_table.mutex); err = find_entry(dev, port, gid, prot, mailbox, &prev, &index); if (err) goto out; if (index == -1) { mlx4_err(dev, "MGID %pI6 not found\n", gid); err = -EINVAL; goto out; } /* if this pq is also a promisc qp, it shouldn't be removed */ if (prot == MLX4_PROT_ETH && check_duplicate_entry(dev, port, steer, index, qp->qpn)) goto out; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; ++i) if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { loc = i; break; } if (loc == -1) { mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); err = -EINVAL; goto out; } /* copy the last QP in this MGM over removed QP */ mgm->qp[loc] = mgm->qp[members_count - 1]; mgm->qp[members_count - 1] = 0; mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); if (prot == MLX4_PROT_ETH) removed_entry = can_remove_steering_entry(dev, port, steer, index, qp->qpn); if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { err = mlx4_WRITE_ENTRY(dev, index, mailbox); goto out; } /* We are going to delete the entry, members count should be 0 */ mgm->members_count = cpu_to_be32((u32) prot << 30); if (prev == -1) { /* Remove entry from MGM */ int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; if (amgm_index) { err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); if (err) goto out; } else memset(mgm->gid, 0, 16); err = mlx4_WRITE_ENTRY(dev, index, mailbox); if (err) goto out; if (amgm_index) { if (amgm_index < dev->caps.num_mgms) mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", index, amgm_index, dev->caps.num_mgms); else mlx4_bitmap_free(&priv->mcg_table.bitmap, amgm_index - dev->caps.num_mgms); } } else { /* Remove entry from AMGM */ int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; err = mlx4_READ_ENTRY(dev, prev, mailbox); if (err) goto out; mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); err = mlx4_WRITE_ENTRY(dev, prev, mailbox); if (err) goto out; if (index < dev->caps.num_mgms) mlx4_warn(dev, "entry %d had next AMGM index %d < %d", prev, index, dev->caps.num_mgms); else mlx4_bitmap_free(&priv->mcg_table.bitmap, index - dev->caps.num_mgms); } out: mutex_unlock(&priv->mcg_table.mutex); mlx4_free_cmd_mailbox(dev, mailbox); return err; } static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 attach, u8 block_loopback, enum mlx4_protocol prot) { struct mlx4_cmd_mailbox *mailbox; int err = 0; int qpn; if (!mlx4_is_mfunc(dev)) return -EBADF; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memcpy(mailbox->buf, gid, 16); qpn = qp->qpn; qpn |= (prot << 28); if (attach && block_loopback) - qpn |= (1 << 31); + qpn |= (1U << 31); err = mlx4_cmd(dev, mailbox->dma, qpn, attach, MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 port, int block_mcast_loopback, enum mlx4_protocol prot, u64 *reg_id) { switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_A0: if (prot == MLX4_PROT_ETH) return 0; case MLX4_STEERING_MODE_B0: if (prot == MLX4_PROT_ETH) gid[7] |= (MLX4_MC_STEER << 1); if (mlx4_is_mfunc(dev)) return mlx4_QP_ATTACH(dev, qp, gid, 1, block_mcast_loopback, prot); return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, prot, MLX4_MC_STEER); case MLX4_STEERING_MODE_DEVICE_MANAGED: { struct mlx4_spec_list spec = { {NULL} }; __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); struct mlx4_net_trans_rule rule = { .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; rule.allow_loopback = !block_mcast_loopback; rule.port = port; rule.qpn = qp->qpn; INIT_LIST_HEAD(&rule.list); switch (prot) { case MLX4_PROT_ETH: spec.id = MLX4_NET_TRANS_RULE_ID_ETH; memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); break; case MLX4_PROT_IB_IPV6: spec.id = MLX4_NET_TRANS_RULE_ID_IB; memcpy(spec.ib.dst_gid, gid, 16); memset(&spec.ib.dst_gid_msk, 0xff, 16); break; default: return -EINVAL; } list_add_tail(&spec.list, &rule.list); return mlx4_flow_attach(dev, &rule, reg_id); } default: return -EINVAL; } } EXPORT_SYMBOL_GPL(mlx4_multicast_attach); int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot, u64 reg_id) { switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_A0: if (prot == MLX4_PROT_ETH) return 0; case MLX4_STEERING_MODE_B0: if (prot == MLX4_PROT_ETH) gid[7] |= (MLX4_MC_STEER << 1); if (mlx4_is_mfunc(dev)) return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER); case MLX4_STEERING_MODE_DEVICE_MANAGED: return mlx4_flow_detach(dev, reg_id); default: return -EINVAL; } } EXPORT_SYMBOL_GPL(mlx4_multicast_detach); int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode) { struct mlx4_net_trans_rule rule; u64 *regid_p; switch (mode) { case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: return -1; } if (*regid_p != 0) return -1; rule.promisc_mode = mode; rule.port = port; rule.qpn = qpn; INIT_LIST_HEAD(&rule.list); mlx4_err(dev, "going promisc on %x\n", port); return mlx4_flow_attach(dev, &rule, regid_p); } EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, enum mlx4_net_trans_promisc_mode mode) { int ret; u64 *regid_p; switch (mode) { case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: return -1; } if (*regid_p == 0) return -1; ret = mlx4_flow_detach(dev, *regid_p); if (ret == 0) *regid_p = 0; return ret; } EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) { if (prot == MLX4_PROT_ETH) gid[7] |= (MLX4_UC_STEER << 1); if (mlx4_is_mfunc(dev)) return mlx4_QP_ATTACH(dev, qp, gid, 1, block_mcast_loopback, prot); return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, prot, MLX4_UC_STEER); } EXPORT_SYMBOL_GPL(mlx4_unicast_attach); int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot) { if (prot == MLX4_PROT_ETH) gid[7] |= (MLX4_UC_STEER << 1); if (mlx4_is_mfunc(dev)) return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); } EXPORT_SYMBOL_GPL(mlx4_unicast_detach); int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { u32 qpn = (u32) vhcr->in_param & 0xffffffff; u8 port = vhcr->in_param >> 62; enum mlx4_steer_type steer = vhcr->in_modifier; /* Promiscuous unicast is not allowed in mfunc */ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) return 0; if (vhcr->op_modifier) return add_promisc_qp(dev, port, steer, qpn); else return remove_promisc_qp(dev, port, steer, qpn); } static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, enum mlx4_steer_type steer, u8 add, u8 port) { return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) { if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) { if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) { if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) { if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); } EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); int mlx4_init_mcg_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; /* No need for mcg_table when fw managed the mcg table*/ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) return 0; err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, dev->caps.num_amgms - 1, 0, 0); if (err) return err; mutex_init(&priv->mcg_table.mutex); return 0; } void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) { if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); } Index: head/sys/powerpc/fpu/fpu_emu.c =================================================================== --- head/sys/powerpc/fpu/fpu_emu.c (revision 258779) +++ head/sys/powerpc/fpu/fpu_emu.c (revision 258780) @@ -1,794 +1,794 @@ /* $NetBSD: fpu_emu.c,v 1.14 2005/12/11 12:18:42 christos Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. * All rights reserved. * * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)fpu.c 8.1 (Berkeley) 6/11/93 */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_hw, OID_AUTO, fpu_emu, CTLFLAG_RW, 0, "FPU emulator"); #define FPU_EMU_EVCNT_DECL(name) \ static u_int fpu_emu_evcnt_##name; \ SYSCTL_INT(_hw_fpu_emu, OID_AUTO, evcnt_##name, CTLFLAG_RD, \ &fpu_emu_evcnt_##name, 0, "") #define FPU_EMU_EVCNT_INCR(name) fpu_emu_evcnt_##name++ FPU_EMU_EVCNT_DECL(stfiwx); FPU_EMU_EVCNT_DECL(fpstore); FPU_EMU_EVCNT_DECL(fpload); FPU_EMU_EVCNT_DECL(fcmpu); FPU_EMU_EVCNT_DECL(frsp); FPU_EMU_EVCNT_DECL(fctiw); FPU_EMU_EVCNT_DECL(fcmpo); FPU_EMU_EVCNT_DECL(mtfsb1); FPU_EMU_EVCNT_DECL(fnegabs); FPU_EMU_EVCNT_DECL(mcrfs); FPU_EMU_EVCNT_DECL(mtfsb0); FPU_EMU_EVCNT_DECL(fmr); FPU_EMU_EVCNT_DECL(mtfsfi); FPU_EMU_EVCNT_DECL(fnabs); FPU_EMU_EVCNT_DECL(fabs); FPU_EMU_EVCNT_DECL(mffs); FPU_EMU_EVCNT_DECL(mtfsf); FPU_EMU_EVCNT_DECL(fctid); FPU_EMU_EVCNT_DECL(fcfid); FPU_EMU_EVCNT_DECL(fdiv); FPU_EMU_EVCNT_DECL(fsub); FPU_EMU_EVCNT_DECL(fadd); FPU_EMU_EVCNT_DECL(fsqrt); FPU_EMU_EVCNT_DECL(fsel); FPU_EMU_EVCNT_DECL(fpres); FPU_EMU_EVCNT_DECL(fmul); FPU_EMU_EVCNT_DECL(frsqrte); FPU_EMU_EVCNT_DECL(fmulsub); FPU_EMU_EVCNT_DECL(fmuladd); FPU_EMU_EVCNT_DECL(fnmsub); FPU_EMU_EVCNT_DECL(fnmadd); /* FPSR exception masks */ #define FPSR_EX_MSK (FPSCR_VX|FPSCR_OX|FPSCR_UX|FPSCR_ZX| \ FPSCR_XX|FPSCR_VXSNAN|FPSCR_VXISI|FPSCR_VXIDI| \ FPSCR_VXZDZ|FPSCR_VXIMZ|FPSCR_VXVC|FPSCR_VXSOFT|\ FPSCR_VXSQRT|FPSCR_VXCVI) #define FPSR_EX (FPSCR_VE|FPSCR_OE|FPSCR_UE|FPSCR_ZE|FPSCR_XE) #define FPSR_EXOP (FPSR_EX_MSK&(~FPSR_EX)) int fpe_debug = 0; #ifdef DEBUG vm_offset_t opc_disasm(vm_offset_t, int); /* * Dump a `fpn' structure. */ void fpu_dumpfpn(struct fpn *fp) { static const char *class[] = { "SNAN", "QNAN", "ZERO", "NUM", "INF" }; printf("%s %c.%x %x %x %xE%d", class[fp->fp_class + 2], fp->fp_sign ? '-' : ' ', fp->fp_mant[0], fp->fp_mant[1], fp->fp_mant[2], fp->fp_mant[3], fp->fp_exp); } #endif /* * fpu_execute returns the following error numbers (0 = no error): */ #define FPE 1 /* take a floating point exception */ #define NOTFPU 2 /* not an FPU instruction */ #define FAULT 3 /* * Emulate a floating-point instruction. * Return zero for success, else signal number. * (Typically: zero, SIGFPE, SIGILL, SIGSEGV) */ int fpu_emulate(struct trapframe *frame, struct fpreg *fpf) { static union instr insn; static struct fpemu fe; static int lastill = 0; int sig; /* initialize insn.is_datasize to tell it is *not* initialized */ fe.fe_fpstate = fpf; fe.fe_cx = 0; /* always set this (to avoid a warning) */ if (copyin((void *) (frame->srr0), &insn.i_int, sizeof (insn.i_int))) { #ifdef DEBUG printf("fpu_emulate: fault reading opcode\n"); #endif return SIGSEGV; } DPRINTF(FPE_EX, ("fpu_emulate: emulating insn %x at %p\n", insn.i_int, (void *)frame->srr0)); if ((insn.i_any.i_opcd == OPC_TWI) || ((insn.i_any.i_opcd == OPC_integer_31) && (insn.i_x.i_xo == OPC31_TW))) { /* Check for the two trap insns. */ DPRINTF(FPE_EX, ("fpu_emulate: SIGTRAP\n")); return (SIGTRAP); } sig = 0; switch (fpu_execute(frame, &fe, &insn)) { case 0: DPRINTF(FPE_EX, ("fpu_emulate: success\n")); frame->srr0 += 4; break; case FPE: DPRINTF(FPE_EX, ("fpu_emulate: SIGFPE\n")); sig = SIGFPE; break; case FAULT: DPRINTF(FPE_EX, ("fpu_emulate: SIGSEGV\n")); sig = SIGSEGV; break; case NOTFPU: default: DPRINTF(FPE_EX, ("fpu_emulate: SIGILL\n")); #ifdef DEBUG if (fpe_debug & FPE_EX) { printf("fpu_emulate: illegal insn %x at %p:", insn.i_int, (void *) (frame->srr0)); opc_disasm(frame->srr0, insn.i_int); } #endif /* * XXXX retry an illegal insn once due to cache issues. */ if (lastill == frame->srr0) { sig = SIGILL; #ifdef DEBUG if (fpe_debug & FPE_EX) kdb_enter(KDB_WHY_UNSET, "illegal instruction"); #endif } lastill = frame->srr0; break; } return (sig); } /* * Execute an FPU instruction (one that runs entirely in the FPU; not * FBfcc or STF, for instance). On return, fe->fe_fs->fs_fsr will be * modified to reflect the setting the hardware would have left. * * Note that we do not catch all illegal opcodes, so you can, for instance, * multiply two integers this way. */ int fpu_execute(struct trapframe *tf, struct fpemu *fe, union instr *insn) { struct fpn *fp; union instr instr = *insn; int *a; vm_offset_t addr; int ra, rb, rc, rt, type, mask, fsr, cx, bf, setcr; unsigned int cond; struct fpreg *fs; /* Setup work. */ fp = NULL; fs = fe->fe_fpstate; fe->fe_fpscr = ((int *)&fs->fpscr)[1]; /* * On PowerPC all floating point values are stored in registers * as doubles, even when used for single precision operations. */ type = FTYPE_DBL; cond = instr.i_any.i_rc; setcr = 0; bf = 0; /* XXX gcc */ #if defined(DDB) && defined(DEBUG) if (fpe_debug & FPE_EX) { vm_offset_t loc = tf->srr0; printf("Trying to emulate: %p ", (void *)loc); opc_disasm(loc, instr.i_int); } #endif /* * `Decode' and execute instruction. */ if ((instr.i_any.i_opcd >= OPC_LFS && instr.i_any.i_opcd <= OPC_STFDU) || instr.i_any.i_opcd == OPC_integer_31) { /* * Handle load/store insns: * * Convert to/from single if needed, calculate addr, * and update index reg if needed. */ double buf; size_t size = sizeof(float); int store, update; cond = 0; /* ld/st never set condition codes */ if (instr.i_any.i_opcd == OPC_integer_31) { if (instr.i_x.i_xo == OPC31_STFIWX) { FPU_EMU_EVCNT_INCR(stfiwx); /* Store as integer */ ra = instr.i_x.i_ra; rb = instr.i_x.i_rb; DPRINTF(FPE_INSN, ("reg %d has %x reg %d has %x\n", ra, tf->fixreg[ra], rb, tf->fixreg[rb])); addr = tf->fixreg[rb]; if (ra != 0) addr += tf->fixreg[ra]; rt = instr.i_x.i_rt; a = (int *)&fs->fpreg[rt]; DPRINTF(FPE_INSN, ("fpu_execute: Store INT %x at %p\n", a[1], (void *)addr)); if (copyout(&a[1], (void *)addr, sizeof(int))) return (FAULT); return (0); } if ((instr.i_x.i_xo & OPC31_FPMASK) != OPC31_FPOP) /* Not an indexed FP load/store op */ return (NOTFPU); store = (instr.i_x.i_xo & 0x80); if (instr.i_x.i_xo & 0x40) size = sizeof(double); else type = FTYPE_SNG; update = (instr.i_x.i_xo & 0x20); /* calculate EA of load/store */ ra = instr.i_x.i_ra; rb = instr.i_x.i_rb; DPRINTF(FPE_INSN, ("reg %d has %x reg %d has %x\n", ra, tf->fixreg[ra], rb, tf->fixreg[rb])); addr = tf->fixreg[rb]; if (ra != 0) addr += tf->fixreg[ra]; rt = instr.i_x.i_rt; } else { store = instr.i_d.i_opcd & 0x4; if (instr.i_d.i_opcd & 0x2) size = sizeof(double); else type = FTYPE_SNG; update = instr.i_d.i_opcd & 0x1; /* calculate EA of load/store */ ra = instr.i_d.i_ra; addr = instr.i_d.i_d; DPRINTF(FPE_INSN, ("reg %d has %x displ %x\n", ra, tf->fixreg[ra], addr)); if (ra != 0) addr += tf->fixreg[ra]; rt = instr.i_d.i_rt; } if (update && ra == 0) return (NOTFPU); if (store) { /* Store */ FPU_EMU_EVCNT_INCR(fpstore); if (type != FTYPE_DBL) { DPRINTF(FPE_INSN, ("fpu_execute: Store SNG at %p\n", (void *)addr)); fpu_explode(fe, fp = &fe->fe_f1, FTYPE_DBL, rt); fpu_implode(fe, fp, type, (void *)&buf); if (copyout(&buf, (void *)addr, size)) return (FAULT); } else { DPRINTF(FPE_INSN, ("fpu_execute: Store DBL at %p\n", (void *)addr)); if (copyout(&fs->fpreg[rt], (void *)addr, size)) return (FAULT); } } else { /* Load */ FPU_EMU_EVCNT_INCR(fpload); DPRINTF(FPE_INSN, ("fpu_execute: Load from %p\n", (void *)addr)); if (copyin((const void *)addr, &fs->fpreg[rt], size)) return (FAULT); if (type != FTYPE_DBL) { fpu_explode(fe, fp = &fe->fe_f1, type, rt); fpu_implode(fe, fp, FTYPE_DBL, (u_int *)&fs->fpreg[rt]); } } if (update) tf->fixreg[ra] = addr; /* Complete. */ return (0); #ifdef notyet } else if (instr.i_any.i_opcd == OPC_load_st_62) { /* These are 64-bit extenstions */ return (NOTFPU); #endif } else if (instr.i_any.i_opcd == OPC_sp_fp_59 || instr.i_any.i_opcd == OPC_dp_fp_63) { if (instr.i_any.i_opcd == OPC_dp_fp_63 && !(instr.i_a.i_xo & OPC63M_MASK)) { /* Format X */ rt = instr.i_x.i_rt; ra = instr.i_x.i_ra; rb = instr.i_x.i_rb; /* One of the special opcodes.... */ switch (instr.i_x.i_xo) { case OPC63_FCMPU: FPU_EMU_EVCNT_INCR(fcmpu); DPRINTF(FPE_INSN, ("fpu_execute: FCMPU\n")); rt >>= 2; fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rb); fpu_compare(fe, 0); /* Make sure we do the condition regs. */ cond = 0; /* N.B.: i_rs is already left shifted by two. */ bf = instr.i_x.i_rs & 0xfc; setcr = 1; break; case OPC63_FRSP: /* * Convert to single: * * PowerPC uses this to round a double * precision value to single precision, * but values in registers are always * stored in double precision format. */ FPU_EMU_EVCNT_INCR(frsp); DPRINTF(FPE_INSN, ("fpu_execute: FRSP\n")); fpu_explode(fe, fp = &fe->fe_f1, FTYPE_DBL, rb); fpu_implode(fe, fp, FTYPE_SNG, (u_int *)&fs->fpreg[rt]); fpu_explode(fe, fp = &fe->fe_f1, FTYPE_SNG, rt); type = FTYPE_DBL; break; case OPC63_FCTIW: case OPC63_FCTIWZ: FPU_EMU_EVCNT_INCR(fctiw); DPRINTF(FPE_INSN, ("fpu_execute: FCTIW\n")); fpu_explode(fe, fp = &fe->fe_f1, type, rb); type = FTYPE_INT; break; case OPC63_FCMPO: FPU_EMU_EVCNT_INCR(fcmpo); DPRINTF(FPE_INSN, ("fpu_execute: FCMPO\n")); rt >>= 2; fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rb); fpu_compare(fe, 1); /* Make sure we do the condition regs. */ cond = 0; /* N.B.: i_rs is already left shifted by two. */ bf = instr.i_x.i_rs & 0xfc; setcr = 1; break; case OPC63_MTFSB1: FPU_EMU_EVCNT_INCR(mtfsb1); DPRINTF(FPE_INSN, ("fpu_execute: MTFSB1\n")); fe->fe_fpscr |= (~(FPSCR_VX|FPSR_EX) & (1<<(31-rt))); break; case OPC63_FNEG: FPU_EMU_EVCNT_INCR(fnegabs); DPRINTF(FPE_INSN, ("fpu_execute: FNEGABS\n")); memcpy(&fs->fpreg[rt], &fs->fpreg[rb], sizeof(double)); a = (int *)&fs->fpreg[rt]; - *a ^= (1 << 31); + *a ^= (1U << 31); break; case OPC63_MCRFS: FPU_EMU_EVCNT_INCR(mcrfs); DPRINTF(FPE_INSN, ("fpu_execute: MCRFS\n")); cond = 0; rt &= 0x1c; ra &= 0x1c; /* Extract the bits we want */ mask = (fe->fe_fpscr >> (28 - ra)) & 0xf; /* Clear the bits we copied. */ fe->fe_cx = (FPSR_EX_MSK | (0xf << (28 - ra))); fe->fe_fpscr &= fe->fe_cx; /* Now shove them in the right part of cr */ tf->cr &= ~(0xf << (28 - rt)); tf->cr |= (mask << (28 - rt)); break; case OPC63_MTFSB0: FPU_EMU_EVCNT_INCR(mtfsb0); DPRINTF(FPE_INSN, ("fpu_execute: MTFSB0\n")); fe->fe_fpscr &= ((FPSCR_VX|FPSR_EX) & ~(1<<(31-rt))); break; case OPC63_FMR: FPU_EMU_EVCNT_INCR(fmr); DPRINTF(FPE_INSN, ("fpu_execute: FMR\n")); memcpy(&fs->fpreg[rt], &fs->fpreg[rb], sizeof(double)); break; case OPC63_MTFSFI: FPU_EMU_EVCNT_INCR(mtfsfi); DPRINTF(FPE_INSN, ("fpu_execute: MTFSFI\n")); rb >>= 1; rt &= 0x1c; /* Already left-shifted 4 */ fe->fe_cx = rb << (28 - rt); mask = 0xf<<(28 - rt); fe->fe_fpscr = (fe->fe_fpscr & ~mask) | fe->fe_cx; /* XXX weird stuff about OX, FX, FEX, and VX should be handled */ break; case OPC63_FNABS: FPU_EMU_EVCNT_INCR(fnabs); DPRINTF(FPE_INSN, ("fpu_execute: FABS\n")); memcpy(&fs->fpreg[rt], &fs->fpreg[rb], sizeof(double)); a = (int *)&fs->fpreg[rt]; - *a |= (1 << 31); + *a |= (1U << 31); break; case OPC63_FABS: FPU_EMU_EVCNT_INCR(fabs); DPRINTF(FPE_INSN, ("fpu_execute: FABS\n")); memcpy(&fs->fpreg[rt], &fs->fpreg[rb], sizeof(double)); a = (int *)&fs->fpreg[rt]; - *a &= ~(1 << 31); + *a &= ~(1U << 31); break; case OPC63_MFFS: FPU_EMU_EVCNT_INCR(mffs); DPRINTF(FPE_INSN, ("fpu_execute: MFFS\n")); memcpy(&fs->fpreg[rt], &fs->fpscr, sizeof(fs->fpscr)); break; case OPC63_MTFSF: FPU_EMU_EVCNT_INCR(mtfsf); DPRINTF(FPE_INSN, ("fpu_execute: MTFSF\n")); if ((rt = instr.i_xfl.i_flm) == -1) mask = -1; else { mask = 0; /* Convert 1 bit -> 4 bits */ for (ra = 0; ra < 8; ra ++) if (rt & (1<fpreg[rt]; fe->fe_cx = mask & a[1]; fe->fe_fpscr = (fe->fe_fpscr&~mask) | (fe->fe_cx); /* XXX weird stuff about OX, FX, FEX, and VX should be handled */ break; case OPC63_FCTID: case OPC63_FCTIDZ: FPU_EMU_EVCNT_INCR(fctid); DPRINTF(FPE_INSN, ("fpu_execute: FCTID\n")); fpu_explode(fe, fp = &fe->fe_f1, type, rb); type = FTYPE_LNG; break; case OPC63_FCFID: FPU_EMU_EVCNT_INCR(fcfid); DPRINTF(FPE_INSN, ("fpu_execute: FCFID\n")); type = FTYPE_LNG; fpu_explode(fe, fp = &fe->fe_f1, type, rb); type = FTYPE_DBL; break; default: return (NOTFPU); break; } } else { /* Format A */ rt = instr.i_a.i_frt; ra = instr.i_a.i_fra; rb = instr.i_a.i_frb; rc = instr.i_a.i_frc; /* * All arithmetic operations work on registers, which * are stored as doubles. */ type = FTYPE_DBL; switch ((unsigned int)instr.i_a.i_xo) { case OPC59_FDIVS: FPU_EMU_EVCNT_INCR(fdiv); DPRINTF(FPE_INSN, ("fpu_execute: FDIV\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_div(fe); break; case OPC59_FSUBS: FPU_EMU_EVCNT_INCR(fsub); DPRINTF(FPE_INSN, ("fpu_execute: FSUB\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_sub(fe); break; case OPC59_FADDS: FPU_EMU_EVCNT_INCR(fadd); DPRINTF(FPE_INSN, ("fpu_execute: FADD\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_add(fe); break; case OPC59_FSQRTS: FPU_EMU_EVCNT_INCR(fsqrt); DPRINTF(FPE_INSN, ("fpu_execute: FSQRT\n")); fpu_explode(fe, &fe->fe_f1, type, rb); fp = fpu_sqrt(fe); break; case OPC63M_FSEL: FPU_EMU_EVCNT_INCR(fsel); DPRINTF(FPE_INSN, ("fpu_execute: FSEL\n")); a = (int *)&fe->fe_fpstate->fpreg[ra]; if ((*a & 0x80000000) && (*a & 0x7fffffff)) /* fra < 0 */ rc = rb; DPRINTF(FPE_INSN, ("f%d => f%d\n", rc, rt)); memcpy(&fs->fpreg[rt], &fs->fpreg[rc], sizeof(double)); break; case OPC59_FRES: FPU_EMU_EVCNT_INCR(fpres); DPRINTF(FPE_INSN, ("fpu_execute: FPRES\n")); fpu_explode(fe, &fe->fe_f1, type, rb); fp = fpu_sqrt(fe); /* now we've gotta overwrite the dest reg */ *((int *)&fe->fe_fpstate->fpreg[rt]) = 1; fpu_explode(fe, &fe->fe_f1, FTYPE_INT, rt); fpu_div(fe); break; case OPC59_FMULS: FPU_EMU_EVCNT_INCR(fmul); DPRINTF(FPE_INSN, ("fpu_execute: FMUL\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rc); fp = fpu_mul(fe); break; case OPC63M_FRSQRTE: /* Reciprocal sqrt() estimate */ FPU_EMU_EVCNT_INCR(frsqrte); DPRINTF(FPE_INSN, ("fpu_execute: FRSQRTE\n")); fpu_explode(fe, &fe->fe_f1, type, rb); fp = fpu_sqrt(fe); fe->fe_f2 = *fp; /* now we've gotta overwrite the dest reg */ *((int *)&fe->fe_fpstate->fpreg[rt]) = 1; fpu_explode(fe, &fe->fe_f1, FTYPE_INT, rt); fpu_div(fe); break; case OPC59_FMSUBS: FPU_EMU_EVCNT_INCR(fmulsub); DPRINTF(FPE_INSN, ("fpu_execute: FMULSUB\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rc); fp = fpu_mul(fe); fe->fe_f1 = *fp; fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_sub(fe); break; case OPC59_FMADDS: FPU_EMU_EVCNT_INCR(fmuladd); DPRINTF(FPE_INSN, ("fpu_execute: FMULADD\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rc); fp = fpu_mul(fe); fe->fe_f1 = *fp; fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_add(fe); break; case OPC59_FNMSUBS: FPU_EMU_EVCNT_INCR(fnmsub); DPRINTF(FPE_INSN, ("fpu_execute: FNMSUB\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rc); fp = fpu_mul(fe); fe->fe_f1 = *fp; fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_sub(fe); /* Negate */ fp->fp_sign ^= 1; break; case OPC59_FNMADDS: FPU_EMU_EVCNT_INCR(fnmadd); DPRINTF(FPE_INSN, ("fpu_execute: FNMADD\n")); fpu_explode(fe, &fe->fe_f1, type, ra); fpu_explode(fe, &fe->fe_f2, type, rc); fp = fpu_mul(fe); fe->fe_f1 = *fp; fpu_explode(fe, &fe->fe_f2, type, rb); fp = fpu_add(fe); /* Negate */ fp->fp_sign ^= 1; break; default: return (NOTFPU); break; } /* If the instruction was single precision, round */ if (!(instr.i_any.i_opcd & 0x4)) { fpu_implode(fe, fp, FTYPE_SNG, (u_int *)&fs->fpreg[rt]); fpu_explode(fe, fp = &fe->fe_f1, FTYPE_SNG, rt); } } } else { return (NOTFPU); } /* * ALU operation is complete. Collapse the result and then check * for exceptions. If we got any, and they are enabled, do not * alter the destination register, just stop with an exception. * Otherwise set new current exceptions and accrue. */ if (fp) fpu_implode(fe, fp, type, (u_int *)&fs->fpreg[rt]); cx = fe->fe_cx; fsr = fe->fe_fpscr; if (cx != 0) { fsr &= ~FPSCR_FX; if ((cx^fsr)&FPSR_EX_MSK) fsr |= FPSCR_FX; mask = fsr & FPSR_EX; mask <<= (25-3); if (cx & mask) fsr |= FPSCR_FEX; if (cx & FPSCR_FPRF) { /* Need to replace CC */ fsr &= ~FPSCR_FPRF; } if (cx & (FPSR_EXOP)) fsr |= FPSCR_VX; fsr |= cx; DPRINTF(FPE_INSN, ("fpu_execute: cx %x, fsr %x\n", cx, fsr)); } if (cond) { cond = fsr & 0xf0000000; /* Isolate condition codes */ cond >>= 28; /* Move fpu condition codes to cr[1] */ tf->cr &= (0x0f000000); tf->cr |= (cond<<24); DPRINTF(FPE_INSN, ("fpu_execute: cr[1] <= %x\n", cond)); } if (setcr) { cond = fsr & FPSCR_FPCC; /* Isolate condition codes */ cond <<= 16; /* Move fpu condition codes to cr[1] */ tf->cr &= ~(0xf0000000>>bf); tf->cr |= (cond>>bf); DPRINTF(FPE_INSN, ("fpu_execute: cr[%d] (cr=%x) <= %x\n", bf/4, tf->cr, cond)); } ((int *)&fs->fpscr)[1] = fsr; if (fsr & FPSCR_FEX) return(FPE); return (0); /* success */ } Index: head/sys/powerpc/fpu/fpu_sqrt.c =================================================================== --- head/sys/powerpc/fpu/fpu_sqrt.c (revision 258779) +++ head/sys/powerpc/fpu/fpu_sqrt.c (revision 258780) @@ -1,415 +1,415 @@ /* $NetBSD: fpu_sqrt.c,v 1.4 2005/12/11 12:18:42 christos Exp $ */ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)fpu_sqrt.c 8.1 (Berkeley) 6/11/93 */ /* * Perform an FPU square root (return sqrt(x)). */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include /* * Our task is to calculate the square root of a floating point number x0. * This number x normally has the form: * * exp * x = mant * 2 (where 1 <= mant < 2 and exp is an integer) * * This can be left as it stands, or the mantissa can be doubled and the * exponent decremented: * * exp-1 * x = (2 * mant) * 2 (where 2 <= 2 * mant < 4) * * If the exponent `exp' is even, the square root of the number is best * handled using the first form, and is by definition equal to: * * exp/2 * sqrt(x) = sqrt(mant) * 2 * * If exp is odd, on the other hand, it is convenient to use the second * form, giving: * * (exp-1)/2 * sqrt(x) = sqrt(2 * mant) * 2 * * In the first case, we have * * 1 <= mant < 2 * * and therefore * * sqrt(1) <= sqrt(mant) < sqrt(2) * * while in the second case we have * * 2 <= 2*mant < 4 * * and therefore * * sqrt(2) <= sqrt(2*mant) < sqrt(4) * * so that in any case, we are sure that * * sqrt(1) <= sqrt(n * mant) < sqrt(4), n = 1 or 2 * * or * * 1 <= sqrt(n * mant) < 2, n = 1 or 2. * * This root is therefore a properly formed mantissa for a floating * point number. The exponent of sqrt(x) is either exp/2 or (exp-1)/2 * as above. This leaves us with the problem of finding the square root * of a fixed-point number in the range [1..4). * * Though it may not be instantly obvious, the following square root * algorithm works for any integer x of an even number of bits, provided * that no overflows occur: * * let q = 0 * for k = NBITS-1 to 0 step -1 do -- for each digit in the answer... * x *= 2 -- multiply by radix, for next digit * if x >= 2q + 2^k then -- if adding 2^k does not * x -= 2q + 2^k -- exceed the correct root, * q += 2^k -- add 2^k and adjust x * fi * done * sqrt = q / 2^(NBITS/2) -- (and any remainder is in x) * * If NBITS is odd (so that k is initially even), we can just add another * zero bit at the top of x. Doing so means that q is not going to acquire * a 1 bit in the first trip around the loop (since x0 < 2^NBITS). If the * final value in x is not needed, or can be off by a factor of 2, this is * equivalant to moving the `x *= 2' step to the bottom of the loop: * * for k = NBITS-1 to 0 step -1 do if ... fi; x *= 2; done * * and the result q will then be sqrt(x0) * 2^floor(NBITS / 2). * (Since the algorithm is destructive on x, we will call x's initial * value, for which q is some power of two times its square root, x0.) * * If we insert a loop invariant y = 2q, we can then rewrite this using * C notation as: * * q = y = 0; x = x0; * for (k = NBITS; --k >= 0;) { * #if (NBITS is even) * x *= 2; * #endif * t = y + (1 << k); * if (x >= t) { * x -= t; * q += 1 << k; * y += 1 << (k + 1); * } * #if (NBITS is odd) * x *= 2; * #endif * } * * If x0 is fixed point, rather than an integer, we can simply alter the * scale factor between q and sqrt(x0). As it happens, we can easily arrange * for the scale factor to be 2**0 or 1, so that sqrt(x0) == q. * * In our case, however, x0 (and therefore x, y, q, and t) are multiword * integers, which adds some complication. But note that q is built one * bit at a time, from the top down, and is not used itself in the loop * (we use 2q as held in y instead). This means we can build our answer * in an integer, one word at a time, which saves a bit of work. Also, * since 1 << k is always a `new' bit in q, 1 << k and 1 << (k+1) are * `new' bits in y and we can set them with an `or' operation rather than * a full-blown multiword add. * * We are almost done, except for one snag. We must prove that none of our * intermediate calculations can overflow. We know that x0 is in [1..4) * and therefore the square root in q will be in [1..2), but what about x, * y, and t? * * We know that y = 2q at the beginning of each loop. (The relation only * fails temporarily while y and q are being updated.) Since q < 2, y < 4. * The sum in t can, in our case, be as much as y+(1<<1) = y+2 < 6, and. * Furthermore, we can prove with a bit of work that x never exceeds y by * more than 2, so that even after doubling, 0 <= x < 8. (This is left as * an exercise to the reader, mostly because I have become tired of working * on this comment.) * * If our floating point mantissas (which are of the form 1.frac) occupy * B+1 bits, our largest intermediary needs at most B+3 bits, or two extra. * In fact, we want even one more bit (for a carry, to avoid compares), or * three extra. There is a comment in fpu_emu.h reminding maintainers of * this, so we have some justification in assuming it. */ struct fpn * fpu_sqrt(struct fpemu *fe) { struct fpn *x = &fe->fe_f1; u_int bit, q, tt; u_int x0, x1, x2, x3; u_int y0, y1, y2, y3; u_int d0, d1, d2, d3; int e; FPU_DECL_CARRY; /* * Take care of special cases first. In order: * * sqrt(NaN) = NaN * sqrt(+0) = +0 * sqrt(-0) = -0 * sqrt(x < 0) = NaN (including sqrt(-Inf)) * sqrt(+Inf) = +Inf * * Then all that remains are numbers with mantissas in [1..2). */ DPRINTF(FPE_REG, ("fpu_sqer:\n")); DUMPFPN(FPE_REG, x); DPRINTF(FPE_REG, ("=>\n")); if (ISNAN(x)) { fe->fe_cx |= FPSCR_VXSNAN; DUMPFPN(FPE_REG, x); return (x); } if (ISZERO(x)) { fe->fe_cx |= FPSCR_ZX; x->fp_class = FPC_INF; DUMPFPN(FPE_REG, x); return (x); } if (x->fp_sign) { return (fpu_newnan(fe)); } if (ISINF(x)) { fe->fe_cx |= FPSCR_VXSQRT; DUMPFPN(FPE_REG, 0); return (0); } /* * Calculate result exponent. As noted above, this may involve * doubling the mantissa. We will also need to double x each * time around the loop, so we define a macro for this here, and * we break out the multiword mantissa. */ #ifdef FPU_SHL1_BY_ADD #define DOUBLE_X { \ FPU_ADDS(x3, x3, x3); FPU_ADDCS(x2, x2, x2); \ FPU_ADDCS(x1, x1, x1); FPU_ADDC(x0, x0, x0); \ } #else #define DOUBLE_X { \ x0 = (x0 << 1) | (x1 >> 31); x1 = (x1 << 1) | (x2 >> 31); \ x2 = (x2 << 1) | (x3 >> 31); x3 <<= 1; \ } #endif #if (FP_NMANT & 1) != 0 # define ODD_DOUBLE DOUBLE_X # define EVEN_DOUBLE /* nothing */ #else # define ODD_DOUBLE /* nothing */ # define EVEN_DOUBLE DOUBLE_X #endif x0 = x->fp_mant[0]; x1 = x->fp_mant[1]; x2 = x->fp_mant[2]; x3 = x->fp_mant[3]; e = x->fp_exp; if (e & 1) /* exponent is odd; use sqrt(2mant) */ DOUBLE_X; /* THE FOLLOWING ASSUMES THAT RIGHT SHIFT DOES SIGN EXTENSION */ x->fp_exp = e >> 1; /* calculates (e&1 ? (e-1)/2 : e/2 */ /* * Now calculate the mantissa root. Since x is now in [1..4), * we know that the first trip around the loop will definitely * set the top bit in q, so we can do that manually and start * the loop at the next bit down instead. We must be sure to * double x correctly while doing the `known q=1.0'. * * We do this one mantissa-word at a time, as noted above, to - * save work. To avoid `(1 << 31) << 1', we also do the top bit + * save work. To avoid `(1U << 31) << 1', we also do the top bit * outside of each per-word loop. * * The calculation `t = y + bit' breaks down into `t0 = y0, ..., * t3 = y3, t? |= bit' for the appropriate word. Since the bit * is always a `new' one, this means that three of the `t?'s are * just the corresponding `y?'; we use `#define's here for this. * The variable `tt' holds the actual `t?' variable. */ /* calculate q0 */ #define t0 tt bit = FP_1; EVEN_DOUBLE; /* if (x >= (t0 = y0 | bit)) { */ /* always true */ q = bit; x0 -= bit; y0 = bit << 1; /* } */ ODD_DOUBLE; while ((bit >>= 1) != 0) { /* for remaining bits in q0 */ EVEN_DOUBLE; t0 = y0 | bit; /* t = y + bit */ if (x0 >= t0) { /* if x >= t then */ x0 -= t0; /* x -= t */ q |= bit; /* q += bit */ y0 |= bit << 1; /* y += bit << 1 */ } ODD_DOUBLE; } x->fp_mant[0] = q; #undef t0 /* calculate q1. note (y0&1)==0. */ #define t0 y0 #define t1 tt q = 0; y1 = 0; bit = 1 << 31; EVEN_DOUBLE; t1 = bit; FPU_SUBS(d1, x1, t1); FPU_SUBC(d0, x0, t0); /* d = x - t */ if ((int)d0 >= 0) { /* if d >= 0 (i.e., x >= t) then */ x0 = d0, x1 = d1; /* x -= t */ q = bit; /* q += bit */ y0 |= 1; /* y += bit << 1 */ } ODD_DOUBLE; while ((bit >>= 1) != 0) { /* for remaining bits in q1 */ EVEN_DOUBLE; /* as before */ t1 = y1 | bit; FPU_SUBS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1; q |= bit; y1 |= bit << 1; } ODD_DOUBLE; } x->fp_mant[1] = q; #undef t1 /* calculate q2. note (y1&1)==0; y0 (aka t0) is fixed. */ #define t1 y1 #define t2 tt q = 0; y2 = 0; bit = 1 << 31; EVEN_DOUBLE; t2 = bit; FPU_SUBS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; q |= bit; y1 |= 1; /* now t1, y1 are set in concrete */ } ODD_DOUBLE; while ((bit >>= 1) != 0) { EVEN_DOUBLE; t2 = y2 | bit; FPU_SUBS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; q |= bit; y2 |= bit << 1; } ODD_DOUBLE; } x->fp_mant[2] = q; #undef t2 /* calculate q3. y0, t0, y1, t1 all fixed; y2, t2, almost done. */ #define t2 y2 #define t3 tt q = 0; y3 = 0; bit = 1 << 31; EVEN_DOUBLE; t3 = bit; FPU_SUBS(d3, x3, t3); FPU_SUBCS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); ODD_DOUBLE; if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; q |= bit; y2 |= 1; } while ((bit >>= 1) != 0) { EVEN_DOUBLE; t3 = y3 | bit; FPU_SUBS(d3, x3, t3); FPU_SUBCS(d2, x2, t2); FPU_SUBCS(d1, x1, t1); FPU_SUBC(d0, x0, t0); if ((int)d0 >= 0) { x0 = d0, x1 = d1, x2 = d2; q |= bit; y3 |= bit << 1; } ODD_DOUBLE; } x->fp_mant[3] = q; /* * The result, which includes guard and round bits, is exact iff * x is now zero; any nonzero bits in x represent sticky bits. */ x->fp_sticky = x0 | x1 | x2 | x3; DUMPFPN(FPE_REG, x); return (x); } Index: head/sys/powerpc/powermac/nvbl.c =================================================================== --- head/sys/powerpc/powermac/nvbl.c (revision 258779) +++ head/sys/powerpc/powermac/nvbl.c (revision 258780) @@ -1,199 +1,199 @@ /*- * Copyright (c) 2012 Justin Hibbits * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #define NVIDIA_BRIGHT_MIN (0x0ec) #define NVIDIA_BRIGHT_MAX (0x538) #define NVIDIA_BRIGHT_SCALE ((NVIDIA_BRIGHT_MAX - NVIDIA_BRIGHT_MIN)/100) /* nVidia's MMIO registers are at PCI BAR[0] */ #define NVIDIA_MMIO_PMC (0x0) #define NVIDIA_PMC_OFF (NVIDIA_MMIO_PMC + 0x10f0) #define NVIDIA_PMC_BL_SHIFT (16) -#define NVIDIA_PMC_BL_EN (1 << 31) +#define NVIDIA_PMC_BL_EN (1U << 31) struct nvbl_softc { device_t dev; struct resource *sc_memr; }; static void nvbl_identify(driver_t *driver, device_t parent); static int nvbl_probe(device_t dev); static int nvbl_attach(device_t dev); static int nvbl_setlevel(struct nvbl_softc *sc, int newlevel); static int nvbl_getlevel(struct nvbl_softc *sc); static int nvbl_sysctl(SYSCTL_HANDLER_ARGS); static device_method_t nvbl_methods[] = { /* Device interface */ DEVMETHOD(device_identify, nvbl_identify), DEVMETHOD(device_probe, nvbl_probe), DEVMETHOD(device_attach, nvbl_attach), {0, 0}, }; static driver_t nvbl_driver = { "backlight", nvbl_methods, sizeof(struct nvbl_softc) }; static devclass_t nvbl_devclass; DRIVER_MODULE(nvbl, vgapci, nvbl_driver, nvbl_devclass, 0, 0); static void nvbl_identify(driver_t *driver, device_t parent) { if (OF_finddevice("mac-io/backlight") == -1) return; if (device_find_child(parent, "backlight", -1) == NULL) device_add_child(parent, "backlight", -1); } static int nvbl_probe(device_t dev) { char control[8]; phandle_t handle; handle = OF_finddevice("mac-io/backlight"); if (handle == -1) return (ENXIO); if (OF_getprop(handle, "backlight-control", &control, sizeof(control)) < 0) return (ENXIO); if (strcmp(control, "mnca") != 0) return (ENXIO); device_set_desc(dev, "PowerBook backlight for nVidia graphics"); return (0); } static int nvbl_attach(device_t dev) { struct nvbl_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; int rid; sc = device_get_softc(dev); rid = 0x10; /* BAR[0], for the MMIO register */ sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_memr == NULL) { device_printf(dev, "Could not alloc mem resource!\n"); return (ENXIO); } /* Turn on big-endian mode */ if (!(bus_read_stream_4(sc->sc_memr, NVIDIA_MMIO_PMC + 4) & 0x01000001)) { bus_write_stream_4(sc->sc_memr, NVIDIA_MMIO_PMC + 4, 0x01000001); mb(); } ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "level", CTLTYPE_INT | CTLFLAG_RW, sc, 0, nvbl_sysctl, "I", "Backlight level (0-100)"); return (0); } static int nvbl_setlevel(struct nvbl_softc *sc, int newlevel) { uint32_t pmc_reg; if (newlevel > 100) newlevel = 100; if (newlevel < 0) newlevel = 0; if (newlevel > 0) newlevel = (newlevel * NVIDIA_BRIGHT_SCALE) + NVIDIA_BRIGHT_MIN; pmc_reg = bus_read_stream_4(sc->sc_memr, NVIDIA_PMC_OFF) & 0xffff; pmc_reg |= NVIDIA_PMC_BL_EN | (newlevel << NVIDIA_PMC_BL_SHIFT); bus_write_stream_4(sc->sc_memr, NVIDIA_PMC_OFF, pmc_reg); return (0); } static int nvbl_getlevel(struct nvbl_softc *sc) { uint16_t level; level = bus_read_stream_2(sc->sc_memr, NVIDIA_PMC_OFF) & 0x7fff; if (level < NVIDIA_BRIGHT_MIN) return 0; level = (level - NVIDIA_BRIGHT_MIN) / NVIDIA_BRIGHT_SCALE; return (level); } static int nvbl_sysctl(SYSCTL_HANDLER_ARGS) { struct nvbl_softc *sc; int newlevel, error; sc = arg1; newlevel = nvbl_getlevel(sc); error = sysctl_handle_int(oidp, &newlevel, 0, req); if (error || !req->newptr) return (error); return (nvbl_setlevel(sc, newlevel)); } Index: head/sys/sys/consio.h =================================================================== --- head/sys/sys/consio.h (revision 258779) +++ head/sys/sys/consio.h (revision 258780) @@ -1,440 +1,440 @@ /*- * Copyright (c) 1991-1996 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_CONSIO_H_ #define _SYS_CONSIO_H_ #ifndef _KERNEL #include #endif #include /* * Console ioctl commands. Some commands are named as KDXXXX, GIO_XXX, and * PIO_XXX, rather than CONS_XXX, for historical and compatibility reasons. * Some other CONS_XXX commands are works as wrapper around frame buffer * ioctl commands FBIO_XXX. Do not try to change all these commands, * otherwise we shall have compatibility problems. */ /* get/set video mode */ #define KD_TEXT 0 /* set text mode restore fonts */ #define KD_TEXT0 0 /* ditto */ #define KD_GRAPHICS 1 /* set graphics mode */ #define KD_TEXT1 2 /* set text mode !restore fonts */ #define KD_PIXEL 3 /* set pixel mode */ #define KDGETMODE _IOR('K', 9, int) #define KDSETMODE _IOWINT('K', 10) /* set border color */ #define KDSBORDER _IOWINT('K', 13) /* set up raster(pixel) text mode */ struct _scr_size { int scr_size[3]; }; typedef struct _scr_size scr_size_t; #define KDRASTER _IOW('K', 100, scr_size_t) /* get/set screen char map */ struct _scrmap { char scrmap[256]; }; typedef struct _scrmap scrmap_t; #define GIO_SCRNMAP _IOR('k', 2, scrmap_t) #define PIO_SCRNMAP _IOW('k', 3, scrmap_t) /* get the current text attribute */ #define GIO_ATTR _IOR('a', 0, int) /* get the current text color */ #define GIO_COLOR _IOR('c', 0, int) /* get the adapter type (equivalent to FBIO_ADPTYPE) */ #define CONS_CURRENT _IOR('c', 1, int) /* get the current video mode (equivalent to FBIO_GETMODE) */ #define CONS_GET _IOR('c', 2, int) /* not supported? */ #define CONS_IO _IO('c', 3) /* set blank time interval */ #define CONS_BLANKTIME _IOW('c', 4, int) /* set/get the screen saver (these ioctls are current noop) */ struct ssaver { #define MAXSSAVER 16 char name[MAXSSAVER]; int num; long time; }; typedef struct ssaver ssaver_t; #define CONS_SSAVER _IOW('c', 5, ssaver_t) #define CONS_GSAVER _IOWR('c', 6, ssaver_t) /* set the text cursor type (obsolete, see CONS_CURSORSHAPE below) */ /* #define CONS_BLINK_CURSOR (1 << 0) #define CONS_CHAR_CURSOR (1 << 1) */ #define CONS_CURSORTYPE _IOW('c', 7, int) /* set the bell type to audible or visual */ #define CONS_VISUAL_BELL (1 << 0) #define CONS_QUIET_BELL (1 << 1) #define CONS_BELLTYPE _IOW('c', 8, int) /* set the history (scroll back) buffer size (in lines) */ #define CONS_HISTORY _IOW('c', 9, int) /* clear the history (scroll back) buffer */ #define CONS_CLRHIST _IO('c', 10) /* mouse cursor ioctl */ struct mouse_data { int x; int y; int z; int buttons; }; typedef struct mouse_data mouse_data_t; struct mouse_mode { int mode; int signal; }; typedef struct mouse_mode mouse_mode_t; struct mouse_event { int id; /* one based */ int value; }; typedef struct mouse_event mouse_event_t; struct mouse_info { int operation; #define MOUSE_SHOW 0x01 #define MOUSE_HIDE 0x02 #define MOUSE_MOVEABS 0x03 #define MOUSE_MOVEREL 0x04 #define MOUSE_GETINFO 0x05 #define MOUSE_MODE 0x06 #define MOUSE_ACTION 0x07 #define MOUSE_MOTION_EVENT 0x08 #define MOUSE_BUTTON_EVENT 0x09 #define MOUSE_MOUSECHAR 0x0a union { mouse_data_t data; mouse_mode_t mode; mouse_event_t event; int mouse_char; } u; }; typedef struct mouse_info mouse_info_t; #define CONS_MOUSECTL _IOWR('c', 10, mouse_info_t) /* see if the vty has been idle */ #define CONS_IDLE _IOR('c', 11, int) /* set the screen saver mode */ #define CONS_NO_SAVER (-1) #define CONS_LKM_SAVER 0 #define CONS_USR_SAVER 1 #define CONS_SAVERMODE _IOW('c', 12, int) /* start the screen saver */ #define CONS_SAVERSTART _IOW('c', 13, int) /* set the text cursor shape (see also CONS_CURSORTYPE above) */ #define CONS_BLINK_CURSOR (1 << 0) #define CONS_CHAR_CURSOR (1 << 1) #define CONS_HIDDEN_CURSOR (1 << 2) #define CONS_CURSOR_ATTRS (CONS_BLINK_CURSOR | CONS_CHAR_CURSOR | \ CONS_HIDDEN_CURSOR) #define CONS_RESET_CURSOR (1 << 30) -#define CONS_LOCAL_CURSOR (1 << 31) +#define CONS_LOCAL_CURSOR (1U << 31) #define CONS_CURSOR_FLAGS (CONS_RESET_CURSOR | CONS_LOCAL_CURSOR) struct cshape { /* shape[0]: flags, shape[1]: base, shape[2]: height */ int shape[3]; }; #define CONS_GETCURSORSHAPE _IOWR('c', 14, struct cshape) #define CONS_SETCURSORSHAPE _IOW('c', 15, struct cshape) /* set/get font data */ struct fnt8 { char fnt8x8[8*256]; }; typedef struct fnt8 fnt8_t; struct fnt14 { char fnt8x14[14*256]; }; typedef struct fnt14 fnt14_t; struct fnt16 { char fnt8x16[16*256]; }; typedef struct fnt16 fnt16_t; #define PIO_FONT8x8 _IOW('c', 64, fnt8_t) #define GIO_FONT8x8 _IOR('c', 65, fnt8_t) #define PIO_FONT8x14 _IOW('c', 66, fnt14_t) #define GIO_FONT8x14 _IOR('c', 67, fnt14_t) #define PIO_FONT8x16 _IOW('c', 68, fnt16_t) #define GIO_FONT8x16 _IOR('c', 69, fnt16_t) /* get video mode information */ struct colors { char fore; char back; }; struct vid_info { short size; short m_num; u_short font_size; u_short mv_row, mv_col; u_short mv_rsz, mv_csz; u_short mv_hsz; struct colors mv_norm, mv_rev, mv_grfc; u_char mv_ovscan; u_char mk_keylock; }; typedef struct vid_info vid_info_t; #define CONS_GETINFO _IOWR('c', 73, vid_info_t) /* get version */ #define CONS_GETVERS _IOR('c', 74, int) /* get the video adapter index (equivalent to FBIO_ADAPTER) */ #define CONS_CURRENTADP _IOR('c', 100, int) /* get the video adapter information (equivalent to FBIO_ADPINFO) */ #define CONS_ADPINFO _IOWR('c', 101, video_adapter_info_t) /* get the video mode information (equivalent to FBIO_MODEINFO) */ #define CONS_MODEINFO _IOWR('c', 102, video_info_t) /* find a video mode (equivalent to FBIO_FINDMODE) */ #define CONS_FINDMODE _IOWR('c', 103, video_info_t) /* set the frame buffer window origin (equivalent to FBIO_SETWINORG) */ #define CONS_SETWINORG _IOWINT('c', 104) /* use the specified keyboard */ #define CONS_SETKBD _IOWINT('c', 110) /* release the current keyboard */ #define CONS_RELKBD _IO('c', 111) struct scrshot { int x; int y; int xsize; int ysize; u_int16_t* buf; }; typedef struct scrshot scrshot_t; /* Snapshot the current video buffer */ #define CONS_SCRSHOT _IOWR('c', 105, scrshot_t) /* get/set the current terminal emulator info. */ #define TI_NAME_LEN 32 #define TI_DESC_LEN 64 struct term_info { int ti_index; int ti_flags; u_char ti_name[TI_NAME_LEN]; u_char ti_desc[TI_DESC_LEN]; }; typedef struct term_info term_info_t; #define CONS_GETTERM _IOWR('c', 112, term_info_t) #define CONS_SETTERM _IOW('c', 113, term_info_t) /* * Vty switching ioctl commands. */ /* get the next available vty */ #define VT_OPENQRY _IOR('v', 1, int) /* set/get vty switching mode */ #ifndef _VT_MODE_DECLARED #define _VT_MODE_DECLARED struct vt_mode { char mode; #define VT_AUTO 0 /* switching is automatic */ #define VT_PROCESS 1 /* switching controlled by prog */ #define VT_KERNEL 255 /* switching controlled in kernel */ char waitv; /* not implemented yet SOS */ short relsig; short acqsig; short frsig; /* not implemented yet SOS */ }; typedef struct vt_mode vtmode_t; #endif /* !_VT_MODE_DECLARED */ #define VT_SETMODE _IOW('v', 2, vtmode_t) #define VT_GETMODE _IOR('v', 3, vtmode_t) /* acknowledge release or acquisition of a vty */ #define VT_FALSE 0 #define VT_TRUE 1 #define VT_ACKACQ 2 #define VT_RELDISP _IOWINT('v', 4) /* activate the specified vty */ #define VT_ACTIVATE _IOWINT('v', 5) /* wait until the specified vty is activate */ #define VT_WAITACTIVE _IOWINT('v', 6) /* get the currently active vty */ #define VT_GETACTIVE _IOR('v', 7, int) /* get the index of the vty */ #define VT_GETINDEX _IOR('v', 8, int) /* prevent switching vtys */ #define VT_LOCKSWITCH _IOW('v', 9, int) /* * Video mode switching ioctl. See sys/fbio.h for mode numbers. */ #define SW_B40x25 _IO('S', M_B40x25) #define SW_C40x25 _IO('S', M_C40x25) #define SW_B80x25 _IO('S', M_B80x25) #define SW_C80x25 _IO('S', M_C80x25) #define SW_BG320 _IO('S', M_BG320) #define SW_CG320 _IO('S', M_CG320) #define SW_BG640 _IO('S', M_BG640) #define SW_EGAMONO80x25 _IO('S', M_EGAMONO80x25) #define SW_CG320_D _IO('S', M_CG320_D) #define SW_CG640_E _IO('S', M_CG640_E) #define SW_EGAMONOAPA _IO('S', M_EGAMONOAPA) #define SW_CG640x350 _IO('S', M_CG640x350) #define SW_ENH_MONOAPA2 _IO('S', M_ENHMONOAPA2) #define SW_ENH_CG640 _IO('S', M_ENH_CG640) #define SW_ENH_B40x25 _IO('S', M_ENH_B40x25) #define SW_ENH_C40x25 _IO('S', M_ENH_C40x25) #define SW_ENH_B80x25 _IO('S', M_ENH_B80x25) #define SW_ENH_C80x25 _IO('S', M_ENH_C80x25) #define SW_ENH_B80x43 _IO('S', M_ENH_B80x43) #define SW_ENH_C80x43 _IO('S', M_ENH_C80x43) #define SW_MCAMODE _IO('S', M_MCA_MODE) #define SW_VGA_C40x25 _IO('S', M_VGA_C40x25) #define SW_VGA_C80x25 _IO('S', M_VGA_C80x25) #define SW_VGA_C80x30 _IO('S', M_VGA_C80x30) #define SW_VGA_C80x50 _IO('S', M_VGA_C80x50) #define SW_VGA_C80x60 _IO('S', M_VGA_C80x60) #define SW_VGA_M80x25 _IO('S', M_VGA_M80x25) #define SW_VGA_M80x30 _IO('S', M_VGA_M80x30) #define SW_VGA_M80x50 _IO('S', M_VGA_M80x50) #define SW_VGA_M80x60 _IO('S', M_VGA_M80x60) #define SW_VGA11 _IO('S', M_VGA11) #define SW_BG640x480 _IO('S', M_VGA11) #define SW_VGA12 _IO('S', M_VGA12) #define SW_CG640x480 _IO('S', M_VGA12) #define SW_VGA13 _IO('S', M_VGA13) #define SW_VGA_CG320 _IO('S', M_VGA13) #define SW_VGA_CG640 _IO('S', M_VGA_CG640) #define SW_VGA_MODEX _IO('S', M_VGA_MODEX) #define SW_PC98_80x25 _IO('S', M_PC98_80x25) #define SW_PC98_80x30 _IO('S', M_PC98_80x30) #define SW_PC98_EGC640x400 _IO('S', M_PC98_EGC640x400) #define SW_PC98_PEGC640x400 _IO('S', M_PC98_PEGC640x400) #define SW_PC98_PEGC640x480 _IO('S', M_PC98_PEGC640x480) #define SW_VGA_C90x25 _IO('S', M_VGA_C90x25) #define SW_VGA_M90x25 _IO('S', M_VGA_M90x25) #define SW_VGA_C90x30 _IO('S', M_VGA_C90x30) #define SW_VGA_M90x30 _IO('S', M_VGA_M90x30) #define SW_VGA_C90x43 _IO('S', M_VGA_C90x43) #define SW_VGA_M90x43 _IO('S', M_VGA_M90x43) #define SW_VGA_C90x50 _IO('S', M_VGA_C90x50) #define SW_VGA_M90x50 _IO('S', M_VGA_M90x50) #define SW_VGA_C90x60 _IO('S', M_VGA_C90x60) #define SW_VGA_M90x60 _IO('S', M_VGA_M90x60) #define SW_TEXT_80x25 _IO('S', M_TEXT_80x25) #define SW_TEXT_80x30 _IO('S', M_TEXT_80x30) #define SW_TEXT_80x43 _IO('S', M_TEXT_80x43) #define SW_TEXT_80x50 _IO('S', M_TEXT_80x50) #define SW_TEXT_80x60 _IO('S', M_TEXT_80x60) #define SW_TEXT_132x25 _IO('S', M_TEXT_132x25) #define SW_TEXT_132x30 _IO('S', M_TEXT_132x30) #define SW_TEXT_132x43 _IO('S', M_TEXT_132x43) #define SW_TEXT_132x50 _IO('S', M_TEXT_132x50) #define SW_TEXT_132x60 _IO('S', M_TEXT_132x60) #define SW_VESA_CG640x400 _IO('V', M_VESA_CG640x400 - M_VESA_BASE) #define SW_VESA_CG640x480 _IO('V', M_VESA_CG640x480 - M_VESA_BASE) #define SW_VESA_800x600 _IO('V', M_VESA_800x600 - M_VESA_BASE) #define SW_VESA_CG800x600 _IO('V', M_VESA_CG800x600 - M_VESA_BASE) #define SW_VESA_1024x768 _IO('V', M_VESA_1024x768 - M_VESA_BASE) #define SW_VESA_CG1024x768 _IO('V', M_VESA_CG1024x768 - M_VESA_BASE) #define SW_VESA_1280x1024 _IO('V', M_VESA_1280x1024 - M_VESA_BASE) #define SW_VESA_CG1280x1024 _IO('V', M_VESA_CG1280x1024 - M_VESA_BASE) #define SW_VESA_C80x60 _IO('V', M_VESA_C80x60 - M_VESA_BASE) #define SW_VESA_C132x25 _IO('V', M_VESA_C132x25 - M_VESA_BASE) #define SW_VESA_C132x43 _IO('V', M_VESA_C132x43 - M_VESA_BASE) #define SW_VESA_C132x50 _IO('V', M_VESA_C132x50 - M_VESA_BASE) #define SW_VESA_C132x60 _IO('V', M_VESA_C132x60 - M_VESA_BASE) #define SW_VESA_32K_320 _IO('V', M_VESA_32K_320 - M_VESA_BASE) #define SW_VESA_64K_320 _IO('V', M_VESA_64K_320 - M_VESA_BASE) #define SW_VESA_FULL_320 _IO('V', M_VESA_FULL_320 - M_VESA_BASE) #define SW_VESA_32K_640 _IO('V', M_VESA_32K_640 - M_VESA_BASE) #define SW_VESA_64K_640 _IO('V', M_VESA_64K_640 - M_VESA_BASE) #define SW_VESA_FULL_640 _IO('V', M_VESA_FULL_640 - M_VESA_BASE) #define SW_VESA_32K_800 _IO('V', M_VESA_32K_800 - M_VESA_BASE) #define SW_VESA_64K_800 _IO('V', M_VESA_64K_800 - M_VESA_BASE) #define SW_VESA_FULL_800 _IO('V', M_VESA_FULL_800 - M_VESA_BASE) #define SW_VESA_32K_1024 _IO('V', M_VESA_32K_1024 - M_VESA_BASE) #define SW_VESA_64K_1024 _IO('V', M_VESA_64K_1024 - M_VESA_BASE) #define SW_VESA_FULL_1024 _IO('V', M_VESA_FULL_1024 - M_VESA_BASE) #define SW_VESA_32K_1280 _IO('V', M_VESA_32K_1280 - M_VESA_BASE) #define SW_VESA_64K_1280 _IO('V', M_VESA_64K_1280 - M_VESA_BASE) #define SW_VESA_FULL_1280 _IO('V', M_VESA_FULL_1280 - M_VESA_BASE) #endif /* !_SYS_CONSIO_H_ */ Index: head/sys/x86/iommu/intel_reg.h =================================================================== --- head/sys/x86/iommu/intel_reg.h (revision 258779) +++ head/sys/x86/iommu/intel_reg.h (revision 258780) @@ -1,330 +1,330 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __X86_IOMMU_INTEL_REG_H #define __X86_IOMMU_INTEL_REG_H #define DMAR_PAGE_SIZE PAGE_SIZE #define DMAR_PAGE_MASK (DMAR_PAGE_SIZE - 1) #define DMAR_PAGE_SHIFT PAGE_SHIFT #define DMAR_NPTEPG (DMAR_PAGE_SIZE / sizeof(dmar_pte_t)) #define DMAR_NPTEPGSHIFT 9 #define DMAR_PTEMASK (DMAR_NPTEPG - 1) typedef struct dmar_root_entry { uint64_t r1; uint64_t r2; } dmar_root_entry_t; #define DMAR_ROOT_R1_P 1 /* Present */ #define DMAR_ROOT_R1_CTP_MASK 0xfffffffffffff000 /* Mask for Context-Entry Table Pointer */ #define DMAR_CTX_CNT (DMAR_PAGE_SIZE / sizeof(dmar_root_entry_t)) typedef struct dmar_ctx_entry { uint64_t ctx1; uint64_t ctx2; } dmar_ctx_entry_t; #define DMAR_CTX1_P 1 /* Present */ #define DMAR_CTX1_FPD 2 /* Fault Processing Disable */ /* Translation Type: */ #define DMAR_CTX1_T_UNTR 0 /* only Untranslated */ #define DMAR_CTX1_T_TR 4 /* both Untranslated and Translated */ #define DMAR_CTX1_T_PASS 8 /* Pass-Through */ #define DMAR_CTX1_ASR_MASK 0xfffffffffffff000 /* Mask for the Address Space Root */ #define DMAR_CTX2_AW_2LVL 0 /* 2-level page tables */ #define DMAR_CTX2_AW_3LVL 1 /* 3-level page tables */ #define DMAR_CTX2_AW_4LVL 2 /* 4-level page tables */ #define DMAR_CTX2_AW_5LVL 3 /* 5-level page tables */ #define DMAR_CTX2_AW_6LVL 4 /* 6-level page tables */ #define DMAR_CTX2_DID(x) ((x) << 8) /* Domain Identifier */ typedef struct dmar_pte { uint64_t pte; } dmar_pte_t; #define DMAR_PTE_R 1 /* Read */ #define DMAR_PTE_W (1 << 1) /* Write */ #define DMAR_PTE_SP (1 << 7) /* Super Page */ #define DMAR_PTE_SNP (1 << 11) /* Snoop Behaviour */ #define DMAR_PTE_ADDR_MASK 0xffffffffff000 /* Address Mask */ #define DMAR_PTE_TM (1ULL << 62) /* Transient Mapping */ /* Version register */ #define DMAR_VER_REG 0 #define DMAR_MAJOR_VER(x) (((x) >> 4) & 0xf) #define DMAR_MINOR_VER(x) ((x) & 0xf) /* Capabilities register */ #define DMAR_CAP_REG 0x8 #define DMAR_CAP_DRD (1ULL << 55) /* DMA Read Draining */ #define DMAR_CAP_DWD (1ULL << 54) /* DMA Write Draining */ #define DMAR_CAP_MAMV(x) ((u_int)(((x) >> 48) & 0x3f)) /* Maximum Address Mask */ #define DMAR_CAP_NFR(x) ((u_int)(((x) >> 40) & 0xff) + 1) /* Num of Fault-recording regs */ #define DMAR_CAP_PSI (1ULL << 39) /* Page Selective Invalidation */ #define DMAR_CAP_SPS(x) ((u_int)(((x) >> 34) & 0xf)) /* Super-Page Support */ #define DMAR_CAP_SPS_2M 0x1 #define DMAR_CAP_SPS_1G 0x2 #define DMAR_CAP_SPS_512G 0x4 #define DMAR_CAP_SPS_1T 0x8 #define DMAR_CAP_FRO(x) ((u_int)(((x) >> 24) & 0x1ff)) /* Fault-recording reg offset */ #define DMAR_CAP_ISOCH (1 << 23) /* Isochrony */ #define DMAR_CAP_ZLR (1 << 22) /* Zero-length reads */ #define DMAR_CAP_MGAW(x) ((u_int)(((x) >> 16) & 0x3f)) /* Max Guest Address Width */ #define DMAR_CAP_SAGAW(x) ((u_int)(((x) >> 8) & 0x1f)) /* Adjusted Guest Address Width */ #define DMAR_CAP_SAGAW_2LVL 0x01 #define DMAR_CAP_SAGAW_3LVL 0x02 #define DMAR_CAP_SAGAW_4LVL 0x04 #define DMAR_CAP_SAGAW_5LVL 0x08 #define DMAR_CAP_SAGAW_6LVL 0x10 #define DMAR_CAP_CM (1 << 7) /* Caching mode */ #define DMAR_CAP_PHMR (1 << 6) /* Protected High-mem Region */ #define DMAR_CAP_PLMR (1 << 5) /* Protected Low-mem Region */ #define DMAR_CAP_RWBF (1 << 4) /* Required Write-Buffer Flushing */ #define DMAR_CAP_AFL (1 << 3) /* Advanced Fault Logging */ #define DMAR_CAP_ND(x) ((u_int)((x) & 0x3)) /* Number of domains */ /* Extended Capabilities register */ #define DMAR_ECAP_REG 0x10 #define DMAR_ECAP_MHMV(x) ((u_int)(((x) >> 20) & 0xf)) /* Maximum Handle Mask Value */ #define DMAR_ECAP_IRO(x) ((u_int)(((x) >> 8) & 0x3ff)) /* IOTLB Register Offset */ #define DMAR_ECAP_SC (1 << 7) /* Snoop Control */ #define DMAR_ECAP_PT (1 << 6) /* Pass Through */ #define DMAR_ECAP_EIM (1 << 4) /* Extended Interrupt Mode */ #define DMAR_ECAP_IR (1 << 3) /* Interrupt Remapping */ #define DMAR_ECAP_DI (1 << 2) /* Device IOTLB */ #define DMAR_ECAP_QI (1 << 1) /* Queued Invalidation */ #define DMAR_ECAP_C (1 << 0) /* Coherency */ /* Global Command register */ #define DMAR_GCMD_REG 0x18 -#define DMAR_GCMD_TE (1 << 31) /* Translation Enable */ +#define DMAR_GCMD_TE (1U << 31) /* Translation Enable */ #define DMAR_GCMD_SRTP (1 << 30) /* Set Root Table Pointer */ #define DMAR_GCMD_SFL (1 << 29) /* Set Fault Log */ #define DMAR_GCMD_EAFL (1 << 28) /* Enable Advanced Fault Logging */ #define DMAR_GCMD_WBF (1 << 27) /* Write Buffer Flush */ #define DMAR_GCMD_QIE (1 << 26) /* Queued Invalidation Enable */ #define DMAR_GCMD_IRE (1 << 25) /* Interrupt Remapping Enable */ #define DMAR_GCMD_SIRTP (1 << 24) /* Set Interrupt Remap Table Pointer */ #define DMAR_GCMD_CFI (1 << 23) /* Compatibility Format Interrupt */ /* Global Status register */ #define DMAR_GSTS_REG 0x1c -#define DMAR_GSTS_TES (1 << 31) /* Translation Enable Status */ +#define DMAR_GSTS_TES (1U << 31) /* Translation Enable Status */ #define DMAR_GSTS_RTPS (1 << 30) /* Root Table Pointer Status */ #define DMAR_GSTS_FLS (1 << 29) /* Fault Log Status */ #define DMAR_GSTS_AFLS (1 << 28) /* Advanced Fault Logging Status */ #define DMAR_GSTS_WBFS (1 << 27) /* Write Buffer Flush Status */ #define DMAR_GSTS_QIES (1 << 26) /* Queued Invalidation Enable Status */ #define DMAR_GSTS_IRES (1 << 25) /* Interrupt Remapping Enable Status */ #define DMAR_GSTS_IRTPS (1 << 24) /* Interrupt Remapping Table Pointer Status */ #define DMAR_GSTS_CFIS (1 << 23) /* Compatibility Format Interrupt Status */ /* Root-Entry Table Address register */ #define DMAR_RTADDR_REG 0x20 /* Context Command register */ #define DMAR_CCMD_REG 0x28 #define DMAR_CCMD_ICC (1ULL << 63) /* Invalidate Context-Cache */ -#define DMAR_CCMD_ICC32 (1 << 31) +#define DMAR_CCMD_ICC32 (1U << 31) #define DMAR_CCMD_CIRG_MASK (0x3ULL << 61) /* Context Invalidation Request Granularity */ #define DMAR_CCMD_CIRG_GLOB (0x1ULL << 61) /* Global */ #define DMAR_CCMD_CIRG_DOM (0x2ULL << 61) /* Domain */ #define DMAR_CCMD_CIRG_DEV (0x3ULL << 61) /* Device */ #define DMAR_CCMD_CAIG(x) (((x) >> 59) & 0x3) /* Context Actual Invalidation Granularity */ #define DMAR_CCMD_CAIG_GLOB 0x1 /* Global */ #define DMAR_CCMD_CAIG_DOM 0x2 /* Domain */ #define DMAR_CCMD_CAIG_DEV 0x3 /* Device */ #define DMAR_CCMD_FM (0x3UUL << 32) /* Function Mask */ #define DMAR_CCMD_SID(x) (((x) & 0xffff) << 16) /* Source-ID */ #define DMAR_CCMD_DID(x) ((x) & 0xffff) /* Domain-ID */ /* Invalidate Address register */ #define DMAR_IVA_REG_OFF 0 #define DMAR_IVA_IH (1 << 6) /* Invalidation Hint */ #define DMAR_IVA_AM(x) ((x) & 0x1f) /* Address Mask */ #define DMAR_IVA_ADDR(x) ((x) & ~0xfffULL) /* Address */ /* IOTLB Invalidate register */ #define DMAR_IOTLB_REG_OFF 0x8 #define DMAR_IOTLB_IVT (1ULL << 63) /* Invalidate IOTLB */ -#define DMAR_IOTLB_IVT32 (1 << 31) +#define DMAR_IOTLB_IVT32 (1U << 31) #define DMAR_IOTLB_IIRG_MASK (0x3ULL << 60) /* Invalidation Request Granularity */ #define DMAR_IOTLB_IIRG_GLB (0x1ULL << 60) /* Global */ #define DMAR_IOTLB_IIRG_DOM (0x2ULL << 60) /* Domain-selective */ #define DMAR_IOTLB_IIRG_PAGE (0x3ULL << 60) /* Page-selective */ #define DMAR_IOTLB_IAIG_MASK (0x3ULL << 57) /* Actual Invalidation Granularity */ #define DMAR_IOTLB_IAIG_INVLD 0 /* Hw detected error */ #define DMAR_IOTLB_IAIG_GLB (0x1ULL << 57) /* Global */ #define DMAR_IOTLB_IAIG_DOM (0x2ULL << 57) /* Domain-selective */ #define DMAR_IOTLB_IAIG_PAGE (0x3ULL << 57) /* Page-selective */ #define DMAR_IOTLB_DR (0x1ULL << 49) /* Drain Reads */ #define DMAR_IOTLB_DW (0x1ULL << 48) /* Drain Writes */ #define DMAR_IOTLB_DID(x) (((uint64_t)(x) & 0xffff) << 32) /* Domain Id */ /* Fault Status register */ #define DMAR_FSTS_REG 0x34 #define DMAR_FSTS_FRI(x) (((x) >> 8) & 0xff) /* Fault Record Index */ #define DMAR_FSTS_ITE (1 << 6) /* Invalidation Time-out */ #define DMAR_FSTS_ICE (1 << 5) /* Invalidation Completion */ #define DMAR_FSTS_IQE (1 << 4) /* Invalidation Queue */ #define DMAR_FSTS_APF (1 << 3) /* Advanced Pending Fault */ #define DMAR_FSTS_AFO (1 << 2) /* Advanced Fault Overflow */ #define DMAR_FSTS_PPF (1 << 1) /* Primary Pending Fault */ #define DMAR_FSTS_PFO 1 /* Fault Overflow */ /* Fault Event Control register */ #define DMAR_FECTL_REG 0x38 -#define DMAR_FECTL_IM (1 << 31) /* Interrupt Mask */ +#define DMAR_FECTL_IM (1U << 31) /* Interrupt Mask */ #define DMAR_FECTL_IP (1 << 30) /* Interrupt Pending */ /* Fault Event Data register */ #define DMAR_FEDATA_REG 0x3c /* Fault Event Address register */ #define DMAR_FEADDR_REG 0x40 /* Fault Event Upper Address register */ #define DMAR_FEUADDR_REG 0x44 /* Advanced Fault Log register */ #define DMAR_AFLOG_REG 0x58 /* Fault Recording Register, also usable for Advanced Fault Log records */ #define DMAR_FRCD2_F (1ULL << 63) /* Fault */ -#define DMAR_FRCD2_F32 (1 << 31) +#define DMAR_FRCD2_F32 (1U << 31) #define DMAR_FRCD2_T(x) ((int)((x >> 62) & 1)) /* Type */ #define DMAR_FRCD2_T_W 0 /* Write request */ #define DMAR_FRCD2_T_R 1 /* Read or AtomicOp */ #define DMAR_FRCD2_AT(x) ((int)((x >> 60) & 0x3)) /* Address Type */ #define DMAR_FRCD2_FR(x) ((int)((x >> 32) & 0xff)) /* Fault Reason */ #define DMAR_FRCD2_SID(x) ((int)(x & 0xffff)) /* Source Identifier */ #define DMAR_FRCS1_FI_MASK 0xffffffffff000 /* Fault Info, Address Mask */ /* Protected Memory Enable register */ #define DMAR_PMEN_REG 0x64 -#define DMAR_PMEN_EPM (1 << 31) /* Enable Protected Memory */ +#define DMAR_PMEN_EPM (1U << 31) /* Enable Protected Memory */ #define DMAR_PMEN_PRS 1 /* Protected Region Status */ /* Protected Low-Memory Base register */ #define DMAR_PLMBASE_REG 0x68 /* Protected Low-Memory Limit register */ #define DMAR_PLMLIMIT_REG 0x6c /* Protected High-Memory Base register */ #define DMAR_PHMBASE_REG 0x70 /* Protected High-Memory Limit register */ #define DMAR_PHMLIMIT_REG 0x78 /* Queued Invalidation Descriptors */ #define DMAR_IQ_DESCR_SZ_SHIFT 4 /* Shift for descriptor count to ring offset */ #define DMAR_IQ_DESCR_SZ (1 << DMAR_IQ_DESCR_SZ_SHIFT) /* Descriptor size */ #define DMAR_IQ_DESCR_CTX_INV 0x1 /* Context-cache Invalidate Descriptor */ #define DMAR_IQ_DESCR_CTX_GLOB (0x1 << 4) /* Granularity: Global */ #define DMAR_IQ_DESCR_CTX_DOM (0x2 << 4) /* Granularity: Domain */ #define DMAR_IQ_DESCR_CTX_DEV (0x3 << 4) /* Granularity: Device */ #define DMAR_IQ_DESCR_CTX_DID(x) (((uint32_t)(x)) << 16) /* Domain Id */ #define DMAR_IQ_DESCR_CTX_SRC(x) (((uint64_t)(x)) << 32) /* Source Id */ #define DMAR_IQ_DESCR_CTX_FM(x) (((uint64_t)(x)) << 48) /* Function Mask */ #define DMAR_IQ_DESCR_IOTLB_INV 0x2 /* IOTLB Invalidate Descriptor */ #define DMAR_IQ_DESCR_IOTLB_GLOB (0x1 << 4) /* Granularity: Global */ #define DMAR_IQ_DESCR_IOTLB_DOM (0x2 << 4) /* Granularity: Domain */ #define DMAR_IQ_DESCR_IOTLB_PAGE (0x3 << 4) /* Granularity: Page */ #define DMAR_IQ_DESCR_IOTLB_DW (1 << 6) /* Drain Writes */ #define DMAR_IQ_DESCR_IOTLB_DR (1 << 7) /* Drain Reads */ #define DMAR_IQ_DESCR_IOTLB_DID(x) (((uint32_t)(x)) << 16) /* Domain Id */ #define DMAR_IQ_DESCR_WAIT_ID 0x5 /* Invalidation Wait Descriptor */ #define DMAR_IQ_DESCR_WAIT_IF (1 << 4) /* Interrupt Flag */ #define DMAR_IQ_DESCR_WAIT_SW (1 << 5) /* Status Write */ #define DMAR_IQ_DESCR_WAIT_FN (1 << 6) /* Fence */ #define DMAR_IQ_DESCR_WAIT_SD(x) (((uint64_t)(x)) << 32) /* Status Data */ /* Invalidation Queue Head register */ #define DMAR_IQH_REG 0x80 #define DMAR_IQH_MASK 0x7fff0 /* Next cmd index mask */ /* Invalidation Queue Tail register */ #define DMAR_IQT_REG 0x88 #define DMAR_IQT_MASK 0x7fff0 /* Invalidation Queue Address register */ #define DMAR_IQA_REG 0x90 #define DMAR_IQA_IQA_MASK 0xfffffffffffff000 /* Invalidation Queue Base Address mask */ #define DMAR_IQA_QS_MASK 0x7 /* Queue Size in pages */ #define DMAR_IQA_QS_MAX 0x7 /* Max Queue size */ #define DMAR_IQA_QS_DEF 3 /* Invalidation Completion Status register */ #define DMAR_ICS_REG 0x9c #define DMAR_ICS_IWC 1 /* Invalidation Wait Descriptor Complete */ /* Invalidation Event Control register */ #define DMAR_IECTL_REG 0xa0 -#define DMAR_IECTL_IM (1 << 31) /* Interrupt Mask */ +#define DMAR_IECTL_IM (1U << 31) /* Interrupt Mask */ #define DMAR_IECTL_IP (1 << 30) /* Interrupt Pending */ /* Invalidation Event Data register */ #define DMAR_IEDATA_REG 0xa4 /* Invalidation Event Address register */ #define DMAR_IEADDR_REG 0xa8 /* Invalidation Event Upper Address register */ #define DMAR_IEUADDR_REG 0xac /* Interrupt Remapping Table Address register */ #define DMAR_IRTA_REG 0xb8 #endif Index: head/usr.sbin/bluetooth/bthidd/kbd.c =================================================================== --- head/usr.sbin/bluetooth/bthidd/kbd.c (revision 258779) +++ head/usr.sbin/bluetooth/bthidd/kbd.c (revision 258780) @@ -1,580 +1,580 @@ /* * kbd.c */ /*- * Copyright (c) 2006 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: kbd.c,v 1.4 2006/09/07 21:06:53 max Exp $ * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bthid_config.h" #include "bthidd.h" #include "kbd.h" static void kbd_write(bitstr_t *m, int32_t fb, int32_t make, int32_t fd); static int32_t kbd_xlate(int32_t code, int32_t make, int32_t *b, int32_t const *eob); /* * HID code to PS/2 set 1 code translation table. * * http://www.microsoft.com/whdc/device/input/Scancode.mspx * * The table only contains "make" (key pressed) codes. * The "break" (key released) code is generated as "make" | 0x80 */ -#define E0PREFIX (1 << 31) +#define E0PREFIX (1U << 31) #define NOBREAK (1 << 30) #define CODEMASK (~(E0PREFIX|NOBREAK)) static int32_t const x[] = { /*==================================================*/ /* Name HID code Make Break*/ /*==================================================*/ /* No Event 00 */ -1, /* None */ /* Overrun Error 01 */ NOBREAK|0xFF, /* None */ /* POST Fail 02 */ NOBREAK|0xFC, /* None */ /* ErrorUndefined 03 */ -1, /* Unassigned */ /* a A 04 */ 0x1E, /* 9E */ /* b B 05 */ 0x30, /* B0 */ /* c C 06 */ 0x2E, /* AE */ /* d D 07 */ 0x20, /* A0 */ /* e E 08 */ 0x12, /* 92 */ /* f F 09 */ 0x21, /* A1 */ /* g G 0A */ 0x22, /* A2 */ /* h H 0B */ 0x23, /* A3 */ /* i I 0C */ 0x17, /* 97 */ /* j J 0D */ 0x24, /* A4 */ /* k K 0E */ 0x25, /* A5 */ /* l L 0F */ 0x26, /* A6 */ /* m M 10 */ 0x32, /* B2 */ /* n N 11 */ 0x31, /* B1 */ /* o O 12 */ 0x18, /* 98 */ /* p P 13 */ 0x19, /* 99 */ /* q Q 14 */ 0x10, /* 90 */ /* r R 15 */ 0x13, /* 93 */ /* s S 16 */ 0x1F, /* 9F */ /* t T 17 */ 0x14, /* 94 */ /* u U 18 */ 0x16, /* 96 */ /* v V 19 */ 0x2F, /* AF */ /* w W 1A */ 0x11, /* 91 */ /* x X 1B */ 0x2D, /* AD */ /* y Y 1C */ 0x15, /* 95 */ /* z Z 1D */ 0x2C, /* AC */ /* 1 ! 1E */ 0x02, /* 82 */ /* 2 @ 1F */ 0x03, /* 83 */ /* 3 # 20 */ 0x04, /* 84 */ /* 4 $ 21 */ 0x05, /* 85 */ /* 5 % 22 */ 0x06, /* 86 */ /* 6 ^ 23 */ 0x07, /* 87 */ /* 7 & 24 */ 0x08, /* 88 */ /* 8 * 25 */ 0x09, /* 89 */ /* 9 ( 26 */ 0x0A, /* 8A */ /* 0 ) 27 */ 0x0B, /* 8B */ /* Return 28 */ 0x1C, /* 9C */ /* Escape 29 */ 0x01, /* 81 */ /* Backspace 2A */ 0x0E, /* 8E */ /* Tab 2B */ 0x0F, /* 8F */ /* Space 2C */ 0x39, /* B9 */ /* - _ 2D */ 0x0C, /* 8C */ /* = + 2E */ 0x0D, /* 8D */ /* [ { 2F */ 0x1A, /* 9A */ /* ] } 30 */ 0x1B, /* 9B */ /* \ | 31 */ 0x2B, /* AB */ /* Europe 1 32 */ 0x2B, /* AB */ /* ; : 33 */ 0x27, /* A7 */ /* " ' 34 */ 0x28, /* A8 */ /* ` ~ 35 */ 0x29, /* A9 */ /* comma < 36 */ 0x33, /* B3 */ /* . > 37 */ 0x34, /* B4 */ /* / ? 38 */ 0x35, /* B5 */ /* Caps Lock 39 */ 0x3A, /* BA */ /* F1 3A */ 0x3B, /* BB */ /* F2 3B */ 0x3C, /* BC */ /* F3 3C */ 0x3D, /* BD */ /* F4 3D */ 0x3E, /* BE */ /* F5 3E */ 0x3F, /* BF */ /* F6 3F */ 0x40, /* C0 */ /* F7 40 */ 0x41, /* C1 */ /* F8 41 */ 0x42, /* C2 */ /* F9 42 */ 0x43, /* C3 */ /* F10 43 */ 0x44, /* C4 */ /* F11 44 */ 0x57, /* D7 */ /* F12 45 */ 0x58, /* D8 */ /* Print Screen 46 */ E0PREFIX|0x37, /* E0 B7 */ /* Scroll Lock 47 */ 0x46, /* C6 */ #if 0 /* Break (Ctrl-Pause) 48 */ E0 46 E0 C6, /* None */ /* Pause 48 */ E1 1D 45 E1 9D C5, /* None */ #else /* Break (Ctrl-Pause)/Pause 48 */ NOBREAK /* Special case */, /* None */ #endif /* Insert 49 */ E0PREFIX|0x52, /* E0 D2 */ /* Home 4A */ E0PREFIX|0x47, /* E0 C7 */ /* Page Up 4B */ E0PREFIX|0x49, /* E0 C9 */ /* Delete 4C */ E0PREFIX|0x53, /* E0 D3 */ /* End 4D */ E0PREFIX|0x4F, /* E0 CF */ /* Page Down 4E */ E0PREFIX|0x51, /* E0 D1 */ /* Right Arrow 4F */ E0PREFIX|0x4D, /* E0 CD */ /* Left Arrow 50 */ E0PREFIX|0x4B, /* E0 CB */ /* Down Arrow 51 */ E0PREFIX|0x50, /* E0 D0 */ /* Up Arrow 52 */ E0PREFIX|0x48, /* E0 C8 */ /* Num Lock 53 */ 0x45, /* C5 */ /* Keypad / 54 */ E0PREFIX|0x35, /* E0 B5 */ /* Keypad * 55 */ 0x37, /* B7 */ /* Keypad - 56 */ 0x4A, /* CA */ /* Keypad + 57 */ 0x4E, /* CE */ /* Keypad Enter 58 */ E0PREFIX|0x1C, /* E0 9C */ /* Keypad 1 End 59 */ 0x4F, /* CF */ /* Keypad 2 Down 5A */ 0x50, /* D0 */ /* Keypad 3 PageDn 5B */ 0x51, /* D1 */ /* Keypad 4 Left 5C */ 0x4B, /* CB */ /* Keypad 5 5D */ 0x4C, /* CC */ /* Keypad 6 Right 5E */ 0x4D, /* CD */ /* Keypad 7 Home 5F */ 0x47, /* C7 */ /* Keypad 8 Up 60 */ 0x48, /* C8 */ /* Keypad 9 PageUp 61 */ 0x49, /* C9 */ /* Keypad 0 Insert 62 */ 0x52, /* D2 */ /* Keypad . Delete 63 */ 0x53, /* D3 */ /* Europe 2 64 */ 0x56, /* D6 */ /* App 65 */ E0PREFIX|0x5D, /* E0 DD */ /* Keyboard Power 66 */ E0PREFIX|0x5E, /* E0 DE */ /* Keypad = 67 */ 0x59, /* D9 */ /* F13 68 */ 0x64, /* E4 */ /* F14 69 */ 0x65, /* E5 */ /* F15 6A */ 0x66, /* E6 */ /* F16 6B */ 0x67, /* E7 */ /* F17 6C */ 0x68, /* E8 */ /* F18 6D */ 0x69, /* E9 */ /* F19 6E */ 0x6A, /* EA */ /* F20 6F */ 0x6B, /* EB */ /* F21 70 */ 0x6C, /* EC */ /* F22 71 */ 0x6D, /* ED */ /* F23 72 */ 0x6E, /* EE */ /* F24 73 */ 0x76, /* F6 */ /* Keyboard Execute 74 */ -1, /* Unassigned */ /* Keyboard Help 75 */ -1, /* Unassigned */ /* Keyboard Menu 76 */ -1, /* Unassigned */ /* Keyboard Select 77 */ -1, /* Unassigned */ /* Keyboard Stop 78 */ -1, /* Unassigned */ /* Keyboard Again 79 */ -1, /* Unassigned */ /* Keyboard Undo 7A */ -1, /* Unassigned */ /* Keyboard Cut 7B */ -1, /* Unassigned */ /* Keyboard Copy 7C */ -1, /* Unassigned */ /* Keyboard Paste 7D */ -1, /* Unassigned */ /* Keyboard Find 7E */ -1, /* Unassigned */ /* Keyboard Mute 7F */ -1, /* Unassigned */ /* Keyboard Volume Up 80 */ -1, /* Unassigned */ /* Keyboard Volume Dn 81 */ -1, /* Unassigned */ /* Keyboard Locking Caps Lock 82 */ -1, /* Unassigned */ /* Keyboard Locking Num Lock 83 */ -1, /* Unassigned */ /* Keyboard Locking Scroll Lock 84 */ -1, /* Unassigned */ /* Keypad comma 85 */ 0x7E, /* FE */ /* Keyboard Equal Sign 86 */ -1, /* Unassigned */ /* Keyboard Int'l 1 87 */ 0x73, /* F3 */ /* Keyboard Int'l 2 88 */ 0x70, /* F0 */ /* Keyboard Int'l 2 89 */ 0x7D, /* FD */ /* Keyboard Int'l 4 8A */ 0x79, /* F9 */ /* Keyboard Int'l 5 8B */ 0x7B, /* FB */ /* Keyboard Int'l 6 8C */ 0x5C, /* DC */ /* Keyboard Int'l 7 8D */ -1, /* Unassigned */ /* Keyboard Int'l 8 8E */ -1, /* Unassigned */ /* Keyboard Int'l 9 8F */ -1, /* Unassigned */ /* Keyboard Lang 1 90 */ NOBREAK|0xF2, /* None */ /* Keyboard Lang 2 91 */ NOBREAK|0xF1, /* None */ /* Keyboard Lang 3 92 */ 0x78, /* F8 */ /* Keyboard Lang 4 93 */ 0x77, /* F7 */ /* Keyboard Lang 5 94 */ 0x76, /* F6 */ /* Keyboard Lang 6 95 */ -1, /* Unassigned */ /* Keyboard Lang 7 96 */ -1, /* Unassigned */ /* Keyboard Lang 8 97 */ -1, /* Unassigned */ /* Keyboard Lang 9 98 */ -1, /* Unassigned */ /* Keyboard Alternate Erase 99 */ -1, /* Unassigned */ /* Keyboard SysReq/Attention 9A */ -1, /* Unassigned */ /* Keyboard Cancel 9B */ -1, /* Unassigned */ /* Keyboard Clear 9C */ -1, /* Unassigned */ /* Keyboard Prior 9D */ -1, /* Unassigned */ /* Keyboard Return 9E */ -1, /* Unassigned */ /* Keyboard Separator 9F */ -1, /* Unassigned */ /* Keyboard Out A0 */ -1, /* Unassigned */ /* Keyboard Oper A1 */ -1, /* Unassigned */ /* Keyboard Clear/Again A2 */ -1, /* Unassigned */ /* Keyboard CrSel/Props A3 */ -1, /* Unassigned */ /* Keyboard ExSel A4 */ -1, /* Unassigned */ /* Reserved A5 */ -1, /* Reserved */ /* Reserved A6 */ -1, /* Reserved */ /* Reserved A7 */ -1, /* Reserved */ /* Reserved A8 */ -1, /* Reserved */ /* Reserved A9 */ -1, /* Reserved */ /* Reserved AA */ -1, /* Reserved */ /* Reserved AB */ -1, /* Reserved */ /* Reserved AC */ -1, /* Reserved */ /* Reserved AD */ -1, /* Reserved */ /* Reserved AE */ -1, /* Reserved */ /* Reserved AF */ -1, /* Reserved */ /* Reserved B0 */ -1, /* Reserved */ /* Reserved B1 */ -1, /* Reserved */ /* Reserved B2 */ -1, /* Reserved */ /* Reserved B3 */ -1, /* Reserved */ /* Reserved B4 */ -1, /* Reserved */ /* Reserved B5 */ -1, /* Reserved */ /* Reserved B6 */ -1, /* Reserved */ /* Reserved B7 */ -1, /* Reserved */ /* Reserved B8 */ -1, /* Reserved */ /* Reserved B9 */ -1, /* Reserved */ /* Reserved BA */ -1, /* Reserved */ /* Reserved BB */ -1, /* Reserved */ /* Reserved BC */ -1, /* Reserved */ /* Reserved BD */ -1, /* Reserved */ /* Reserved BE */ -1, /* Reserved */ /* Reserved BF */ -1, /* Reserved */ /* Reserved C0 */ -1, /* Reserved */ /* Reserved C1 */ -1, /* Reserved */ /* Reserved C2 */ -1, /* Reserved */ /* Reserved C3 */ -1, /* Reserved */ /* Reserved C4 */ -1, /* Reserved */ /* Reserved C5 */ -1, /* Reserved */ /* Reserved C6 */ -1, /* Reserved */ /* Reserved C7 */ -1, /* Reserved */ /* Reserved C8 */ -1, /* Reserved */ /* Reserved C9 */ -1, /* Reserved */ /* Reserved CA */ -1, /* Reserved */ /* Reserved CB */ -1, /* Reserved */ /* Reserved CC */ -1, /* Reserved */ /* Reserved CD */ -1, /* Reserved */ /* Reserved CE */ -1, /* Reserved */ /* Reserved CF */ -1, /* Reserved */ /* Reserved D0 */ -1, /* Reserved */ /* Reserved D1 */ -1, /* Reserved */ /* Reserved D2 */ -1, /* Reserved */ /* Reserved D3 */ -1, /* Reserved */ /* Reserved D4 */ -1, /* Reserved */ /* Reserved D5 */ -1, /* Reserved */ /* Reserved D6 */ -1, /* Reserved */ /* Reserved D7 */ -1, /* Reserved */ /* Reserved D8 */ -1, /* Reserved */ /* Reserved D9 */ -1, /* Reserved */ /* Reserved DA */ -1, /* Reserved */ /* Reserved DB */ -1, /* Reserved */ /* Reserved DC */ -1, /* Reserved */ /* Reserved DD */ -1, /* Reserved */ /* Reserved DE */ -1, /* Reserved */ /* Reserved DF */ -1, /* Reserved */ /* Left Control E0 */ 0x1D, /* 9D */ /* Left Shift E1 */ 0x2A, /* AA */ /* Left Alt E2 */ 0x38, /* B8 */ /* Left GUI E3 */ E0PREFIX|0x5B, /* E0 DB */ /* Right Control E4 */ E0PREFIX|0x1D, /* E0 9D */ /* Right Shift E5 */ 0x36, /* B6 */ /* Right Alt E6 */ E0PREFIX|0x38, /* E0 B8 */ /* Right GUI E7 */ E0PREFIX|0x5C /* E0 DC */ }; #define xsize ((int32_t)(sizeof(x)/sizeof(x[0]))) /* * Get a max HID keycode (aligned) */ int32_t kbd_maxkey(void) { return (xsize); } /* * Process keys */ int32_t kbd_process_keys(bthid_session_p s) { bitstr_t diff[bitstr_size(xsize)]; int32_t f1, f2, i; assert(s != NULL); assert(s->srv != NULL); /* Check if the new keys have been pressed */ bit_ffs(s->keys1, xsize, &f1); /* Check if old keys still pressed */ bit_ffs(s->keys2, xsize, &f2); if (f1 == -1) { /* no new key pressed */ if (f2 != -1) { /* release old keys */ kbd_write(s->keys2, f2, 0, s->vkbd); memset(s->keys2, 0, bitstr_size(xsize)); } return (0); } if (f2 == -1) { /* no old keys, but new keys pressed */ assert(f1 != -1); memcpy(s->keys2, s->keys1, bitstr_size(xsize)); kbd_write(s->keys1, f1, 1, s->vkbd); memset(s->keys1, 0, bitstr_size(xsize)); return (0); } /* new keys got pressed, old keys got released */ memset(diff, 0, bitstr_size(xsize)); for (i = f2; i < xsize; i ++) { if (bit_test(s->keys2, i)) { if (!bit_test(s->keys1, i)) { bit_clear(s->keys2, i); bit_set(diff, i); } } } for (i = f1; i < xsize; i++) { if (bit_test(s->keys1, i)) { if (!bit_test(s->keys2, i)) bit_set(s->keys2, i); else bit_clear(s->keys1, i); } } bit_ffs(diff, xsize, &f2); if (f2 > 0) kbd_write(diff, f2, 0, s->vkbd); bit_ffs(s->keys1, xsize, &f1); if (f1 > 0) { kbd_write(s->keys1, f1, 1, s->vkbd); memset(s->keys1, 0, bitstr_size(xsize)); } return (0); } /* * Translate given keymap and write keyscodes */ static void kbd_write(bitstr_t *m, int32_t fb, int32_t make, int32_t fd) { int32_t i, *b, *eob, n, buf[64]; b = buf; eob = b + sizeof(buf)/sizeof(buf[0]); i = fb; while (i < xsize) { if (bit_test(m, i)) { n = kbd_xlate(i, make, b, eob); if (n == -1) { write(fd, buf, (b - buf) * sizeof(buf[0])); b = buf; continue; } b += n; } i ++; } if (b != buf) write(fd, buf, (b - buf) * sizeof(buf[0])); } /* * Translate HID code into PS/2 code and put codes into buffer b. * Returns the number of codes put in b. Return -1 if buffer has not * enough space. */ #undef PUT #define PUT(c, n, b, eob) \ do { \ if ((b) >= (eob)) \ return (-1); \ *(b) = (c); \ (b) ++; \ (n) ++; \ } while (0) static int32_t kbd_xlate(int32_t code, int32_t make, int32_t *b, int32_t const *eob) { int32_t c, n; n = 0; if (code >= xsize) return (0); /* HID code is not in the table */ /* Handle special case - Pause/Break */ if (code == 0x48) { if (!make) return (0); /* No break code */ #if 0 XXX FIXME if (ctrl_is_pressed) { /* Break (Ctrl-Pause) */ PUT(0xe0, n, b, eob); PUT(0x46, n, b, eob); PUT(0xe0, n, b, eob); PUT(0xc6, n, b, eob); } else { /* Pause */ PUT(0xe1, n, b, eob); PUT(0x1d, n, b, eob); PUT(0x45, n, b, eob); PUT(0xe1, n, b, eob); PUT(0x9d, n, b, eob); PUT(0xc5, n, b, eob); } #endif return (n); } if ((c = x[code]) == -1) return (0); /* HID code translation is not defined */ if (make) { if (c & E0PREFIX) PUT(0xe0, n, b, eob); PUT((c & CODEMASK), n, b, eob); } else if (!(c & NOBREAK)) { if (c & E0PREFIX) PUT(0xe0, n, b, eob); PUT((0x80|(c & CODEMASK)), n, b, eob); } return (n); } /* * Process status change from vkbd(4) */ int32_t kbd_status_changed(bthid_session_p s, uint8_t *data, int32_t len) { vkbd_status_t st; uint8_t leds, report_id; hid_device_p hid_device; hid_data_t d; hid_item_t h; assert(s != NULL); assert(len == sizeof(vkbd_status_t)); memcpy(&st, data, sizeof(st)); leds = 0; report_id = NO_REPORT_ID; hid_device = get_hid_device(&s->bdaddr); assert(hid_device != NULL); for (d = hid_start_parse(hid_device->desc, 1 << hid_output, -1); hid_get_item(d, &h) > 0; ) { if (HID_PAGE(h.usage) == HUP_LEDS) { if (report_id == NO_REPORT_ID) report_id = h.report_ID; else if (h.report_ID != report_id) syslog(LOG_WARNING, "Output HID report IDs " \ "for %s do not match: %d vs. %d. " \ "Please report", bt_ntoa(&s->bdaddr, NULL), h.report_ID, report_id); switch(HID_USAGE(h.usage)) { case 0x01: /* Num Lock LED */ if (st.leds & LED_NUM) hid_set_data(&leds, &h, 1); break; case 0x02: /* Caps Lock LED */ if (st.leds & LED_CAP) hid_set_data(&leds, &h, 1); break; case 0x03: /* Scroll Lock LED */ if (st.leds & LED_SCR) hid_set_data(&leds, &h, 1); break; /* XXX add other LEDs ? */ } } } hid_end_parse(d); data[0] = 0xa2; /* DATA output (HID output report) */ if (report_id != NO_REPORT_ID) { data[1] = report_id; data[2] = leds; len = 3; } else { data[1] = leds; len = 2; } write(s->intr, data, len); return (0); }